From 62e989dbfa04a6b2bf3a457b02735979183aba3d Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Fri, 10 Jan 2020 11:24:09 +0800 Subject: [PATCH 1/2] Merge crate abci-rs into abci --- CHANGELOG.md | 13 +- Cargo.toml | 33 +- Makefile | 6 +- README.md | 47 +- build.rs | 4 +- examples/counter.rs | 179 ++++++ examples/counter_app.rs | 76 --- examples/empty_app.rs | 18 - protobuf/abci.proto | 36 +- .../tendermint/abci/types/types.proto | 36 +- .../tendermint/crypto/merkle/merkle.proto | 3 +- .../libs/{common => kv}/types.proto | 5 +- protobuf/google/protobuf/descriptor.proto | 140 ++-- protobuf/google/protobuf/duration.proto | 116 ++++ protobuf/google/protobuf/timestamp.proto | 11 +- src/application.rs | 136 ++++ src/codec.rs | 150 ----- src/lib.rs | 166 ++--- src/messages/mod.rs | 3 - src/proto.rs | 91 +++ src/{messages => proto}/abci.rs | 603 +++++++++--------- src/{messages => proto}/merkle.rs | 21 +- src/{messages => proto}/types.rs | 64 +- src/server.rs | 364 ++++++++--- src/types.rs | 53 ++ src/types/begin_block.rs | 51 ++ src/types/check_tx.rs | 92 +++ src/types/commit.rs | 15 + src/types/deliver_tx.rs | 62 ++ src/types/end_block.rs | 51 ++ src/types/info.rs | 47 ++ src/types/init_chain.rs | 65 ++ src/types/misc.rs | 484 ++++++++++++++ src/types/query.rs | 85 +++ src/types/set_option.rs | 47 ++ version.txt | 4 +- 36 files changed, 2437 insertions(+), 940 deletions(-) create mode 100644 examples/counter.rs delete mode 100644 examples/counter_app.rs delete mode 100644 examples/empty_app.rs rename protobuf/github.com/tendermint/tendermint/libs/{common => kv}/types.proto (86%) create mode 100644 protobuf/google/protobuf/duration.proto create mode 100644 src/application.rs delete mode 100644 src/codec.rs delete mode 100644 src/messages/mod.rs create mode 100644 src/proto.rs rename src/{messages => proto}/abci.rs (95%) rename src/{messages => proto}/merkle.rs (95%) rename src/{messages => proto}/types.rs (88%) create mode 100644 src/types.rs create mode 100644 src/types/begin_block.rs create mode 100644 src/types/check_tx.rs create mode 100644 src/types/commit.rs create mode 100644 src/types/deliver_tx.rs create mode 100644 src/types/end_block.rs create mode 100644 src/types/info.rs create mode 100644 src/types/init_chain.rs create mode 100644 src/types/misc.rs create mode 100644 src/types/query.rs create mode 100644 src/types/set_option.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index a30a308..a31ecc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,17 @@ # CHANGELOG -January 2, 2020_ +_January 24, 2020_ + +## v0.7.0 + +- Restructures `abci` to use latest `async`/`await` functionality (new design is based on https://github.com/devashishdxt/abci-rs, + read documentation for more information) +- Updates minimum supported tendermint version to `v0.33.0` +- [Issue #30](https://github.com/tendermint/rust-abci/issues/30): Adds support for unix sockets +- [Issue #107](https://github.com/tendermint/rust-abci/issues/107): Updates `tokio` to `v0.2` +- Adds support for `async-std` executor to drive `Future`s. + +_January 2, 2020_ Special thanks to external contributors on this release: @devashishdxt diff --git a/Cargo.toml b/Cargo.toml index ffa247b..94eb708 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,25 +1,38 @@ [package] name = "abci" -version = "0.6.5" +version = "0.7.0" authors = ["Adrian Brink ", "Jackson Lewis ", "Dave Bryson", "Tomas Tauber"] edition = "2018" license = "MIT/Apache-2.0" description = "Tendermint ABCI server for Rust" homepage = "https://tendermint.com/docs/spec/abci/" repository = "https://github.com/tendermint/rust-abci" +categories = ["network-programming"] keywords = ["abci", "tendermint", "blockchain", "rust"] readme = "README.md" include = ["src/**/*", "Cargo.toml"] +[package.metadata.docs.rs] +features = ["doc"] +rustdoc-args = ["--cfg", "feature=\"doc\""] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + [dependencies] -bytes = "0.4" -protobuf = "= 2.10.0" -byteorder = "1.3.2" -integer-encoding = "1.0.5" -log = "0.4.8" -env_logger = "0.7.0" -tokio = { version = "0.1", default-features = false, features = ["codec", "io", "tcp", "rt-full"] } -futures = "0.3" +log = "0.4" +protobuf = "2.10" +integer-encoding = "1.0" +async-trait = "0.1" +async-std = { version = "1.5", optional = true } +tokio = { version = "0.2", optional = true, features = ["io-util", "sync", "tcp", "stream", "rt-core", "uds"] } + +[dev-dependencies] +env_logger = "0.7" +tokio = { version = "0.2", features = ["macros"] } [build-dependencies] -protobuf-codegen-pure = "= 2.10.0" \ No newline at end of file +protobuf-codegen-pure = "2.10" + +[features] +default = ["tokio"] +doc = [] diff --git a/Makefile b/Makefile index 52b57c0..893a49e 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,16 @@ # Origin -version_branch = v0.32.8 +version_branch = v0.33.0 tendermint = https://raw.githubusercontent.com/tendermint/tendermint/$(version_branch) # Outputs -tmkv = protobuf/github.com/tendermint/tendermint/libs/common/types.proto +tmkv = protobuf/github.com/tendermint/tendermint/libs/kv/types.proto tmmerkle = protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto tmabci = protobuf/github.com/tendermint/tendermint/abci/types/types.proto # You *only* need to run this to rebuild protobufs from the tendermint source update-proto: curl $(tendermint)/abci/types/types.proto > $(tmabci) - curl $(tendermint)/libs/common/types.proto > $(tmkv) + curl $(tendermint)/libs/kv/types.proto > $(tmkv) curl $(tendermint)/crypto/merkle/merkle.proto > $(tmmerkle) sed 's@package types;@package abci;@' $(tmabci) > protobuf/abci.proto curl $(tendermint)/version/version.go | grep -F -eTMCoreSem -eABCISemVer > version.txt diff --git a/README.md b/README.md index e47d43f..ce2380f 100644 --- a/README.md +++ b/README.md @@ -12,29 +12,41 @@ applications for [Tendermint](https://github.com/tendermint/tendermint/). ## Supported Version -- Tendermint 0.32.7 -- ABCI 0.16.0 +- Tendermint 0.33.0 +- ABCI 0.16.1 -## Installation +## Usage -### Dependencies +Add `abci` in your `Cargo.toml`'s `dependencies` section: -Make sure that you have Rust and Cargo installed. The easiest way is to follow the instructions on [rustup](https://rustup.rs/). +```toml +[dependencies] +abci = "0.7" +``` -To test the examples, please clone this repository. +Each ABCI application has to implement three core traits corresponding to all three ABCI connections, `Consensus`, +`Mempool` and `Info`. -``` -git clone https://github.com/tendermint/rust-abci.git -``` +> Note: Implementations of these traits are expected to be `Send + Sync` and methods take immutable reference of `self`. +So, internal mutability must be handled using thread safe (`Arc`, `Mutex`, etc.) constructs. -The `empty_app` example, found under the `examples` folder, is a good demonstration/bare minimum foundation for a Rust ABCI app. +After implementing all three above mentioned `trait`s, you can create a `Server` object and use `Server::run()` to start +ABCI application. -To use this library to build your own ABCI apps in Rust you have to include the following in your `Cargo.toml` file. +`Server::run()` is an `async` function and returns a `Future`. So, you'll need an executor to drive `Future` returned +from `Server::run()`. `async-std` and `tokio` are two popular options. In `counter` example, we use `tokio`'s executor. -```toml -[dependencies] -abci = "0.6.4" -``` +To know more, go to `examples/` to see a sample ABCI application. + +### Features + +- `tokio`: Enables `tokio` backend for running ABCI TCP/UDS server + - **Enabled** by default. +- `async-std`: Enables `async-std` backend for running ABCI TCP/UDS server + - **Disabled** by default. + +> Features `tokio` and `async-std` are mutually exclusive, i.e., only one of them can be enabled at a time. Compilation +will fail if either both of them are enabled or none of them are enabled. ### Development @@ -50,9 +62,7 @@ To run either of the example apps you have to have Tendermint installed and init tendermint node ``` -After the node is online, you can run the `empty_app` example using `cargo run --example empty_app`. - -To run the `counter_app` run `cargo run --example counter_app` and send transaction to Tendermint via: +After the node is online, you can run the `counter` example using `cargo run --example counter`. ``` curl localhost:26657/broadcast_tx_commit?tx=0x01 @@ -65,6 +75,7 @@ For a real life example of an ABCI application you can checkout [Cosmos SDK](htt | Tendermint | Rust-abci | | ---------- | :-------: | +| 0.33.0 | 0.7.0 | | 0.32.7 | 0.6.4 | | 0.31.7 | 0.5.4 | diff --git a/build.rs b/build.rs index d0ea35c..c0ce80a 100644 --- a/build.rs +++ b/build.rs @@ -2,10 +2,10 @@ extern crate protobuf_codegen_pure; fn main() { protobuf_codegen_pure::run(protobuf_codegen_pure::Args { - out_dir: "src/messages", + out_dir: "src/proto", input: &[ "protobuf/abci.proto", - "protobuf/github.com/tendermint/tendermint/libs/common/types.proto", + "protobuf/github.com/tendermint/tendermint/libs/kv/types.proto", "protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto", ], includes: &["protobuf"], diff --git a/examples/counter.rs b/examples/counter.rs new file mode 100644 index 0000000..92b3f2c --- /dev/null +++ b/examples/counter.rs @@ -0,0 +1,179 @@ +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use abci::{async_trait, types::*, Consensus, Info, Mempool, Server}; + +/// Simple counter +#[derive(Debug, Default, Clone)] +pub struct CounterState { + block_height: i64, + app_hash: Vec, + counter: u64, +} + +#[derive(Debug)] +pub struct ConsensusConnection { + committed_state: Arc>, + current_state: Arc>>, +} + +impl ConsensusConnection { + pub fn new( + committed_state: Arc>, + current_state: Arc>>, + ) -> Self { + Self { + committed_state, + current_state, + } + } +} + +#[async_trait] +impl Consensus for ConsensusConnection { + async fn init_chain(&self, _init_chain_request: InitChainRequest) -> InitChainResponse { + Default::default() + } + + async fn begin_block(&self, _begin_block_request: BeginBlockRequest) -> BeginBlockResponse { + let committed_state = self.committed_state.lock().unwrap().clone(); + + let mut current_state = self.current_state.lock().unwrap(); + *current_state = Some(committed_state); + + Default::default() + } + + async fn deliver_tx(&self, deliver_tx_request: DeliverTxRequest) -> Result { + let new_counter = parse_bytes_to_counter(&deliver_tx_request.tx)?; + + let mut current_state_lock = self.current_state.lock().unwrap(); + let mut current_state = current_state_lock.as_mut().unwrap(); + + if current_state.counter + 1 != new_counter { + return Err(Error { + code: 2, + codespace: "Validation error".to_owned(), + log: "Only consecutive integers are allowed".to_owned(), + info: "Numbers to counter app should be supplied in increasing order of consecutive integers staring from 1".to_owned(), + }); + } + + current_state.counter = new_counter; + + Ok(Default::default()) + } + + async fn end_block(&self, end_block_request: EndBlockRequest) -> EndBlockResponse { + let mut current_state_lock = self.current_state.lock().unwrap(); + let mut current_state = current_state_lock.as_mut().unwrap(); + + current_state.block_height = end_block_request.height; + current_state.app_hash = current_state.counter.to_be_bytes().to_vec(); + + Default::default() + } + + async fn commit(&self) -> CommitResponse { + let current_state = self.current_state.lock().unwrap().as_ref().unwrap().clone(); + let mut committed_state = self.committed_state.lock().unwrap(); + *committed_state = current_state; + + CommitResponse { + data: (*committed_state).app_hash.clone(), + } + } +} + +#[derive(Debug)] +pub struct MempoolConnection { + state: Arc>>, +} + +impl MempoolConnection { + pub fn new(state: Arc>>) -> Self { + Self { state } + } +} + +#[async_trait] +impl Mempool for MempoolConnection { + async fn check_tx(&self, check_tx_request: CheckTxRequest) -> Result { + let new_counter = parse_bytes_to_counter(&check_tx_request.tx)?; + + let state_lock = self.state.lock().unwrap(); + let state = state_lock.as_ref().unwrap(); + + if state.counter + 1 != new_counter { + Err(Error { + code: 2, + codespace: "Validation error".to_owned(), + log: "Only consecutive integers are allowed".to_owned(), + info: "Numbers to counter app should be supplied in increasing order of consecutive integers staring from 1".to_owned(), + }) + } else { + Ok(Default::default()) + } + } +} + +pub struct InfoConnection { + state: Arc>, +} + +impl InfoConnection { + pub fn new(state: Arc>) -> Self { + Self { state } + } +} + +#[async_trait] +impl Info for InfoConnection { + async fn info(&self, _info_request: InfoRequest) -> InfoResponse { + let state = self.state.lock().unwrap(); + + InfoResponse { + data: Default::default(), + version: Default::default(), + app_version: Default::default(), + last_block_height: (*state).block_height, + last_block_app_hash: (*state).app_hash.clone(), + } + } +} + +fn parse_bytes_to_counter(bytes: &[u8]) -> Result { + if bytes.len() != 8 { + return Err(Error { + code: 1, + codespace: "Parsing error".to_owned(), + log: "Transaction should be 8 bytes long".to_owned(), + info: "Transaction is big-endian encoding of 64-bit integer".to_owned(), + }); + } + + let mut counter_bytes = [0; 8]; + counter_bytes.copy_from_slice(bytes); + + Ok(u64::from_be_bytes(counter_bytes)) +} + +#[tokio::main] +async fn main() -> std::io::Result<()> { + env_logger::init(); + + let committed_state: Arc> = Default::default(); + let current_state: Arc>> = Default::default(); + + let consensus = ConsensusConnection::new(committed_state.clone(), current_state.clone()); + let mempool = MempoolConnection::new(current_state.clone()); + let info = InfoConnection::new(committed_state.clone()); + + let server = Server::new(consensus, mempool, info); + + server + .run("127.0.0.1:26658".parse::().unwrap()) + .await +} diff --git a/examples/counter_app.rs b/examples/counter_app.rs deleted file mode 100644 index 3d86ec9..0000000 --- a/examples/counter_app.rs +++ /dev/null @@ -1,76 +0,0 @@ -extern crate abci; -extern crate byteorder; -extern crate env_logger; - -use abci::*; -use byteorder::{BigEndian, ByteOrder}; -use env_logger::Env; - -// Simple counter application. Its only state is a u64 count -// We use BigEndian to serialize the data across transactions calls -struct CounterApp { - count: u64, -} - -impl CounterApp { - fn new() -> CounterApp { - CounterApp { count: 0 } - } -} - -// Convert incoming tx data to the proper BigEndian size. txs.len() > 8 will return 0 -fn convert_tx(tx: &[u8]) -> u64 { - if tx.len() < 8 { - let pad = 8 - tx.len(); - let mut x = vec![0; pad]; - x.extend_from_slice(tx); - return BigEndian::read_u64(x.as_slice()); - } - BigEndian::read_u64(tx) -} - -impl abci::Application for CounterApp { - // Validate transactions. Rule: Transactions must be incremental: 1,2,3,4... - fn check_tx(&mut self, req: &RequestCheckTx) -> ResponseCheckTx { - // Get the Tx [u8] and convert to u64 - let c = convert_tx(req.get_tx()); - let mut resp = ResponseCheckTx::new(); - - // Validation logic - if c != self.count + 1 { - resp.set_code(1); - resp.set_log(String::from("Count must be incremental!")); - return resp; - } - - // Update state to keep state correct for next check_tx call - self.count = c; - resp - } - - fn deliver_tx(&mut self, req: &RequestDeliverTx) -> ResponseDeliverTx { - // Get the Tx [u8] - let c = convert_tx(req.get_tx()); - // Update state - self.count = c; - // Return default code 0 == bueno - ResponseDeliverTx::new() - } - - fn commit(&mut self, _req: &RequestCommit) -> ResponseCommit { - // Create the response - let mut resp = ResponseCommit::new(); - // Convert count to bits - let mut buf = [0; 8]; - BigEndian::write_u64(&mut buf, self.count); - // Set data so last state is included in the block - resp.set_data(buf.to_vec()); - resp - } -} - -fn main() { - // Run on localhost using default Tendermint port - env_logger::from_env(Env::default().default_filter_or("info")).init(); - abci::run_local(CounterApp::new()); -} diff --git a/examples/empty_app.rs b/examples/empty_app.rs deleted file mode 100644 index 50ee62c..0000000 --- a/examples/empty_app.rs +++ /dev/null @@ -1,18 +0,0 @@ -extern crate abci; -extern crate env_logger; - -use env_logger::Env; - -// Simple example that responds with defaults to Tendermint -struct EmptyApp; - -// Implement the Application and use default responses -impl abci::Application for EmptyApp {} - -fn main() { - // Use default local addr and Tendermint ABCI port - let addr = "127.0.0.1:26658".parse().unwrap(); - // Fire it up! - env_logger::from_env(Env::default().default_filter_or("info")).init(); - abci::run(addr, EmptyApp); -} diff --git a/protobuf/abci.proto b/protobuf/abci.proto index c017b47..4483e68 100644 --- a/protobuf/abci.proto +++ b/protobuf/abci.proto @@ -1,12 +1,14 @@ syntax = "proto3"; -package abci; +package tendermint.abci.types; +option go_package = "github.com/tendermint/tendermint/abci/types"; // For more information on gogo.proto, see: // https://github.com/gogo/protobuf/blob/master/extensions.md import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto"; -import "github.com/tendermint/tendermint/libs/common/types.proto"; +import "github.com/tendermint/tendermint/libs/kv/types.proto"; import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. @@ -165,7 +167,7 @@ message ResponseQuery { int64 index = 5; bytes key = 6; bytes value = 7; - merkle.Proof proof = 8; + tendermint.crypto.merkle.Proof proof = 8; int64 height = 9; string codespace = 10; } @@ -226,10 +228,10 @@ message BlockParams { int64 max_gas = 2; } -// EvidenceParams contains limits on the evidence. message EvidenceParams { // Note: must be greater than 0 - int64 max_age = 1; + int64 max_age_num_blocks = 1; + google.protobuf.Duration max_age_duration = 2 [(gogoproto.nullable)=false, (gogoproto.stdduration)=true]; } // ValidatorParams contains limits on validators. @@ -244,7 +246,7 @@ message LastCommitInfo { message Event { string type = 1; - repeated common.KVPair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; + repeated tendermint.libs.kv.Pair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; } //---------------------------------------- @@ -256,26 +258,24 @@ message Header { string chain_id = 2 [(gogoproto.customname)="ChainID"]; int64 height = 3; google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; - int64 num_txs = 5; - int64 total_txs = 6; // prev block info - BlockID last_block_id = 7 [(gogoproto.nullable)=false]; + BlockID last_block_id = 5 [(gogoproto.nullable)=false]; // hashes of block data - bytes last_commit_hash = 8; // commit from validators from the last block - bytes data_hash = 9; // transactions + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions // hashes from the app output from the prev block - bytes validators_hash = 10; // validators for the current block - bytes next_validators_hash = 11; // validators for the next block - bytes consensus_hash = 12; // consensus params for current block - bytes app_hash = 13; // state after txs from the previous block - bytes last_results_hash = 14;// root hash of all results from the txs from the previous block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12;// root hash of all results from the txs from the previous block // consensus info - bytes evidence_hash = 15; // evidence included in the block - bytes proposer_address = 16; // original proposer of the block + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block } message Version { diff --git a/protobuf/github.com/tendermint/tendermint/abci/types/types.proto b/protobuf/github.com/tendermint/tendermint/abci/types/types.proto index 8f9dda8..4483e68 100644 --- a/protobuf/github.com/tendermint/tendermint/abci/types/types.proto +++ b/protobuf/github.com/tendermint/tendermint/abci/types/types.proto @@ -1,12 +1,14 @@ syntax = "proto3"; -package types; +package tendermint.abci.types; +option go_package = "github.com/tendermint/tendermint/abci/types"; // For more information on gogo.proto, see: // https://github.com/gogo/protobuf/blob/master/extensions.md import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto"; -import "github.com/tendermint/tendermint/libs/common/types.proto"; +import "github.com/tendermint/tendermint/libs/kv/types.proto"; import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. @@ -165,7 +167,7 @@ message ResponseQuery { int64 index = 5; bytes key = 6; bytes value = 7; - merkle.Proof proof = 8; + tendermint.crypto.merkle.Proof proof = 8; int64 height = 9; string codespace = 10; } @@ -226,10 +228,10 @@ message BlockParams { int64 max_gas = 2; } -// EvidenceParams contains limits on the evidence. message EvidenceParams { // Note: must be greater than 0 - int64 max_age = 1; + int64 max_age_num_blocks = 1; + google.protobuf.Duration max_age_duration = 2 [(gogoproto.nullable)=false, (gogoproto.stdduration)=true]; } // ValidatorParams contains limits on validators. @@ -244,7 +246,7 @@ message LastCommitInfo { message Event { string type = 1; - repeated common.KVPair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; + repeated tendermint.libs.kv.Pair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; } //---------------------------------------- @@ -256,26 +258,24 @@ message Header { string chain_id = 2 [(gogoproto.customname)="ChainID"]; int64 height = 3; google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; - int64 num_txs = 5; - int64 total_txs = 6; // prev block info - BlockID last_block_id = 7 [(gogoproto.nullable)=false]; + BlockID last_block_id = 5 [(gogoproto.nullable)=false]; // hashes of block data - bytes last_commit_hash = 8; // commit from validators from the last block - bytes data_hash = 9; // transactions + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions // hashes from the app output from the prev block - bytes validators_hash = 10; // validators for the current block - bytes next_validators_hash = 11; // validators for the next block - bytes consensus_hash = 12; // consensus params for current block - bytes app_hash = 13; // state after txs from the previous block - bytes last_results_hash = 14;// root hash of all results from the txs from the previous block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12;// root hash of all results from the txs from the previous block // consensus info - bytes evidence_hash = 15; // evidence included in the block - bytes proposer_address = 16; // original proposer of the block + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block } message Version { diff --git a/protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto b/protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto index 8a6c467..6685492 100644 --- a/protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto +++ b/protobuf/github.com/tendermint/tendermint/crypto/merkle/merkle.proto @@ -1,5 +1,6 @@ syntax = "proto3"; -package merkle; +package tendermint.crypto.merkle; +option go_package = "github.com/tendermint/tendermint/crypto/merkle"; // For more information on gogo.proto, see: // https://github.com/gogo/protobuf/blob/master/extensions.md diff --git a/protobuf/github.com/tendermint/tendermint/libs/common/types.proto b/protobuf/github.com/tendermint/tendermint/libs/kv/types.proto similarity index 86% rename from protobuf/github.com/tendermint/tendermint/libs/common/types.proto rename to protobuf/github.com/tendermint/tendermint/libs/kv/types.proto index 518e7ca..59940c8 100644 --- a/protobuf/github.com/tendermint/tendermint/libs/common/types.proto +++ b/protobuf/github.com/tendermint/tendermint/libs/kv/types.proto @@ -1,5 +1,6 @@ syntax = "proto3"; -package common; +package tendermint.libs.kv; +option go_package = "github.com/tendermint/tendermint/libs/kv"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; @@ -16,7 +17,7 @@ option (gogoproto.testgen_all) = true; // Abstract types // Define these here for compatibility but use tmlibs/common.KVPair. -message KVPair { +message Pair { bytes key = 1; bytes value = 2; } diff --git a/protobuf/google/protobuf/descriptor.proto b/protobuf/google/protobuf/descriptor.proto index ed08fcb..a2102d7 100644 --- a/protobuf/google/protobuf/descriptor.proto +++ b/protobuf/google/protobuf/descriptor.proto @@ -40,6 +40,7 @@ syntax = "proto2"; package google.protobuf; + option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; @@ -59,8 +60,8 @@ message FileDescriptorSet { // Describes a complete .proto file. message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. repeated string dependency = 3; @@ -100,8 +101,8 @@ message DescriptorProto { repeated EnumDescriptorProto enum_type = 4; message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. optional ExtensionRangeOptions options = 3; } @@ -115,8 +116,8 @@ message DescriptorProto { // fields or extension ranges in the same message. Reserved ranges may // not overlap. message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. } repeated ReservedRange reserved_range = 9; // Reserved field names, which may not be used by fields in the same message. @@ -137,42 +138,42 @@ message FieldDescriptorProto { enum Type { // 0 is reserved for errors. // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; + TYPE_INT64 = 3; + TYPE_UINT64 = 4; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; // Tag-delimited aggregate. // Group type is deprecated and not supported in proto3. However, Proto3 // implementations should still be able to parse the group wire format and // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } enum Label { // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } optional string name = 1; optional int32 number = 3; @@ -234,8 +235,8 @@ message EnumDescriptorProto { // is inclusive such that it can appropriately represent the entire int32 // domain. message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. } // Range of reserved numeric values. Reserved numeric values may not be used @@ -276,9 +277,9 @@ message MethodDescriptorProto { optional MethodOptions options = 4; // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; + optional bool client_streaming = 5 [default = false]; // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; + optional bool server_streaming = 6 [default = false]; } @@ -314,7 +315,6 @@ message MethodDescriptorProto { // If this turns out to be popular, a web service will be set up // to automatically assign option numbers. - message FileOptions { // Sets the Java package where classes generated from this .proto will be @@ -337,7 +337,7 @@ message FileOptions { // named by java_outer_classname. However, the outer class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; + optional bool java_multiple_files = 10 [default = false]; // This option does nothing. optional bool java_generate_equals_and_hash = 20 [deprecated=true]; @@ -348,17 +348,17 @@ message FileOptions { // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; + optional bool java_string_check_utf8 = 27 [default = false]; // Generated classes can be optimized for speed or code size. enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. } - optional OptimizeMode optimize_for = 9 [default=SPEED]; + optional OptimizeMode optimize_for = 9 [default = SPEED]; // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: @@ -369,6 +369,7 @@ message FileOptions { + // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). @@ -379,20 +380,20 @@ message FileOptions { // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; + optional bool deprecated = 23 [default = false]; // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; + optional bool cc_enable_arenas = 31 [default = false]; // Sets the objective c class prefix which is prepended to all objective c @@ -417,10 +418,9 @@ message FileOptions { // determining the namespace. optional string php_namespace = 41; - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. optional string php_metadata_namespace = 44; // Use this option to change the package of ruby generated classes. Default @@ -428,6 +428,7 @@ message FileOptions { // determining the ruby package. optional string ruby_package = 45; + // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; @@ -458,18 +459,18 @@ message MessageOptions { // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; + optional bool message_set_wire_format = 1 [default = false]; // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; + optional bool no_standard_descriptor_accessor = 2 [default = false]; // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; + optional bool deprecated = 3 [default = false]; // Whether the message is an automatically generated map entry type for the // maps field. @@ -486,7 +487,7 @@ message MessageOptions { // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as + // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. // // NOTE: Do not set the option in .proto files. Always use the maps syntax @@ -497,6 +498,7 @@ message MessageOptions { reserved 8; // javalite_serializable reserved 9; // javanano_as_lite + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -576,16 +578,16 @@ message FieldOptions { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. - optional bool lazy = 5 [default=false]; + optional bool lazy = 5 [default = false]; // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; + optional bool deprecated = 3 [default = false]; // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; + optional bool weak = 10 [default = false]; // The parser stores options it doesn't recognize here. See above. @@ -615,7 +617,7 @@ message EnumOptions { // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; + optional bool deprecated = 3 [default = false]; reserved 5; // javanano_as_lite @@ -631,7 +633,7 @@ message EnumValueOptions { // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; + optional bool deprecated = 1 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -651,7 +653,7 @@ message ServiceOptions { // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; + optional bool deprecated = 33 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -671,18 +673,18 @@ message MethodOptions { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; + optional bool deprecated = 33 [default = false]; // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -763,7 +765,7 @@ message SourceCodeInfo { // beginning of the "extend" block and is shared by all extensions within // the block. // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines + // does not mean that it is a descendant. For example, a "group" defines // both a type and a field in a single declaration. Thus, the locations // corresponding to the type and field and their components will overlap. // - Code which tries to interpret locations should probably be designed to @@ -794,14 +796,14 @@ message SourceCodeInfo { // [ 4, 3, 2, 7 ] // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; + repeated int32 path = 1 [packed = true]; // Always has exactly three or four elements: start line, start column, // end line (optional, otherwise assumed same as start line), end column. // These are packed into a single field for efficiency. Note that line // and column numbers are zero-based -- typically you will want to add // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; + repeated int32 span = 2 [packed = true]; // If this SourceCodeInfo represents a complete declaration, these are any // comments appearing before and after the declaration which appear to be @@ -866,7 +868,7 @@ message GeneratedCodeInfo { message Annotation { // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; + repeated int32 path = 1 [packed = true]; // Identifies the filesystem path to the original source .proto. optional string source_file = 2; diff --git a/protobuf/google/protobuf/duration.proto b/protobuf/google/protobuf/duration.proto new file mode 100644 index 0000000..99cb102 --- /dev/null +++ b/protobuf/google/protobuf/duration.proto @@ -0,0 +1,116 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/protobuf/google/protobuf/timestamp.proto b/protobuf/google/protobuf/timestamp.proto index 05d988a..cd35786 100644 --- a/protobuf/google/protobuf/timestamp.proto +++ b/protobuf/google/protobuf/timestamp.proto @@ -113,17 +113,18 @@ option objc_class_prefix = "GPB"; // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { - // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. diff --git a/src/application.rs b/src/application.rs new file mode 100644 index 0000000..52f09d7 --- /dev/null +++ b/src/application.rs @@ -0,0 +1,136 @@ +use async_trait::async_trait; + +use crate::types::*; + +/// Trait for initialization and for queries from the user. +#[async_trait] +pub trait Info: Send + Sync { + /// Echo a string to test abci client/server implementation. + async fn echo(&self, message: String) -> String { + message + } + + /// Return information about the application state. + /// + /// # Crash Recovery + /// + /// On startup, Tendermint calls the [`info`] method to get the **latest committed state** of the app. The app + /// **MUST** return information consistent with the last block it successfully completed [`commit`] for. + /// + /// If the app succesfully committed block `H` but not `H+1`, then + /// - `last_block_height = H` + /// - `last_block_app_hash = ` + /// + /// If the app failed during the [`commit`] of block `H`, then + /// - `last_block_height = H-1` + /// - `last_block_app_hash = ` + /// + /// [`info`]: trait.Info.html#tymethod.info + /// [`commit`]: trait.Consensus.html#tymethod.commit + async fn info(&self, info_request: InfoRequest) -> InfoResponse; + + /// Set non-consensus critical application specific options. + async fn set_option(&self, _set_option_request: SetOptionRequest) -> Result { + Ok(Default::default()) + } + + /// Query for data from the application at current or past height. + async fn query(&self, _query_request: QueryRequest) -> Result { + Ok(Default::default()) + } +} + +/// Trait for managing consensus of blockchain. +/// +/// # Details +/// +/// [_Consensus_] should maintain a `consensus_state` - the working state for block execution. It should be updated by +/// the calls to [`begin_block`], [`deliver_tx`], and [`end_block`] during block execution and committed to disk as the +/// **latest committed state** during [`commit`]. +/// +/// Updates made to the `consensus_state` by each method call must be readable by each subsequent method - ie. the +/// updates are linearizable. +/// +/// [_Consensus_]: trait.Consensus.html#details +/// [`begin_block`]: trait.Consensus.html#tymethod.begin_block +/// [`deliver_tx`]: trait.Consensus.html#tymethod.deliver_tx +/// [`end_block`]: trait.Consensus.html#tymethod.end_block +/// [`commit`]: trait.Consensus.html#tymethod.commit +#[async_trait] +pub trait Consensus: Send + Sync { + /// Called once upon genesis. Usually used to establish initial (genesis) state. + async fn init_chain(&self, init_chain_request: InitChainRequest) -> InitChainResponse; + + /// Signals the beginning of a new block. Called prior to any [`deliver_tx`](trait.Consensus.html#tymethod.deliver_tx)s. + async fn begin_block(&self, begin_block_request: BeginBlockRequest) -> BeginBlockResponse; + + /// Execute the transaction in full. The workhorse of the application. + async fn deliver_tx(&self, deliver_tx_request: DeliverTxRequest) -> Result; + + /// Signals the end of a block. Called after all transactions, prior to each [`commit`](trait.Commit.html#tymethod.commit). + async fn end_block(&self, end_block_request: EndBlockRequest) -> EndBlockResponse; + + /// Persist the application state. + /// + /// # Details + /// + /// Application state should only be persisted to disk during [`commit`]. + /// + /// Before [`commit`] is called, Tendermint locks and flushes the mempool so that no new messages will be received + /// on the mempool connection. This provides an opportunity to safely update all three states ([_Consensus_], + /// [_Mempool_] and [_Info_]) to the **latest committed state** at once. + /// + /// When [`commit`] completes, it unlocks the mempool. + /// + /// # Warning + /// + /// If the ABCI application logic processing the [`commit`] message sends a `/broadcast_tx_sync` or + /// `/broadcast_tx_commit` and waits for the response before proceeding, it will deadlock. Executing those + /// `broadcast_tx` calls involves acquiring a lock that is held during the [`commit`] call, so it's not possible. If + /// you make the call to the `broadcast_tx` endpoints concurrently, that's no problem, it just can't be part of the + /// sequential logic of the [`commit`] function. + /// + /// [`commit`]: trait.Commit.html#tymethod.commit + /// [_Consensus_]: trait.Consensus.html#details + /// [_Mempool_]: trait.Mempool.html#details + /// [_Info_]: trait.Info.html + async fn commit(&self) -> CommitResponse; + + /// Signals that messages queued on the client should be flushed to the server. + async fn flush(&self) {} +} + +/// Trait for managing tendermint's mempool. +/// +/// # Details +/// +/// [_Mempool_] should maintain a `mempool_state` to sequentially process pending transactions in the mempool that have +/// not yet been committed. It should be initialized to the latest committed state at the end of every [`commit`]. +/// +/// The `mempool_state` may be updated concurrently with the `consensus_state`, as messages may be sent concurrently on +/// [_Consensus_] and [_Mempool_] connections. However, before calling [`commit`], Tendermint will lock and flush the +/// mempool connection, ensuring that all existing [`check_tx`] are responded to and no new ones can begin. +/// +/// After [`commit`], [`check_tx`] is run again on all transactions that remain in the node's local mempool after +/// filtering those included in the block. To prevent the mempool from rechecking all transactions every time a block is +/// committed, set the configuration option `mempool.recheck=false`. +/// +/// Finally, the mempool will unlock and new transactions can be processed through [`check_tx`] again. +/// +/// Note that [`check_tx`] doesn't have to check everything that affects transaction validity; the expensive things can +/// be skipped. In fact, [`check_tx`] doesn't have to check anything; it might say that any transaction is a valid +/// transaction. Unlike [`deliver_tx`], [`check_tx`] is just there as a sort of weak filter to keep invalid transactions +/// out of the blockchain. It's weak, because a Byzantine node doesn't care about [`check_tx`]; it can propose a block +/// full of invalid transactions if it wants. +/// +/// [_Mempool_]: trait.Mempool.html#details +/// [`commit`]: trait.Consensus.html#tymethod.commit +/// [_Consensus_]: trait.Consensus.html#details +/// [`deliver_tx`]: trait.Consensus.html#tymethod.deliver_tx +/// [`check_tx`]: trait.Mempool.html#method.check_tx +#[async_trait] +pub trait Mempool: Send + Sync { + /// Guardian of the mempool: every node runs CheckTx before letting a transaction into its local mempool. + /// Technically optional - not involved in processing blocks + async fn check_tx(&self, check_tx_request: CheckTxRequest) -> Result; +} diff --git a/src/codec.rs b/src/codec.rs deleted file mode 100644 index b40ad75..0000000 --- a/src/codec.rs +++ /dev/null @@ -1,150 +0,0 @@ -use std::error::Error; - -use bytes::{BufMut, BytesMut}; -use integer_encoding::VarInt; -use protobuf::Message; -use tokio::codec::{Decoder, Encoder}; - -use crate::messages::abci::*; - -#[derive(Debug)] -pub struct ABCICodec; - -impl ABCICodec { - pub fn new() -> ABCICodec { - ABCICodec - } -} - -impl Decoder for ABCICodec { - type Item = Request; - type Error = Box; - - fn decode(&mut self, buf: &mut BytesMut) -> Result, Box> { - let length = buf.len(); - if length == 0 { - return Ok(None); - } - let varint: (i64, usize) = i64::decode_var(&buf[..]); - if varint.0 as usize + varint.1 > length { - return Ok(None); - } - let request = protobuf::parse_from_bytes(&buf[varint.1..(varint.0 as usize + varint.1)])?; - buf.split_to(varint.0 as usize + varint.1); - Ok(Some(request)) - } -} - -impl Encoder for ABCICodec { - type Item = Response; - type Error = Box; - - fn encode(&mut self, msg: Response, buf: &mut BytesMut) -> Result<(), Box> { - let msg_len = msg.compute_size(); - let varint = i64::encode_var_vec(i64::from(msg_len)); - - let remaining = buf.remaining_mut(); - let needed = msg_len as usize + varint.len(); - if remaining < needed { - buf.reserve(needed); - } - - buf.put(&varint); - msg.write_to_writer(&mut buf.writer())?; - trace!("Encode response! {:?}", &buf[..]); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn setup_echo_request_buf() -> Result> { - let buf = &mut BytesMut::new(); - - let mut r = Request::new(); - let mut echo = RequestEcho::new(); - echo.set_message(String::from("Helloworld")); - r.set_echo(echo); - - let msg_len = r.compute_size(); - let varint = i64::encode_var_vec(msg_len as i64); - buf.put(varint); - r.write_to_writer(&mut buf.writer())?; - - trace!("Encode response! {:?}", &buf[..]); - - Ok(buf.take()) - } - - fn setup_echo_large_request_buf() -> Result> { - let buf = &mut BytesMut::new(); - - let mut r = Request::new(); - let mut echo = RequestEcho::new(); - let st = (0..2 * 4096).map(|_| "X").collect::(); - echo.set_message(st); - r.set_echo(echo); - - let msg_len = r.compute_size(); - let varint = i64::encode_var_vec(msg_len as i64); - - let remaining = buf.remaining_mut(); - let needed = msg_len as usize + varint.len(); - if remaining < needed { - buf.reserve(needed); - } - - buf.put(varint); - r.write_to_writer(&mut buf.writer())?; - - trace!("Encode response! {:?}", &buf[..]); - - Ok(buf.take()) - } - - #[test] - fn should_decode() { - let mut codec = ABCICodec::new(); - let mut buf = setup_echo_request_buf().unwrap(); - let r = codec.decode(&mut buf); - assert!(r.is_ok()); - let v1 = r.ok(); - assert!(v1.is_some()); - let v2 = v1.unwrap(); - assert!(v2.is_some()); - let v3 = v2.unwrap(); - assert!(v3.has_echo()); - assert_eq!(v3.get_echo().get_message(), "Helloworld"); - } - - #[test] - fn should_decode_large_request() { - let mut codec = ABCICodec::new(); - let mut buf = setup_echo_large_request_buf().unwrap(); - let r = codec.decode(&mut buf); - assert!(r.is_ok()); - let v1 = r.ok(); - assert!(v1.is_some()); - let v2 = v1.unwrap(); - assert!(v2.is_some()); - let v3 = v2.unwrap(); - assert!(v3.has_echo()); - } - - #[test] - fn should_encode() { - let mut codec = ABCICodec::new(); - - let mut r = Response::new(); - let mut echo = ResponseEcho::new(); - echo.set_message(String::from("Helloworld")); - r.set_echo(echo); - - let buf = &mut BytesMut::new(); - - let v = codec.encode(r, buf); - assert!(v.is_ok()); - } -} diff --git a/src/lib.rs b/src/lib.rs index a0690e9..37a5fac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,121 +1,73 @@ -//! # Tendermint ABCI library for Rust +#![deny(missing_docs, unsafe_code)] +//! A Rust crate for creating ABCI applications. //! -//! This library provides an application Trait and TCP server for implementing Tendemint ABCI -//! application in Rust. The Application Trait provides default implementations for each callback -//! to simplify development. +//! ## ABCI Overview //! -//! ## Example +//! ABCI is the interface between Tendermint (a state-machine replication engine) and your application (the actual state +//! machine). It consists of a set of methods, where each method has a corresponding `Request` and `Response` message type. +//! Tendermint calls the ABCI methods on the ABCI application by sending the `Request` messages and receiving the `Response` +//! messages in return. //! -//! Here's a simple example that communicates with Tendermint. Defaults callbacks are handled by -//! the Trait. The app doesn't do any actual processing on a transaction. +//! ABCI methods are split across 3 separate ABCI connections: //! -//! ```rust,no_run -//! struct EmptyApp; +//! - `Consensus` Connection: `InitChain`, `BeginBlock`, `DeliverTx`, `EndBlock`, `Commit` +//! - `Mempool` Connection: `CheckTx` +//! - `Info` Connection: `Info`, `SetOption`, `Query` //! -//! impl abci::Application for EmptyApp {} +//! Additionally, there is a `Flush` method that is called on every connection, and an `Echo` method that is just for +//! debugging. //! -//! fn run_empty_app() { -//! abci::run_local(EmptyApp); -//! } -//!``` +//! To know more about ABCI protocol specifications, go to official ABCI [documentation](https://tendermint.com/docs/spec/abci/). //! -extern crate byteorder; -extern crate bytes; -extern crate env_logger; -extern crate futures; -extern crate integer_encoding; -#[macro_use] -extern crate log; -extern crate core; -extern crate protobuf; -extern crate tokio; +//! ## Usage +//! +//! Add `abci` in your `Cargo.toml`'s `dependencies` section: +//! +//! ```toml +//! [dependencies] +//! abci = "0.7" +//! ``` +//! +//! Each ABCI application has to implement three core traits corresponding to all three ABCI connections, `Consensus`, +//! `Mempool` and `Info`. +//! +//! > Note: Implementations of these traits are expected to be `Send + Sync` and methods take immutable reference of `self`. +//! So, internal mutability must be handled using thread safe (`Arc`, `Mutex`, etc.) constructs. +//! +//! After implementing all three above mentioned `trait`s, you can create a `Server` object and use `Server::run()`to start +//! ABCI application. +//! +//! `Server::run()` is an `async` function and returns a `Future`. So, you'll need an executor to drive `Future` returned +//! from `Server::run()`. `async-std` and `tokio` are two popular options. In `counter` example, we use `tokio`'s executor. +//! +//! To know more, go to `examples/` to see a sample ABCI application. +//! +//! ### Features +//! +//! - `tokio`: Enables `tokio` backend for running ABCI TCP/UDS server +//! - **Enabled** by default. +//! - `async-std`: Enables `async-std` backend for running ABCI TCP/UDS server +//! - **Disabled** by default. +//! +//! > Note: Features `tokio` and `async-std` are mutually exclusive, i.e., only one of them can be enabled at a time. Compilation +//! will fail if either both of them are enabled or none of them are enabled. +#![cfg_attr(feature = "doc", feature(doc_cfg))] -use std::net::SocketAddr; +#[cfg(all(feature = "async-std", feature = "tokio"))] +compile_error!("Features `async-std` and `tokio` are mutually exclusive"); -pub use crate::messages::abci::*; -pub use crate::messages::merkle::*; -pub use crate::messages::types::*; -use crate::server::serve; +#[cfg(not(any(feature = "async-std", feature = "tokio")))] +compile_error!("Either feature `async-std` or `tokio` must be enabled for this crate"); -mod codec; -mod messages; +mod application; +mod proto; mod server; -/// Main Trait for an ABCI application. Provides generic responses for all callbacks -/// Override desired callbacks as needed. Tendermint makes 3 TCP connections to the -/// application and does so in a synchonized manner. -pub trait Application { - /// Query Connection: Called on startup from Tendermint. The application should normally - /// return the last know state so Tendermint can determine if it needs to replay blocks - /// to the application. - fn info(&mut self, _req: &RequestInfo) -> ResponseInfo { - ResponseInfo::new() - } - - /// Query Connection: Set options on the application (rarely used) - fn set_option(&mut self, _req: &RequestSetOption) -> ResponseSetOption { - ResponseSetOption::new() - } - - /// Query Connection: Query your application. This usually resolves through a merkle tree holding - /// the state of the app. - fn query(&mut self, _req: &RequestQuery) -> ResponseQuery { - ResponseQuery::new() - } - - /// Mempool Connection: Used to validate incoming transactions. If the application reponds - /// with a non-zero value, the transaction is added to Tendermint's mempool for processing - /// on the deliver_tx call below. - fn check_tx(&mut self, _req: &RequestCheckTx) -> ResponseCheckTx { - ResponseCheckTx::new() - } - - /// Consensus Connection: Called once on startup. Usually used to establish initial (genesis) - /// state. - fn init_chain(&mut self, _req: &RequestInitChain) -> ResponseInitChain { - ResponseInitChain::new() - } - - /// Consensus Connection: Called at the start of processing a block of transactions - /// The flow is: - /// begin_block() - /// deliver_tx() for each transaction in the block - /// end_block() - /// commit() - fn begin_block(&mut self, _req: &RequestBeginBlock) -> ResponseBeginBlock { - ResponseBeginBlock::new() - } - - /// Consensus Connection: Actually processing the transaction, performing some form of a - /// state transistion. - fn deliver_tx(&mut self, _p: &RequestDeliverTx) -> ResponseDeliverTx { - ResponseDeliverTx::new() - } - - /// Consensus Connection: Called at the end of the block. Often used to update the validator set. - fn end_block(&mut self, _req: &RequestEndBlock) -> ResponseEndBlock { - ResponseEndBlock::new() - } - - /// Consensus Connection: Commit the block with the latest state from the application. - fn commit(&mut self, _req: &RequestCommit) -> ResponseCommit { - ResponseCommit::new() - } -} +pub mod types; -/// Setup the app and start the server using localhost and default tendermint port 26658 -pub fn run_local(app: A) -where - A: Application + 'static + Send + Sync, -{ - let addr = "127.0.0.1:26658".parse().unwrap(); - run(addr, app); -} +/// Utility macro for implementing [`Consensus`](trait.Consensus.html), [`Mempool`](trait.Mempool.html) and +/// [`Info`](trait.Info.html) traits. +pub use async_trait::async_trait; -/// Setup the application and start the server. Use this fn when setting different ip:port. -pub fn run(listen_addr: SocketAddr, app: A) -where - A: Application + 'static + Send + Sync, -{ - serve(app, listen_addr).unwrap(); -} +pub use self::application::{Consensus, Info, Mempool}; +pub use self::server::{Address, Server}; diff --git a/src/messages/mod.rs b/src/messages/mod.rs deleted file mode 100644 index 62f0c97..0000000 --- a/src/messages/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod abci; // Core types -pub mod merkle; -pub mod types; // Common (KV Pairs) diff --git a/src/proto.rs b/src/proto.rs new file mode 100644 index 0000000..32cc51e --- /dev/null +++ b/src/proto.rs @@ -0,0 +1,91 @@ +pub mod abci; +pub mod merkle; +pub mod types; + +use std::io::{Error, ErrorKind, Result}; + +#[cfg(feature = "async-std")] +use async_std::{ + io::{Read, Write}, + prelude::*, +}; +#[cfg(feature = "tokio")] +use tokio::io::{AsyncRead as Read, AsyncReadExt, AsyncWrite as Write, AsyncWriteExt}; + +use integer_encoding::VarInt; +use protobuf::{parse_from_bytes, Message}; + +use self::abci::{Request, Response}; + +const BUFLEN: usize = 10; +const MSB: u8 = 0b1000_0000; + +/// Decodes a `Request` from stream +pub async fn decode(mut reader: R) -> Result> { + let length: i64 = read_varint(&mut reader).await?; + + if length == 0 { + return Ok(None); + } + + let mut bytes = vec![0; length as usize]; + reader.take(length as u64).read(&mut bytes).await?; + + parse_from_bytes(&bytes) + .map(Some) + .map_err(|e| Error::new(ErrorKind::InvalidData, e)) +} + +/// Encodes a `Response` to stream +pub async fn encode(message: Response, mut writer: W) -> Result<()> { + write_varint(&mut writer, i64::from(message.compute_size())).await?; + + let bytes = message + .write_to_bytes() + .map_err(|e| Error::new(ErrorKind::Other, e))?; + + writer.write_all(&bytes).await +} + +/// Reads a varint from `AsyncRead`. Implementation is based on original synchronous version of +/// [`read_varint`](https://github.com/dermesser/integer-encoding-rs/blob/v1.0.7/src/reader.rs#L21) +/// +/// There won't be any need for this once [this](https://github.com/dermesser/integer-encoding-rs/issues/4) is fixed +async fn read_varint(mut reader: R) -> Result { + let mut buf = [0 as u8; BUFLEN]; + let mut i = 0; + + loop { + if i >= BUFLEN { + return Err(Error::new(ErrorKind::InvalidData, "Unterminated varint")); + } + + let read = reader.read(&mut buf[i..=i]).await?; + + // EOF + if read == 0 && i == 0 { + return Err(Error::new(ErrorKind::UnexpectedEof, "Reached EOF")); + } + + if buf[i] & MSB == 0 { + break; + } + + i += 1; + } + + let (result, _) = VI::decode_var(&buf[0..=i]); + + Ok(result) +} + +/// Writes a varint to `AsyncWrite`. Implementation is based on original synchronous version of +/// [`write_varint`](https://github.com/dermesser/integer-encoding-rs/blob/v1.0.7/src/writer.rs#L12) +/// +/// There won't be any need for this once [this](https://github.com/dermesser/integer-encoding-rs/issues/4) is fixed +async fn write_varint(mut writer: W, n: VI) -> Result { + let mut buf = [0 as u8; BUFLEN]; + let used = n.encode_var(&mut buf[..]); + + writer.write(&buf[0..used]).await +} diff --git a/src/messages/abci.rs b/src/proto/abci.rs similarity index 95% rename from src/messages/abci.rs rename to src/proto/abci.rs index 01e03ca..c065b94 100644 --- a/src/messages/abci.rs +++ b/src/proto/abci.rs @@ -1,7 +1,7 @@ -// This file is generated by rust-protobuf 2.10.0. Do not edit +// This file is generated by rust-protobuf 2.10.2. Do not edit // @generated -// https://github.com/Manishearth/rust-clippy/issues/702 +// https://github.com/rust-lang/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy::all)] @@ -24,7 +24,7 @@ use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; /// Generated files are compatible only with the same version /// of protobuf runtime. -const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_0; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_2; #[derive(PartialEq,Clone,Default)] pub struct Request { @@ -61,7 +61,7 @@ impl Request { ::std::default::Default::default() } - // .abci.RequestEcho echo = 2; + // .tendermint.abci.types.RequestEcho echo = 2; pub fn get_echo(&self) -> &RequestEcho { @@ -110,7 +110,7 @@ impl Request { } } - // .abci.RequestFlush flush = 3; + // .tendermint.abci.types.RequestFlush flush = 3; pub fn get_flush(&self) -> &RequestFlush { @@ -159,7 +159,7 @@ impl Request { } } - // .abci.RequestInfo info = 4; + // .tendermint.abci.types.RequestInfo info = 4; pub fn get_info(&self) -> &RequestInfo { @@ -208,7 +208,7 @@ impl Request { } } - // .abci.RequestSetOption set_option = 5; + // .tendermint.abci.types.RequestSetOption set_option = 5; pub fn get_set_option(&self) -> &RequestSetOption { @@ -257,7 +257,7 @@ impl Request { } } - // .abci.RequestInitChain init_chain = 6; + // .tendermint.abci.types.RequestInitChain init_chain = 6; pub fn get_init_chain(&self) -> &RequestInitChain { @@ -306,7 +306,7 @@ impl Request { } } - // .abci.RequestQuery query = 7; + // .tendermint.abci.types.RequestQuery query = 7; pub fn get_query(&self) -> &RequestQuery { @@ -355,7 +355,7 @@ impl Request { } } - // .abci.RequestBeginBlock begin_block = 8; + // .tendermint.abci.types.RequestBeginBlock begin_block = 8; pub fn get_begin_block(&self) -> &RequestBeginBlock { @@ -404,7 +404,7 @@ impl Request { } } - // .abci.RequestCheckTx check_tx = 9; + // .tendermint.abci.types.RequestCheckTx check_tx = 9; pub fn get_check_tx(&self) -> &RequestCheckTx { @@ -453,7 +453,7 @@ impl Request { } } - // .abci.RequestDeliverTx deliver_tx = 19; + // .tendermint.abci.types.RequestDeliverTx deliver_tx = 19; pub fn get_deliver_tx(&self) -> &RequestDeliverTx { @@ -502,7 +502,7 @@ impl Request { } } - // .abci.RequestEndBlock end_block = 11; + // .tendermint.abci.types.RequestEndBlock end_block = 11; pub fn get_end_block(&self) -> &RequestEndBlock { @@ -551,7 +551,7 @@ impl Request { } } - // .abci.RequestCommit commit = 12; + // .tendermint.abci.types.RequestCommit commit = 12; pub fn get_commit(&self) -> &RequestCommit { @@ -1830,7 +1830,7 @@ impl RequestInitChain { ::std::mem::replace(&mut self.chain_id, ::std::string::String::new()) } - // .abci.ConsensusParams consensus_params = 3; + // .tendermint.abci.types.ConsensusParams consensus_params = 3; pub fn get_consensus_params(&self) -> &ConsensusParams { @@ -1863,7 +1863,7 @@ impl RequestInitChain { self.consensus_params.take().unwrap_or_else(|| ConsensusParams::new()) } - // repeated .abci.ValidatorUpdate validators = 4; + // repeated .tendermint.abci.types.ValidatorUpdate validators = 4; pub fn get_validators(&self) -> &[ValidatorUpdate] { @@ -2451,7 +2451,7 @@ impl RequestBeginBlock { ::std::mem::replace(&mut self.hash, ::std::vec::Vec::new()) } - // .abci.Header header = 2; + // .tendermint.abci.types.Header header = 2; pub fn get_header(&self) -> &Header { @@ -2484,7 +2484,7 @@ impl RequestBeginBlock { self.header.take().unwrap_or_else(|| Header::new()) } - // .abci.LastCommitInfo last_commit_info = 3; + // .tendermint.abci.types.LastCommitInfo last_commit_info = 3; pub fn get_last_commit_info(&self) -> &LastCommitInfo { @@ -2517,7 +2517,7 @@ impl RequestBeginBlock { self.last_commit_info.take().unwrap_or_else(|| LastCommitInfo::new()) } - // repeated .abci.Evidence byzantine_validators = 4; + // repeated .tendermint.abci.types.Evidence byzantine_validators = 4; pub fn get_byzantine_validators(&self) -> &[Evidence] { @@ -2781,7 +2781,7 @@ impl RequestCheckTx { ::std::mem::replace(&mut self.tx, ::std::vec::Vec::new()) } - // .abci.CheckTxType type = 2; + // .tendermint.abci.types.CheckTxType type = 2; pub fn get_field_type(&self) -> CheckTxType { @@ -3427,7 +3427,7 @@ impl Response { ::std::default::Default::default() } - // .abci.ResponseException exception = 1; + // .tendermint.abci.types.ResponseException exception = 1; pub fn get_exception(&self) -> &ResponseException { @@ -3476,7 +3476,7 @@ impl Response { } } - // .abci.ResponseEcho echo = 2; + // .tendermint.abci.types.ResponseEcho echo = 2; pub fn get_echo(&self) -> &ResponseEcho { @@ -3525,7 +3525,7 @@ impl Response { } } - // .abci.ResponseFlush flush = 3; + // .tendermint.abci.types.ResponseFlush flush = 3; pub fn get_flush(&self) -> &ResponseFlush { @@ -3574,7 +3574,7 @@ impl Response { } } - // .abci.ResponseInfo info = 4; + // .tendermint.abci.types.ResponseInfo info = 4; pub fn get_info(&self) -> &ResponseInfo { @@ -3623,7 +3623,7 @@ impl Response { } } - // .abci.ResponseSetOption set_option = 5; + // .tendermint.abci.types.ResponseSetOption set_option = 5; pub fn get_set_option(&self) -> &ResponseSetOption { @@ -3672,7 +3672,7 @@ impl Response { } } - // .abci.ResponseInitChain init_chain = 6; + // .tendermint.abci.types.ResponseInitChain init_chain = 6; pub fn get_init_chain(&self) -> &ResponseInitChain { @@ -3721,7 +3721,7 @@ impl Response { } } - // .abci.ResponseQuery query = 7; + // .tendermint.abci.types.ResponseQuery query = 7; pub fn get_query(&self) -> &ResponseQuery { @@ -3770,7 +3770,7 @@ impl Response { } } - // .abci.ResponseBeginBlock begin_block = 8; + // .tendermint.abci.types.ResponseBeginBlock begin_block = 8; pub fn get_begin_block(&self) -> &ResponseBeginBlock { @@ -3819,7 +3819,7 @@ impl Response { } } - // .abci.ResponseCheckTx check_tx = 9; + // .tendermint.abci.types.ResponseCheckTx check_tx = 9; pub fn get_check_tx(&self) -> &ResponseCheckTx { @@ -3868,7 +3868,7 @@ impl Response { } } - // .abci.ResponseDeliverTx deliver_tx = 10; + // .tendermint.abci.types.ResponseDeliverTx deliver_tx = 10; pub fn get_deliver_tx(&self) -> &ResponseDeliverTx { @@ -3917,7 +3917,7 @@ impl Response { } } - // .abci.ResponseEndBlock end_block = 11; + // .tendermint.abci.types.ResponseEndBlock end_block = 11; pub fn get_end_block(&self) -> &ResponseEndBlock { @@ -3966,7 +3966,7 @@ impl Response { } } - // .abci.ResponseCommit commit = 12; + // .tendermint.abci.types.ResponseCommit commit = 12; pub fn get_commit(&self) -> &ResponseCommit { @@ -5497,7 +5497,7 @@ impl ResponseInitChain { ::std::default::Default::default() } - // .abci.ConsensusParams consensus_params = 1; + // .tendermint.abci.types.ConsensusParams consensus_params = 1; pub fn get_consensus_params(&self) -> &ConsensusParams { @@ -5530,7 +5530,7 @@ impl ResponseInitChain { self.consensus_params.take().unwrap_or_else(|| ConsensusParams::new()) } - // repeated .abci.ValidatorUpdate validators = 2; + // repeated .tendermint.abci.types.ValidatorUpdate validators = 2; pub fn get_validators(&self) -> &[ValidatorUpdate] { @@ -5871,7 +5871,7 @@ impl ResponseQuery { ::std::mem::replace(&mut self.value, ::std::vec::Vec::new()) } - // .merkle.Proof proof = 8; + // .tendermint.crypto.merkle.Proof proof = 8; pub fn get_proof(&self) -> &super::merkle::Proof { @@ -6228,7 +6228,7 @@ impl ResponseBeginBlock { ::std::default::Default::default() } - // repeated .abci.Event events = 1; + // repeated .tendermint.abci.types.Event events = 1; pub fn get_events(&self) -> &[Event] { @@ -6534,7 +6534,7 @@ impl ResponseCheckTx { self.gas_used = v; } - // repeated .abci.Event events = 7; + // repeated .tendermint.abci.types.Event events = 7; pub fn get_events(&self) -> &[Event] { @@ -6983,7 +6983,7 @@ impl ResponseDeliverTx { self.gas_used = v; } - // repeated .abci.Event events = 7; + // repeated .tendermint.abci.types.Event events = 7; pub fn get_events(&self) -> &[Event] { @@ -7304,7 +7304,7 @@ impl ResponseEndBlock { ::std::default::Default::default() } - // repeated .abci.ValidatorUpdate validator_updates = 1; + // repeated .tendermint.abci.types.ValidatorUpdate validator_updates = 1; pub fn get_validator_updates(&self) -> &[ValidatorUpdate] { @@ -7329,7 +7329,7 @@ impl ResponseEndBlock { ::std::mem::replace(&mut self.validator_updates, ::protobuf::RepeatedField::new()) } - // .abci.ConsensusParams consensus_param_updates = 2; + // .tendermint.abci.types.ConsensusParams consensus_param_updates = 2; pub fn get_consensus_param_updates(&self) -> &ConsensusParams { @@ -7362,7 +7362,7 @@ impl ResponseEndBlock { self.consensus_param_updates.take().unwrap_or_else(|| ConsensusParams::new()) } - // repeated .abci.Event events = 3; + // repeated .tendermint.abci.types.Event events = 3; pub fn get_events(&self) -> &[Event] { @@ -7755,7 +7755,7 @@ impl ConsensusParams { ::std::default::Default::default() } - // .abci.BlockParams block = 1; + // .tendermint.abci.types.BlockParams block = 1; pub fn get_block(&self) -> &BlockParams { @@ -7788,7 +7788,7 @@ impl ConsensusParams { self.block.take().unwrap_or_else(|| BlockParams::new()) } - // .abci.EvidenceParams evidence = 2; + // .tendermint.abci.types.EvidenceParams evidence = 2; pub fn get_evidence(&self) -> &EvidenceParams { @@ -7821,7 +7821,7 @@ impl ConsensusParams { self.evidence.take().unwrap_or_else(|| EvidenceParams::new()) } - // .abci.ValidatorParams validator = 3; + // .tendermint.abci.types.ValidatorParams validator = 3; pub fn get_validator(&self) -> &ValidatorParams { @@ -8231,7 +8231,8 @@ impl ::protobuf::reflect::ProtobufValue for BlockParams { #[derive(PartialEq,Clone,Default)] pub struct EvidenceParams { // message fields - pub max_age: i64, + pub max_age_num_blocks: i64, + pub max_age_duration: ::protobuf::SingularPtrField<::protobuf::well_known_types::Duration>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -8248,24 +8249,62 @@ impl EvidenceParams { ::std::default::Default::default() } - // int64 max_age = 1; + // int64 max_age_num_blocks = 1; - pub fn get_max_age(&self) -> i64 { - self.max_age + pub fn get_max_age_num_blocks(&self) -> i64 { + self.max_age_num_blocks } - pub fn clear_max_age(&mut self) { - self.max_age = 0; + pub fn clear_max_age_num_blocks(&mut self) { + self.max_age_num_blocks = 0; } // Param is passed by value, moved - pub fn set_max_age(&mut self, v: i64) { - self.max_age = v; + pub fn set_max_age_num_blocks(&mut self, v: i64) { + self.max_age_num_blocks = v; + } + + // .google.protobuf.Duration max_age_duration = 2; + + + pub fn get_max_age_duration(&self) -> &::protobuf::well_known_types::Duration { + self.max_age_duration.as_ref().unwrap_or_else(|| ::protobuf::well_known_types::Duration::default_instance()) + } + pub fn clear_max_age_duration(&mut self) { + self.max_age_duration.clear(); + } + + pub fn has_max_age_duration(&self) -> bool { + self.max_age_duration.is_some() + } + + // Param is passed by value, moved + pub fn set_max_age_duration(&mut self, v: ::protobuf::well_known_types::Duration) { + self.max_age_duration = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_max_age_duration(&mut self) -> &mut ::protobuf::well_known_types::Duration { + if self.max_age_duration.is_none() { + self.max_age_duration.set_default(); + } + self.max_age_duration.as_mut().unwrap() + } + + // Take field + pub fn take_max_age_duration(&mut self) -> ::protobuf::well_known_types::Duration { + self.max_age_duration.take().unwrap_or_else(|| ::protobuf::well_known_types::Duration::new()) } } impl ::protobuf::Message for EvidenceParams { fn is_initialized(&self) -> bool { + for v in &self.max_age_duration { + if !v.is_initialized() { + return false; + } + }; true } @@ -8278,7 +8317,10 @@ impl ::protobuf::Message for EvidenceParams { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int64()?; - self.max_age = tmp; + self.max_age_num_blocks = tmp; + }, + 2 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.max_age_duration)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; @@ -8292,8 +8334,12 @@ impl ::protobuf::Message for EvidenceParams { #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; - if self.max_age != 0 { - my_size += ::protobuf::rt::value_size(1, self.max_age, ::protobuf::wire_format::WireTypeVarint); + if self.max_age_num_blocks != 0 { + my_size += ::protobuf::rt::value_size(1, self.max_age_num_blocks, ::protobuf::wire_format::WireTypeVarint); + } + if let Some(ref v) = self.max_age_duration.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); @@ -8301,8 +8347,13 @@ impl ::protobuf::Message for EvidenceParams { } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { - if self.max_age != 0 { - os.write_int64(1, self.max_age)?; + if self.max_age_num_blocks != 0 { + os.write_int64(1, self.max_age_num_blocks)?; + } + if let Some(ref v) = self.max_age_duration.as_ref() { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) @@ -8347,9 +8398,14 @@ impl ::protobuf::Message for EvidenceParams { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>( - "max_age", - |m: &EvidenceParams| { &m.max_age }, - |m: &mut EvidenceParams| { &mut m.max_age }, + "max_age_num_blocks", + |m: &EvidenceParams| { &m.max_age_num_blocks }, + |m: &mut EvidenceParams| { &mut m.max_age_num_blocks }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<::protobuf::well_known_types::Duration>>( + "max_age_duration", + |m: &EvidenceParams| { &m.max_age_duration }, + |m: &mut EvidenceParams| { &mut m.max_age_duration }, )); ::protobuf::reflect::MessageDescriptor::new::( "EvidenceParams", @@ -8373,7 +8429,8 @@ impl ::protobuf::Message for EvidenceParams { impl ::protobuf::Clear for EvidenceParams { fn clear(&mut self) { - self.max_age = 0; + self.max_age_num_blocks = 0; + self.max_age_duration.clear(); self.unknown_fields.clear(); } } @@ -8594,7 +8651,7 @@ impl LastCommitInfo { self.round = v; } - // repeated .abci.VoteInfo votes = 2; + // repeated .tendermint.abci.types.VoteInfo votes = 2; pub fn get_votes(&self) -> &[VoteInfo] { @@ -8773,7 +8830,7 @@ impl ::protobuf::reflect::ProtobufValue for LastCommitInfo { pub struct Event { // message fields pub field_type: ::std::string::String, - pub attributes: ::protobuf::RepeatedField, + pub attributes: ::protobuf::RepeatedField, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, @@ -8816,10 +8873,10 @@ impl Event { ::std::mem::replace(&mut self.field_type, ::std::string::String::new()) } - // repeated .common.KVPair attributes = 2; + // repeated .tendermint.libs.kv.Pair attributes = 2; - pub fn get_attributes(&self) -> &[super::types::KVPair] { + pub fn get_attributes(&self) -> &[super::types::Pair] { &self.attributes } pub fn clear_attributes(&mut self) { @@ -8827,17 +8884,17 @@ impl Event { } // Param is passed by value, moved - pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField) { + pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField) { self.attributes = v; } // Mutable pointer to the field. - pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField { + pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField { &mut self.attributes } // Take field - pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField { + pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField { ::std::mem::replace(&mut self.attributes, ::protobuf::RepeatedField::new()) } } @@ -8942,7 +8999,7 @@ impl ::protobuf::Message for Event { |m: &Event| { &m.field_type }, |m: &mut Event| { &mut m.field_type }, )); - fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( "attributes", |m: &Event| { &m.attributes }, |m: &mut Event| { &mut m.attributes }, @@ -8994,8 +9051,6 @@ pub struct Header { pub chain_id: ::std::string::String, pub height: i64, pub time: ::protobuf::SingularPtrField<::protobuf::well_known_types::Timestamp>, - pub num_txs: i64, - pub total_txs: i64, pub last_block_id: ::protobuf::SingularPtrField, pub last_commit_hash: ::std::vec::Vec, pub data_hash: ::std::vec::Vec, @@ -9022,7 +9077,7 @@ impl Header { ::std::default::Default::default() } - // .abci.Version version = 1; + // .tendermint.abci.types.Version version = 1; pub fn get_version(&self) -> &Version { @@ -9129,37 +9184,7 @@ impl Header { self.time.take().unwrap_or_else(|| ::protobuf::well_known_types::Timestamp::new()) } - // int64 num_txs = 5; - - - pub fn get_num_txs(&self) -> i64 { - self.num_txs - } - pub fn clear_num_txs(&mut self) { - self.num_txs = 0; - } - - // Param is passed by value, moved - pub fn set_num_txs(&mut self, v: i64) { - self.num_txs = v; - } - - // int64 total_txs = 6; - - - pub fn get_total_txs(&self) -> i64 { - self.total_txs - } - pub fn clear_total_txs(&mut self) { - self.total_txs = 0; - } - - // Param is passed by value, moved - pub fn set_total_txs(&mut self, v: i64) { - self.total_txs = v; - } - - // .abci.BlockID last_block_id = 7; + // .tendermint.abci.types.BlockID last_block_id = 5; pub fn get_last_block_id(&self) -> &BlockID { @@ -9192,7 +9217,7 @@ impl Header { self.last_block_id.take().unwrap_or_else(|| BlockID::new()) } - // bytes last_commit_hash = 8; + // bytes last_commit_hash = 6; pub fn get_last_commit_hash(&self) -> &[u8] { @@ -9218,7 +9243,7 @@ impl Header { ::std::mem::replace(&mut self.last_commit_hash, ::std::vec::Vec::new()) } - // bytes data_hash = 9; + // bytes data_hash = 7; pub fn get_data_hash(&self) -> &[u8] { @@ -9244,7 +9269,7 @@ impl Header { ::std::mem::replace(&mut self.data_hash, ::std::vec::Vec::new()) } - // bytes validators_hash = 10; + // bytes validators_hash = 8; pub fn get_validators_hash(&self) -> &[u8] { @@ -9270,7 +9295,7 @@ impl Header { ::std::mem::replace(&mut self.validators_hash, ::std::vec::Vec::new()) } - // bytes next_validators_hash = 11; + // bytes next_validators_hash = 9; pub fn get_next_validators_hash(&self) -> &[u8] { @@ -9296,7 +9321,7 @@ impl Header { ::std::mem::replace(&mut self.next_validators_hash, ::std::vec::Vec::new()) } - // bytes consensus_hash = 12; + // bytes consensus_hash = 10; pub fn get_consensus_hash(&self) -> &[u8] { @@ -9322,7 +9347,7 @@ impl Header { ::std::mem::replace(&mut self.consensus_hash, ::std::vec::Vec::new()) } - // bytes app_hash = 13; + // bytes app_hash = 11; pub fn get_app_hash(&self) -> &[u8] { @@ -9348,7 +9373,7 @@ impl Header { ::std::mem::replace(&mut self.app_hash, ::std::vec::Vec::new()) } - // bytes last_results_hash = 14; + // bytes last_results_hash = 12; pub fn get_last_results_hash(&self) -> &[u8] { @@ -9374,7 +9399,7 @@ impl Header { ::std::mem::replace(&mut self.last_results_hash, ::std::vec::Vec::new()) } - // bytes evidence_hash = 15; + // bytes evidence_hash = 13; pub fn get_evidence_hash(&self) -> &[u8] { @@ -9400,7 +9425,7 @@ impl Header { ::std::mem::replace(&mut self.evidence_hash, ::std::vec::Vec::new()) } - // bytes proposer_address = 16; + // bytes proposer_address = 14; pub fn get_proposer_address(&self) -> &[u8] { @@ -9468,47 +9493,33 @@ impl ::protobuf::Message for Header { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.time)?; }, 5 => { - if wire_type != ::protobuf::wire_format::WireTypeVarint { - return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); - } - let tmp = is.read_int64()?; - self.num_txs = tmp; - }, - 6 => { - if wire_type != ::protobuf::wire_format::WireTypeVarint { - return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); - } - let tmp = is.read_int64()?; - self.total_txs = tmp; - }, - 7 => { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.last_block_id)?; }, - 8 => { + 6 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.last_commit_hash)?; }, - 9 => { + 7 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.data_hash)?; }, - 10 => { + 8 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.validators_hash)?; }, - 11 => { + 9 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.next_validators_hash)?; }, - 12 => { + 10 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.consensus_hash)?; }, - 13 => { + 11 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.app_hash)?; }, - 14 => { + 12 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.last_results_hash)?; }, - 15 => { + 13 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.evidence_hash)?; }, - 16 => { + 14 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.proposer_address)?; }, _ => { @@ -9537,42 +9548,36 @@ impl ::protobuf::Message for Header { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } - if self.num_txs != 0 { - my_size += ::protobuf::rt::value_size(5, self.num_txs, ::protobuf::wire_format::WireTypeVarint); - } - if self.total_txs != 0 { - my_size += ::protobuf::rt::value_size(6, self.total_txs, ::protobuf::wire_format::WireTypeVarint); - } if let Some(ref v) = self.last_block_id.as_ref() { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } if !self.last_commit_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(8, &self.last_commit_hash); + my_size += ::protobuf::rt::bytes_size(6, &self.last_commit_hash); } if !self.data_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(9, &self.data_hash); + my_size += ::protobuf::rt::bytes_size(7, &self.data_hash); } if !self.validators_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(10, &self.validators_hash); + my_size += ::protobuf::rt::bytes_size(8, &self.validators_hash); } if !self.next_validators_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(11, &self.next_validators_hash); + my_size += ::protobuf::rt::bytes_size(9, &self.next_validators_hash); } if !self.consensus_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(12, &self.consensus_hash); + my_size += ::protobuf::rt::bytes_size(10, &self.consensus_hash); } if !self.app_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(13, &self.app_hash); + my_size += ::protobuf::rt::bytes_size(11, &self.app_hash); } if !self.last_results_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(14, &self.last_results_hash); + my_size += ::protobuf::rt::bytes_size(12, &self.last_results_hash); } if !self.evidence_hash.is_empty() { - my_size += ::protobuf::rt::bytes_size(15, &self.evidence_hash); + my_size += ::protobuf::rt::bytes_size(13, &self.evidence_hash); } if !self.proposer_address.is_empty() { - my_size += ::protobuf::rt::bytes_size(16, &self.proposer_address); + my_size += ::protobuf::rt::bytes_size(14, &self.proposer_address); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); @@ -9596,43 +9601,37 @@ impl ::protobuf::Message for Header { os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } - if self.num_txs != 0 { - os.write_int64(5, self.num_txs)?; - } - if self.total_txs != 0 { - os.write_int64(6, self.total_txs)?; - } if let Some(ref v) = self.last_block_id.as_ref() { - os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } if !self.last_commit_hash.is_empty() { - os.write_bytes(8, &self.last_commit_hash)?; + os.write_bytes(6, &self.last_commit_hash)?; } if !self.data_hash.is_empty() { - os.write_bytes(9, &self.data_hash)?; + os.write_bytes(7, &self.data_hash)?; } if !self.validators_hash.is_empty() { - os.write_bytes(10, &self.validators_hash)?; + os.write_bytes(8, &self.validators_hash)?; } if !self.next_validators_hash.is_empty() { - os.write_bytes(11, &self.next_validators_hash)?; + os.write_bytes(9, &self.next_validators_hash)?; } if !self.consensus_hash.is_empty() { - os.write_bytes(12, &self.consensus_hash)?; + os.write_bytes(10, &self.consensus_hash)?; } if !self.app_hash.is_empty() { - os.write_bytes(13, &self.app_hash)?; + os.write_bytes(11, &self.app_hash)?; } if !self.last_results_hash.is_empty() { - os.write_bytes(14, &self.last_results_hash)?; + os.write_bytes(12, &self.last_results_hash)?; } if !self.evidence_hash.is_empty() { - os.write_bytes(15, &self.evidence_hash)?; + os.write_bytes(13, &self.evidence_hash)?; } if !self.proposer_address.is_empty() { - os.write_bytes(16, &self.proposer_address)?; + os.write_bytes(14, &self.proposer_address)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) @@ -9696,16 +9695,6 @@ impl ::protobuf::Message for Header { |m: &Header| { &m.time }, |m: &mut Header| { &mut m.time }, )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>( - "num_txs", - |m: &Header| { &m.num_txs }, - |m: &mut Header| { &mut m.num_txs }, - )); - fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>( - "total_txs", - |m: &Header| { &m.total_txs }, - |m: &mut Header| { &mut m.total_txs }, - )); fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( "last_block_id", |m: &Header| { &m.last_block_id }, @@ -9782,8 +9771,6 @@ impl ::protobuf::Clear for Header { self.chain_id.clear(); self.height = 0; self.time.clear(); - self.num_txs = 0; - self.total_txs = 0; self.last_block_id.clear(); self.last_commit_hash.clear(); self.data_hash.clear(); @@ -10054,7 +10041,7 @@ impl BlockID { ::std::mem::replace(&mut self.hash, ::std::vec::Vec::new()) } - // .abci.PartSetHeader parts_header = 2; + // .tendermint.abci.types.PartSetHeader parts_header = 2; pub fn get_parts_header(&self) -> &PartSetHeader { @@ -10662,7 +10649,7 @@ impl ValidatorUpdate { ::std::default::Default::default() } - // .abci.PubKey pub_key = 1; + // .tendermint.abci.types.PubKey pub_key = 1; pub fn get_pub_key(&self) -> &PubKey { @@ -10881,7 +10868,7 @@ impl VoteInfo { ::std::default::Default::default() } - // .abci.Validator validator = 1; + // .tendermint.abci.types.Validator validator = 1; pub fn get_validator(&self) -> &Validator { @@ -11340,7 +11327,7 @@ impl Evidence { ::std::mem::replace(&mut self.field_type, ::std::string::String::new()) } - // .abci.Validator validator = 2; + // .tendermint.abci.types.Validator validator = 2; pub fn get_validator(&self) -> &Validator { @@ -11699,53 +11686,59 @@ impl ::protobuf::reflect::ProtobufValue for CheckTxType { } static file_descriptor_proto_data: &'static [u8] = b"\ - \n\nabci.proto\x12\x04abci\"\xf1\x03\n\x07Request\x12#\n\x04echo\x18\x02\ - \x20\x01(\x0b2\x11.abci.RequestEchoH\0B\0\x12%\n\x05flush\x18\x03\x20\ - \x01(\x0b2\x12.abci.RequestFlushH\0B\0\x12#\n\x04info\x18\x04\x20\x01(\ - \x0b2\x11.abci.RequestInfoH\0B\0\x12.\n\nset_option\x18\x05\x20\x01(\x0b\ - 2\x16.abci.RequestSetOptionH\0B\0\x12.\n\ninit_chain\x18\x06\x20\x01(\ - \x0b2\x16.abci.RequestInitChainH\0B\0\x12%\n\x05query\x18\x07\x20\x01(\ - \x0b2\x12.abci.RequestQueryH\0B\0\x120\n\x0bbegin_block\x18\x08\x20\x01(\ - \x0b2\x17.abci.RequestBeginBlockH\0B\0\x12*\n\x08check_tx\x18\t\x20\x01(\ - \x0b2\x14.abci.RequestCheckTxH\0B\0\x12.\n\ndeliver_tx\x18\x13\x20\x01(\ - \x0b2\x16.abci.RequestDeliverTxH\0B\0\x12,\n\tend_block\x18\x0b\x20\x01(\ - \x0b2\x15.abci.RequestEndBlockH\0B\0\x12'\n\x06commit\x18\x0c\x20\x01(\ - \x0b2\x13.abci.RequestCommitH\0B\0B\x07\n\x05value:\0\"\"\n\x0bRequestEc\ - ho\x12\x11\n\x07message\x18\x01\x20\x01(\tB\0:\0\"\x10\n\x0cRequestFlush\ - :\0\"R\n\x0bRequestInfo\x12\x11\n\x07version\x18\x01\x20\x01(\tB\0\x12\ - \x17\n\rblock_version\x18\x02\x20\x01(\x04B\0\x12\x15\n\x0bp2p_version\ - \x18\x03\x20\x01(\x04B\0:\0\"4\n\x10RequestSetOption\x12\r\n\x03key\x18\ - \x01\x20\x01(\tB\0\x12\x0f\n\x05value\x18\x02\x20\x01(\tB\0:\0\"\xdb\x01\ - \n\x10RequestInitChain\x122\n\x04time\x18\x01\x20\x01(\x0b2\x1a.google.p\ - rotobuf.TimestampB\x08\xc8\xde\x1f\0\x90\xdf\x1f\x01\x12\x12\n\x08chain_\ - id\x18\x02\x20\x01(\tB\0\x121\n\x10consensus_params\x18\x03\x20\x01(\x0b\ - 2\x15.abci.ConsensusParamsB\0\x12/\n\nvalidators\x18\x04\x20\x03(\x0b2\ - \x15.abci.ValidatorUpdateB\x04\xc8\xde\x1f\0\x12\x19\n\x0fapp_state_byte\ - s\x18\x05\x20\x01(\x0cB\0:\0\"S\n\x0cRequestQuery\x12\x0e\n\x04data\x18\ - \x01\x20\x01(\x0cB\0\x12\x0e\n\x04path\x18\x02\x20\x01(\tB\0\x12\x10\n\ - \x06height\x18\x03\x20\x01(\x03B\0\x12\x0f\n\x05prove\x18\x04\x20\x01(\ - \x08B\0:\0\"\xb3\x01\n\x11RequestBeginBlock\x12\x0e\n\x04hash\x18\x01\ - \x20\x01(\x0cB\0\x12\"\n\x06header\x18\x02\x20\x01(\x0b2\x0c.abci.Header\ - B\x04\xc8\xde\x1f\0\x124\n\x10last_commit_info\x18\x03\x20\x01(\x0b2\x14\ - .abci.LastCommitInfoB\x04\xc8\xde\x1f\0\x122\n\x14byzantine_validators\ - \x18\x04\x20\x03(\x0b2\x0e.abci.EvidenceB\x04\xc8\xde\x1f\0:\0\"C\n\x0eR\ - equestCheckTx\x12\x0c\n\x02tx\x18\x01\x20\x01(\x0cB\0\x12!\n\x04type\x18\ - \x02\x20\x01(\x0e2\x11.abci.CheckTxTypeB\0:\0\"\"\n\x10RequestDeliverTx\ - \x12\x0c\n\x02tx\x18\x01\x20\x01(\x0cB\0:\0\"%\n\x0fRequestEndBlock\x12\ - \x10\n\x06height\x18\x01\x20\x01(\x03B\0:\0\"\x11\n\rRequestCommit:\0\"\ - \xad\x04\n\x08Response\x12.\n\texception\x18\x01\x20\x01(\x0b2\x17.abci.\ - ResponseExceptionH\0B\0\x12$\n\x04echo\x18\x02\x20\x01(\x0b2\x12.abci.Re\ - sponseEchoH\0B\0\x12&\n\x05flush\x18\x03\x20\x01(\x0b2\x13.abci.Response\ - FlushH\0B\0\x12$\n\x04info\x18\x04\x20\x01(\x0b2\x12.abci.ResponseInfoH\ - \0B\0\x12/\n\nset_option\x18\x05\x20\x01(\x0b2\x17.abci.ResponseSetOptio\ - nH\0B\0\x12/\n\ninit_chain\x18\x06\x20\x01(\x0b2\x17.abci.ResponseInitCh\ - ainH\0B\0\x12&\n\x05query\x18\x07\x20\x01(\x0b2\x13.abci.ResponseQueryH\ - \0B\0\x121\n\x0bbegin_block\x18\x08\x20\x01(\x0b2\x18.abci.ResponseBegin\ - BlockH\0B\0\x12+\n\x08check_tx\x18\t\x20\x01(\x0b2\x15.abci.ResponseChec\ - kTxH\0B\0\x12/\n\ndeliver_tx\x18\n\x20\x01(\x0b2\x17.abci.ResponseDelive\ - rTxH\0B\0\x12-\n\tend_block\x18\x0b\x20\x01(\x0b2\x16.abci.ResponseEndBl\ - ockH\0B\0\x12(\n\x06commit\x18\x0c\x20\x01(\x0b2\x14.abci.ResponseCommit\ - H\0B\0B\x07\n\x05value:\0\"&\n\x11ResponseException\x12\x0f\n\x05error\ + \n\nabci.proto\x12\x15tendermint.abci.types\"\xac\x05\n\x07Request\x124\ + \n\x04echo\x18\x02\x20\x01(\x0b2\".tendermint.abci.types.RequestEchoH\0B\ + \0\x126\n\x05flush\x18\x03\x20\x01(\x0b2#.tendermint.abci.types.RequestF\ + lushH\0B\0\x124\n\x04info\x18\x04\x20\x01(\x0b2\".tendermint.abci.types.\ + RequestInfoH\0B\0\x12?\n\nset_option\x18\x05\x20\x01(\x0b2'.tendermint.a\ + bci.types.RequestSetOptionH\0B\0\x12?\n\ninit_chain\x18\x06\x20\x01(\x0b\ + 2'.tendermint.abci.types.RequestInitChainH\0B\0\x126\n\x05query\x18\x07\ + \x20\x01(\x0b2#.tendermint.abci.types.RequestQueryH\0B\0\x12A\n\x0bbegin\ + _block\x18\x08\x20\x01(\x0b2(.tendermint.abci.types.RequestBeginBlockH\0\ + B\0\x12;\n\x08check_tx\x18\t\x20\x01(\x0b2%.tendermint.abci.types.Reques\ + tCheckTxH\0B\0\x12?\n\ndeliver_tx\x18\x13\x20\x01(\x0b2'.tendermint.abci\ + .types.RequestDeliverTxH\0B\0\x12=\n\tend_block\x18\x0b\x20\x01(\x0b2&.t\ + endermint.abci.types.RequestEndBlockH\0B\0\x128\n\x06commit\x18\x0c\x20\ + \x01(\x0b2$.tendermint.abci.types.RequestCommitH\0B\0B\x07\n\x05value:\0\ + \"\"\n\x0bRequestEcho\x12\x11\n\x07message\x18\x01\x20\x01(\tB\0:\0\"\ + \x10\n\x0cRequestFlush:\0\"R\n\x0bRequestInfo\x12\x11\n\x07version\x18\ + \x01\x20\x01(\tB\0\x12\x17\n\rblock_version\x18\x02\x20\x01(\x04B\0\x12\ + \x15\n\x0bp2p_version\x18\x03\x20\x01(\x04B\0:\0\"4\n\x10RequestSetOptio\ + n\x12\r\n\x03key\x18\x01\x20\x01(\tB\0\x12\x0f\n\x05value\x18\x02\x20\ + \x01(\tB\0:\0\"\xfd\x01\n\x10RequestInitChain\x122\n\x04time\x18\x01\x20\ + \x01(\x0b2\x1a.google.protobuf.TimestampB\x08\x90\xdf\x1f\x01\xc8\xde\ + \x1f\0\x12\x12\n\x08chain_id\x18\x02\x20\x01(\tB\0\x12B\n\x10consensus_p\ + arams\x18\x03\x20\x01(\x0b2&.tendermint.abci.types.ConsensusParamsB\0\ + \x12@\n\nvalidators\x18\x04\x20\x03(\x0b2&.tendermint.abci.types.Validat\ + orUpdateB\x04\xc8\xde\x1f\0\x12\x19\n\x0fapp_state_bytes\x18\x05\x20\x01\ + (\x0cB\0:\0\"S\n\x0cRequestQuery\x12\x0e\n\x04data\x18\x01\x20\x01(\x0cB\ + \0\x12\x0e\n\x04path\x18\x02\x20\x01(\tB\0\x12\x10\n\x06height\x18\x03\ + \x20\x01(\x03B\0\x12\x0f\n\x05prove\x18\x04\x20\x01(\x08B\0:\0\"\xe6\x01\ + \n\x11RequestBeginBlock\x12\x0e\n\x04hash\x18\x01\x20\x01(\x0cB\0\x123\n\ + \x06header\x18\x02\x20\x01(\x0b2\x1d.tendermint.abci.types.HeaderB\x04\ + \xc8\xde\x1f\0\x12E\n\x10last_commit_info\x18\x03\x20\x01(\x0b2%.tenderm\ + int.abci.types.LastCommitInfoB\x04\xc8\xde\x1f\0\x12C\n\x14byzantine_val\ + idators\x18\x04\x20\x03(\x0b2\x1f.tendermint.abci.types.EvidenceB\x04\ + \xc8\xde\x1f\0:\0\"T\n\x0eRequestCheckTx\x12\x0c\n\x02tx\x18\x01\x20\x01\ + (\x0cB\0\x122\n\x04type\x18\x02\x20\x01(\x0e2\".tendermint.abci.types.Ch\ + eckTxTypeB\0:\0\"\"\n\x10RequestDeliverTx\x12\x0c\n\x02tx\x18\x01\x20\ + \x01(\x0cB\0:\0\"%\n\x0fRequestEndBlock\x12\x10\n\x06height\x18\x01\x20\ + \x01(\x03B\0:\0\"\x11\n\rRequestCommit:\0\"\xf9\x05\n\x08Response\x12?\n\ + \texception\x18\x01\x20\x01(\x0b2(.tendermint.abci.types.ResponseExcepti\ + onH\0B\0\x125\n\x04echo\x18\x02\x20\x01(\x0b2#.tendermint.abci.types.Res\ + ponseEchoH\0B\0\x127\n\x05flush\x18\x03\x20\x01(\x0b2$.tendermint.abci.t\ + ypes.ResponseFlushH\0B\0\x125\n\x04info\x18\x04\x20\x01(\x0b2#.tendermin\ + t.abci.types.ResponseInfoH\0B\0\x12@\n\nset_option\x18\x05\x20\x01(\x0b2\ + (.tendermint.abci.types.ResponseSetOptionH\0B\0\x12@\n\ninit_chain\x18\ + \x06\x20\x01(\x0b2(.tendermint.abci.types.ResponseInitChainH\0B\0\x127\n\ + \x05query\x18\x07\x20\x01(\x0b2$.tendermint.abci.types.ResponseQueryH\0B\ + \0\x12B\n\x0bbegin_block\x18\x08\x20\x01(\x0b2).tendermint.abci.types.Re\ + sponseBeginBlockH\0B\0\x12<\n\x08check_tx\x18\t\x20\x01(\x0b2&.tendermin\ + t.abci.types.ResponseCheckTxH\0B\0\x12@\n\ndeliver_tx\x18\n\x20\x01(\x0b\ + 2(.tendermint.abci.types.ResponseDeliverTxH\0B\0\x12>\n\tend_block\x18\ + \x0b\x20\x01(\x0b2'.tendermint.abci.types.ResponseEndBlockH\0B\0\x129\n\ + \x06commit\x18\x0c\x20\x01(\x0b2%.tendermint.abci.types.ResponseCommitH\ + \0B\0B\x07\n\x05value:\0\"&\n\x11ResponseException\x12\x0f\n\x05error\ \x18\x01\x20\x01(\tB\0:\0\"#\n\x0cResponseEcho\x12\x11\n\x07message\x18\ \x01\x20\x01(\tB\0:\0\"\x11\n\rResponseFlush:\0\"\x86\x01\n\x0cResponseI\ nfo\x12\x0e\n\x04data\x18\x01\x20\x01(\tB\0\x12\x11\n\x07version\x18\x02\ @@ -11753,80 +11746,84 @@ static file_descriptor_proto_data: &'static [u8] = b"\ \n\x11last_block_height\x18\x04\x20\x01(\x03B\0\x12\x1d\n\x13last_block_\ app_hash\x18\x05\x20\x01(\x0cB\0:\0\"D\n\x11ResponseSetOption\x12\x0e\n\ \x04code\x18\x01\x20\x01(\rB\0\x12\r\n\x03log\x18\x03\x20\x01(\tB\0\x12\ - \x0e\n\x04info\x18\x04\x20\x01(\tB\0:\0\"y\n\x11ResponseInitChain\x121\n\ - \x10consensus_params\x18\x01\x20\x01(\x0b2\x15.abci.ConsensusParamsB\0\ - \x12/\n\nvalidators\x18\x02\x20\x03(\x0b2\x15.abci.ValidatorUpdateB\x04\ - \xc8\xde\x1f\0:\0\"\xb8\x01\n\rResponseQuery\x12\x0e\n\x04code\x18\x01\ - \x20\x01(\rB\0\x12\r\n\x03log\x18\x03\x20\x01(\tB\0\x12\x0e\n\x04info\ - \x18\x04\x20\x01(\tB\0\x12\x0f\n\x05index\x18\x05\x20\x01(\x03B\0\x12\r\ - \n\x03key\x18\x06\x20\x01(\x0cB\0\x12\x0f\n\x05value\x18\x07\x20\x01(\ - \x0cB\0\x12\x1e\n\x05proof\x18\x08\x20\x01(\x0b2\r.merkle.ProofB\0\x12\ - \x10\n\x06height\x18\t\x20\x01(\x03B\0\x12\x13\n\tcodespace\x18\n\x20\ - \x01(\tB\0:\0\"M\n\x12ResponseBeginBlock\x125\n\x06events\x18\x01\x20\ - \x03(\x0b2\x0b.abci.EventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\ - \x1f\0:\0\"\xc8\x01\n\x0fResponseCheckTx\x12\x0e\n\x04code\x18\x01\x20\ - \x01(\rB\0\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\x03log\x18\ - \x03\x20\x01(\tB\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\x12\x14\n\nga\ - s_wanted\x18\x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\x06\x20\x01(\ - \x03B\0\x125\n\x06events\x18\x07\x20\x03(\x0b2\x0b.abci.EventB\x18\xc8\ - \xde\x1f\0\xea\xde\x1f\x10events,omitempty\x12\x13\n\tcodespace\x18\x08\ - \x20\x01(\tB\0:\0\"\xca\x01\n\x11ResponseDeliverTx\x12\x0e\n\x04code\x18\ - \x01\x20\x01(\rB\0\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\x03\ - log\x18\x03\x20\x01(\tB\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\x12\ - \x14\n\ngas_wanted\x18\x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\x06\ - \x20\x01(\x03B\0\x125\n\x06events\x18\x07\x20\x03(\x0b2\x0b.abci.EventB\ - \x18\xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty\x12\x13\n\tcodespace\ - \x18\x08\x20\x01(\tB\0:\0\"\xbd\x01\n\x10ResponseEndBlock\x126\n\x11vali\ - dator_updates\x18\x01\x20\x03(\x0b2\x15.abci.ValidatorUpdateB\x04\xc8\ - \xde\x1f\0\x128\n\x17consensus_param_updates\x18\x02\x20\x01(\x0b2\x15.a\ - bci.ConsensusParamsB\0\x125\n\x06events\x18\x03\x20\x03(\x0b2\x0b.abci.E\ - ventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0:\0\"\"\n\x0eResp\ - onseCommit\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0:\0\"\x8d\x01\n\x0fC\ - onsensusParams\x12\"\n\x05block\x18\x01\x20\x01(\x0b2\x11.abci.BlockPara\ - msB\0\x12(\n\x08evidence\x18\x02\x20\x01(\x0b2\x14.abci.EvidenceParamsB\ - \0\x12*\n\tvalidator\x18\x03\x20\x01(\x0b2\x15.abci.ValidatorParamsB\0:\ - \0\"7\n\x0bBlockParams\x12\x13\n\tmax_bytes\x18\x01\x20\x01(\x03B\0\x12\ - \x11\n\x07max_gas\x18\x02\x20\x01(\x03B\0:\0\"%\n\x0eEvidenceParams\x12\ - \x11\n\x07max_age\x18\x01\x20\x01(\x03B\0:\0\",\n\x0fValidatorParams\x12\ - \x17\n\rpub_key_types\x18\x01\x20\x03(\tB\0:\0\"H\n\x0eLastCommitInfo\ - \x12\x0f\n\x05round\x18\x01\x20\x01(\x05B\0\x12#\n\x05votes\x18\x02\x20\ - \x03(\x0b2\x0e.abci.VoteInfoB\x04\xc8\xde\x1f\0:\0\"[\n\x05Event\x12\x0e\ - \n\x04type\x18\x01\x20\x01(\tB\0\x12@\n\nattributes\x18\x02\x20\x03(\x0b\ - 2\x0e.common.KVPairB\x1c\xc8\xde\x1f\0\xea\xde\x1f\x14attributes,omitemp\ - ty:\0\"\xd5\x03\n\x06Header\x12$\n\x07version\x18\x01\x20\x01(\x0b2\r.ab\ - ci.VersionB\x04\xc8\xde\x1f\0\x12\x1d\n\x08chain_id\x18\x02\x20\x01(\tB\ - \x0b\xe2\xde\x1f\x07ChainID\x12\x10\n\x06height\x18\x03\x20\x01(\x03B\0\ - \x122\n\x04time\x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\ - \xc8\xde\x1f\0\x90\xdf\x1f\x01\x12\x11\n\x07num_txs\x18\x05\x20\x01(\x03\ - B\0\x12\x13\n\ttotal_txs\x18\x06\x20\x01(\x03B\0\x12*\n\rlast_block_id\ - \x18\x07\x20\x01(\x0b2\r.abci.BlockIDB\x04\xc8\xde\x1f\0\x12\x1a\n\x10la\ - st_commit_hash\x18\x08\x20\x01(\x0cB\0\x12\x13\n\tdata_hash\x18\t\x20\ - \x01(\x0cB\0\x12\x19\n\x0fvalidators_hash\x18\n\x20\x01(\x0cB\0\x12\x1e\ - \n\x14next_validators_hash\x18\x0b\x20\x01(\x0cB\0\x12\x18\n\x0econsensu\ - s_hash\x18\x0c\x20\x01(\x0cB\0\x12\x12\n\x08app_hash\x18\r\x20\x01(\x0cB\ - \0\x12\x1b\n\x11last_results_hash\x18\x0e\x20\x01(\x0cB\0\x12\x17\n\revi\ - dence_hash\x18\x0f\x20\x01(\x0cB\0\x12\x1a\n\x10proposer_address\x18\x10\ - \x20\x01(\x0cB\0:\0\"+\n\x07Version\x12\x0f\n\x05Block\x18\x01\x20\x01(\ - \x04B\0\x12\r\n\x03App\x18\x02\x20\x01(\x04B\0:\0\"L\n\x07BlockID\x12\ - \x0e\n\x04hash\x18\x01\x20\x01(\x0cB\0\x12/\n\x0cparts_header\x18\x02\ - \x20\x01(\x0b2\x13.abci.PartSetHeaderB\x04\xc8\xde\x1f\0:\0\"2\n\rPartSe\ - tHeader\x12\x0f\n\x05total\x18\x01\x20\x01(\x05B\0\x12\x0e\n\x04hash\x18\ - \x02\x20\x01(\x0cB\0:\0\"1\n\tValidator\x12\x11\n\x07address\x18\x01\x20\ - \x01(\x0cB\0\x12\x0f\n\x05power\x18\x03\x20\x01(\x03B\0:\0\"I\n\x0fValid\ - atorUpdate\x12#\n\x07pub_key\x18\x01\x20\x01(\x0b2\x0c.abci.PubKeyB\x04\ - \xc8\xde\x1f\0\x12\x0f\n\x05power\x18\x02\x20\x01(\x03B\0:\0\"S\n\x08Vot\ - eInfo\x12(\n\tvalidator\x18\x01\x20\x01(\x0b2\x0f.abci.ValidatorB\x04\ - \xc8\xde\x1f\0\x12\x1b\n\x11signed_last_block\x18\x02\x20\x01(\x08B\0:\0\ - \"*\n\x06PubKey\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x12\x0e\n\x04dat\ - a\x18\x02\x20\x01(\x0cB\0:\0\"\xaa\x01\n\x08Evidence\x12\x0e\n\x04type\ - \x18\x01\x20\x01(\tB\0\x12(\n\tvalidator\x18\x02\x20\x01(\x0b2\x0f.abci.\ - ValidatorB\x04\xc8\xde\x1f\0\x12\x10\n\x06height\x18\x03\x20\x01(\x03B\0\ - \x122\n\x04time\x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\ - \x90\xdf\x1f\x01\xc8\xde\x1f\0\x12\x1c\n\x12total_voting_power\x18\x05\ - \x20\x01(\x03B\0:\0*%\n\x0bCheckTxType\x12\x07\n\x03New\x10\0\x12\x0b\n\ - \x07Recheck\x10\x01\x1a\0B\x1c\xc8\xe2\x1e\x01\xe0\xe2\x1e\x01\xf8\xe1\ - \x1e\x01\xc0\xe3\x1e\x01\xa8\xe2\x1e\x01\xb8\xe2\x1e\x01\xd0\xe2\x1e\x01\ - b\x06proto3\ + \x0e\n\x04info\x18\x04\x20\x01(\tB\0:\0\"\x9b\x01\n\x11ResponseInitChain\ + \x12B\n\x10consensus_params\x18\x01\x20\x01(\x0b2&.tendermint.abci.types\ + .ConsensusParamsB\0\x12@\n\nvalidators\x18\x02\x20\x03(\x0b2&.tendermint\ + .abci.types.ValidatorUpdateB\x04\xc8\xde\x1f\0:\0\"\xca\x01\n\rResponseQ\ + uery\x12\x0e\n\x04code\x18\x01\x20\x01(\rB\0\x12\r\n\x03log\x18\x03\x20\ + \x01(\tB\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\x12\x0f\n\x05index\ + \x18\x05\x20\x01(\x03B\0\x12\r\n\x03key\x18\x06\x20\x01(\x0cB\0\x12\x0f\ + \n\x05value\x18\x07\x20\x01(\x0cB\0\x120\n\x05proof\x18\x08\x20\x01(\x0b\ + 2\x1f.tendermint.crypto.merkle.ProofB\0\x12\x10\n\x06height\x18\t\x20\ + \x01(\x03B\0\x12\x13\n\tcodespace\x18\n\x20\x01(\tB\0:\0\"^\n\x12Respons\ + eBeginBlock\x12F\n\x06events\x18\x01\x20\x03(\x0b2\x1c.tendermint.abci.t\ + ypes.EventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0:\0\"\xd9\ + \x01\n\x0fResponseCheckTx\x12\x0e\n\x04code\x18\x01\x20\x01(\rB\0\x12\ + \x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\x03log\x18\x03\x20\x01(\t\ + B\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\x12\x14\n\ngas_wanted\x18\ + \x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\x06\x20\x01(\x03B\0\x12F\ + \n\x06events\x18\x07\x20\x03(\x0b2\x1c.tendermint.abci.types.EventB\x18\ + \xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0\x12\x13\n\tcodespace\x18\ + \x08\x20\x01(\tB\0:\0\"\xdb\x01\n\x11ResponseDeliverTx\x12\x0e\n\x04code\ + \x18\x01\x20\x01(\rB\0\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\ + \x03log\x18\x03\x20\x01(\tB\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\ + \x12\x14\n\ngas_wanted\x18\x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\ + \x06\x20\x01(\x03B\0\x12F\n\x06events\x18\x07\x20\x03(\x0b2\x1c.tendermi\ + nt.abci.types.EventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0\ + \x12\x13\n\tcodespace\x18\x08\x20\x01(\tB\0:\0\"\xf0\x01\n\x10ResponseEn\ + dBlock\x12G\n\x11validator_updates\x18\x01\x20\x03(\x0b2&.tendermint.abc\ + i.types.ValidatorUpdateB\x04\xc8\xde\x1f\0\x12I\n\x17consensus_param_upd\ + ates\x18\x02\x20\x01(\x0b2&.tendermint.abci.types.ConsensusParamsB\0\x12\ + F\n\x06events\x18\x03\x20\x03(\x0b2\x1c.tendermint.abci.types.EventB\x18\ + \xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty:\0\"\"\n\x0eResponseCommi\ + t\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0:\0\"\xc0\x01\n\x0fConsensusP\ + arams\x123\n\x05block\x18\x01\x20\x01(\x0b2\".tendermint.abci.types.Bloc\ + kParamsB\0\x129\n\x08evidence\x18\x02\x20\x01(\x0b2%.tendermint.abci.typ\ + es.EvidenceParamsB\0\x12;\n\tvalidator\x18\x03\x20\x01(\x0b2&.tendermint\ + .abci.types.ValidatorParamsB\0:\0\"7\n\x0bBlockParams\x12\x13\n\tmax_byt\ + es\x18\x01\x20\x01(\x03B\0\x12\x11\n\x07max_gas\x18\x02\x20\x01(\x03B\0:\ + \0\"o\n\x0eEvidenceParams\x12\x1c\n\x12max_age_num_blocks\x18\x01\x20\ + \x01(\x03B\0\x12=\n\x10max_age_duration\x18\x02\x20\x01(\x0b2\x19.google\ + .protobuf.DurationB\x08\xc8\xde\x1f\0\x98\xdf\x1f\x01:\0\",\n\x0fValidat\ + orParams\x12\x17\n\rpub_key_types\x18\x01\x20\x03(\tB\0:\0\"Y\n\x0eLastC\ + ommitInfo\x12\x0f\n\x05round\x18\x01\x20\x01(\x05B\0\x124\n\x05votes\x18\ + \x02\x20\x03(\x0b2\x1f.tendermint.abci.types.VoteInfoB\x04\xc8\xde\x1f\0\ + :\0\"e\n\x05Event\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x12J\n\nattrib\ + utes\x18\x02\x20\x03(\x0b2\x18.tendermint.libs.kv.PairB\x1c\xc8\xde\x1f\ + \0\xea\xde\x1f\x14attributes,omitempty:\0\"\xcf\x03\n\x06Header\x125\n\ + \x07version\x18\x01\x20\x01(\x0b2\x1e.tendermint.abci.types.VersionB\x04\ + \xc8\xde\x1f\0\x12\x1d\n\x08chain_id\x18\x02\x20\x01(\tB\x0b\xe2\xde\x1f\ + \x07ChainID\x12\x10\n\x06height\x18\x03\x20\x01(\x03B\0\x122\n\x04time\ + \x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\0\ + \x90\xdf\x1f\x01\x12;\n\rlast_block_id\x18\x05\x20\x01(\x0b2\x1e.tenderm\ + int.abci.types.BlockIDB\x04\xc8\xde\x1f\0\x12\x1a\n\x10last_commit_hash\ + \x18\x06\x20\x01(\x0cB\0\x12\x13\n\tdata_hash\x18\x07\x20\x01(\x0cB\0\ + \x12\x19\n\x0fvalidators_hash\x18\x08\x20\x01(\x0cB\0\x12\x1e\n\x14next_\ + validators_hash\x18\t\x20\x01(\x0cB\0\x12\x18\n\x0econsensus_hash\x18\n\ + \x20\x01(\x0cB\0\x12\x12\n\x08app_hash\x18\x0b\x20\x01(\x0cB\0\x12\x1b\n\ + \x11last_results_hash\x18\x0c\x20\x01(\x0cB\0\x12\x17\n\revidence_hash\ + \x18\r\x20\x01(\x0cB\0\x12\x1a\n\x10proposer_address\x18\x0e\x20\x01(\ + \x0cB\0:\0\"+\n\x07Version\x12\x0f\n\x05Block\x18\x01\x20\x01(\x04B\0\ + \x12\r\n\x03App\x18\x02\x20\x01(\x04B\0:\0\"]\n\x07BlockID\x12\x0e\n\x04\ + hash\x18\x01\x20\x01(\x0cB\0\x12@\n\x0cparts_header\x18\x02\x20\x01(\x0b\ + 2$.tendermint.abci.types.PartSetHeaderB\x04\xc8\xde\x1f\0:\0\"2\n\rPartS\ + etHeader\x12\x0f\n\x05total\x18\x01\x20\x01(\x05B\0\x12\x0e\n\x04hash\ + \x18\x02\x20\x01(\x0cB\0:\0\"1\n\tValidator\x12\x11\n\x07address\x18\x01\ + \x20\x01(\x0cB\0\x12\x0f\n\x05power\x18\x03\x20\x01(\x03B\0:\0\"Z\n\x0fV\ + alidatorUpdate\x124\n\x07pub_key\x18\x01\x20\x01(\x0b2\x1d.tendermint.ab\ + ci.types.PubKeyB\x04\xc8\xde\x1f\0\x12\x0f\n\x05power\x18\x02\x20\x01(\ + \x03B\0:\0\"d\n\x08VoteInfo\x129\n\tvalidator\x18\x01\x20\x01(\x0b2\x20.\ + tendermint.abci.types.ValidatorB\x04\xc8\xde\x1f\0\x12\x1b\n\x11signed_l\ + ast_block\x18\x02\x20\x01(\x08B\0:\0\"*\n\x06PubKey\x12\x0e\n\x04type\ + \x18\x01\x20\x01(\tB\0\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0:\0\"\ + \xbb\x01\n\x08Evidence\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x129\n\tv\ + alidator\x18\x02\x20\x01(\x0b2\x20.tendermint.abci.types.ValidatorB\x04\ + \xc8\xde\x1f\0\x12\x10\n\x06height\x18\x03\x20\x01(\x03B\0\x122\n\x04tim\ + e\x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\0\ + \x90\xdf\x1f\x01\x12\x1c\n\x12total_voting_power\x18\x05\x20\x01(\x03B\0\ + :\0*%\n\x0bCheckTxType\x12\x07\n\x03New\x10\0\x12\x0b\n\x07Recheck\x10\ + \x01\x1a\0B\x1c\xf8\xe1\x1e\x01\xa8\xe2\x1e\x01\xb8\xe2\x1e\x01\xd0\xe2\ + \x1e\x01\xc0\xe3\x1e\x01\xc8\xe2\x1e\x01\xe0\xe2\x1e\x01b\x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { diff --git a/src/messages/merkle.rs b/src/proto/merkle.rs similarity index 95% rename from src/messages/merkle.rs rename to src/proto/merkle.rs index 3d16e8f..f3412c3 100644 --- a/src/messages/merkle.rs +++ b/src/proto/merkle.rs @@ -1,7 +1,7 @@ -// This file is generated by rust-protobuf 2.10.0. Do not edit +// This file is generated by rust-protobuf 2.10.2. Do not edit // @generated -// https://github.com/Manishearth/rust-clippy/issues/702 +// https://github.com/rust-lang/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy::all)] @@ -24,7 +24,7 @@ use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; /// Generated files are compatible only with the same version /// of protobuf runtime. -const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_0; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_2; #[derive(PartialEq,Clone,Default)] pub struct ProofOp { @@ -299,7 +299,7 @@ impl Proof { ::std::default::Default::default() } - // repeated .merkle.ProofOp ops = 1; + // repeated .tendermint.crypto.merkle.ProofOp ops = 1; pub fn get_ops(&self) -> &[ProofOp] { @@ -456,12 +456,13 @@ impl ::protobuf::reflect::ProtobufValue for Proof { } static file_descriptor_proto_data: &'static [u8] = b"\ - \n;github.com/tendermint/tendermint/crypto/merkle/merkle.proto\x12\x06me\ - rkle\":\n\x07ProofOp\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x12\r\n\x03\ - key\x18\x02\x20\x01(\x0cB\0\x12\x0e\n\x04data\x18\x03\x20\x01(\x0cB\0:\0\ - \"-\n\x05Proof\x12\"\n\x03ops\x18\x01\x20\x03(\x0b2\x0f.merkle.ProofOpB\ - \x04\xc8\xde\x1f\0:\0B\x14\xf8\xe1\x1e\x01\xe0\xe2\x1e\x01\xa8\xe2\x1e\ - \x01\xd0\xe2\x1e\x01\xc8\xe2\x1e\x01b\x06proto3\ + \n;github.com/tendermint/tendermint/crypto/merkle/merkle.proto\x12\x18te\ + ndermint.crypto.merkle\":\n\x07ProofOp\x12\x0e\n\x04type\x18\x01\x20\x01\ + (\tB\0\x12\r\n\x03key\x18\x02\x20\x01(\x0cB\0\x12\x0e\n\x04data\x18\x03\ + \x20\x01(\x0cB\0:\0\"?\n\x05Proof\x124\n\x03ops\x18\x01\x20\x03(\x0b2!.t\ + endermint.crypto.merkle.ProofOpB\x04\xc8\xde\x1f\0:\0B\x14\xd0\xe2\x1e\ + \x01\xe0\xe2\x1e\x01\xf8\xe1\x1e\x01\xc8\xe2\x1e\x01\xa8\xe2\x1e\x01b\ + \x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { diff --git a/src/messages/types.rs b/src/proto/types.rs similarity index 88% rename from src/messages/types.rs rename to src/proto/types.rs index a7a8980..47ce19f 100644 --- a/src/messages/types.rs +++ b/src/proto/types.rs @@ -1,7 +1,7 @@ -// This file is generated by rust-protobuf 2.10.0. Do not edit +// This file is generated by rust-protobuf 2.10.2. Do not edit // @generated -// https://github.com/Manishearth/rust-clippy/issues/702 +// https://github.com/rust-lang/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy::all)] @@ -17,17 +17,17 @@ #![allow(unsafe_code)] #![allow(unused_imports)] #![allow(unused_results)] -//! Generated file from `github.com/tendermint/tendermint/libs/common/types.proto` +//! Generated file from `github.com/tendermint/tendermint/libs/kv/types.proto` use protobuf::Message as Message_imported_for_functions; use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; /// Generated files are compatible only with the same version /// of protobuf runtime. -const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_0; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_10_2; #[derive(PartialEq,Clone,Default)] -pub struct KVPair { +pub struct Pair { // message fields pub key: ::std::vec::Vec, pub value: ::std::vec::Vec, @@ -36,14 +36,14 @@ pub struct KVPair { pub cached_size: ::protobuf::CachedSize, } -impl<'a> ::std::default::Default for &'a KVPair { - fn default() -> &'a KVPair { - ::default_instance() +impl<'a> ::std::default::Default for &'a Pair { + fn default() -> &'a Pair { + ::default_instance() } } -impl KVPair { - pub fn new() -> KVPair { +impl Pair { + pub fn new() -> Pair { ::std::default::Default::default() } @@ -100,7 +100,7 @@ impl KVPair { } } -impl ::protobuf::Message for KVPair { +impl ::protobuf::Message for Pair { fn is_initialized(&self) -> bool { true } @@ -175,8 +175,8 @@ impl ::protobuf::Message for KVPair { Self::descriptor_static() } - fn new() -> KVPair { - KVPair::new() + fn new() -> Pair { + Pair::new() } fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { @@ -189,16 +189,16 @@ impl ::protobuf::Message for KVPair { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( "key", - |m: &KVPair| { &m.key }, - |m: &mut KVPair| { &mut m.key }, + |m: &Pair| { &m.key }, + |m: &mut Pair| { &mut m.key }, )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( "value", - |m: &KVPair| { &m.value }, - |m: &mut KVPair| { &mut m.value }, + |m: &Pair| { &m.value }, + |m: &mut Pair| { &mut m.value }, )); - ::protobuf::reflect::MessageDescriptor::new::( - "KVPair", + ::protobuf::reflect::MessageDescriptor::new::( + "Pair", fields, file_descriptor_proto() ) @@ -206,18 +206,18 @@ impl ::protobuf::Message for KVPair { } } - fn default_instance() -> &'static KVPair { - static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { + fn default_instance() -> &'static Pair { + static mut instance: ::protobuf::lazy::Lazy = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, - ptr: 0 as *const KVPair, + ptr: 0 as *const Pair, }; unsafe { - instance.get(KVPair::new) + instance.get(Pair::new) } } } -impl ::protobuf::Clear for KVPair { +impl ::protobuf::Clear for Pair { fn clear(&mut self) { self.key.clear(); self.value.clear(); @@ -225,13 +225,13 @@ impl ::protobuf::Clear for KVPair { } } -impl ::std::fmt::Debug for KVPair { +impl ::std::fmt::Debug for Pair { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } -impl ::protobuf::reflect::ProtobufValue for KVPair { +impl ::protobuf::reflect::ProtobufValue for Pair { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } @@ -442,12 +442,12 @@ impl ::protobuf::reflect::ProtobufValue for KI64Pair { } static file_descriptor_proto_data: &'static [u8] = b"\ - \n8github.com/tendermint/tendermint/libs/common/types.proto\x12\x06commo\ - n\"*\n\x06KVPair\x12\r\n\x03key\x18\x01\x20\x01(\x0cB\0\x12\x0f\n\x05val\ - ue\x18\x02\x20\x01(\x0cB\0:\0\",\n\x08KI64Pair\x12\r\n\x03key\x18\x01\ - \x20\x01(\x0cB\0\x12\x0f\n\x05value\x18\x02\x20\x01(\x03B\0:\0B\x1c\xd0\ - \xe2\x1e\x01\xa8\xe2\x1e\x01\xc8\xe2\x1e\x01\xb8\xe2\x1e\x01\xf8\xe1\x1e\ - \x01\xe0\xe2\x1e\x01\xc0\xe3\x1e\x01b\x06proto3\ + \n4github.com/tendermint/tendermint/libs/kv/types.proto\x12\x12tendermin\ + t.libs.kv\"(\n\x04Pair\x12\r\n\x03key\x18\x01\x20\x01(\x0cB\0\x12\x0f\n\ + \x05value\x18\x02\x20\x01(\x0cB\0:\0\",\n\x08KI64Pair\x12\r\n\x03key\x18\ + \x01\x20\x01(\x0cB\0\x12\x0f\n\x05value\x18\x02\x20\x01(\x03B\0:\0B\x1c\ + \xc0\xe3\x1e\x01\xf8\xe1\x1e\x01\xe0\xe2\x1e\x01\xc8\xe2\x1e\x01\xd0\xe2\ + \x1e\x01\xa8\xe2\x1e\x01\xb8\xe2\x1e\x01b\x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { diff --git a/src/server.rs b/src/server.rs index 6ea6453..251e714 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,105 +1,285 @@ -use std::net::SocketAddr; -use std::ops::DerefMut; -use std::sync::{Arc, Mutex}; - -use env_logger::Env; -use tokio; -use tokio::codec::Decoder; -use tokio::io; -use tokio::net::TcpListener; -use tokio::prelude::*; -use tokio::runtime; - -use crate::codec::ABCICodec; -use crate::messages::abci::*; -use crate::Application; - -/// Creates the TCP server and listens for connections from Tendermint -pub fn serve(app: A, addr: SocketAddr) -> io::Result<()> +#[cfg(unix)] +use std::path::PathBuf; +use std::{io::Result, net::SocketAddr, sync::Arc}; + +#[cfg(all(unix, feature = "async-std"))] +use async_std::os::unix::net::UnixListener; +#[cfg(feature = "async-std")] +use async_std::{ + io::{Read, Write}, + net::TcpListener, + prelude::*, + sync::Mutex, + task::spawn, +}; +#[cfg(all(unix, feature = "tokio"))] +use tokio::net::UnixListener; +#[cfg(feature = "tokio")] +use tokio::{ + io::{AsyncRead as Read, AsyncWrite as Write}, + net::TcpListener, + spawn, + stream::StreamExt, + sync::Mutex, +}; + +use crate::{ + proto::{abci::*, decode, encode}, + Consensus, Info, Mempool, +}; + +/// ABCI Server +pub struct Server where - A: Application + 'static + Send + Sync, + C: Consensus + 'static, + M: Mempool + 'static, + I: Info + 'static, { - env_logger::from_env(Env::default().default_filter_or("info")) - .try_init() - .ok(); - let listener = TcpListener::bind(&addr).unwrap(); - let incoming = listener.incoming(); - let app = Arc::new(Mutex::new(app)); - let server = incoming - .map_err(|err| panic!("Connection failed: {}", err)) - .for_each(move |socket| { - info!("Got connection! {:?}", socket); - let framed = ABCICodec::new().framed(socket); - let (_writer, reader) = framed.split(); - let app_instance = Arc::clone(&app); - - let responses = reader.map(move |request| { - debug!("Got Request! {:?}", request); - respond(&app_instance, &request) - }); - - let writes = responses.fold(_writer, |writer, response| { - debug!("Return Response! {:?}", response); - writer.send(response) - }); - tokio::spawn(writes.then(|_| Ok(()))) - }); - - let mut rt = runtime::Builder::new() - .panic_handler(|_err| { - std::process::exit(1); - // std::panic::resume_unwind(err); - }) - .build() - .unwrap(); - rt.block_on(server).unwrap(); - Ok(()) + pub(crate) consensus: Arc, + pub(crate) mempool: Arc, + pub(crate) info: Arc, + pub(crate) consensus_state: Arc>, } -fn respond(app: &Arc>, request: &Request) -> Response +impl Server where - A: Application + 'static + Send + Sync, + C: Consensus + 'static, + M: Mempool + 'static, + I: Info + 'static, { - let mut guard = app.lock().unwrap(); - let app = guard.deref_mut(); + /// Creates a new instance of [`Server`](struct.Server.html) + #[inline] + pub fn new(consensus: C, mempool: M, info: I) -> Self { + Self { + consensus: Arc::new(consensus), + mempool: Arc::new(mempool), + info: Arc::new(info), + consensus_state: Arc::new(Mutex::new(ConsensusState::default())), + } + } - let mut response = Response::new(); + /// Starts ABCI server + /// + /// # Note + /// + /// This is an `async` function and returns a `Future`. So, you'll need an executor to drive the `Future` returned + /// from this function. `async-std` and `tokio` are two popular options. + pub async fn run(&self, addr: T) -> Result<()> + where + T: Into
, + { + let addr = addr.into(); + + match addr { + Address::Tcp(addr) => { + #[cfg(feature = "async-std")] + let listener = TcpListener::bind(addr).await?; + + #[cfg(feature = "tokio")] + let mut listener = TcpListener::bind(addr).await?; + + log::info!("Started ABCI server at {}", addr); + + let mut incoming = listener.incoming(); + + while let Some(stream) = incoming.next().await { + self.handle_connection(stream?).await; + } + } + #[cfg(unix)] + Address::Uds(path) => { + #[cfg(feature = "async-std")] + let listener = UnixListener::bind(&path).await?; + + #[cfg(feature = "tokio")] + let mut listener = UnixListener::bind(&path)?; + + log::info!("Started ABCI server at {}", path.display()); + + let mut incoming = listener.incoming(); - match request.value { - // Info - Some(Request_oneof_value::info(ref r)) => response.set_info(app.info(r)), - // Init chain - Some(Request_oneof_value::init_chain(ref r)) => response.set_init_chain(app.init_chain(r)), - // Set option - Some(Request_oneof_value::set_option(ref r)) => response.set_set_option(app.set_option(r)), - // Query - Some(Request_oneof_value::query(ref r)) => response.set_query(app.query(r)), - // Check tx - Some(Request_oneof_value::check_tx(ref r)) => response.set_check_tx(app.check_tx(r)), - // Begin block - Some(Request_oneof_value::begin_block(ref r)) => { - response.set_begin_block(app.begin_block(r)) + while let Some(stream) = incoming.next().await { + self.handle_connection(stream?).await; + } + } } - // Deliver Tx - Some(Request_oneof_value::deliver_tx(ref r)) => response.set_deliver_tx(app.deliver_tx(r)), - // End block - Some(Request_oneof_value::end_block(ref r)) => response.set_end_block(app.end_block(r)), - // Commit - Some(Request_oneof_value::commit(ref r)) => response.set_commit(app.commit(r)), - // Flush - Some(Request_oneof_value::flush(_)) => response.set_flush(ResponseFlush::new()), - // Echo - Some(Request_oneof_value::echo(ref r)) => { - let echo_msg = r.get_message().to_string(); - let mut echo = ResponseEcho::new(); - echo.set_message(echo_msg); - response.set_echo(echo); + + Ok(()) + } + + async fn handle_connection(&self, mut stream: S) + where + S: Read + Write + Send + Unpin + 'static, + { + let consensus = self.consensus.clone(); + let mempool = self.mempool.clone(); + let info = self.info.clone(); + let consensus_state = self.consensus_state.clone(); + + spawn(async move { + while let Ok(request) = decode(&mut stream).await { + match request { + Some(request) => { + let response = process( + consensus.clone(), + mempool.clone(), + info.clone(), + consensus_state.clone(), + request, + ) + .await; + + if let Err(err) = encode(response, &mut stream).await { + log::error!("Error while writing to stream: {}", err); + } + } + None => log::debug!("Received empty request"), + } + } + + log::error!("Error while receiving ABCI request from socket"); + }); + } +} + +async fn process( + consensus: Arc, + mempool: Arc, + info: Arc, + consensus_state: Arc>, + request: Request, +) -> Response +where + C: Consensus + 'static, + M: Mempool + 'static, + I: Info + 'static, +{ + log::debug!("Received request: {:?}", request); + + let value = match request.value.unwrap() { + Request_oneof_value::echo(request) => { + let mut response = ResponseEcho::new(); + response.message = info.echo(request.message).await; + Response_oneof_value::echo(response) } - _ => { - let mut re = ResponseException::new(); - re.set_error(String::from("Unrecognized request")); - response.set_exception(re) + Request_oneof_value::flush(_) => { + consensus.flush().await; + Response_oneof_value::flush(ResponseFlush::new()) } - } + Request_oneof_value::info(request) => { + Response_oneof_value::info(info.info(request.into()).await.into()) + } + Request_oneof_value::set_option(request) => { + Response_oneof_value::set_option(info.set_option(request.into()).await.into()) + } + Request_oneof_value::init_chain(request) => { + consensus_state + .lock() + .await + .validate(ConsensusState::InitChain); + Response_oneof_value::init_chain(consensus.init_chain(request.into()).await.into()) + } + Request_oneof_value::query(request) => { + Response_oneof_value::query(info.query(request.into()).await.into()) + } + Request_oneof_value::begin_block(request) => { + consensus_state + .lock() + .await + .validate(ConsensusState::BeginBlock); + Response_oneof_value::begin_block(consensus.begin_block(request.into()).await.into()) + } + Request_oneof_value::check_tx(request) => { + Response_oneof_value::check_tx(mempool.check_tx(request.into()).await.into()) + } + Request_oneof_value::deliver_tx(request) => { + consensus_state + .lock() + .await + .validate(ConsensusState::DeliverTx); + Response_oneof_value::deliver_tx(consensus.deliver_tx(request.into()).await.into()) + } + Request_oneof_value::end_block(request) => { + consensus_state + .lock() + .await + .validate(ConsensusState::EndBlock); + Response_oneof_value::end_block(consensus.end_block(request.into()).await.into()) + } + Request_oneof_value::commit(_) => { + consensus_state + .lock() + .await + .validate(ConsensusState::Commit); + Response_oneof_value::commit(consensus.commit().await.into()) + } + }; + + let mut response = Response::new(); + response.value = Some(value); + + log::debug!("Sending response: {:?}", response); + response } + +#[derive(Debug, Clone, Copy)] +pub enum ConsensusState { + InitChain, + BeginBlock, + DeliverTx, + EndBlock, + Commit, +} + +impl Default for ConsensusState { + #[inline] + fn default() -> Self { + ConsensusState::InitChain + } +} + +impl ConsensusState { + pub fn validate(&mut self, mut next: ConsensusState) { + let is_valid = match (&self, next) { + (ConsensusState::InitChain, ConsensusState::InitChain) => true, + (ConsensusState::InitChain, ConsensusState::BeginBlock) => true, + (ConsensusState::BeginBlock, ConsensusState::DeliverTx) => true, + (ConsensusState::BeginBlock, ConsensusState::EndBlock) => true, + (ConsensusState::DeliverTx, ConsensusState::DeliverTx) => true, + (ConsensusState::DeliverTx, ConsensusState::EndBlock) => true, + (ConsensusState::EndBlock, ConsensusState::Commit) => true, + (ConsensusState::Commit, ConsensusState::BeginBlock) => true, + _ => false, + }; + + if is_valid { + std::mem::swap(self, &mut next); + } else { + panic!("{:?} cannot be called after {:?}", next, self); + } + } +} + +/// Address of ABCI Server +pub enum Address { + /// TCP Address + Tcp(SocketAddr), + /// UDS Address + #[cfg(unix)] + #[cfg_attr(feature = "doc", doc(cfg(unix)))] + Uds(PathBuf), +} + +impl From for Address { + fn from(addr: SocketAddr) -> Self { + Self::Tcp(addr) + } +} + +#[cfg(unix)] +impl From for Address { + fn from(path: PathBuf) -> Self { + Self::Uds(path) + } +} diff --git a/src/types.rs b/src/types.rs new file mode 100644 index 0000000..ce5f316 --- /dev/null +++ b/src/types.rs @@ -0,0 +1,53 @@ +#![allow(missing_docs)] +//! Types used in ABCI +use std::{error, fmt}; + +mod begin_block; +mod check_tx; +mod commit; +mod deliver_tx; +mod end_block; +mod info; +mod init_chain; +mod misc; +mod query; +mod set_option; + +pub use self::begin_block::*; +pub use self::check_tx::*; +pub use self::commit::*; +pub use self::deliver_tx::*; +pub use self::end_block::*; +pub use self::info::*; +pub use self::init_chain::*; +pub use self::misc::*; +pub use self::query::*; +pub use self::set_option::*; + +#[derive(Debug)] +/// ABCI Error +pub struct Error { + /// Error code + pub code: u32, + /// Namespace for error code + pub codespace: String, + /// Output of application's logger (may be non-deterministic) + pub log: String, + /// Additional information (may be non-deterministic) + pub info: String, +} + +/// ABCI Result +pub type Result = std::result::Result; + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Error: [Code: {}], [Codespace: {}], [Log: {}], [Info: {}]", + self.code, self.codespace, self.log, self.info + ) + } +} + +impl error::Error for Error {} diff --git a/src/types/begin_block.rs b/src/types/begin_block.rs new file mode 100644 index 0000000..ec10a52 --- /dev/null +++ b/src/types/begin_block.rs @@ -0,0 +1,51 @@ +use crate::proto::abci::{Event as ProtoEvent, RequestBeginBlock, ResponseBeginBlock}; +use crate::types::{Event, Evidence, Header, LastCommitInfo}; + +#[derive(Debug, Default)] +pub struct BeginBlockRequest { + /// Block's hash. This can be derived from the block header + pub hash: Vec, + /// Block header + pub header: Option
, + /// Info about the last commit, including the round, and the list of validators and which ones signed the last block + pub last_commit_info: Option, + /// List of evidence of validators that acted maliciously + pub byzantine_validators: Vec, +} + +impl From for BeginBlockRequest { + fn from(request_begin_block: RequestBeginBlock) -> BeginBlockRequest { + BeginBlockRequest { + hash: request_begin_block.hash, + header: request_begin_block.header.into_option().map(Into::into), + last_commit_info: request_begin_block + .last_commit_info + .into_option() + .map(Into::into), + byzantine_validators: request_begin_block + .byzantine_validators + .into_iter() + .map(Into::into) + .collect(), + } + } +} + +#[derive(Debug, Default)] +pub struct BeginBlockResponse { + /// Events for filtering and indexing + pub events: Vec, +} + +impl From for ResponseBeginBlock { + fn from(begin_block_response: BeginBlockResponse) -> ResponseBeginBlock { + let mut response_begin_block = ResponseBeginBlock::new(); + response_begin_block.events = begin_block_response + .events + .into_iter() + .map(Into::into) + .collect::>() + .into(); + response_begin_block + } +} diff --git a/src/types/check_tx.rs b/src/types/check_tx.rs new file mode 100644 index 0000000..09f8288 --- /dev/null +++ b/src/types/check_tx.rs @@ -0,0 +1,92 @@ +use crate::proto::abci::{ + CheckTxType as ProtoCheckTxType, Event as ProtoEvent, RequestCheckTx, ResponseCheckTx, +}; +use crate::types::{Event, Result}; + +#[derive(Debug, Default)] +pub struct CheckTxRequest { + /// The request transaction bytes + pub tx: Vec, + /// Denotes if this is a new request of a re-check request + pub check_type: CheckTxType, +} + +impl From for CheckTxRequest { + fn from(request_check_tx: RequestCheckTx) -> CheckTxRequest { + CheckTxRequest { + tx: request_check_tx.tx, + check_type: request_check_tx.field_type.into(), + } + } +} + +#[derive(Debug)] +pub enum CheckTxType { + /// Denotes that the transaction has never been checked + New, + /// Denotes that the transaction was already checked and certain expensive operation (like checking signatures) can + /// be skipped + Recheck, +} + +impl Default for CheckTxType { + #[inline] + fn default() -> Self { + Self::New + } +} + +impl From for CheckTxType { + fn from(proto_check_tx_type: ProtoCheckTxType) -> Self { + match proto_check_tx_type { + ProtoCheckTxType::New => Self::New, + ProtoCheckTxType::Recheck => Self::Recheck, + } + } +} + +#[derive(Debug, Default)] +pub struct CheckTxResponse { + /// Result bytes, if any. + pub data: Vec, + /// Output of application's logger (may be non-deterministic) + pub log: String, + /// Additional information (may be non-deterministic) + pub info: String, + /// Amount of gas requested for transaction + pub gas_wanted: i64, + /// Amount of gas consumed by transaction + pub gas_used: i64, + /// Events for filtering and indexing + pub events: Vec, +} + +impl From> for ResponseCheckTx { + fn from(check_tx_response: Result) -> ResponseCheckTx { + let mut response_check_tx = ResponseCheckTx::new(); + + match check_tx_response { + Ok(check_tx_response) => { + response_check_tx.data = check_tx_response.data; + response_check_tx.log = check_tx_response.log; + response_check_tx.info = check_tx_response.info; + response_check_tx.gas_wanted = check_tx_response.gas_wanted; + response_check_tx.gas_used = check_tx_response.gas_used; + response_check_tx.events = check_tx_response + .events + .into_iter() + .map(Into::into) + .collect::>() + .into(); + } + Err(error) => { + response_check_tx.code = error.code; + response_check_tx.codespace = error.codespace; + response_check_tx.log = error.log; + response_check_tx.info = error.info; + } + } + + response_check_tx + } +} diff --git a/src/types/commit.rs b/src/types/commit.rs new file mode 100644 index 0000000..3369953 --- /dev/null +++ b/src/types/commit.rs @@ -0,0 +1,15 @@ +use crate::proto::abci::ResponseCommit; + +#[derive(Debug, Default)] +pub struct CommitResponse { + /// The Merkle root hash of the application state + pub data: Vec, +} + +impl From for ResponseCommit { + fn from(commit_response: CommitResponse) -> ResponseCommit { + let mut response_commit = ResponseCommit::new(); + response_commit.data = commit_response.data; + response_commit + } +} diff --git a/src/types/deliver_tx.rs b/src/types/deliver_tx.rs new file mode 100644 index 0000000..8a3269e --- /dev/null +++ b/src/types/deliver_tx.rs @@ -0,0 +1,62 @@ +use crate::proto::abci::{Event as ProtoEvent, RequestDeliverTx, ResponseDeliverTx}; +use crate::types::{Event, Result}; + +#[derive(Debug, Default)] +pub struct DeliverTxRequest { + /// The request transaction bytes + pub tx: Vec, +} + +impl From for DeliverTxRequest { + fn from(request_deliver_tx: RequestDeliverTx) -> DeliverTxRequest { + DeliverTxRequest { + tx: request_deliver_tx.tx, + } + } +} + +#[derive(Debug, Default)] +pub struct DeliverTxResponse { + /// Result bytes, if any. + pub data: Vec, + /// Output of application's logger (may be non-deterministic) + pub log: String, + /// Additional information (may be non-deterministic) + pub info: String, + /// Amount of gas requested for transaction + pub gas_wanted: i64, + /// Amount of gas consumed by transaction + pub gas_used: i64, + /// Events for filtering and indexing + pub events: Vec, +} + +impl From> for ResponseDeliverTx { + fn from(deliver_tx_response: Result) -> ResponseDeliverTx { + let mut response_deliver_tx = ResponseDeliverTx::new(); + + match deliver_tx_response { + Ok(deliver_tx_response) => { + response_deliver_tx.data = deliver_tx_response.data; + response_deliver_tx.log = deliver_tx_response.log; + response_deliver_tx.info = deliver_tx_response.info; + response_deliver_tx.gas_wanted = deliver_tx_response.gas_wanted; + response_deliver_tx.gas_used = deliver_tx_response.gas_used; + response_deliver_tx.events = deliver_tx_response + .events + .into_iter() + .map(Into::into) + .collect::>() + .into(); + } + Err(error) => { + response_deliver_tx.code = error.code; + response_deliver_tx.codespace = error.codespace; + response_deliver_tx.log = error.log; + response_deliver_tx.info = error.info; + } + } + + response_deliver_tx + } +} diff --git a/src/types/end_block.rs b/src/types/end_block.rs new file mode 100644 index 0000000..0d97e91 --- /dev/null +++ b/src/types/end_block.rs @@ -0,0 +1,51 @@ +use crate::proto::abci::{ + Event as ProtoEvent, RequestEndBlock, ResponseEndBlock, ValidatorUpdate as ProtoValidatorUpdate, +}; +use crate::types::{ConsensusParams, Event, ValidatorUpdate}; + +#[derive(Debug, Default)] +pub struct EndBlockRequest { + /// Height of the block just executed + pub height: i64, +} + +impl From for EndBlockRequest { + fn from(request_end_block: RequestEndBlock) -> EndBlockRequest { + EndBlockRequest { + height: request_end_block.height, + } + } +} + +#[derive(Debug, Default)] +pub struct EndBlockResponse { + /// Changes to validator set (set voting power to 0 to remove) + pub validator_updates: Vec, + /// Changes to consensus-critical time, size, and other parameters + pub consensus_param_updates: Option, + /// Events for filtering and indexing + pub events: Vec, +} + +impl From for ResponseEndBlock { + fn from(end_block_response: EndBlockResponse) -> ResponseEndBlock { + let mut response_end_block = ResponseEndBlock::new(); + response_end_block.validator_updates = end_block_response + .validator_updates + .into_iter() + .map(Into::into) + .collect::>() + .into(); + response_end_block.consensus_param_updates = end_block_response + .consensus_param_updates + .map(Into::into) + .into(); + response_end_block.events = end_block_response + .events + .into_iter() + .map(Into::into) + .collect::>() + .into(); + response_end_block + } +} diff --git a/src/types/info.rs b/src/types/info.rs new file mode 100644 index 0000000..a1fef74 --- /dev/null +++ b/src/types/info.rs @@ -0,0 +1,47 @@ +use crate::proto::abci::{RequestInfo, ResponseInfo}; + +#[derive(Debug, Default)] +pub struct InfoRequest { + /// Tendermint software semantic version + pub version: String, + /// Tendermint block protocol version + pub block_version: u64, + /// Tendermint P2P protocol version + pub p2p_version: u64, +} + +impl From for InfoRequest { + fn from(request_info: RequestInfo) -> InfoRequest { + InfoRequest { + version: request_info.version, + block_version: request_info.block_version, + p2p_version: request_info.p2p_version, + } + } +} + +#[derive(Debug, Default)] +pub struct InfoResponse { + /// Some arbitrary information + pub data: String, + /// Application software semantic version + pub version: String, + /// Application protocol version + pub app_version: u64, + /// Latest block for which the app has called Commit + pub last_block_height: i64, + /// Latest result of Commit + pub last_block_app_hash: Vec, +} + +impl From for ResponseInfo { + fn from(info_response: InfoResponse) -> ResponseInfo { + let mut response_info = ResponseInfo::new(); + response_info.data = info_response.data; + response_info.version = info_response.version; + response_info.app_version = info_response.app_version; + response_info.last_block_height = info_response.last_block_height; + response_info.last_block_app_hash = info_response.last_block_app_hash; + response_info + } +} diff --git a/src/types/init_chain.rs b/src/types/init_chain.rs new file mode 100644 index 0000000..5135723 --- /dev/null +++ b/src/types/init_chain.rs @@ -0,0 +1,65 @@ +use std::time::Duration; + +use crate::proto::abci::{ + RequestInitChain, ResponseInitChain, ValidatorUpdate as ProtoValidatorUpdate, +}; +use crate::types::{ConsensusParams, ValidatorUpdate}; + +#[derive(Debug, Default)] +pub struct InitChainRequest { + /// Genesis time (duration since epoch) + pub time: Option, + /// ID of blockchain + pub chain_id: String, + /// Initial consensus-critical parameters + pub consensus_params: Option, + /// Initial genesis validators + pub validators: Vec, + /// Serialized initial application state (amino-encoded JSON bytes) + pub app_state_bytes: Vec, +} + +impl From for InitChainRequest { + fn from(request_init_chain: RequestInitChain) -> InitChainRequest { + InitChainRequest { + time: request_init_chain + .time + .into_option() + .map(|timestamp| Duration::new(timestamp.seconds as u64, timestamp.nanos as u32)), + chain_id: request_init_chain.chain_id, + consensus_params: request_init_chain + .consensus_params + .into_option() + .map(Into::into), + validators: request_init_chain + .validators + .into_iter() + .map(Into::into) + .collect(), + app_state_bytes: request_init_chain.app_state_bytes, + } + } +} + +#[derive(Debug, Default)] +pub struct InitChainResponse { + /// Initial consensus-critical parameters + pub consensus_params: Option, + /// Initial validator set (if non empty) + pub validators: Vec, +} + +impl From for ResponseInitChain { + fn from(init_chain_response: InitChainResponse) -> ResponseInitChain { + let mut response_init_chain = ResponseInitChain::new(); + response_init_chain.consensus_params = + init_chain_response.consensus_params.map(Into::into).into(); + response_init_chain.validators = init_chain_response + .validators + .into_iter() + .map(Into::into) + .collect::>() + .into(); + response_init_chain + } +} diff --git a/src/types/misc.rs b/src/types/misc.rs new file mode 100644 index 0000000..3b2077f --- /dev/null +++ b/src/types/misc.rs @@ -0,0 +1,484 @@ +use std::time::Duration; + +use protobuf::well_known_types::Duration as ProtoDuration; + +use crate::proto::abci::{ + BlockID as ProtoBlockId, BlockParams as ProtoBlockParams, + ConsensusParams as ProtoConsensusParams, Event as ProtoEvent, Evidence as ProtoEvidence, + EvidenceParams as ProtoEvidenceParams, Header as ProtoHeader, + LastCommitInfo as ProtoLastCommitInfo, PartSetHeader as ProtoPartSetHeader, + PubKey as ProtoPublicKey, Validator as ProtoValidator, ValidatorParams as ProtoValidatorParams, + ValidatorUpdate as ProtoValidatorUpdate, Version as ProtoVersion, VoteInfo as ProtoVoteInfo, +}; +use crate::proto::merkle::{Proof as ProtoProof, ProofOp as ProtoProofOp}; +use crate::proto::types::Pair as ProtoKeyValuePair; + +#[derive(Debug, Default)] +pub struct ConsensusParams { + /// Parameters limiting the size of a block and time between consecutive blocks + pub block: Option, + /// Parameters limiting the validity of evidence of byzantine behavior + pub evidence: Option, + /// Parameters limiting the types of pubkeys validators can use + pub validator: Option, +} + +impl From for ProtoConsensusParams { + fn from(consensus_params: ConsensusParams) -> ProtoConsensusParams { + let mut proto_consensus_params = ProtoConsensusParams::new(); + proto_consensus_params.block = consensus_params.block.map(Into::into).into(); + proto_consensus_params.evidence = consensus_params.evidence.map(Into::into).into(); + proto_consensus_params.validator = consensus_params.validator.map(Into::into).into(); + proto_consensus_params + } +} + +impl From for ConsensusParams { + fn from(proto_consensus_params: ProtoConsensusParams) -> ConsensusParams { + ConsensusParams { + block: proto_consensus_params.block.into_option().map(Into::into), + evidence: proto_consensus_params + .evidence + .into_option() + .map(Into::into), + validator: proto_consensus_params + .validator + .into_option() + .map(Into::into), + } + } +} + +#[derive(Debug, Default)] +pub struct BlockParams { + /// Max size of a block, in bytes + pub max_bytes: i64, + /// Max sum of GasWanted in a proposed block + /// + /// # Note + /// + /// Blocks that violate this may be committed if there are Byzantine proposers. It's the application's + /// responsibility to handle this when processing a block! + pub max_gas: i64, +} + +impl From for ProtoBlockParams { + fn from(block_params: BlockParams) -> ProtoBlockParams { + let mut proto_block_params = ProtoBlockParams::new(); + proto_block_params.max_bytes = block_params.max_bytes; + proto_block_params.max_gas = block_params.max_gas; + proto_block_params + } +} + +impl From for BlockParams { + fn from(proto_block_params: ProtoBlockParams) -> BlockParams { + BlockParams { + max_bytes: proto_block_params.max_bytes, + max_gas: proto_block_params.max_gas, + } + } +} + +#[derive(Debug, Default)] +/// Tendermint adopts a hybrid approach to check validity of an evidence. User can provide both `max_age_num_blocks` and +/// `max_age_duration` and tendermint only rejects an evidence if it is older than `max_age_num_blocks` and also +/// `max_age_duration`. +/// +/// # Note +/// +/// - This should correspond with an app's "unbonding period" or other similar mechanism for handling +/// Nothing-At-Stake attacks. +pub struct EvidenceParams { + /// Max age of evidence, in blocks. Evidence older than this is considered stale and ignored + pub max_age_num_blocks: i64, + /// Max age of evidence, in time duration. Evidence older than this is considered stale and ignored + pub max_age_duration: Option, +} + +impl From for ProtoEvidenceParams { + fn from(evidence_params: EvidenceParams) -> ProtoEvidenceParams { + let mut proto_evidence_params = ProtoEvidenceParams::new(); + proto_evidence_params.max_age_num_blocks = evidence_params.max_age_num_blocks; + proto_evidence_params.max_age_duration = evidence_params + .max_age_duration + .map(|duration| { + let mut proto_duration = ProtoDuration::new(); + proto_duration.set_seconds(duration.as_secs() as i64); + proto_duration.set_nanos(duration.subsec_nanos() as i32); + proto_duration + }) + .into(); + proto_evidence_params + } +} + +impl From for EvidenceParams { + fn from(proto_evidence_params: ProtoEvidenceParams) -> EvidenceParams { + let max_age_duration = + proto_evidence_params + .max_age_duration + .into_option() + .map(|ref proto_duration| { + Duration::new( + proto_duration.get_seconds() as u64, + proto_duration.get_nanos() as u32, + ) + }); + + EvidenceParams { + max_age_num_blocks: proto_evidence_params.max_age_num_blocks, + max_age_duration, + } + } +} + +#[derive(Debug, Default)] +pub struct ValidatorParams { + /// List of accepted public key types (uses same naming as `PublicKey.public_key_type`) + pub public_key_types: Vec, +} + +impl From for ProtoValidatorParams { + fn from(validator_params: ValidatorParams) -> ProtoValidatorParams { + let mut proto_validator_params = ProtoValidatorParams::new(); + proto_validator_params.pub_key_types = validator_params.public_key_types.into(); + proto_validator_params + } +} + +impl From for ValidatorParams { + fn from(proto_validator_params: ProtoValidatorParams) -> ValidatorParams { + ValidatorParams { + public_key_types: proto_validator_params.pub_key_types.into_vec(), + } + } +} + +#[derive(Debug, Default)] +pub struct ValidatorUpdate { + /// Public key of the validator + pub public_key: Option, + /// Voting power of the validator + pub power: i64, +} + +impl From for ProtoValidatorUpdate { + fn from(validator_update: ValidatorUpdate) -> ProtoValidatorUpdate { + let mut proto_validator_update = ProtoValidatorUpdate::new(); + proto_validator_update.pub_key = validator_update.public_key.map(Into::into).into(); + proto_validator_update.power = validator_update.power; + proto_validator_update + } +} + +impl From for ValidatorUpdate { + fn from(proto_validator_update: ProtoValidatorUpdate) -> ValidatorUpdate { + ValidatorUpdate { + public_key: proto_validator_update.pub_key.into_option().map(Into::into), + power: proto_validator_update.power, + } + } +} + +#[derive(Debug, Default)] +pub struct PublicKey { + /// Type of the public key. A simple string like "ed25519" (in the future, may indicate a serialization algorithm to + /// parse the Data, for instance "amino") + pub public_key_type: String, + /// Public key data. For a simple public key, it's just the raw bytes. If the `public_key_type` indicates an + /// encoding algorithm, this is the encoded public key. + pub data: Vec, +} + +impl From for ProtoPublicKey { + fn from(public_key: PublicKey) -> ProtoPublicKey { + let mut proto_public_key = ProtoPublicKey::new(); + proto_public_key.field_type = public_key.public_key_type; + proto_public_key.data = public_key.data; + proto_public_key + } +} + +impl From for PublicKey { + fn from(proto_public_key: ProtoPublicKey) -> PublicKey { + PublicKey { + public_key_type: proto_public_key.field_type, + data: proto_public_key.data, + } + } +} + +#[derive(Debug, Default)] +pub struct Proof { + /// List of chained Merkle proofs, of possibly different types + /// + /// # Note + /// + /// - The Merkle root of one op is the value being proven in the next op + /// - The Merkle root of the final op should equal the ultimate root hash being verified against + pub ops: Vec, +} + +impl From for ProtoProof { + fn from(proof: Proof) -> ProtoProof { + let mut proto_proof = ProtoProof::new(); + proto_proof.ops = proof + .ops + .into_iter() + .map(Into::into) + .collect::>() + .into(); + proto_proof + } +} + +#[derive(Debug, Default)] +pub struct ProofOp { + /// Type of Merkle proof and how it's encoded + pub proof_op_type: String, + /// Key in the Merkle tree that this proof is for + pub key: Vec, + /// Encoded Merkle proof for the key + pub data: Vec, +} + +impl From for ProtoProofOp { + fn from(proof_op: ProofOp) -> ProtoProofOp { + let mut proto_proof_op = ProtoProofOp::new(); + proto_proof_op.field_type = proof_op.proof_op_type; + proto_proof_op.key = proof_op.key; + proto_proof_op.data = proof_op.data; + proto_proof_op + } +} + +#[derive(Debug, Default)] +pub struct Version { + /// Protocol version of the blockchain data structures + pub block: u64, + /// Protocol version of the application + pub app: u64, +} + +impl From for Version { + fn from(proto_version: ProtoVersion) -> Version { + Version { + block: proto_version.Block, + app: proto_version.App, + } + } +} + +#[derive(Debug, Default)] +pub struct PartSetHeader { + pub total: i32, + pub hash: Vec, +} + +impl From for PartSetHeader { + fn from(proto_part_set_header: ProtoPartSetHeader) -> PartSetHeader { + PartSetHeader { + total: proto_part_set_header.total, + hash: proto_part_set_header.hash, + } + } +} + +#[derive(Debug, Default)] +pub struct BlockId { + pub hash: Vec, + pub parts_header: Option, +} + +impl From for BlockId { + fn from(proto_block_id: ProtoBlockId) -> BlockId { + BlockId { + hash: proto_block_id.hash, + parts_header: proto_block_id.parts_header.into_option().map(Into::into), + } + } +} + +#[derive(Debug, Default)] +pub struct Header { + /// Version of the blockchain and the application + pub version: Option, + /// ID of the blockchain + pub chain_id: String, + /// Height of the block in the chain + pub height: i64, + /// Time of the previous block. For heights > 1, it's the weighted median of the timestamps of the valid votes in + /// the `block.last_commit`. For height == 1, it's genesis time. (duration since epoch) + pub time: Option, + /// Hash of the previous (parent) block + pub last_block_id: Option, + /// Hash of the previous block's commit + pub last_commit_hash: Vec, + /// Hash if data in the block + pub data_hash: Vec, + /// Hash of the validator set for this block + pub validators_hash: Vec, + /// Hash of the validator set for the next block + pub next_validators_hash: Vec, + /// Hash of the consensus parameters for this block + pub consensus_hash: Vec, + /// Data returned by the last call to `Commit` - typically the Merkle root of the application state after executing + /// the previous block's transactions + pub app_hash: Vec, + /// Hash of the ABCI results returned by the last block + pub last_results_hash: Vec, + /// Hash of the evidence included in this block + pub evidence_hash: Vec, + /// Original proposer for the block + pub proposer_address: Vec, +} + +impl From for Header { + fn from(proto_header: ProtoHeader) -> Header { + Header { + version: proto_header.version.into_option().map(Into::into), + chain_id: proto_header.chain_id, + height: proto_header.height, + time: proto_header + .time + .into_option() + .map(|timestamp| Duration::new(timestamp.seconds as u64, timestamp.nanos as u32)), + last_block_id: proto_header.last_block_id.into_option().map(Into::into), + last_commit_hash: proto_header.last_commit_hash, + data_hash: proto_header.data_hash, + validators_hash: proto_header.validators_hash, + next_validators_hash: proto_header.next_validators_hash, + consensus_hash: proto_header.consensus_hash, + app_hash: proto_header.app_hash, + last_results_hash: proto_header.last_results_hash, + evidence_hash: proto_header.evidence_hash, + proposer_address: proto_header.proposer_address, + } + } +} + +#[derive(Debug, Default)] +pub struct Validator { + /// Address of the validator (hash of the public key) + pub address: Vec, + /// Voting power of the validator + pub power: i64, +} + +impl From for Validator { + fn from(proto_validator: ProtoValidator) -> Validator { + Validator { + address: proto_validator.address, + power: proto_validator.power, + } + } +} + +#[derive(Debug, Default)] +pub struct VoteInfo { + /// A validator + pub validator: Option, + /// Indicates whether or not the validator signed the last block + pub signed_last_block: bool, +} + +impl From for VoteInfo { + fn from(proto_vote_info: ProtoVoteInfo) -> VoteInfo { + VoteInfo { + validator: proto_vote_info.validator.into_option().map(Into::into), + signed_last_block: proto_vote_info.signed_last_block, + } + } +} + +#[derive(Debug, Default)] +pub struct LastCommitInfo { + /// Commit round + pub round: i32, + /// List of validators addresses in the last validator set with their voting power and whether or not they signed a + /// vote. + pub votes: Vec, +} + +impl From for LastCommitInfo { + fn from(proto_last_commit_info: ProtoLastCommitInfo) -> LastCommitInfo { + LastCommitInfo { + round: proto_last_commit_info.round, + votes: proto_last_commit_info + .votes + .into_iter() + .map(Into::into) + .collect(), + } + } +} + +#[derive(Debug, Default)] +pub struct Evidence { + /// Type of the evidence. A hierarchical path like "duplicate/vote". + pub evidence_type: String, + /// The offending validator + pub validator: Option, + /// Height when the offense was committed + pub height: i64, + /// Time of the block at height Height. It is the proposer's local time when block was created (duration since + /// epoch) + pub time: Option, + /// Total voting power of the validator set at `height` + pub total_voting_power: i64, +} + +impl From for Evidence { + fn from(proto_evidence: ProtoEvidence) -> Evidence { + Evidence { + evidence_type: proto_evidence.field_type, + validator: proto_evidence.validator.into_option().map(Into::into), + height: proto_evidence.height, + time: proto_evidence + .time + .into_option() + .map(|timestamp| Duration::new(timestamp.seconds as u64, timestamp.nanos as u32)), + total_voting_power: proto_evidence.total_voting_power, + } + } +} + +#[derive(Debug, Default)] +pub struct KeyValuePair { + /// Key + pub key: Vec, + /// Value + pub value: Vec, +} + +impl From for ProtoKeyValuePair { + fn from(pair: KeyValuePair) -> ProtoKeyValuePair { + let mut proto_pair = ProtoKeyValuePair::new(); + proto_pair.key = pair.key; + proto_pair.value = pair.value; + proto_pair + } +} + +#[derive(Debug, Default)] +pub struct Event { + /// Event type + pub event_type: String, + /// Attributes + pub attributes: Vec, +} + +impl From for ProtoEvent { + fn from(event: Event) -> ProtoEvent { + let mut proto_event = ProtoEvent::new(); + proto_event.field_type = event.event_type; + proto_event.attributes = event + .attributes + .into_iter() + .map(Into::into) + .collect::>() + .into(); + proto_event + } +} diff --git a/src/types/query.rs b/src/types/query.rs new file mode 100644 index 0000000..d079246 --- /dev/null +++ b/src/types/query.rs @@ -0,0 +1,85 @@ +use crate::proto::abci::{RequestQuery, ResponseQuery}; +use crate::types::{Proof, Result}; + +#[derive(Debug, Default)] +pub struct QueryRequest { + /// Raw query bytes (can be used with or in lieu of `path`) + pub data: Vec, + /// Path of request, like an HTTP GET path (can be used with or in lieu of `data`) + /// + /// # Note + /// + /// - Apps MUST interpret '/store' as a query by key on the underlying store. The key SHOULD be specified in the + /// `data` field + /// - Apps SHOULD allow queries over specific types like '/accounts/...' or '/votes/...' + pub path: String, + /// Block height for which you want the query (default=0 returns data for the latest committed block) + /// + /// # Note + /// + /// This is the height of the block containing the application's Merkle root hash, which represents the state as it + /// was after committing the block at `height-1` + pub height: i64, + /// Return Merkle proof with response if possible + pub prove: bool, +} + +impl From for QueryRequest { + fn from(request_query: RequestQuery) -> QueryRequest { + QueryRequest { + data: request_query.data, + path: request_query.path, + height: request_query.height, + prove: request_query.prove, + } + } +} + +#[derive(Debug, Default)] +pub struct QueryResponse { + /// Output of application's logger (may be non-deterministic) + pub log: String, + /// Additional information (may be non-deterministic) + pub info: String, + /// Index of the key in the tree + pub index: i64, + /// Key of the matching data + pub key: Vec, + /// Value of the matching data + pub value: Vec, + /// Serialized proof for the value data, if requested, to be verified against the app_hash for the given height + pub proof: Option, + /// Block height from which data was derived + /// + /// # Note + /// + /// this is the height of the block containing the application's Merkle root hash, which represents the state as it + /// was after committing the block at `height-1` + pub height: i64, +} + +impl From> for ResponseQuery { + fn from(query_response: Result) -> ResponseQuery { + let mut response_query = ResponseQuery::new(); + + match query_response { + Ok(query_response) => { + response_query.log = query_response.log; + response_query.info = query_response.info; + response_query.index = query_response.index; + response_query.key = query_response.key; + response_query.value = query_response.value; + response_query.proof = query_response.proof.map(Into::into).into(); + response_query.height = query_response.height; + } + Err(error) => { + response_query.code = error.code; + response_query.codespace = error.codespace; + response_query.log = error.log; + response_query.info = error.info; + } + } + + response_query + } +} diff --git a/src/types/set_option.rs b/src/types/set_option.rs new file mode 100644 index 0000000..172489d --- /dev/null +++ b/src/types/set_option.rs @@ -0,0 +1,47 @@ +use crate::proto::abci::{RequestSetOption, ResponseSetOption}; +use crate::types::Result; + +#[derive(Debug, Default)] +pub struct SetOptionRequest { + /// Key to set + pub key: String, + /// Value to set for key + pub value: String, +} + +impl From for SetOptionRequest { + fn from(request_set_option: RequestSetOption) -> SetOptionRequest { + SetOptionRequest { + key: request_set_option.key, + value: request_set_option.value, + } + } +} + +#[derive(Debug, Default)] +pub struct SetOptionResponse { + /// Output of application's logger (may be non-deterministic) + pub log: String, + /// Additional information (may be non-deterministic) + pub info: String, +} + +impl From> for ResponseSetOption { + fn from(set_option_response: Result) -> ResponseSetOption { + let mut response_set_option = ResponseSetOption::new(); + + match set_option_response { + Ok(set_option_response) => { + response_set_option.log = set_option_response.log; + response_set_option.info = set_option_response.info; + } + Err(error) => { + response_set_option.code = error.code; + response_set_option.log = error.log; + response_set_option.info = error.info; + } + } + + response_set_option + } +} diff --git a/version.txt b/version.txt index c9130e6..c591106 100644 --- a/version.txt +++ b/version.txt @@ -1,6 +1,6 @@ - Version string = TMCoreSemVer + Version = TMCoreSemVer // TMCoreSemVer is the current version of Tendermint Core. - TMCoreSemVer = "0.32.8" + TMCoreSemVer = "0.33.0" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.1" ABCIVersion = ABCISemVer From 2c0f6cad8289da042de75924bc9a479b7185dd6e Mon Sep 17 00:00:00 2001 From: Devashish Dixit Date: Wed, 4 Mar 2020 11:10:05 +0800 Subject: [PATCH 2/2] Get initialization status of ABCI Server from user --- examples/counter.rs | 2 +- src/lib.rs | 2 +- src/proto/abci.rs | 40 ++++++++++++++++++++-------------------- src/proto/merkle.rs | 4 ++-- src/proto/types.rs | 6 +++--- src/server.rs | 33 +++++++++++++++++++-------------- 6 files changed, 46 insertions(+), 41 deletions(-) diff --git a/examples/counter.rs b/examples/counter.rs index 92b3f2c..6af1d60 100644 --- a/examples/counter.rs +++ b/examples/counter.rs @@ -171,7 +171,7 @@ async fn main() -> std::io::Result<()> { let mempool = MempoolConnection::new(current_state.clone()); let info = InfoConnection::new(committed_state.clone()); - let server = Server::new(consensus, mempool, info); + let server = Server::new(consensus, mempool, info, false); server .run("127.0.0.1:26658".parse::().unwrap()) diff --git a/src/lib.rs b/src/lib.rs index 37a5fac..6ffdc4d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,7 @@ //! //! ```toml //! [dependencies] -//! abci = "0.7" +//! abci = "0.8" //! ``` //! //! Each ABCI application has to implement three core traits corresponding to all three ABCI connections, `Consensus`, diff --git a/src/proto/abci.rs b/src/proto/abci.rs index be433bb..30fb06f 100644 --- a/src/proto/abci.rs +++ b/src/proto/abci.rs @@ -11706,15 +11706,15 @@ static file_descriptor_proto_data: &'static [u8] = b"\ \x15\n\x0bp2p_version\x18\x03\x20\x01(\x04B\0:\0\"4\n\x10RequestSetOptio\ n\x12\r\n\x03key\x18\x01\x20\x01(\tB\0\x12\x0f\n\x05value\x18\x02\x20\ \x01(\tB\0:\0\"\xfd\x01\n\x10RequestInitChain\x122\n\x04time\x18\x01\x20\ - \x01(\x0b2\x1a.google.protobuf.TimestampB\x08\x90\xdf\x1f\x01\xc8\xde\ - \x1f\0\x12\x12\n\x08chain_id\x18\x02\x20\x01(\tB\0\x12B\n\x10consensus_p\ - arams\x18\x03\x20\x01(\x0b2&.tendermint.abci.types.ConsensusParamsB\0\ - \x12@\n\nvalidators\x18\x04\x20\x03(\x0b2&.tendermint.abci.types.Validat\ - orUpdateB\x04\xc8\xde\x1f\0\x12\x19\n\x0fapp_state_bytes\x18\x05\x20\x01\ - (\x0cB\0:\0\"S\n\x0cRequestQuery\x12\x0e\n\x04data\x18\x01\x20\x01(\x0cB\ - \0\x12\x0e\n\x04path\x18\x02\x20\x01(\tB\0\x12\x10\n\x06height\x18\x03\ - \x20\x01(\x03B\0\x12\x0f\n\x05prove\x18\x04\x20\x01(\x08B\0:\0\"\xe6\x01\ - \n\x11RequestBeginBlock\x12\x0e\n\x04hash\x18\x01\x20\x01(\x0cB\0\x123\n\ + \x01(\x0b2\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\0\x90\xdf\x1f\ + \x01\x12\x12\n\x08chain_id\x18\x02\x20\x01(\tB\0\x12B\n\x10consensus_par\ + ams\x18\x03\x20\x01(\x0b2&.tendermint.abci.types.ConsensusParamsB\0\x12@\ + \n\nvalidators\x18\x04\x20\x03(\x0b2&.tendermint.abci.types.ValidatorUpd\ + ateB\x04\xc8\xde\x1f\0\x12\x19\n\x0fapp_state_bytes\x18\x05\x20\x01(\x0c\ + B\0:\0\"S\n\x0cRequestQuery\x12\x0e\n\x04data\x18\x01\x20\x01(\x0cB\0\ + \x12\x0e\n\x04path\x18\x02\x20\x01(\tB\0\x12\x10\n\x06height\x18\x03\x20\ + \x01(\x03B\0\x12\x0f\n\x05prove\x18\x04\x20\x01(\x08B\0:\0\"\xe6\x01\n\ + \x11RequestBeginBlock\x12\x0e\n\x04hash\x18\x01\x20\x01(\x0cB\0\x123\n\ \x06header\x18\x02\x20\x01(\x0b2\x1d.tendermint.abci.types.HeaderB\x04\ \xc8\xde\x1f\0\x12E\n\x10last_commit_info\x18\x03\x20\x01(\x0b2%.tenderm\ int.abci.types.LastCommitInfoB\x04\xc8\xde\x1f\0\x12C\n\x14byzantine_val\ @@ -11757,25 +11757,25 @@ static file_descriptor_proto_data: &'static [u8] = b"\ 2\x1f.tendermint.crypto.merkle.ProofB\0\x12\x10\n\x06height\x18\t\x20\ \x01(\x03B\0\x12\x13\n\tcodespace\x18\n\x20\x01(\tB\0:\0\"^\n\x12Respons\ eBeginBlock\x12F\n\x06events\x18\x01\x20\x03(\x0b2\x1c.tendermint.abci.t\ - ypes.EventB\x18\xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty:\0\"\xd9\ + ypes.EventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0:\0\"\xd9\ \x01\n\x0fResponseCheckTx\x12\x0e\n\x04code\x18\x01\x20\x01(\rB\0\x12\ \x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\x03log\x18\x03\x20\x01(\t\ B\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\x12\x14\n\ngas_wanted\x18\ \x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\x06\x20\x01(\x03B\0\x12F\ \n\x06events\x18\x07\x20\x03(\x0b2\x1c.tendermint.abci.types.EventB\x18\ - \xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty\x12\x13\n\tcodespace\x18\ + \xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0\x12\x13\n\tcodespace\x18\ \x08\x20\x01(\tB\0:\0\"\xdb\x01\n\x11ResponseDeliverTx\x12\x0e\n\x04code\ \x18\x01\x20\x01(\rB\0\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0\x12\r\n\ \x03log\x18\x03\x20\x01(\tB\0\x12\x0e\n\x04info\x18\x04\x20\x01(\tB\0\ \x12\x14\n\ngas_wanted\x18\x05\x20\x01(\x03B\0\x12\x12\n\x08gas_used\x18\ \x06\x20\x01(\x03B\0\x12F\n\x06events\x18\x07\x20\x03(\x0b2\x1c.tendermi\ - nt.abci.types.EventB\x18\xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty\ + nt.abci.types.EventB\x18\xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0\ \x12\x13\n\tcodespace\x18\x08\x20\x01(\tB\0:\0\"\xf0\x01\n\x10ResponseEn\ dBlock\x12G\n\x11validator_updates\x18\x01\x20\x03(\x0b2&.tendermint.abc\ i.types.ValidatorUpdateB\x04\xc8\xde\x1f\0\x12I\n\x17consensus_param_upd\ ates\x18\x02\x20\x01(\x0b2&.tendermint.abci.types.ConsensusParamsB\0\x12\ F\n\x06events\x18\x03\x20\x03(\x0b2\x1c.tendermint.abci.types.EventB\x18\ - \xea\xde\x1f\x10events,omitempty\xc8\xde\x1f\0:\0\"\"\n\x0eResponseCommi\ + \xc8\xde\x1f\0\xea\xde\x1f\x10events,omitempty:\0\"\"\n\x0eResponseCommi\ t\x12\x0e\n\x04data\x18\x02\x20\x01(\x0cB\0:\0\"\xc0\x01\n\x0fConsensusP\ arams\x123\n\x05block\x18\x01\x20\x01(\x0b2\".tendermint.abci.types.Bloc\ kParamsB\0\x129\n\x08evidence\x18\x02\x20\x01(\x0b2%.tendermint.abci.typ\ @@ -11789,15 +11789,15 @@ static file_descriptor_proto_data: &'static [u8] = b"\ ommitInfo\x12\x0f\n\x05round\x18\x01\x20\x01(\x05B\0\x124\n\x05votes\x18\ \x02\x20\x03(\x0b2\x1f.tendermint.abci.types.VoteInfoB\x04\xc8\xde\x1f\0\ :\0\"e\n\x05Event\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x12J\n\nattrib\ - utes\x18\x02\x20\x03(\x0b2\x18.tendermint.libs.kv.PairB\x1c\xc8\xde\x1f\ - \0\xea\xde\x1f\x14attributes,omitempty:\0\"\xcf\x03\n\x06Header\x125\n\ + utes\x18\x02\x20\x03(\x0b2\x18.tendermint.libs.kv.PairB\x1c\xea\xde\x1f\ + \x14attributes,omitempty\xc8\xde\x1f\0:\0\"\xcf\x03\n\x06Header\x125\n\ \x07version\x18\x01\x20\x01(\x0b2\x1e.tendermint.abci.types.VersionB\x04\ \xc8\xde\x1f\0\x12\x1d\n\x08chain_id\x18\x02\x20\x01(\tB\x0b\xe2\xde\x1f\ \x07ChainID\x12\x10\n\x06height\x18\x03\x20\x01(\x03B\0\x122\n\x04time\ - \x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\x90\xdf\x1f\ - \x01\xc8\xde\x1f\0\x12;\n\rlast_block_id\x18\x05\x20\x01(\x0b2\x1e.tende\ - rmint.abci.types.BlockIDB\x04\xc8\xde\x1f\0\x12\x1a\n\x10last_commit_has\ - h\x18\x06\x20\x01(\x0cB\0\x12\x13\n\tdata_hash\x18\x07\x20\x01(\x0cB\0\ + \x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\0\ + \x90\xdf\x1f\x01\x12;\n\rlast_block_id\x18\x05\x20\x01(\x0b2\x1e.tenderm\ + int.abci.types.BlockIDB\x04\xc8\xde\x1f\0\x12\x1a\n\x10last_commit_hash\ + \x18\x06\x20\x01(\x0cB\0\x12\x13\n\tdata_hash\x18\x07\x20\x01(\x0cB\0\ \x12\x19\n\x0fvalidators_hash\x18\x08\x20\x01(\x0cB\0\x12\x1e\n\x14next_\ validators_hash\x18\t\x20\x01(\x0cB\0\x12\x18\n\x0econsensus_hash\x18\n\ \x20\x01(\x0cB\0\x12\x12\n\x08app_hash\x18\x0b\x20\x01(\x0cB\0\x12\x1b\n\ @@ -11822,7 +11822,7 @@ static file_descriptor_proto_data: &'static [u8] = b"\ e\x18\x04\x20\x01(\x0b2\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\0\ \x90\xdf\x1f\x01\x12\x1c\n\x12total_voting_power\x18\x05\x20\x01(\x03B\0\ :\0*%\n\x0bCheckTxType\x12\x07\n\x03New\x10\0\x12\x0b\n\x07Recheck\x10\ - \x01\x1a\0B\x1c\xc0\xe3\x1e\x01\xd0\xe2\x1e\x01\xc8\xe2\x1e\x01\xe0\xe2\ + \x01\x1a\0B\x1c\xc0\xe3\x1e\x01\xc8\xe2\x1e\x01\xd0\xe2\x1e\x01\xe0\xe2\ \x1e\x01\xf8\xe1\x1e\x01\xa8\xe2\x1e\x01\xb8\xe2\x1e\x01b\x06proto3\ "; diff --git a/src/proto/merkle.rs b/src/proto/merkle.rs index ac05de4..c858cec 100644 --- a/src/proto/merkle.rs +++ b/src/proto/merkle.rs @@ -460,8 +460,8 @@ static file_descriptor_proto_data: &'static [u8] = b"\ \x07ProofOp\x12\x0e\n\x04type\x18\x01\x20\x01(\tB\0\x12\r\n\x03key\x18\ \x02\x20\x01(\x0cB\0\x12\x0e\n\x04data\x18\x03\x20\x01(\x0cB\0:\0\"?\n\ \x05Proof\x124\n\x03ops\x18\x01\x20\x03(\x0b2!.tendermint.crypto.merkle.\ - ProofOpB\x04\xc8\xde\x1f\0:\0B\x14\xa8\xe2\x1e\x01\xd0\xe2\x1e\x01\xe0\ - \xe2\x1e\x01\xc8\xe2\x1e\x01\xf8\xe1\x1e\x01b\x06proto3\ + ProofOpB\x04\xc8\xde\x1f\0:\0B\x14\xe0\xe2\x1e\x01\xc8\xe2\x1e\x01\xf8\ + \xe1\x1e\x01\xa8\xe2\x1e\x01\xd0\xe2\x1e\x01b\x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { diff --git a/src/proto/types.rs b/src/proto/types.rs index 6136b77..1145569 100644 --- a/src/proto/types.rs +++ b/src/proto/types.rs @@ -445,9 +445,9 @@ static file_descriptor_proto_data: &'static [u8] = b"\ \n\x13libs/kv/types.proto\x12\x12tendermint.libs.kv\"(\n\x04Pair\x12\r\n\ \x03key\x18\x01\x20\x01(\x0cB\0\x12\x0f\n\x05value\x18\x02\x20\x01(\x0cB\ \0:\0\",\n\x08KI64Pair\x12\r\n\x03key\x18\x01\x20\x01(\x0cB\0\x12\x0f\n\ - \x05value\x18\x02\x20\x01(\x03B\0:\0B\x1c\xc0\xe3\x1e\x01\xc8\xe2\x1e\ - \x01\xf8\xe1\x1e\x01\xd0\xe2\x1e\x01\xa8\xe2\x1e\x01\xb8\xe2\x1e\x01\xe0\ - \xe2\x1e\x01b\x06proto3\ + \x05value\x18\x02\x20\x01(\x03B\0:\0B\x1c\xa8\xe2\x1e\x01\xc0\xe3\x1e\ + \x01\xc8\xe2\x1e\x01\xe0\xe2\x1e\x01\xd0\xe2\x1e\x01\xb8\xe2\x1e\x01\xf8\ + \xe1\x1e\x01b\x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { diff --git a/src/server.rs b/src/server.rs index 251e714..8a72dbc 100644 --- a/src/server.rs +++ b/src/server.rs @@ -48,13 +48,22 @@ where I: Info + 'static, { /// Creates a new instance of [`Server`](struct.Server.html) - #[inline] - pub fn new(consensus: C, mempool: M, info: I) -> Self { + /// + /// # Note + /// + /// Set `is_initialized` to `true` if your application is already initialized by `init_chain`, `false` otherwise. + pub fn new(consensus: C, mempool: M, info: I, is_initialized: bool) -> Self { + let consensus_state = if is_initialized { + ConsensusState::Initialized + } else { + ConsensusState::NotInitialized + }; + Self { consensus: Arc::new(consensus), mempool: Arc::new(mempool), info: Arc::new(info), - consensus_state: Arc::new(Mutex::new(ConsensusState::default())), + consensus_state: Arc::new(Mutex::new(consensus_state)), } } @@ -225,6 +234,8 @@ where #[derive(Debug, Clone, Copy)] pub enum ConsensusState { + NotInitialized, + Initialized, InitChain, BeginBlock, DeliverTx, @@ -232,17 +243,11 @@ pub enum ConsensusState { Commit, } -impl Default for ConsensusState { - #[inline] - fn default() -> Self { - ConsensusState::InitChain - } -} - impl ConsensusState { - pub fn validate(&mut self, mut next: ConsensusState) { - let is_valid = match (&self, next) { - (ConsensusState::InitChain, ConsensusState::InitChain) => true, + pub fn validate(&mut self, next: ConsensusState) { + let is_valid = match (*self, next) { + (ConsensusState::NotInitialized, ConsensusState::InitChain) => true, + (ConsensusState::Initialized, ConsensusState::BeginBlock) => true, (ConsensusState::InitChain, ConsensusState::BeginBlock) => true, (ConsensusState::BeginBlock, ConsensusState::DeliverTx) => true, (ConsensusState::BeginBlock, ConsensusState::EndBlock) => true, @@ -254,7 +259,7 @@ impl ConsensusState { }; if is_valid { - std::mem::swap(self, &mut next); + *self = next; } else { panic!("{:?} cannot be called after {:?}", next, self); }