From 91c6491aebe34fc09a3f9e48f1a94380d2b16fa9 Mon Sep 17 00:00:00 2001 From: chunfengSun <516108736@qq.com> Date: Mon, 18 Dec 2023 11:19:34 +0800 Subject: [PATCH] fork6 nodev0.4.1 (#45) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * improve: adding config param ReadLimit in bytes for websockets, and using it on the websocket connection. Signed-off-by: Nikolay Nedkov * build(deps): bump github.com/jackc/pgconn from 1.14.0 to 1.14.1 (#2312) Bumps [github.com/jackc/pgconn](https://github.com/jackc/pgconn) from 1.14.0 to 1.14.1. - [Changelog](https://github.com/jackc/pgconn/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgconn/compare/v1.14.0...v1.14.1) --- updated-dependencies: - dependency-name: github.com/jackc/pgconn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/rubenv/sql-migrate from 1.5.1 to 1.5.2 (#2313) Bumps [github.com/rubenv/sql-migrate](https://github.com/rubenv/sql-migrate) from 1.5.1 to 1.5.2. - [Commits](https://github.com/rubenv/sql-migrate/compare/v1.5.1...v1.5.2) --- updated-dependencies: - dependency-name: github.com/rubenv/sql-migrate dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * remove unnecessary config values and added RPC.WebSockets.ReadLimit default value unit test * add jRPC ReadLimit log error handling and e2e test * Release/v0.2.0 to develop (#2322) Merge release/0.2.0 * build(deps): bump github.com/go-git/go-git/v5 from 5.7.0 to 5.8.1 (#2332) Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.7.0 to 5.8.1. - [Release notes](https://github.com/go-git/go-git/releases) - [Commits](https://github.com/go-git/go-git/compare/v5.7.0...v5.8.1) --- updated-dependencies: - dependency-name: github.com/go-git/go-git/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump google.golang.org/grpc from 1.56.2 to 1.57.0 (#2331) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.56.2 to 1.57.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.56.2...v1.57.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * improve: Add IP and ZKCounter Validation, Refactor Batch Configs Signed-off-by: Nikolay Nedkov * fixing comments Signed-off-by: Nikolay Nedkov * fixing rebase errors Signed-off-by: Nikolay Nedkov * improve: moving BatchConfig to state package Signed-off-by: Nikolay Nedkov * Merge/v0.2.1 into develop (#2334) * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix estimate gas nonce computation to deal with sequencer concurrency (#2204) * Update README diagram (#2303) Update README diagram * fixing state_db env vars Signed-off-by: Nikolay Nedkov * Merge Release/v0.2.2 into develop (#2353) * fix null effective_percentage * fix forkID calculation * fix script * generate json-schema + docs for node config file and network_custom * fix unittest * Hotfixv0.1.4 to v0.2.0 (#2255) * Hotfix v0.1.4 to main (#2250) * fix concurrent web socket writes * fix eth_syncing * fix custom trace internal tx call error handling and update prover * add test to custom tracer depth issue; fix internal call error and gas used * fix custom tracer for internal tx with error and no more steps after it * remove debug code * Make max grpc message size configurable (#2179) * make max grpc message size configurable * fix state tests * fix tests * fix tests * get SequencerNodeURI from SC if empty and not IsTrustedSequencer * Optimize trace (#2183) * optimize trace * fix memory reading * update docker image * update prover image * fix converter * fix memory * fix step memory * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * update prover image * fix struclogs * fix memory size * fix memory size * fix memory size * refactor memory resize * refactor memory resize * move log for the best fitting tx (#2192) * fix load zkCounters from pool * remove unnecessary log.info * add custom tracer support to CREATES opcode without depth increase (#2213) * logs * fix getting stateroot from previous batch (GetWIPBatch) * logs * Fix GetWipBatch when previous last batch is a forced batch * fix forcedBatch trusted state * Revert "fix getting stateroot from previous batch (GetWIPBatch)" This reverts commit 860f0e74016219daf81f96b76f6b25609e1c66fd. * force GHA * add pool limits (#2189) * Hotfix/batch l2 data (#2223) * Fix BatchL2Data * Force GHA * remove failed txs from the pool limit check (#2233) * debug trace by batch number via external rpc requests (#2235) * fix trace batch remote requests in parallel limitation (#2244) * Added RPC.TraceBatchUseHTTPS config parameter * fix executor version --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * fix test * fix test --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * Effective GasPrice refactor+fixes (#2247) * effective GasPrice refactor * bugs fixes and finalizer tests fixes * fix typo * fix calculate effective gasprice percentage * fix test gas price * Fix/#2257 effective gas price receipt (#2258) * effective gas price returned by the rpc in the receipt * linter * bugfix: fixing l2blocks timestamp for the fist batch (#2260) * bugfix: fixing l2blocks timestamp for the fist batch Signed-off-by: Nikolay Nedkov * fix finalizer unit test --------- Signed-off-by: Nikolay Nedkov * add more comments, and removed fields PrivateKeyPath and PrivateKeyPassword from etherman.Config that are not in use * add info to git action * add info to git action * fix github action * updated comments * updated comments * Fix/#2263 gas used (#2264) * fix fea2scalar and gas used * suggestion * fix fea2scalar * suggestion * Fix pending tx when duplicate nonce (#2270) * fix pending tx when duplicate nonce * set pool.transaction.failed_reason to NULL when updating an existing tx * add more log details when adding tx to AddrQueue * fix query to add tx to the pool. Fix lint errors * change failed_reason for tx discarded due duplicate nonce * Only return a tx from the pool if tx is in pending status (#2273) * Return a tx from the pool only if it is * fix TestGetTransactionByHash --------- Co-authored-by: agnusmor * fix documentation with config file * improve: adding check to skip appending effectivePercentage if current forkId is under 5. Signed-off-by: Nikolay Nedkov * Fiex effectiveGasprice unsigned txs with forkId lower than 5 (#2278) * feat: adding functionality to stop sequencer on specific batch num from config param. Signed-off-by: Nikolay Nedkov * patch: adding print for X-Real-IP in JSON-RPC Signed-off-by: Nikolay Nedkov * Fix checkIfSynced (#2289) * [Rehashing] Check logs order and fix blockhash and blockNumber in the log conversion (#2280) * fix and check order * linter * flushID synchronizer (#2287) * FlushID in synchronizer * linter * fix logs * commnets * executor error refactor (#2299) * handle invalid rlp ROM error (#2297) * add maxL2GasPrice (#2294) * add maxL2GasPrice * fix * fix * add test * document parameter * update description * Error refactor (#2302) * error refactor * refactor * Fix replaced tx as failed when duplicated nonce (#2308) * Fix UpdateTxStatus for replacedTx * Fix adding tx with same nonce on AddrQueue * log reprocess need (#2309) * log reprocess need * Update finalizer.go * Feature/2300 synchronizer detect if executor restart (#2306) * detect if executor restarts and stop synchonizer * Update prover images (#2311) * update prover image * update prover images * change executor param * Update testnet.prover.config.json * Update test.permissionless.prover.config.json * Update test.prover.config.json * Update public.prover.config.json * prover params * prover params * prover params * update prover images * add doc, and fix dockers to be able to use snap/restore feature (#2315) * add doc, and fix dockers to be able to use snap/restore feature * add doc for snap/restore feature --------- Co-authored-by: Toni Ramírez * Update docker-compose.yml * Update docker-compose.yml * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix dbMultiWriteSinglePosition config value * workarround for the error error closing batch * workarround for the error error closing batch * workarround for the error error closing batch * workaround for the error of closing batch, another case * `Worker`'s `AddTxTracker` Bug Fix (#2343) * bugfix: Resolve Function Bug in Worker Module Signed-off-by: Nikolay Nedkov * improve: improving the wait for pending txs to be for only the txs for the current address. Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * rename config files (#2349) * fix closing batch + logs (#2348) * fix closing batch + logs * fix * log description * typo errors * fix error: failed to store transactions for batch due to duplicate key * test * typo * Update README.md * Update release.yml * fix conflict --------- Signed-off-by: Nikolay Nedkov Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Nikolay Nedkov * build(deps): bump golang.org/x/net from 0.12.0 to 0.13.0 (#2354) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.12.0 to 0.13.0. - [Commits](https://github.com/golang/net/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump go.uber.org/zap from 1.24.0 to 1.25.0 (#2361) Bumps [go.uber.org/zap](https://github.com/uber-go/zap) from 1.24.0 to 1.25.0. - [Release notes](https://github.com/uber-go/zap/releases) - [Changelog](https://github.com/uber-go/zap/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/zap/compare/v1.24.0...v1.25.0) --- updated-dependencies: - dependency-name: go.uber.org/zap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Merge v0.2.4 (includes v0.2.3) into Develop (#2376) * fix null effective_percentage * fix forkID calculation * fix script * generate json-schema + docs for node config file and network_custom * fix unittest * Hotfixv0.1.4 to v0.2.0 (#2255) * Hotfix v0.1.4 to main (#2250) * fix concurrent web socket writes * fix eth_syncing * fix custom trace internal tx call error handling and update prover * add test to custom tracer depth issue; fix internal call error and gas used * fix custom tracer for internal tx with error and no more steps after it * remove debug code * Make max grpc message size configurable (#2179) * make max grpc message size configurable * fix state tests * fix tests * fix tests * get SequencerNodeURI from SC if empty and not IsTrustedSequencer * Optimize trace (#2183) * optimize trace * fix memory reading * update docker image * update prover image * fix converter * fix memory * fix step memory * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * update prover image * fix struclogs * fix memory size * fix memory size * fix memory size * refactor memory resize * refactor memory resize * move log for the best fitting tx (#2192) * fix load zkCounters from pool * remove unnecessary log.info * add custom tracer support to CREATES opcode without depth increase (#2213) * logs * fix getting stateroot from previous batch (GetWIPBatch) * logs * Fix GetWipBatch when previous last batch is a forced batch * fix forcedBatch trusted state * Revert "fix getting stateroot from previous batch (GetWIPBatch)" This reverts commit 860f0e74016219daf81f96b76f6b25609e1c66fd. * force GHA * add pool limits (#2189) * Hotfix/batch l2 data (#2223) * Fix BatchL2Data * Force GHA * remove failed txs from the pool limit check (#2233) * debug trace by batch number via external rpc requests (#2235) * fix trace batch remote requests in parallel limitation (#2244) * Added RPC.TraceBatchUseHTTPS config parameter * fix executor version --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * fix test * fix test --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * Effective GasPrice refactor+fixes (#2247) * effective GasPrice refactor * bugs fixes and finalizer tests fixes * fix typo * fix calculate effective gasprice percentage * fix test gas price * Fix/#2257 effective gas price receipt (#2258) * effective gas price returned by the rpc in the receipt * linter * bugfix: fixing l2blocks timestamp for the fist batch (#2260) * bugfix: fixing l2blocks timestamp for the fist batch Signed-off-by: Nikolay Nedkov * fix finalizer unit test --------- Signed-off-by: Nikolay Nedkov * add more comments, and removed fields PrivateKeyPath and PrivateKeyPassword from etherman.Config that are not in use * add info to git action * add info to git action * fix github action * updated comments * updated comments * Fix/#2263 gas used (#2264) * fix fea2scalar and gas used * suggestion * fix fea2scalar * suggestion * Fix pending tx when duplicate nonce (#2270) * fix pending tx when duplicate nonce * set pool.transaction.failed_reason to NULL when updating an existing tx * add more log details when adding tx to AddrQueue * fix query to add tx to the pool. Fix lint errors * change failed_reason for tx discarded due duplicate nonce * Only return a tx from the pool if tx is in pending status (#2273) * Return a tx from the pool only if it is * fix TestGetTransactionByHash --------- Co-authored-by: agnusmor * fix documentation with config file * improve: adding check to skip appending effectivePercentage if current forkId is under 5. Signed-off-by: Nikolay Nedkov * Fiex effectiveGasprice unsigned txs with forkId lower than 5 (#2278) * feat: adding functionality to stop sequencer on specific batch num from config param. Signed-off-by: Nikolay Nedkov * patch: adding print for X-Real-IP in JSON-RPC Signed-off-by: Nikolay Nedkov * Fix checkIfSynced (#2289) * [Rehashing] Check logs order and fix blockhash and blockNumber in the log conversion (#2280) * fix and check order * linter * flushID synchronizer (#2287) * FlushID in synchronizer * linter * fix logs * commnets * executor error refactor (#2299) * handle invalid rlp ROM error (#2297) * add maxL2GasPrice (#2294) * add maxL2GasPrice * fix * fix * add test * document parameter * update description * Error refactor (#2302) * error refactor * refactor * Fix replaced tx as failed when duplicated nonce (#2308) * Fix UpdateTxStatus for replacedTx * Fix adding tx with same nonce on AddrQueue * log reprocess need (#2309) * log reprocess need * Update finalizer.go * Feature/2300 synchronizer detect if executor restart (#2306) * detect if executor restarts and stop synchonizer * Update prover images (#2311) * update prover image * update prover images * change executor param * Update testnet.prover.config.json * Update test.permissionless.prover.config.json * Update test.prover.config.json * Update public.prover.config.json * prover params * prover params * prover params * update prover images * add doc, and fix dockers to be able to use snap/restore feature (#2315) * add doc, and fix dockers to be able to use snap/restore feature * add doc for snap/restore feature --------- Co-authored-by: Toni Ramírez * Update docker-compose.yml * Update docker-compose.yml * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix dbMultiWriteSinglePosition config value * workarround for the error error closing batch * workarround for the error error closing batch * workarround for the error error closing batch * workaround for the error of closing batch, another case * `Worker`'s `AddTxTracker` Bug Fix (#2343) * bugfix: Resolve Function Bug in Worker Module Signed-off-by: Nikolay Nedkov * improve: improving the wait for pending txs to be for only the txs for the current address. Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * rename config files (#2349) * fix closing batch + logs (#2348) * fix closing batch + logs * fix * log description * typo errors * fix error: failed to store transactions for batch due to duplicate key * test * typo * Update README.md * Update release.yml * bugfix: fixing place where we need to increment the wg per address for pending txs Signed-off-by: Nikolay Nedkov * Store batchL2Data when the batch is opened (#2358) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager (#2360) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager * add logs, fix config * update config file documentation --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * bugfix: attaching missing TxTracker.From to pending txs to store for forced batches. (#2365) Signed-off-by: Nikolay Nedkov * Update README.md * improve: adding logs (#2373) * improve: adding logs Signed-off-by: Nikolay Nedkov * adding more logs Signed-off-by: Nikolay Nedkov * adding more logs #2 Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * bugfix: fixing finalizer's handling. (#2375) Signed-off-by: Nikolay Nedkov * Update README.md --------- Signed-off-by: Nikolay Nedkov Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Nikolay Nedkov * Adaptation to new HashDB interface (#2367) * change hashdb go package * new hashdb interface * aggregator pb refactor * new prover image * change prover config * update prover image * update to latest proto and prover image * build(deps): bump golang.org/x/net from 0.13.0 to 0.14.0 (#2380) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.13.0 to 0.14.0. - [Commits](https://github.com/golang/net/compare/v0.13.0...v0.14.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix script (#2383) * avoid double unlock in sequencer AddTxTracker (#2285) * Fix typos (#2267) * fix http request instance null for websocket requests (#2385) * Feature/2362 state use field batch l2data of processing context v2 (#2390) * close #2390: removed field encodedTxs from func ProcessAndStoreClosedBatch * build(deps): bump github.com/ethereum/go-ethereum from 1.12.0 to 1.12.1 (#2402) Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.12.0 to 1.12.1. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.12.0...v1.12.1) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * test: adding benchmark test script for uniswap Signed-off-by: Nikolay Nedkov * test: fixing things and adding make entry for the e2e test. Signed-off-by: Nikolay Nedkov * fix ws subscribe to get filtered log notifications (#2396) * bugfix: adding back the 'State' section in test.node.config.toml to fix env vars resolving for 'permisionless-node'. Signed-off-by: Nikolay Nedkov * Merge/v0.2.5 (#2413) * fix null effective_percentage * fix forkID calculation * fix script * generate json-schema + docs for node config file and network_custom * fix unittest * Hotfixv0.1.4 to v0.2.0 (#2255) * Hotfix v0.1.4 to main (#2250) * fix concurrent web socket writes * fix eth_syncing * fix custom trace internal tx call error handling and update prover * add test to custom tracer depth issue; fix internal call error and gas used * fix custom tracer for internal tx with error and no more steps after it * remove debug code * Make max grpc message size configurable (#2179) * make max grpc message size configurable * fix state tests * fix tests * fix tests * get SequencerNodeURI from SC if empty and not IsTrustedSequencer * Optimize trace (#2183) * optimize trace * fix memory reading * update docker image * update prover image * fix converter * fix memory * fix step memory * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * update prover image * fix struclogs * fix memory size * fix memory size * fix memory size * refactor memory resize * refactor memory resize * move log for the best fitting tx (#2192) * fix load zkCounters from pool * remove unnecessary log.info * add custom tracer support to CREATES opcode without depth increase (#2213) * logs * fix getting stateroot from previous batch (GetWIPBatch) * logs * Fix GetWipBatch when previous last batch is a forced batch * fix forcedBatch trusted state * Revert "fix getting stateroot from previous batch (GetWIPBatch)" This reverts commit 860f0e74016219daf81f96b76f6b25609e1c66fd. * force GHA * add pool limits (#2189) * Hotfix/batch l2 data (#2223) * Fix BatchL2Data * Force GHA * remove failed txs from the pool limit check (#2233) * debug trace by batch number via external rpc requests (#2235) * fix trace batch remote requests in parallel limitation (#2244) * Added RPC.TraceBatchUseHTTPS config parameter * fix executor version --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * fix test * fix test --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * Effective GasPrice refactor+fixes (#2247) * effective GasPrice refactor * bugs fixes and finalizer tests fixes * fix typo * fix calculate effective gasprice percentage * fix test gas price * Fix/#2257 effective gas price receipt (#2258) * effective gas price returned by the rpc in the receipt * linter * bugfix: fixing l2blocks timestamp for the fist batch (#2260) * bugfix: fixing l2blocks timestamp for the fist batch Signed-off-by: Nikolay Nedkov * fix finalizer unit test --------- Signed-off-by: Nikolay Nedkov * add more comments, and removed fields PrivateKeyPath and PrivateKeyPassword from etherman.Config that are not in use * add info to git action * add info to git action * fix github action * updated comments * updated comments * Fix/#2263 gas used (#2264) * fix fea2scalar and gas used * suggestion * fix fea2scalar * suggestion * Fix pending tx when duplicate nonce (#2270) * fix pending tx when duplicate nonce * set pool.transaction.failed_reason to NULL when updating an existing tx * add more log details when adding tx to AddrQueue * fix query to add tx to the pool. Fix lint errors * change failed_reason for tx discarded due duplicate nonce * Only return a tx from the pool if tx is in pending status (#2273) * Return a tx from the pool only if it is * fix TestGetTransactionByHash --------- Co-authored-by: agnusmor * fix documentation with config file * improve: adding check to skip appending effectivePercentage if current forkId is under 5. Signed-off-by: Nikolay Nedkov * Fiex effectiveGasprice unsigned txs with forkId lower than 5 (#2278) * feat: adding functionality to stop sequencer on specific batch num from config param. Signed-off-by: Nikolay Nedkov * patch: adding print for X-Real-IP in JSON-RPC Signed-off-by: Nikolay Nedkov * Fix checkIfSynced (#2289) * [Rehashing] Check logs order and fix blockhash and blockNumber in the log conversion (#2280) * fix and check order * linter * flushID synchronizer (#2287) * FlushID in synchronizer * linter * fix logs * commnets * executor error refactor (#2299) * handle invalid rlp ROM error (#2297) * add maxL2GasPrice (#2294) * add maxL2GasPrice * fix * fix * add test * document parameter * update description * Error refactor (#2302) * error refactor * refactor * Fix replaced tx as failed when duplicated nonce (#2308) * Fix UpdateTxStatus for replacedTx * Fix adding tx with same nonce on AddrQueue * log reprocess need (#2309) * log reprocess need * Update finalizer.go * Feature/2300 synchronizer detect if executor restart (#2306) * detect if executor restarts and stop synchonizer * Update prover images (#2311) * update prover image * update prover images * change executor param * Update testnet.prover.config.json * Update test.permissionless.prover.config.json * Update test.prover.config.json * Update public.prover.config.json * prover params * prover params * prover params * update prover images * add doc, and fix dockers to be able to use snap/restore feature (#2315) * add doc, and fix dockers to be able to use snap/restore feature * add doc for snap/restore feature --------- Co-authored-by: Toni Ramírez * Update docker-compose.yml * Update docker-compose.yml * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix dbMultiWriteSinglePosition config value * workarround for the error error closing batch * workarround for the error error closing batch * workarround for the error error closing batch * workaround for the error of closing batch, another case * `Worker`'s `AddTxTracker` Bug Fix (#2343) * bugfix: Resolve Function Bug in Worker Module Signed-off-by: Nikolay Nedkov * improve: improving the wait for pending txs to be for only the txs for the current address. Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * rename config files (#2349) * fix closing batch + logs (#2348) * fix closing batch + logs * fix * log description * typo errors * fix error: failed to store transactions for batch due to duplicate key * test * typo * Update README.md * Update release.yml * bugfix: fixing place where we need to increment the wg per address for pending txs Signed-off-by: Nikolay Nedkov * Store batchL2Data when the batch is opened (#2358) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager (#2360) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager * add logs, fix config * update config file documentation --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * bugfix: attaching missing TxTracker.From to pending txs to store for forced batches. (#2365) Signed-off-by: Nikolay Nedkov * Update README.md * improve: adding logs (#2373) * improve: adding logs Signed-off-by: Nikolay Nedkov * adding more logs Signed-off-by: Nikolay Nedkov * adding more logs #2 Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * bugfix: fixing finalizer's handling. (#2375) Signed-off-by: Nikolay Nedkov * Update README.md * change hashdb go package * new hashdb interface * aggregator pb refactor * new prover image * change prover config * update prover image * update to latest proto and prover image * Refactor nonce calculation for addQueue (#2382) * refactor nonce * fix * fix * fix script * check to avoid data inconsistencies (#2387) * check to avoid data inconsistencies * check batchL2Data * names in the logs * Refactor: avoid delete addrQueue if it has pending txs to store (#2391) * refactor delete addrQueue only if not pending txs to store * fix finalizer test * fix olsStateRoot in handleForcedTxsProcessResp * Update sequencer/addrqueue.go Co-authored-by: Alonso Rodriguez --------- Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: Alonso Rodriguez * Sort txs in worker by gasPrice (remove efficiency sort) (#2392) * Sort txs in worker by GasPrice (remove efficiency sort) * update config docs --------- Co-authored-by: Toni Ramírez * use useMainExecGenerated (#2393) * Fix store forced batch tx (#2394) * l2coinbase (#2400) * l2coinbase * add default config * add support config fields that are common.Address * docs * prover image --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * merge v0.2.5 into develop --------- Signed-off-by: Nikolay Nedkov Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Nikolay Nedkov * build(deps): bump github.com/ethereum/go-ethereum from 1.12.1 to 1.12.2 (#2407) Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.12.1 to 1.12.2. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.12.1...v1.12.2) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix safe and finalized l2 block to consider l1 safe and finalized blocks respectively (#2245) * improve pre-EIP155 check (#2327) * benmchmark: improving benchmark tests, adding main script to do all steps for setup, metrics collection and printing, and adding docs. Signed-off-by: Nikolay Nedkov * Makefile improvements (#2419) * config: updating RPC.Websockets.ReadLimit=80MB Signed-off-by: Nikolay Nedkov * docs: generating docs Signed-off-by: Nikolay Nedkov * fix: fixing config test values for 'RPC.WebSockets.ReadLimit' Signed-off-by: Nikolay Nedkov * improve: adding config params and check for batch requests for enabling/disabling (disabled by default) and max requests count limit. Signed-off-by: Nikolay Nedkov * updating bytes sent in Signed-off-by: Nikolay Nedkov * updating 'RPC.WebSockets.ReadLimit=100MB' Signed-off-by: Nikolay Nedkov * update doc (#2433) * refactoring and adding tests to batch requests * linter fixes * build(deps): bump github.com/google/uuid from 1.3.0 to 1.3.1 (#2436) Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.3.0 to 1.3.1. - [Release notes](https://github.com/google/uuid/releases) - [Changelog](https://github.com/google/uuid/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) --- updated-dependencies: - dependency-name: github.com/google/uuid dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fixing check for pending txs and adding informational print for gas metrics. Signed-off-by: Nikolay Nedkov * fix and add tests for safe and finalized l2 blocks (#2424) * review code owners to match with the current team structure (#2435) * fix: fixing usage of num-ops param in 'benchmarks/sequencer/scripts' Signed-off-by: Nikolay Nedkov * fixing print in 'benchmarks/sequencer/../metrics' Signed-off-by: Nikolay Nedkov * Remove actual user names from PR template (#2470) * remove actual user names from PR template * remove actual user names from PR template * fix postgres version to 15 in docker compose files (#2471) * fix postgres version to 15 in docker compose files * fix postgres version to 15 in docker compose files * fix postgres version to 15 in docker compose files * Update production-setup.md * improve: improving scripts. Signed-off-by: Nikolay Nedkov * Merge v0.3.0 into develop (#2474) * fix null effective_percentage * fix forkID calculation * fix script * generate json-schema + docs for node config file and network_custom * fix unittest * Hotfixv0.1.4 to v0.2.0 (#2255) * Hotfix v0.1.4 to main (#2250) * fix concurrent web socket writes * fix eth_syncing * fix custom trace internal tx call error handling and update prover * add test to custom tracer depth issue; fix internal call error and gas used * fix custom tracer for internal tx with error and no more steps after it * remove debug code * Make max grpc message size configurable (#2179) * make max grpc message size configurable * fix state tests * fix tests * fix tests * get SequencerNodeURI from SC if empty and not IsTrustedSequencer * Optimize trace (#2183) * optimize trace * fix memory reading * update docker image * update prover image * fix converter * fix memory * fix step memory * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * update prover image * fix struclogs * fix memory size * fix memory size * fix memory size * refactor memory resize * refactor memory resize * move log for the best fitting tx (#2192) * fix load zkCounters from pool * remove unnecessary log.info * add custom tracer support to CREATES opcode without depth increase (#2213) * logs * fix getting stateroot from previous batch (GetWIPBatch) * logs * Fix GetWipBatch when previous last batch is a forced batch * fix forcedBatch trusted state * Revert "fix getting stateroot from previous batch (GetWIPBatch)" This reverts commit 860f0e74016219daf81f96b76f6b25609e1c66fd. * force GHA * add pool limits (#2189) * Hotfix/batch l2 data (#2223) * Fix BatchL2Data * Force GHA * remove failed txs from the pool limit check (#2233) * debug trace by batch number via external rpc requests (#2235) * fix trace batch remote requests in parallel limitation (#2244) * Added RPC.TraceBatchUseHTTPS config parameter * fix executor version --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * fix test * fix test --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * Effective GasPrice refactor+fixes (#2247) * effective GasPrice refactor * bugs fixes and finalizer tests fixes * fix typo * fix calculate effective gasprice percentage * fix test gas price * Fix/#2257 effective gas price receipt (#2258) * effective gas price returned by the rpc in the receipt * linter * bugfix: fixing l2blocks timestamp for the fist batch (#2260) * bugfix: fixing l2blocks timestamp for the fist batch Signed-off-by: Nikolay Nedkov * fix finalizer unit test --------- Signed-off-by: Nikolay Nedkov * add more comments, and removed fields PrivateKeyPath and PrivateKeyPassword from etherman.Config that are not in use * add info to git action * add info to git action * fix github action * updated comments * updated comments * Fix/#2263 gas used (#2264) * fix fea2scalar and gas used * suggestion * fix fea2scalar * suggestion * Fix pending tx when duplicate nonce (#2270) * fix pending tx when duplicate nonce * set pool.transaction.failed_reason to NULL when updating an existing tx * add more log details when adding tx to AddrQueue * fix query to add tx to the pool. Fix lint errors * change failed_reason for tx discarded due duplicate nonce * Only return a tx from the pool if tx is in pending status (#2273) * Return a tx from the pool only if it is * fix TestGetTransactionByHash --------- Co-authored-by: agnusmor * fix documentation with config file * improve: adding check to skip appending effectivePercentage if current forkId is under 5. Signed-off-by: Nikolay Nedkov * Fiex effectiveGasprice unsigned txs with forkId lower than 5 (#2278) * feat: adding functionality to stop sequencer on specific batch num from config param. Signed-off-by: Nikolay Nedkov * patch: adding print for X-Real-IP in JSON-RPC Signed-off-by: Nikolay Nedkov * Fix checkIfSynced (#2289) * [Rehashing] Check logs order and fix blockhash and blockNumber in the log conversion (#2280) * fix and check order * linter * flushID synchronizer (#2287) * FlushID in synchronizer * linter * fix logs * commnets * executor error refactor (#2299) * handle invalid rlp ROM error (#2297) * add maxL2GasPrice (#2294) * add maxL2GasPrice * fix * fix * add test * document parameter * update description * Error refactor (#2302) * error refactor * refactor * Fix replaced tx as failed when duplicated nonce (#2308) * Fix UpdateTxStatus for replacedTx * Fix adding tx with same nonce on AddrQueue * log reprocess need (#2309) * log reprocess need * Update finalizer.go * Feature/2300 synchronizer detect if executor restart (#2306) * detect if executor restarts and stop synchonizer * Update prover images (#2311) * update prover image * update prover images * change executor param * Update testnet.prover.config.json * Update test.permissionless.prover.config.json * Update test.prover.config.json * Update public.prover.config.json * prover params * prover params * prover params * update prover images * add doc, and fix dockers to be able to use snap/restore feature (#2315) * add doc, and fix dockers to be able to use snap/restore feature * add doc for snap/restore feature --------- Co-authored-by: Toni Ramírez * Update docker-compose.yml * Update docker-compose.yml * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix dbMultiWriteSinglePosition config value * workarround for the error error closing batch * workarround for the error error closing batch * workarround for the error error closing batch * workaround for the error of closing batch, another case * `Worker`'s `AddTxTracker` Bug Fix (#2343) * bugfix: Resolve Function Bug in Worker Module Signed-off-by: Nikolay Nedkov * improve: improving the wait for pending txs to be for only the txs for the current address. Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * rename config files (#2349) * fix closing batch + logs (#2348) * fix closing batch + logs * fix * log description * typo errors * fix error: failed to store transactions for batch due to duplicate key * test * typo * Update README.md * Update release.yml * bugfix: fixing place where we need to increment the wg per address for pending txs Signed-off-by: Nikolay Nedkov * Store batchL2Data when the batch is opened (#2358) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager (#2360) * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager * add logs, fix config * update config file documentation --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * bugfix: attaching missing TxTracker.From to pending txs to store for forced batches. (#2365) Signed-off-by: Nikolay Nedkov * Update README.md * improve: adding logs (#2373) * improve: adding logs Signed-off-by: Nikolay Nedkov * adding more logs Signed-off-by: Nikolay Nedkov * adding more logs #2 Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * bugfix: fixing finalizer's handling. (#2375) Signed-off-by: Nikolay Nedkov * Update README.md * change hashdb go package * new hashdb interface * aggregator pb refactor * new prover image * change prover config * update prover image * update to latest proto and prover image * Refactor nonce calculation for addQueue (#2382) * refactor nonce * fix * fix * fix script * check to avoid data inconsistencies (#2387) * check to avoid data inconsistencies * check batchL2Data * names in the logs * Refactor: avoid delete addrQueue if it has pending txs to store (#2391) * refactor delete addrQueue only if not pending txs to store * fix finalizer test * fix olsStateRoot in handleForcedTxsProcessResp * Update sequencer/addrqueue.go Co-authored-by: Alonso Rodriguez --------- Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: Alonso Rodriguez * Sort txs in worker by gasPrice (remove efficiency sort) (#2392) * Sort txs in worker by GasPrice (remove efficiency sort) * update config docs --------- Co-authored-by: Toni Ramírez * use useMainExecGenerated (#2393) * Fix store forced batch tx (#2394) * l2coinbase (#2400) * l2coinbase * add default config * add support config fields that are common.Address * docs * prover image --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * Check flushID != 0 (#2406) * Show tx.GasPrice in the worker logs (instead of tx.Cost) (#2416) * Check flushID != 0 in Sequencer (#2415) * check flushid != 0 in sequencer * Use f.halt instead of log.Fatal to report that flushid is 0 Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> * fix lint --------- Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> * update config params for Prover v2.1.0 (#2418) * cherry-pick #2385 and #2396 from develop into v0.2.6 (#2412) * fix http request instance null for websocket requests (#2385) * fix ws subscribe to get filtered log notifications (#2396) * new block endpoints and improvements to batch endpoint (#2411) * Add forced batches tx to addrQueue (#2398) * add forced batches tx to addrQueue * fix test * fix test * fix test * fix test * fix test * fix test * fix test * fix test * fix test * refactor * fix test * fix test * fix test * fix test * fix test * fixes * fixes * fixes * fixes * fixes * fixes * fixes * fixes * fixes * fixes * fixes * fixe hash and from * fixe hash and from * fixe hash and from * fixe hash and from * fix test * fix test * fix test * fix test * fix test * fix test * fix test * improve tests * improve tests * improve tests * improve tests * improve tests * refactor * refactor * improve logs * bugifx: adding missing tx.BreakEvenGasPrice nil check Signed-off-by: Nikolay Nedkov * Sync halt (#2428) * cherry-pick #2245 and #2424 from develop into v0.2.6 (#2447) * fix safe and finalized l2 block to consider l1 safe and finalized blocks respectively (#2245) * fix and add tests for safe and finalized l2 blocks (#2424) * New executor errors refactor (#2438) * wip * new errors * retry on executor db error * new prover images * fix comment * update hasdh proto and prover images * handle excutor db error * update test * update test * update test * update test * refactor error check in unsigned tx * Reprocess full batch in parallel (sanity check) (#2425) * reprocess full batch in parallel (sanity check) * update doc * update reprocessFullBatch logs * Speed up deleting batches from stateDB creating an index for state.receipt.block_num (#2457) * receipt deletion index * receipt deletion index * Feature/#2429 fork id improvement (#2450) * db table + tests * GetForks func modified to get them by range * Sync forkID * forkIDIntervals and forkID genesis * linter * docs * Avoid resetForkID in trustedNode * fix test group 9 * suggestions * doc and mocks * fix check storedFlushID (#2458) * remove stored flush id 0 (#2459) * Feature/#2403 snap (#2404) * Path snapshot command * restore * readme * options used by dbeaver * #2429_forkID_improvement: #2429_forkID_improvement: * fix * fix postgres version to v15 * fix permissionless init script * bugfix: removing measuring of metrics from async batch reprocessing f… (#2461) * bugfix: removing measuring of metrics from async batch reprocessing for executor. Signed-off-by: Nikolay Nedkov * fixing unit tests Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * update prover images (#2473) * Update production-setup.md * update doc * fix jsonrpc tests * fixes state.db * update doc again * remove obsolete config * docs one more time... --------- Signed-off-by: Nikolay Nedkov Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Nikolay Nedkov * Fix state configuration (#2478) * Fix state configuration * Remove ResourceWeights unnecessary configuration * linter * add http request content limit (#2448) * remove http call from trace by batch number; fix batch by number not found error (#2446) * reverting to working version of the scripts Signed-off-by: Nikolay Nedkov * improve: adding handling of odd-length hex strings in . Signed-off-by: Nikolay Nedkov * benchmarks: removing the need to get sequencer IP from aws. Signed-off-by: Nikolay Nedkov * tool to rerun batches (#2498) * tool to rerun batches * Cherrypick/fix convert process batch response (#2479) * remove need of decoded txs to process a batch response * fix * fix typo * fix * fix fb convert handling * handle invalid rlp error * improve logs * another fix * change IsStateRootChanged * fix INVALID_RLP * new rlp error --------- Co-authored-by: Alonso * build(deps): bump golang.org/x/net from 0.14.0 to 0.15.0 (#2513) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.14.0 to 0.15.0. - [Commits](https://github.com/golang/net/compare/v0.14.0...v0.15.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump google.golang.org/grpc from 1.57.0 to 1.58.0 (#2520) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.57.0 to 1.58.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.57.0...v1.58.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/invopop/jsonschema from 0.7.0 to 0.8.0 (#2519) * build(deps): bump github.com/invopop/jsonschema from 0.7.0 to 0.8.0 Bumps [github.com/invopop/jsonschema](https://github.com/invopop/jsonschema) from 0.7.0 to 0.8.0. - [Commits](https://github.com/invopop/jsonschema/compare/v0.7.0...v0.8.0) --- updated-dependencies: - dependency-name: github.com/invopop/jsonschema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * new jsonschema-0.8.0 produce a difference for genesis[].storage because is a map --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * Cherrypick/v0.3.1 to develop (#2516) * fix error detection (#2506) * fix error detection * linter * fix rom level error while syncing (#2507) * Fix forkID (#2511) * fix forkID reset * linter * sync logs * leave blockNum column * update on conflict * logs * fix unit test * Remove prover ports (aggregator) from docker-compose (#2525) * Fix rom ooc error detection (#2527) (#2529) * Fix rom ooc error detection * remove check * fix logger (#2530) * Use ZKEVM_NETWORK variable to specify location of config files in docker compose file (#2524) * Use ZKEVM_NETWORK variable to specify location of config files in docker-compose * fix config path in step 7.1 * rename ZKEVM_NODE_STATE_DB_DATA_DIR to ZKEVM_NODE_STATEDB_DATA_DIR * improve the way the jRPC server log errors when returning an error response to the user (#2485) * add jRPC support to eth_coinbase (#2500) * only RPC component will refresh blocked addresses (#2501) * fix merges (#2537) * build(deps): bump github.com/go-git/go-git/v5 from 5.8.1 to 5.9.0 (#2541) Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.8.1 to 5.9.0. - [Release notes](https://github.com/go-git/go-git/releases) - [Commits](https://github.com/go-git/go-git/compare/v5.8.1...v5.9.0) --- updated-dependencies: - dependency-name: github.com/go-git/go-git/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * avoid halt syncronization in permissionless nodes (#2536) (#2545) * avoid halt syncronization in permissionless nodes * logs * logs with %d * print always the batchL2Data * Fix nil evaluation (#2544) (#2548) * fix nil evaluation * fix * fix * build(deps): bump google.golang.org/grpc from 1.58.0 to 1.58.1 (#2551) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.0 to 1.58.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.58.0...v1.58.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/ethereum/go-ethereum from 1.12.2 to 1.13.0 (#2540) * build(deps): bump github.com/ethereum/go-ethereum from 1.12.2 to 1.13.0 Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.12.2 to 1.13.0. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.12.2...v1.13.0) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * return the gas price in base 16, not base 10 * change e2e call tracer test to conditionally validate revert reason --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Toni Ramírez Co-authored-by: tclemos * build(deps): bump go.uber.org/zap from 1.25.0 to 1.26.0 (#2550) Bumps [go.uber.org/zap](https://github.com/uber-go/zap) from 1.25.0 to 1.26.0. - [Release notes](https://github.com/uber-go/zap/releases) - [Changelog](https://github.com/uber-go/zap/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/zap/compare/v1.25.0...v1.26.0) --- updated-dependencies: - dependency-name: go.uber.org/zap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * L1 synchronization in parallel (#2504) * L1 synchronization in parallel: merged feature/#2371-synchronizer_optimization * build(deps): bump github.com/ethereum/go-ethereum from 1.13.0 to 1.13.1 (#2554) Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.13.0 to 1.13.1. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.13.0...v1.13.1) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * renamed test function to remove underscore (#2557) * add unit and e2e tests for big batch l2 data (#2539) * Enable gasless txs on L2 (#2560) (#2565) Enable gasless txs on L2 * Synchronizer L1 parallel: improve context usage (#2568) * + #2567: improve context, add unittest, remove unused code, improved logs * Fix Makefile: should start synchronizer before eth-tx-manager (#2553) * Fix Makefile: start synchronizer before eth-tx-manager * increase sleep seconds for db migration * fix trace default values and add condition to define which trace should be returned (#2576) * Synchronizer L1 parallel: fix stuck due error on request (#2579) * + limit pending results to give to consumer * improve: improving benchmark scripts. Signed-off-by: Nikolay Nedkov * bugfix: fixing L2 Reorg check. (#2588) Signed-off-by: Nikolay Nedkov * Synchronizer L1 parallel: add control between retries to avoid spamming (#2585) * +limit retries in time for RollupInfo * DB index (#2562) (#2570) * build(deps): bump google.golang.org/grpc from 1.58.1 to 1.58.2 (#2592) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.1 to 1.58.2. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.58.1...v1.58.2) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Data Streamer (#2569) * Data Streamer * fix encode functions * updates * build(deps): bump github.com/spf13/afero from 1.9.5 to 1.10.0 (#2594) Bumps [github.com/spf13/afero](https://github.com/spf13/afero) from 1.9.5 to 1.10.0. - [Release notes](https://github.com/spf13/afero/releases) - [Commits](https://github.com/spf13/afero/compare/v1.9.5...v1.10.0) --- updated-dependencies: - dependency-name: github.com/spf13/afero dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix deadlock on L1 synchronization when appears a ForkId that need a rewind (#2600) * build(deps): bump github.com/ethereum/go-ethereum from 1.13.1 to 1.13.2 (#2599) * build(deps): bump github.com/ethereum/go-ethereum from 1.13.1 to 1.13.2 Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.13.1 to 1.13.2. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.13.1...v1.13.2) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * fix doc --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alonso * build(deps): bump github.com/prometheus/client_golang (#2598) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.16.0 to 1.17.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/v1.17.0/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix error check returned by the executor when debug tracing a block (#2605) * remove deprecated TraceBatchUseHTTPS config (#2606) * build(deps): bump github.com/invopop/jsonschema from 0.8.0 to 0.10.0 (#2597) * build(deps): bump github.com/invopop/jsonschema from 0.8.0 to 0.10.0 Bumps [github.com/invopop/jsonschema](https://github.com/invopop/jsonschema) from 0.8.0 to 0.10.0. - [Commits](https://github.com/invopop/jsonschema/compare/v0.8.0...v0.10.0) --- updated-dependencies: - dependency-name: github.com/invopop/jsonschema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * adapt code for jsonschema 0.10.0 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alonso Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> * race condition: reset trusted state memory var (#2602) (#2603) * race condition: reset trusted state memory var * linter * doc * comment * build(deps): bump github.com/invopop/jsonschema from 0.10.0 to 0.11.0 (#2611) Bumps [github.com/invopop/jsonschema](https://github.com/invopop/jsonschema) from 0.10.0 to 0.11.0. - [Commits](https://github.com/invopop/jsonschema/compare/v0.10.0...v0.11.0) --- updated-dependencies: - dependency-name: github.com/invopop/jsonschema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/prometheus/client_model (#2612) Bumps [github.com/prometheus/client_model](https://github.com/prometheus/client_model) from 0.4.1-0.20230718164431-9a2bf3000d16 to 0.5.0. - [Release notes](https://github.com/prometheus/client_model/releases) - [Commits](https://github.com/prometheus/client_model/commits/v0.5.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_model dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Populate stream file on sequencer startup (#2613) (#2614) * populates stream file on startup * fix test * build(deps): bump github.com/invopop/jsonschema from 0.11.0 to 0.12.0 (#2617) Bumps [github.com/invopop/jsonschema](https://github.com/invopop/jsonschema) from 0.11.0 to 0.12.0. - [Commits](https://github.com/invopop/jsonschema/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: github.com/invopop/jsonschema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Feature/2595 l1 sync deadlock (#2616) * changed that producer doesnt stop * renaming: renaming dbManager.checkIfReorg to checkStateInconsistency. Signed-off-by: Nikolay Nedkov * add random ContextId when calling executor.ProcessBatch (#2575) * add unit test to validate jRPC max request limit per ip and second (#2593) * Feature/jrpc log limit (#2572) * build(deps): bump golang.org/x/crypto from 0.13.0 to 0.14.0 (#2622) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.13.0 to 0.14.0. - [Commits](https://github.com/golang/crypto/compare/v0.13.0...v0.14.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump golang.org/x/net from 0.15.0 to 0.16.0 (#2623) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.15.0 to 0.16.0. - [Commits](https://github.com/golang/net/compare/v0.15.0...v0.16.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * migration + tests and queries (#2625) (#2626) * build(deps): bump golang.org/x/sync from 0.3.0 to 0.4.0 (#2621) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.3.0 to 0.4.0. - [Commits](https://github.com/golang/sync/compare/v0.3.0...v0.4.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/spf13/viper from 1.16.0 to 1.17.0 (#2628) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.16.0 to 1.17.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.16.0...v1.17.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Update mock files using mockery v2.22.1 to v2.32.0 (#2630) * Fix encodng tx in stream (#2629) (#2631) * Fixes Tx encoding in data stream * fix * Fix encode into stream (#2634) (#2636) * rebuild tool * wip * fix encoded * linter * Cherrypick/#2632 #2637 (#2639) * Delete duplicate values * Fix release docker-compose (#2638) * Cherrypick/2642 2647 (#2648) * Add bookmarks to streamer (#2642) * update tools * fix error * update data streamer version * refactor encoded * refactor encoded * gitignore * fix tool (#2647) * build(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 (#2645) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/net/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix debug tracer error message (#2345) * Improve WS subscription (#2635) * build(deps): bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#2644) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.2 to 1.58.3. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.58.2...v1.58.3) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * check for bookmark entry in data stream file (#2670) (#2671) * check for bookmark entry in data stream file * fix current l2 block * build(deps): bump github.com/0xPolygonHermez/zkevm-data-streamer (#2665) Bumps [github.com/0xPolygonHermez/zkevm-data-streamer](https://github.com/0xPolygonHermez/zkevm-data-streamer) from 0.0.10 to 0.0.12. - [Release notes](https://github.com/0xPolygonHermez/zkevm-data-streamer/releases) - [Commits](https://github.com/0xPolygonHermez/zkevm-data-streamer/compare/v0.0.10...v0.0.12) --- updated-dependencies: - dependency-name: github.com/0xPolygonHermez/zkevm-data-streamer dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Refactor data stream tool (#2653) (#2657) * Refactor data stream tool (#2653) * recover start in tool * fix timestamp * remove duplicate code * new tool * update data stream lib * restore go.mod * fix query * linter * build(deps): bump github.com/fatih/color from 1.14.1 to 1.15.0 (#2675) Bumps [github.com/fatih/color](https://github.com/fatih/color) from 1.14.1 to 1.15.0. - [Release notes](https://github.com/fatih/color/releases) - [Commits](https://github.com/fatih/color/compare/v1.14.1...v1.15.0) --- updated-dependencies: - dependency-name: github.com/fatih/color dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github.com/prometheus/client_golang (#2674) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.16.0 to 1.17.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * + fix behaviour on persistent errors and add logs to the creation of batches (#2672) * Upgrade to go 1.21 (#2676) * upgrade to go 1.21 * go.mod * fix gosec * + L1 synchronization parallel: add logs to batches, fix control of consumer block (#2677) * issue: #2672 * + add logs to batches * + consumer ignore no sequential blocks * build(deps): bump google.golang.org/grpc from 1.58.3 to 1.59.0 (#2679) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.3 to 1.59.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.58.3...v1.59.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Cherrypick/2680 (#2688) * prover v3.0.0-RC3-fork.6 * prover v3.0.0 * update prover to v3.0.0-RC1 * update prover to v0.3.1-RC1 * update tests to forkID6 * update tests to forkID6 * update prover image * udpate test * Changing the forced batch tests to use genesisBlockNumber from the test.genesis.config.json. Signed-off-by: Nikolay Nedkov * Fixing forced batch tests to be symlinks and fixing usage of SetForkID. Signed-off-by: Nikolay Nedkov * Fixing forced batch tests to be symlinks and fixing usage of SetForkID. Signed-off-by: Nikolay Nedkov * update prover image to v3.0.0-RC2 --------- Signed-off-by: Nikolay Nedkov Co-authored-by: Nikolay Nedkov * build(deps): bump github.com/prometheus/common from 0.44.0 to 0.45.0 (#2687) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.44.0 to 0.45.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.44.0...v0.45.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * add new endpoint zkevm_getNativeBlockHashesInRange (#2661) * update code owners (#2703) * Cherrypick/v0.3.3-RC6 (#2702) * Add UpdateGER entry to stream (#2695) * Add Update GER entry to stream * add entity definition * improve logs * fix logic * start metrics http server before node components (#2697) * start streamserver before update the file (#2698) * add entries decode (#2699) * add entries decode * linter * udpate streamer lib * update to streamer v0.1.2 * Fix/update ger open batch (#2701) * fix update GER on open batch * update stream lib * fix config file * change DSBatch query --------- Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> * Add pool.whitelisted table (#2704) * add pool.whitelisted table * Add empty line after DROP TABLE Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> --------- Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> * + Synchronizer L1 parallel: rollupinfo request for lastest block, add previous block to check L… (#2685) * + rollupinfo request for lastest block, add previous block to check L1-reogs - Request after justified block ask for previous block to be able to match that belongs to same chain * + add coverage report to sonarcloud (#2706) * + add coverge to sonarcloud * build(deps): bump github.com/go-git/go-git/v5 from 5.9.0 to 5.10.0 (#2711) Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.9.0 to 5.10.0. - [Release notes](https://github.com/go-git/go-git/releases) - [Commits](https://github.com/go-git/go-git/compare/v5.9.0...v5.10.0) --- updated-dependencies: - dependency-name: github.com/go-git/go-git/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add online tooling for data stream (#2712) (#2715) * online tools * udpate streamer lib * upgrade to go 1.21 * restore go.mod * fix streamer tool logs (#2717) * fix streamer tool logs * fix streamer tool logs * Fix release process: version binaries, patch > 9, testnet.zip file * add LDFLAGS to goreleaser to include in the binary the right, version. Also add a new tag test* to be able to test this solution * fix error generating testnet.zip * build(deps): bump github.com/google/uuid from 1.3.1 to 1.4.0 (#2721) * revert changes on config file (#2725) * Update zkevm-data-streamer to v0.1.8 (#2723) * add config to enable or disable http logs (#2709) * add config to enable or disable http logs * update doc * review l2 block and batch tags: earliest, latest, pending, safe and finalized (#2673) * add batch pending, safe and finalized tags for jRPC queries * fix linter issues * review l2 block and batch tags to handle earliest, latest, pending, safe and finalized properly * fix unit tests * ethtxmanager improvements (#2664) * fix null effective_percentage * fix forkID calculation * fix script * generate json-schema + docs for node config file and network_custom * fix unittest * Hotfixv0.1.4 to v0.2.0 (#2255) * Hotfix v0.1.4 to main (#2250) * fix concurrent web socket writes * fix eth_syncing * fix custom trace internal tx call error handling and update prover * add test to custom tracer depth issue; fix internal call error and gas used * fix custom tracer for internal tx with error and no more steps after it * remove debug code * Make max grpc message size configurable (#2179) * make max grpc message size configurable * fix state tests * fix tests * fix tests * get SequencerNodeURI from SC if empty and not IsTrustedSequencer * Optimize trace (#2183) * optimize trace * fix memory reading * update docker image * update prover image * fix converter * fix memory * fix step memory * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * fix structlogs * update prover image * fix struclogs * fix memory size * fix memory size * fix memory size * refactor memory resize * refactor memory resize * move log for the best fitting tx (#2192) * fix load zkCounters from pool * remove unnecessary log.info * add custom tracer support to CREATES opcode without depth increase (#2213) * logs * fix getting stateroot from previous batch (GetWIPBatch) * logs * Fix GetWipBatch when previous last batch is a forced batch * fix forcedBatch trusted state * Revert "fix getting stateroot from previous batch (GetWIPBatch)" This reverts commit 860f0e74016219daf81f96b76f6b25609e1c66fd. * force GHA * add pool limits (#2189) * Hotfix/batch l2 data (#2223) * Fix BatchL2Data * Force GHA * remove failed txs from the pool limit check (#2233) * debug trace by batch number via external rpc requests (#2235) * fix trace batch remote requests in parallel limitation (#2244) * Added RPC.TraceBatchUseHTTPS config parameter * fix executor version --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * fix test * fix test --------- Co-authored-by: tclemos Co-authored-by: tclemos Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos * Effective GasPrice refactor+fixes (#2247) * effective GasPrice refactor * bugs fixes and finalizer tests fixes * fix typo * fix calculate effective gasprice percentage * fix test gas price * Fix/#2257 effective gas price receipt (#2258) * effective gas price returned by the rpc in the receipt * linter * bugfix: fixing l2blocks timestamp for the fist batch (#2260) * bugfix: fixing l2blocks timestamp for the fist batch Signed-off-by: Nikolay Nedkov * fix finalizer unit test --------- Signed-off-by: Nikolay Nedkov * add more comments, and removed fields PrivateKeyPath and PrivateKeyPassword from etherman.Config that are not in use * add info to git action * add info to git action * fix github action * updated comments * updated comments * Fix/#2263 gas used (#2264) * fix fea2scalar and gas used * suggestion * fix fea2scalar * suggestion * Fix pending tx when duplicate nonce (#2270) * fix pending tx when duplicate nonce * set pool.transaction.failed_reason to NULL when updating an existing tx * add more log details when adding tx to AddrQueue * fix query to add tx to the pool. Fix lint errors * change failed_reason for tx discarded due duplicate nonce * Only return a tx from the pool if tx is in pending status (#2273) * Return a tx from the pool only if it is * fix TestGetTransactionByHash --------- Co-authored-by: agnusmor * fix documentation with config file * improve: adding check to skip appending effectivePercentage if current forkId is under 5. Signed-off-by: Nikolay Nedkov * Fiex effectiveGasprice unsigned txs with forkId lower than 5 (#2278) * feat: adding functionality to stop sequencer on specific batch num from config param. Signed-off-by: Nikolay Nedkov * patch: adding print for X-Real-IP in JSON-RPC Signed-off-by: Nikolay Nedkov * Fix checkIfSynced (#2289) * [Rehashing] Check logs order and fix blockhash and blockNumber in the log conversion (#2280) * fix and check order * linter * flushID synchronizer (#2287) * FlushID in synchronizer * linter * fix logs * commnets * executor error refactor (#2299) * handle invalid rlp ROM error (#2297) * add maxL2GasPrice (#2294) * add maxL2GasPrice * fix * fix * add test * document parameter * update description * Error refactor (#2302) * error refactor * refactor * Fix replaced tx as failed when duplicated nonce (#2308) * Fix UpdateTxStatus for replacedTx * Fix adding tx with same nonce on AddrQueue * log reprocess need (#2309) * log reprocess need * Update finalizer.go * Feature/2300 synchronizer detect if executor restart (#2306) * detect if executor restarts and stop synchonizer * Update prover images (#2311) * update prover image * update prover images * change executor param * Update testnet.prover.config.json * Update test.permissionless.prover.config.json * Update test.prover.config.json * Update public.prover.config.json * prover params * prover params * prover params * update prover images * add doc, and fix dockers to be able to use snap/restore feature (#2315) * add doc, and fix dockers to be able to use snap/restore feature * add doc for snap/restore feature --------- Co-authored-by: Toni Ramírez * Update docker-compose.yml * Update docker-compose.yml * do not add tx to the pool in case err != nil * do not add tx into the pool if a fatal error in the executor happens during pre execution * fix dbMultiWriteSinglePosition config value * workarround for the error error closing batch * workarround for the error error closing batch * workarround for the error error closing batch * workaround for the error of closing batch, another case * `Worker`'s `AddTxTracker` Bug Fix (#2343) * bugfix: Resolve Function Bug in Worker Module Signed-off-by: Nikolay Nedkov * improve: improving the wait for pending txs to be for only the txs for the current address. Signed-off-by: Nikolay Nedkov --------- Signed-off-by: Nikolay Nedkov * rename config files (#2349) * fix closing batch + logs (#2348) * fix closing batch + logs * fix * log description * typo errors * fix error: failed to store transactions for batch due to duplicate key * test * typo * Update README.md * Update release.yml * bugfix: fixing place where we need to increment the wg per address for pending txs Signed-off-by: Nikolay Nedkov * add GasPriceMarginFactor and MaxGasPrice to eth-tx-manager * add logs, fix config * update config file documentation * refactoring how eth tx manager monitor txs to increase the throughput * monitoredTx log improvements * merge clean-up * merge clean-up * fix * ethTxManager gasOffset refactoring * update docs * add recover to monitor tx flow and unit test to failed gas estimation with forced gas --------- Signed-off-by: Nikolay Nedkov Co-authored-by: Toni Ramírez Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Nikolay Nedkov * fix nil pointer when fails etherman call (#2730) * add pool indexes (#2731) * add pool indexes * add pool indexes * add pool indexes * add pool indexes * add pool indexes * add pool indexes * add pool indexes * update GHA workflows to use latest docker package versions (#2718) * delete failed txs during pool cleanup (#2733) * delete failed txs during pool cleanup * delete failed txs during pool cleanup * improve logs * Improve effective gas price implementation (RPC filter, gasprice/balance opcodes flags, new formula) (#2690) * initial changes related to revision of effective gas price implementation * Use EffectiveGasPriceLog struct to log/store EGP calculation details * added egp tests and logs. Update executor proto * implementation EGP for RPC (#2652) * implementation EGP for RPC * added additional fields to EGPLog. Several fixes * use BreakEvenFactor instead or pct margin. RPC egp-log changed * fix white spaces * fix lint and doc * change pool egp logs * fix nil evaluation on pool.StoreTx * Revert "fix nil evaluation on pool.StoreTx" This reverts commit 958a41f88963d261f55e2a970c6e3a0bb986dde3. * fix pgstatestorage tests * fix config doc * added HasOpCode e2e test * Added HasOpCode e2e test. Improve logs. Fixes * Fix finalizer tests * change egp checks * fix PR review comments * fix new error handling (#2737) --------- Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> * Fix GlobalQueue/AccountQueue pool config (#2738) * Properly handle sequenced empty batches with GER Update (#2742) (#2743) * handle GER update on empty forced batch * fix update current GER * full refactor * full refactor * update stream library (#2746) * Fix missing pool.Config parameter when creating a new Sequencer (#2741) * add gasprice update log * log finalizer L1/L2 gas prices * fix passing pool.Config to new sequencer * remove temporary logs * Update/websocketslib (#2757) * update stream library * update websockets lib * + renamed config vars for L1 parallel sync (#2768) * + renamed config vars for L1 parallel sync * Added L2GasPriceSuggesterFactor config parameter to simulate estimated L2 gas price (EGP disabled) (#2777) * added L2GasPriceSuggesterFactor config parameter to "simulate" estimated l2 gas price when egp is disabled * fix L2GasPriceSuggesterFactor value in test config file * fix L2GasPriceSuggesterFactor value in test config file * update doc * add queue to ws subscription filters (#2769) * fix trace internal tx gas used computation (#2778) * replace atomic wsConn by concurrentWsConn (#2782) (#2790) * Update GER on closed batch without txs (#2793) (#2795) * update GER on closed batch without txs * refactor * cherry pick 2785 (#2796) * cherry pick the latest WS improvements into v0.4.0 (#2800) * update prover images to v3.0.2 (#2802) * set synchronizer sequential mode as default (#2807) * set sequential mode as default * add response headers to all http responses (#2829) * Merge pull request #9 from okx/zjg/fork5-dac fork5 dac * fix * Add logs for sequencer (#14) * add log statistics * add reset and batch number * total duration with milliseconds * add batch gas used statistics * support innertx (#16) * add switch for pending transaction filter (#18) * Update endpoints_eth.go fix fix * suport free gas to claim asset (#22) * fix * Update worker.go * Update run.go * add gp adjust (#17) * add the fixed type for gp adjust * add ut and adjust the code about kafka * add fixed config to the local.node.config.toml * fix lint error * fix lint error * read rootca from path * fix conflict * fix kafka lint * fix * validium/rollup switch (#26) * update * update * update * update * update * update * update * update * update * update abi * update go mod file * add wait tick * fix config name * fix logs * modify is zero byte array function * fix * support nacos (#27) * support nacos * reorgnize nacos configuration * modify the nacos configure name * add example configure file * fix * sync fix ws debug (#28) * sync the code and fix the ws bug for the rpc method * adaptive the err judge * Support websocket to register nacos (#33) * add nacos support for websocket * lint code * support flatCallTracer and result limit for trace transaction (#40) * support flatCallTracer * trace result limit * fix make deploy-sc error (#38) * update * update * update * test xgon * 1 * add gas price dynamic adjust in follower mode (#34) * add dynamic adjust for follower * modify the config * fix lint and schema * modify the comment * modify the comment * make doc * fix compile add missing code * first commit * Update db.go * Update datacommittee_test.go * format * fmt * goimport * 1 * 1 * fix * Update start.go * 1 * a * 1 * a * Update Makefile * fix test * Update setup.go * a * update makefile * update makefile * fix ut * update * update * update * disabel * udpate * update * update * fix e2e * update permissionless-node * update --------- Signed-off-by: Nikolay Nedkov Signed-off-by: dependabot[bot] Co-authored-by: Nikolay Nedkov Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: tclemos Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: Thiago Coimbra Lemos Co-authored-by: Arnau Bennassar Co-authored-by: joanestebanr Co-authored-by: Alonso Rodriguez Co-authored-by: tclemos Co-authored-by: agnusmor Co-authored-by: agnusmor <100322135+agnusmor@users.noreply.github.com> Co-authored-by: icydark <54430021+icydark@users.noreply.github.com> Co-authored-by: Alejandro Criado-Pérez Co-authored-by: Alonso Co-authored-by: Toni Ramírez Co-authored-by: bap2pecs <111917526+bap2pecs@users.noreply.github.com> Co-authored-by: KamiD <44460798+KamiD@users.noreply.github.com> Co-authored-by: zhangkai Co-authored-by: lyh169 <44960676+lyh169@users.noreply.github.com> Co-authored-by: chengzhinei Co-authored-by: JianGuo Co-authored-by: ylsGit Co-authored-by: root --- .github/CODEOWNERS | 27 +- .github/pull_request_template.md | 4 +- .github/workflows/jsonschema.yml | 12 +- .github/workflows/lint.yml | 2 +- .github/workflows/push-docker-develop.yml | 8 +- .github/workflows/push-docker-tagged.yml | 8 +- .github/workflows/release.yml | 7 +- .github/workflows/sonarqube.yml | 13 +- .github/workflows/test-e2e.yml | 2 +- .github/workflows/test-from-prover.yml | 4 +- .github/workflows/test-full-non-e2e.yml | 2 +- .github/workflows/updatedeps.yml | 2 +- .gitignore | 9 +- .goreleaser.yaml | 5 + CONTRIBUTING.md | 8 +- Dockerfile | 2 +- Makefile | 100 +- README.md | 31 +- aggregator/aggregator.go | 16 +- aggregator/aggregator_test.go | 6 +- aggregator/config.go | 12 + aggregator/interfaces.go | 2 +- aggregator/mocks/mock_dbtx.go | 11 +- aggregator/mocks/mock_etherman.go | 11 +- aggregator/mocks/mock_ethtxmanager.go | 21 +- aggregator/mocks/mock_profitabilitychecker.go | 11 +- aggregator/mocks/mock_prover.go | 11 +- aggregator/mocks/mock_state.go | 11 +- aggregator/prover/aggregator.pb.go | 5 +- aggregator/prover/aggregator_grpc.pb.go | 1 + ci/e2e-group-dac/datacommittee_test.go | 1 + ci/e2e-group10/forced_batches_test.go | 239 +-- .../forced_batches_vector_group2_test.go | 229 +-- ci/e2e-group10/shared.go | 1 + .../forced_batches_vector_group3_test.go | 233 +-- ci/e2e-group11/shared.go | 1 + ci/e2e-group2/gasless_test.go | 1 + .../forced_batches_vector_group1_test.go | 235 +-- ci/e2e-group9/shared.go | 1 + cmd/dumpstate.go | 4 +- cmd/restore.go | 12 +- cmd/run.go | 68 +- cmd/snapshot.go | 10 +- config/config.go | 5 +- config/config_test.go | 188 +- config/default.go | 82 +- .../local/local.genesis.config.json | 2 +- .../environments/local/local.node.config.toml | 61 +- config/environments/mainnet/node.config.toml | 33 +- .../environments/mainnet/prover.config.json | 2 +- config/environments/testnet/node.config.toml | 32 +- config/gen_json_schema.go | 75 +- config/gen_json_schema_test.go | 8 +- config/network.go | 12 +- db/migrations/pool/0011.sql | 15 + db/migrations/pool/0011_test.go | 49 + db/migrations/pool/utils_test.go | 116 ++ db/migrations/state/0009.sql | 7 + db/migrations/state/0009_test.go | 95 + db/migrations/state/0010.sql | 21 + db/migrations/state/0010_test.go | 67 + db/migrations/state/0011.sql | 21 + db/migrations/state/0011_test.go | 73 + db/migrations/state/0012.sql | 8 + db/migrations/state/0012_test.go | 62 + docker-compose.yml | 10 +- docs/architecture.drawio.png | Bin 154305 -> 169540 bytes docs/ci/groups.md | 6 +- docs/components/aggregator.md | 4 +- docs/components/rpc.md | 4 +- docs/components/sequencer.md | 6 +- docs/components/synchronizer.md | 2 +- .../custom_network-config-doc.html | 2 +- docs/config-file/custom_network-config-doc.md | 13 +- .../custom_network-config-schema.json | 6 +- docs/config-file/node-config-doc.html | 30 +- docs/config-file/node-config-doc.md | 1643 ++++++++++++----- docs/config-file/node-config-schema.json | 549 ++++-- docs/configuration.md | 16 +- .../l1_sync_channels_flow_v2.drawio.png | Bin 0 -> 91154 bytes .../design/synchronizer/l1_synchronization.md | 62 + docs/json-rpc-endpoints.md | 1 + docs/modes.md | 12 +- docs/production-setup.md | 16 +- docs/snap_restore.md | 12 +- etherman/etherman_test.go | 2 +- etherman/mock_etherscan.go | 11 +- etherman/mock_ethgasstation.go | 11 +- etherman/smartcontracts/script.sh | 2 +- ethtxmanager/ethtxmanager.go | 477 ++--- ethtxmanager/ethtxmanager_test.go | 234 ++- ethtxmanager/mock_etherman_test.go | 11 +- ethtxmanager/mock_state_test.go | 11 +- ethtxmanager/monitoredtx.go | 5 +- ethtxmanager/monitoredtx_test.go | 18 +- ethtxmanager/pgstorage.go | 29 +- gasprice/mock_etherman.go | 11 +- gasprice/mock_pool.go | 11 +- go.mod | 99 +- go.sum | 216 ++- hex/hex.go | 6 + hex/hex_test.go | 36 + jsonrpc/client/client.go | 87 +- jsonrpc/config.go | 34 +- jsonrpc/dbtxmanager.go | 6 +- jsonrpc/endpoints_debug.go | 35 +- jsonrpc/endpoints_debug_innertx.go | 4 +- jsonrpc/endpoints_eth.go | 503 +++-- jsonrpc/endpoints_eth_test.go | 528 +++++- jsonrpc/endpoints_zkevm.go | 56 +- jsonrpc/endpoints_zkevm.openrpc.json | 38 + jsonrpc/endpoints_zkevm_test.go | 173 +- jsonrpc/handler.go | 23 +- jsonrpc/interfaces.go | 16 +- jsonrpc/metrics/metrics.go | 2 + jsonrpc/mock_storage.go | 72 +- jsonrpc/mocks/mock_dbtx.go | 11 +- jsonrpc/mocks/mock_etherman.go | 11 +- jsonrpc/mocks/mock_pool.go | 11 +- jsonrpc/mocks/mock_state.go | 117 +- jsonrpc/nacos/start.go | 21 +- jsonrpc/nacos/utils.go | 3 +- jsonrpc/query.go | 154 +- jsonrpc/server.go | 232 ++- jsonrpc/server_test.go | 499 ++++- jsonrpc/storage.go | 195 +- jsonrpc/types/codec.go | 82 +- jsonrpc/types/codec_test.go | 165 +- jsonrpc/types/errors.go | 10 + jsonrpc/types/interfaces.go | 9 +- jsonrpc/wsconn.go | 46 + log/log.go | 18 +- merkletree/hashdb/hashdb.pb.go | 5 +- merkletree/hashdb/hashdb_grpc.pb.go | 1 + pool/config.go | 31 +- pool/config_test.go | 63 + pool/effectivegasprice.go | 143 ++ pool/effectivegasprice_test.go | 271 +++ pool/errors.go | 12 + pool/interfaces.go | 1 + pool/pgpoolstorage/pgpoolstorage.go | 10 + pool/pool.go | 154 +- pool/pool_test.go | 352 +++- pool/validation.go | 8 + pool/validation_test.go | 27 + proto/src/proto/executor/v1/executor.proto | 38 +- sequencer/addrqueue.go | 2 +- sequencer/closingsignalsmanager_test.go | 4 +- sequencer/config.go | 70 +- sequencer/dbmanager.go | 192 +- sequencer/dbmanager_test.go | 4 +- sequencer/effective_gas_price.go | 138 -- sequencer/effective_gas_price_test.go | 96 - sequencer/errors.go | 4 - sequencer/finalizer.go | 362 +++- sequencer/finalizer_test.go | 130 +- sequencer/interfaces.go | 22 +- sequencer/mock_db_manager.go | 27 +- sequencer/mock_dbtx.go | 11 +- sequencer/mock_etherman.go | 76 +- sequencer/mock_pool.go | 43 +- sequencer/mock_state.go | 143 +- sequencer/mock_worker.go | 14 +- sequencer/sequencer.go | 115 +- sequencer/txtracker.go | 57 +- sequencer/worker.go | 32 +- sequencer/worker_test.go | 66 +- sequencesender/config.go | 22 +- sequencesender/interfaces.go | 2 +- sequencesender/sequencesender.go | 25 +- sonar-project.properties | 9 + state/batch.go | 46 +- state/config.go | 54 +- state/converters.go | 80 +- state/datastream.go | 489 +++++ state/errors.go | 12 + state/genesis.go | 7 +- state/helper.go | 10 +- state/infinite.go | 27 + state/l2block.go | 108 +- state/pgstatestorage.go | 502 ++++- state/pgstatestorage_test.go | 436 ++++- state/queue.go | 67 + state/queue_test.go | 52 + state/runtime/executor/errors.go | 48 + state/runtime/executor/executor.pb.go | 1218 ++++++------ state/runtime/executor/executor_grpc.pb.go | 3 +- state/runtime/instrumentation/js/goja.go | 7 +- .../internal/tracers/4byte_tracer_legacy.js | 2 +- .../js/internal/tracers/call_tracer_legacy.js | 2 +- .../tracers/native/gen_callframe_json.go | 6 +- state/runtime/runtime.go | 24 + state/state.go | 6 +- state/state_test.go | 107 +- state/test/datastream_test.go | 55 + state/transaction.go | 145 +- state/types.go | 31 +- synchronizer/block_range.go | 60 + synchronizer/config.go | 49 + synchronizer/control_flush_id.go | 133 ++ synchronizer/ext_control.go | 129 ++ synchronizer/generic_cache.go | 104 ++ synchronizer/generic_cache_test.go | 176 ++ synchronizer/interfaces.go | 10 +- synchronizer/l1_common.go | 61 + synchronizer/l1_data_message.go | 98 + ...er_send_orderer_results_to_synchronizer.go | 127 ++ ...nd_orderer_results_to_synchronizer_test.go | 332 ++++ synchronizer/l1_live_block_ranges.go | 99 + synchronizer/l1_live_block_ranges_test.go | 77 + synchronizer/l1_rollup_info_consumer.go | 284 +++ .../l1_rollup_info_consumer_statistics.go | 69 + ...l1_rollup_info_consumer_statistics_test.go | 117 ++ synchronizer/l1_rollup_info_consumer_test.go | 157 ++ synchronizer/l1_rollup_info_producer.go | 596 ++++++ .../l1_rollup_info_producer_statistics.go | 90 + ...l1_rollup_info_producer_statistics_test.go | 31 + synchronizer/l1_rollup_info_producer_test.go | 139 ++ synchronizer/l1_sync_orchestration.go | 189 ++ synchronizer/l1_sync_orchestration_test.go | 50 + synchronizer/l1_syncstatus.go | 332 ++++ synchronizer/l1_syncstatus_test.go | 281 +++ synchronizer/l1_worker_etherman.go | 390 ++++ synchronizer/l1_worker_etherman_test.go | 273 +++ synchronizer/l1_workers.go | 224 +++ ...workers_decorator_limit_retries_by_time.go | 75 + ...rs_decorator_limit_retries_by_time_test.go | 54 + synchronizer/mock_datacommitteeclient.go | 2 +- .../mock_datacommitteeclientfactory.go | 2 +- synchronizer/mock_dbtx.go | 11 +- synchronizer/mock_etherman.go | 33 +- synchronizer/mock_ethtxmanager.go | 11 +- .../mock_l1_rollup_consumer_interface.go | 77 + .../mock_l1_rollup_producer_interface.go | 57 + synchronizer/mock_l1_worker.go | 85 + synchronizer/mock_pool.go | 11 +- synchronizer/mock_state.go | 121 +- .../mock_synchronizer_process_block_range.go | 43 + synchronizer/mock_workers.go | 137 ++ synchronizer/mock_zkevmclient.go | 11 +- synchronizer/synchronizer.go | 312 +++- synchronizer/synchronizer_test.go | 168 +- synchronizer/time_provider.go | 19 + test/Makefile | 77 +- .../sequencer/common/metrics/metrics.go | 156 +- .../sequencer/common/params/constants.go | 4 +- .../sequencer/common/setup/setup.go | 35 +- .../common/transactions/transactions.go | 82 +- .../e2e/erc20-transfers/deployment.go | 43 + .../e2e/erc20-transfers/erc20_test.go | 80 + .../e2e/erc20-transfers/tx_sender.go | 47 + .../eth-transfers/eth_test.go} | 44 +- .../sequencer/e2e/eth-transfers/tx_sender.go | 56 + .../e2e/uniswap-transfers/tx_sender.go | 44 + .../e2e/uniswap-transfers/uniswap_test.go | 66 + .../pool_processing_erc20_test.go | 109 -- .../sequencer/erc20-transfers/tx_sender.go | 40 - .../sequencer/eth-transfers/tx_sender.go | 47 - .../benchmarks/sequencer/scripts/.env.example | 17 + test/benchmarks/sequencer/scripts/README.md | 51 + .../scripts/common/environment/constants.go | 35 - .../sequencer/scripts/common/results/print.go | 19 - .../scripts/environment/constants.go | 26 + .../scripts/{common => }/environment/init.go | 34 +- .../sequencer/scripts/erc20-transfers/main.go | 48 +- .../sequencer/scripts/eth-transfers/main.go | 37 +- test/benchmarks/sequencer/scripts/main.go | 348 ++++ .../scripts/uniswap-transfers/main.go | 61 + test/config/debug.node.config.toml | 64 +- test/config/test.genesis.config.json | 2 +- test/config/test.node.config.toml | 72 +- test/contracts/auto/ConstructorMap.sol | 14 + test/contracts/auto/FFFFFFFF.sol | 11 + test/contracts/auto/HasOpCode.sol | 24 + .../bin/ConstructorMap/ConstructorMap.go | 234 +++ test/contracts/bin/FFFFFFFF/FFFFFFFF.go | 203 ++ test/contracts/bin/HasOpCode/HasOpCode.go | 245 +++ test/docker-compose.yml | 66 +- test/e2e/debug_calltracer_test.go | 6 +- test/e2e/debug_shared.go | 42 + test/e2e/debug_test.go | 9 +- test/e2e/effectivegasprice_test.go | 63 + test/e2e/forced_batches_test.go | 23 +- ...o => forced_batches_vector_group1_test.go} | 121 +- test/e2e/forced_batches_vector_group2_test.go | 134 ++ test/e2e/forced_batches_vector_group3_test.go | 134 ++ test/e2e/gasless_test.go | 89 + test/e2e/jsonrpc2_test.go | 56 +- test/e2e/pool_test.go | 60 + test/e2e/preEIP155_test.go | 66 + test/e2e/shared.go | 104 +- test/e2e/state_test.go | 7 +- test/e2e/uniswap_test.go | 2 +- test/operations/manager.go | 22 +- test/operations/token.go | 10 +- test/scripts/uniswap/main.go | 262 +-- test/scripts/uniswap/pkg/setup.go | 237 +++ test/scripts/uniswap/pkg/swap.go | 74 + test/scripts/uniswap/pkg/types.go | 19 + test/vectors/vectors_v2.go | 2 +- tools/datastreamer/Makefile | 59 + tools/datastreamer/config/config.go | 101 + tools/datastreamer/config/default.go | 35 + tools/datastreamer/config/tool.config.toml | 30 + tools/datastreamer/main.go | 838 +++++++++ tools/executor/main.go | 10 +- tools/rlp/README.md | 2 +- tools/state/README.md | 56 + tools/state/control_flush_id.go | 115 ++ tools/state/estimated_time.go | 32 + tools/state/main.go | 99 + tools/state/output_interface.go | 13 + tools/state/output_pretty.go | 78 + tools/state/reprocess_action.go | 160 ++ tools/state/reprocess_cmd.go | 183 ++ tools/state/version.go | 13 + tools/zkevmprovermock/Dockerfile | 11 - 317 files changed, 21686 insertions(+), 5642 deletions(-) create mode 100644 ci/e2e-group-dac/datacommittee_test.go mode change 100644 => 120000 ci/e2e-group10/forced_batches_test.go mode change 100644 => 120000 ci/e2e-group10/forced_batches_vector_group2_test.go create mode 120000 ci/e2e-group10/shared.go mode change 100644 => 120000 ci/e2e-group11/forced_batches_vector_group3_test.go create mode 120000 ci/e2e-group11/shared.go create mode 120000 ci/e2e-group2/gasless_test.go mode change 100644 => 120000 ci/e2e-group9/forced_batches_vector_group1_test.go create mode 120000 ci/e2e-group9/shared.go create mode 100644 db/migrations/pool/0011.sql create mode 100644 db/migrations/pool/0011_test.go create mode 100644 db/migrations/pool/utils_test.go create mode 100644 db/migrations/state/0009.sql create mode 100644 db/migrations/state/0009_test.go create mode 100644 db/migrations/state/0010.sql create mode 100644 db/migrations/state/0010_test.go create mode 100644 db/migrations/state/0011.sql create mode 100644 db/migrations/state/0011_test.go create mode 100644 db/migrations/state/0012.sql create mode 100644 db/migrations/state/0012_test.go create mode 100644 docs/design/synchronizer/l1_sync_channels_flow_v2.drawio.png create mode 100644 docs/design/synchronizer/l1_synchronization.md create mode 100644 jsonrpc/wsconn.go create mode 100644 pool/config_test.go create mode 100644 pool/effectivegasprice.go create mode 100644 pool/effectivegasprice_test.go create mode 100644 pool/validation.go create mode 100644 pool/validation_test.go delete mode 100644 sequencer/effective_gas_price.go delete mode 100644 sequencer/effective_gas_price_test.go create mode 100644 state/datastream.go create mode 100644 state/infinite.go create mode 100644 state/queue.go create mode 100644 state/queue_test.go create mode 100644 state/test/datastream_test.go create mode 100644 synchronizer/block_range.go create mode 100644 synchronizer/control_flush_id.go create mode 100644 synchronizer/ext_control.go create mode 100644 synchronizer/generic_cache.go create mode 100644 synchronizer/generic_cache_test.go create mode 100644 synchronizer/l1_common.go create mode 100644 synchronizer/l1_data_message.go create mode 100644 synchronizer/l1_filter_send_orderer_results_to_synchronizer.go create mode 100644 synchronizer/l1_filter_send_orderer_results_to_synchronizer_test.go create mode 100644 synchronizer/l1_live_block_ranges.go create mode 100644 synchronizer/l1_live_block_ranges_test.go create mode 100644 synchronizer/l1_rollup_info_consumer.go create mode 100644 synchronizer/l1_rollup_info_consumer_statistics.go create mode 100644 synchronizer/l1_rollup_info_consumer_statistics_test.go create mode 100644 synchronizer/l1_rollup_info_consumer_test.go create mode 100644 synchronizer/l1_rollup_info_producer.go create mode 100644 synchronizer/l1_rollup_info_producer_statistics.go create mode 100644 synchronizer/l1_rollup_info_producer_statistics_test.go create mode 100644 synchronizer/l1_rollup_info_producer_test.go create mode 100644 synchronizer/l1_sync_orchestration.go create mode 100644 synchronizer/l1_sync_orchestration_test.go create mode 100644 synchronizer/l1_syncstatus.go create mode 100644 synchronizer/l1_syncstatus_test.go create mode 100644 synchronizer/l1_worker_etherman.go create mode 100644 synchronizer/l1_worker_etherman_test.go create mode 100644 synchronizer/l1_workers.go create mode 100644 synchronizer/l1_workers_decorator_limit_retries_by_time.go create mode 100644 synchronizer/l1_workers_decorator_limit_retries_by_time_test.go create mode 100644 synchronizer/mock_l1_rollup_consumer_interface.go create mode 100644 synchronizer/mock_l1_rollup_producer_interface.go create mode 100644 synchronizer/mock_l1_worker.go create mode 100644 synchronizer/mock_synchronizer_process_block_range.go create mode 100644 synchronizer/mock_workers.go create mode 100644 synchronizer/time_provider.go create mode 100644 test/benchmarks/sequencer/e2e/erc20-transfers/deployment.go create mode 100644 test/benchmarks/sequencer/e2e/erc20-transfers/erc20_test.go create mode 100644 test/benchmarks/sequencer/e2e/erc20-transfers/tx_sender.go rename test/benchmarks/sequencer/{eth-transfers/pool_processing_eth_test.go => e2e/eth-transfers/eth_test.go} (58%) create mode 100644 test/benchmarks/sequencer/e2e/eth-transfers/tx_sender.go create mode 100644 test/benchmarks/sequencer/e2e/uniswap-transfers/tx_sender.go create mode 100644 test/benchmarks/sequencer/e2e/uniswap-transfers/uniswap_test.go delete mode 100644 test/benchmarks/sequencer/erc20-transfers/pool_processing_erc20_test.go delete mode 100644 test/benchmarks/sequencer/erc20-transfers/tx_sender.go delete mode 100644 test/benchmarks/sequencer/eth-transfers/tx_sender.go create mode 100644 test/benchmarks/sequencer/scripts/.env.example create mode 100644 test/benchmarks/sequencer/scripts/README.md delete mode 100644 test/benchmarks/sequencer/scripts/common/environment/constants.go delete mode 100644 test/benchmarks/sequencer/scripts/common/results/print.go create mode 100644 test/benchmarks/sequencer/scripts/environment/constants.go rename test/benchmarks/sequencer/scripts/{common => }/environment/init.go (63%) create mode 100644 test/benchmarks/sequencer/scripts/main.go create mode 100644 test/benchmarks/sequencer/scripts/uniswap-transfers/main.go create mode 100644 test/contracts/auto/ConstructorMap.sol create mode 100644 test/contracts/auto/FFFFFFFF.sol create mode 100644 test/contracts/auto/HasOpCode.sol create mode 100644 test/contracts/bin/ConstructorMap/ConstructorMap.go create mode 100644 test/contracts/bin/FFFFFFFF/FFFFFFFF.go create mode 100644 test/contracts/bin/HasOpCode/HasOpCode.go create mode 100644 test/e2e/effectivegasprice_test.go rename test/e2e/{forced_batches_vector_test.go => forced_batches_vector_group1_test.go} (53%) create mode 100644 test/e2e/forced_batches_vector_group2_test.go create mode 100644 test/e2e/forced_batches_vector_group3_test.go create mode 100644 test/e2e/gasless_test.go create mode 100644 test/scripts/uniswap/pkg/setup.go create mode 100644 test/scripts/uniswap/pkg/swap.go create mode 100644 test/scripts/uniswap/pkg/types.go create mode 100644 tools/datastreamer/Makefile create mode 100644 tools/datastreamer/config/config.go create mode 100644 tools/datastreamer/config/default.go create mode 100644 tools/datastreamer/config/tool.config.toml create mode 100644 tools/datastreamer/main.go create mode 100644 tools/state/README.md create mode 100644 tools/state/control_flush_id.go create mode 100644 tools/state/estimated_time.go create mode 100644 tools/state/main.go create mode 100644 tools/state/output_interface.go create mode 100644 tools/state/output_pretty.go create mode 100644 tools/state/reprocess_action.go create mode 100644 tools/state/reprocess_cmd.go create mode 100644 tools/state/version.go delete mode 100644 tools/zkevmprovermock/Dockerfile diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 659c0b6bb1..14cbfb11ab 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,12 +1,19 @@ -*.md @arnaubennassar -aggregator/ @Mikelle @KonradIT -cmd/ @tclemos -db/ @Mikelle @tclemos -docs/ @arnaubennassar -etherman/ @ARR552 @cool-develope -jsonrpc/ @tclemos @KonradIT -pool/ @Mikelle @tclemos @KonradIT -sequencer/ @Mikelle @ToniRamirezM @KonradIT +*.md @agnusmor +aggregator/ @agnusmor +ci/ @agnusmor @tclemos @ToniRamirezM @ARR552 +cmd/ @tclemos @ToniRamirezM @ARR552 +config/ @agnusmor @tclemos @ToniRamirezM @ARR552 +db/ @tclemos @ToniRamirezM @ARR552 +docs/ @agnusmor @joanestebanr +etherman/ @ARR552 @joanestebanr +ethtxmanager/ @tclemos +gasprice/ @ARR552 +jsonrpc/ @tclemos +merkletree/ @ToniRamirezM +pool/ @tclemos +proto/ @ToniRamirezM +sequencer/ @ToniRamirezM @dpunish3r @agnusmor +sequencesender/ @ToniRamirezM @dpunish3r @agnusmor state/ @ToniRamirezM @tclemos -synchronizer/ @ARR552 @cool-develope +synchronizer/ @ARR552 @joanestebanr test/ @tclemos diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c9588d5d48..a801ce1ae3 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -18,5 +18,5 @@ Codeowner reviewers: -- @Alice -- @Bob \ No newline at end of file +- @-Alice +- @-Bob diff --git a/.github/workflows/jsonschema.yml b/.github/workflows/jsonschema.yml index 9caaa30aae..ddc25c65fe 100644 --- a/.github/workflows/jsonschema.yml +++ b/.github/workflows/jsonschema.yml @@ -14,7 +14,7 @@ jobs: json-schema: strategy: matrix: - go-version: [ 1.19.x ] + go-version: [ 1.21.x ] goarch: [ "amd64" ] runs-on: ubuntu-latest steps: @@ -32,11 +32,13 @@ jobs: env: GOARCH: ${{ matrix.goarch }} - - uses: actions/setup-python@v1 - - uses: BSFishy/pip-action@v1 + - uses: actions/setup-python@v4 with: - packages: | - json-schema-for-humans + python-version: '3.10' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install json-schema-for-humans - name: Check if JSON schema and generated doc is up to date run: | diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5d2d399ac4..3b3807666d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -15,7 +15,7 @@ jobs: - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19.x + go-version: 1.21.x - name: Checkout code uses: actions/checkout@v3 - name: Lint diff --git a/.github/workflows/push-docker-develop.yml b/.github/workflows/push-docker-develop.yml index c3a9a69527..aaa824348e 100644 --- a/.github/workflows/push-docker-develop.yml +++ b/.github/workflows/push-docker-develop.yml @@ -11,20 +11,20 @@ jobs: uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: platforms: linux/amd64,linux/arm64 push: true diff --git a/.github/workflows/push-docker-tagged.yml b/.github/workflows/push-docker-tagged.yml index 402e71e466..d8f6061507 100644 --- a/.github/workflows/push-docker-tagged.yml +++ b/.github/workflows/push-docker-tagged.yml @@ -11,20 +11,20 @@ jobs: uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ee1810d11e..aef92e6d19 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,8 +3,7 @@ name: release on: push: tags: - - 'v[0-9]+.[0-9]+.[0-9]' # this action will only run on tags that follow semver - + - 'v[0-9]+.[0-9]+.[0-9]+' # this action will only run on tags that follow semver jobs: releaser: runs-on: ubuntu-latest @@ -17,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.21 - name: Get packr run: go install github.com/gobuffalo/packr/v2/packr2@v2.8.3 @@ -41,6 +40,7 @@ jobs: mkdir -p testnet/db/scripts cp config/environments/testnet/* testnet/config/environments/testnet cp docker-compose.yml testnet + sed -i 's/\/config\/environments\/${ZKEVM_NETWORK}/\/config\/environments\/testnet/g' testnet/docker-compose.yml cp db/scripts/init_prover_db.sql testnet/db/scripts mv testnet/config/environments/testnet/example.env testnet sed -i -e "s/image: zkevm-node/image: hermeznetwork\/zkevm-node:$GIT_TAG_NAME/g" testnet/docker-compose.yml @@ -50,6 +50,7 @@ jobs: mkdir -p mainnet/db/scripts cp config/environments/mainnet/* mainnet/config/environments/mainnet cp docker-compose.yml mainnet + sed -i 's/\/config\/environments\/${ZKEVM_NETWORK}/\/config\/environments\/mainnet/g' mainnet/docker-compose.yml cp db/scripts/init_prover_db.sql mainnet/db/scripts mv mainnet/config/environments/mainnet/example.env mainnet sed -i -e "s/image: zkevm-node/image: hermeznetwork\/zkevm-node:$GIT_TAG_NAME/g" mainnet/docker-compose.yml diff --git a/.github/workflows/sonarqube.yml b/.github/workflows/sonarqube.yml index 30c260dbd9..6ea34936d8 100644 --- a/.github/workflows/sonarqube.yml +++ b/.github/workflows/sonarqube.yml @@ -4,16 +4,27 @@ on: push: branches: - develop + - feature/sonarcloud-coverage jobs: sonarqube: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: # Disabling shallow clone is recommended for improving relevancy of reporting. fetch-depth: 0 + - name: Compile SCs + run: make compile-scs + working-directory: test + + - name: Test + env: + ZKPROVER_URI: 127.0.0.1 + run: make test-full-non-e2e + working-directory: test + # Triggering SonarQube analysis as results of it are required by Quality Gate check. - name: SonarQube Scan uses: sonarsource/sonarqube-scan-action@master diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 4439845050..18207220c6 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - go-version: [ 1.19.x ] + go-version: [ 1.21.x ] goarch: [ "amd64" ] e2e-group: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, dac-1 ] runs-on: ubuntu-latest diff --git a/.github/workflows/test-from-prover.yml b/.github/workflows/test-from-prover.yml index f10f96ab1f..45a5671325 100644 --- a/.github/workflows/test-from-prover.yml +++ b/.github/workflows/test-from-prover.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: - go-version: [ 1.19.x ] + go-version: [ 1.21.x ] goarch: [ "amd64" ] e2e-group: [ 2 ] @@ -35,7 +35,7 @@ jobs: GOARCH: ${{ matrix.goarch }} - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/test-full-non-e2e.yml b/.github/workflows/test-full-non-e2e.yml index 723726cb8c..1be90483bb 100644 --- a/.github/workflows/test-full-non-e2e.yml +++ b/.github/workflows/test-full-non-e2e.yml @@ -14,7 +14,7 @@ jobs: test-full-non-e2e: strategy: matrix: - go-version: [ 1.19.x ] + go-version: [ 1.21.x ] goarch: [ "amd64" ] runs-on: ubuntu-latest steps: diff --git a/.github/workflows/updatedeps.yml b/.github/workflows/updatedeps.yml index 7f0896525c..e4a0b69884 100644 --- a/.github/workflows/updatedeps.yml +++ b/.github/workflows/updatedeps.yml @@ -14,7 +14,7 @@ jobs: - name: Install Go uses: actions/setup-go@v3 with: - go-version: "1.19.x" + go-version: "1.21.x" env: GOARCH: "amd64" diff --git a/.gitignore b/.gitignore index 4907cd4699..9df139c8cb 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,11 @@ /test/contracts/bin/**/*.bin /test/contracts/bin/**/*.abi +/tools/datastreamer/*.bin +/test/datastreamer/*.db/* +/test/*.bin +/test/*.db/* + **/.DS_Store .vscode .idea/ @@ -18,4 +23,6 @@ out.dat cmd/__debug_bin -.venv \ No newline at end of file +.venv + +*metrics.txt \ No newline at end of file diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 21264f08be..09519520d1 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -9,6 +9,11 @@ builds: - arm64 env: - CGO_ENABLED=0 + ldflags: + - -X github.com/0xPolygonHermez/zkevm-node.Version={{.Version}} + - -X github.com/0xPolygonHermez/zkevm-node.GitRev={{.Commit}} + - -X github.com/0xPolygonHermez/zkevm-node.BuildDate={{.Date}} + - -X github.com/0xPolygonHermez/zkevm-node.GitBranch={{.Branch}} release: # If set to auto, will mark the release as not ready for production # in case there is an indicator for this in the tag e.g. v1.0.0-rc1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 872be190d5..3393d643e4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,22 +2,22 @@ This document addresses how we should create PRs, give and receive reviews. The motivation is to have better code, reduce the time from creation to merge while sharing knowledge and insights that help everyone becoming better developers. -Note that non of this is a hard rule, but suggestions / guidelines. Although everyone is encouraged to stick to this points as much as posible. Use your common sense if some of this do not apply well on a particular PR +Note that non of this is a hard rule, but suggestions / guidelines. Although everyone is encouraged to stick to this points as much as possible. Use your common sense if some of this do not apply well on a particular PR ## How to create a good PR - Follow the template, unless for some reason it doesn't fit the content of the PR - Try hard on doing small PRs (> ~400 lines), in general is better to have 2 small PRs rather than a big one - Indicate clearly who should review it, ideally 2 team mates -- Author of the PR is responsible for merging. Never do it until you have the aproval of the specified reviewers unless you have their explicit permision +- Author of the PR is responsible for merging. Never do it until you have the approval of the specified reviewers unless you have their explicit permission - Introduce the purpose of the PR, for example: `Fixes the handle of ...` - Give brief context on why this is being done and link it to any relevant issue - Feel free to ask to specific team mates to review specific parts of the PR ## How to do a good review -- In general it's hard to set a quality treshold for changes. A good measure for when to approve is to accept changes once the overall quality of the code has been improved (compared to the code base before the PR) -- Try hard to avoid taking things personaly. For instance avoid using `I`, `you`, `I (don't) like`, ... +- In general it's hard to set a quality threshold for changes. A good measure for when to approve is to accept changes once the overall quality of the code has been improved (compared to the code base before the PR) +- Try hard to avoid taking things personally. For instance avoid using `I`, `you`, `I (don't) like`, ... - Ask, don’t tell. ("What about trying...?" rather than "Don’t do...") - Try to use positive language. You can even use emoji to clarify tone. - Be super clear on how confident you are when requesting changes. One way to do it is by starting the message like this: diff --git a/Dockerfile b/Dockerfile index d16343fb3b..fc702a93fc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # CONTAINER FOR BUILDING BINARY -FROM golang:1.19 AS build +FROM golang:1.21 AS build # INSTALL DEPENDENCIES RUN go install github.com/gobuffalo/packr/v2/packr2@v2.8.3 diff --git a/Makefile b/Makefile index be2af6d9c1..1ad9666453 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ ARCH := $(shell arch) ifeq ($(ARCH),x86_64) ARCH = amd64 -else +else ifeq ($(ARCH),aarch64) ARCH = arm64 endif @@ -26,8 +26,53 @@ VENV_PYTHON = $(VENV)/bin/python SYSTEM_PYTHON = $(or $(shell which python3), $(shell which python)) PYTHON = $(or $(wildcard $(VENV_PYTHON)), "install_first_venv") GENERATE_SCHEMA_DOC = $(VENV)/bin/generate-schema-doc -GENERATE_DOC_PATH= "docs/config-file/" -GENERATE_DOC_TEMPLATES_PATH= "docs/config-file/templates/" +GENERATE_DOC_PATH = "docs/config-file/" +GENERATE_DOC_TEMPLATES_PATH = "docs/config-file/templates/" + +# Check dependencies +# Check for Go +.PHONY: check-go +check-go: + @which go > /dev/null || (echo "Error: Go is not installed" && exit 1) + +# Check for Docker +.PHONY: check-docker +check-docker: + @which docker > /dev/null || (echo "Error: docker is not installed" && exit 1) + +# Check for Docker-compose +.PHONY: check-docker-compose +check-docker-compose: + @which docker-compose > /dev/null || (echo "Error: docker-compose is not installed" && exit 1) + +# Check for Protoc +.PHONY: check-protoc +check-protoc: + @which protoc > /dev/null || (echo "Error: Protoc is not installed" && exit 1) + +# Check for Python +.PHONY: check-python +check-python: + @which python3 > /dev/null || which python > /dev/null || (echo "Error: Python is not installed" && exit 1) + +# Check for Curl +.PHONY: check-curl +check-curl: + @which curl > /dev/null || (echo "Error: curl is not installed" && exit 1) + +# Targets that require the checks +build: check-go +lint: check-go +build-docker: check-docker +build-docker-nc: check-docker +run-rpc: check-docker check-docker-compose +stop: check-docker check-docker-compose +install-linter: check-go check-curl +install-config-doc-gen: check-python +config-doc-node: check-go check-python +config-doc-custom_network: check-go check-python +update-external-dependencies: check-go +generate-code-from-proto: check-protoc .PHONY: build build: ## Builds the binary locally into ./dist @@ -42,7 +87,7 @@ build-docker-nc: ## Builds a docker image with the node binary - but without bui docker build --no-cache=true -t x1-node -f ./Dockerfile . .PHONY: run-rpc -run-rpc: ## Runs all the services need to run a local zkEMV RPC node +run-rpc: ## Runs all the services needed to run a local zkEVM RPC node docker-compose up -d x1-state-db x1-pool-db sleep 2 docker-compose up -d x1-prover @@ -57,13 +102,12 @@ stop: ## Stops all services .PHONY: install-linter install-linter: ## Installs the linter - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin v1.52.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin v1.54.2 .PHONY: lint lint: ## Runs the linter export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/golangci-lint run - $(VENV_PYTHON): rm -rf $(VENV) $(SYSTEM_PYTHON) -m venv $(VENV) @@ -76,38 +120,36 @@ $(GENERATE_SCHEMA_DOC): $(VENV_PYTHON) $(PYTHON) -m pip install --upgrade pip $(PYTHON) -m pip install json-schema-for-humans -PHONY: config-doc-gen -config-doc-gen: config-doc-node config-doc-custom_network ## Generate config file's json-schema for node and custom_network and documentation - # +.PHONY: config-doc-gen +config-doc-gen: config-doc-node config-doc-custom_network ## Generate config file's json-schema for node and custom_network and documentation .PHONY: config-doc-node config-doc-node: $(GENERATE_SCHEMA_DOC) ## Generate config file's json-schema for node and documentation go run ./cmd generate-json-schema --config-file=node --output=$(GENERATE_DOC_PATH)node-config-schema.json $(GENERATE_SCHEMA_DOC) --config show_breadcrumbs=true \ - --config footer_show_time=false \ - --config expand_buttons=true \ - --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ - $(GENERATE_DOC_PATH)node-config-schema.json \ - $(GENERATE_DOC_PATH)node-config-doc.html + --config footer_show_time=false \ + --config expand_buttons=true \ + --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ + $(GENERATE_DOC_PATH)node-config-schema.json \ + $(GENERATE_DOC_PATH)node-config-doc.html $(GENERATE_SCHEMA_DOC) --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/md/base.md \ - --config footer_show_time=false \ - $(GENERATE_DOC_PATH)node-config-schema.json \ - $(GENERATE_DOC_PATH)node-config-doc.md + --config footer_show_time=false \ + $(GENERATE_DOC_PATH)node-config-schema.json \ + $(GENERATE_DOC_PATH)node-config-doc.md .PHONY: config-doc-custom_network config-doc-custom_network: $(GENERATE_SCHEMA_DOC) ## Generate config file's json-schema for custom_network and documentation go run ./cmd generate-json-schema --config-file=custom_network --output=$(GENERATE_DOC_PATH)custom_network-config-schema.json $(GENERATE_SCHEMA_DOC) --config show_breadcrumbs=true --config footer_show_time=false \ - --config expand_buttons=true \ - --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ - $(GENERATE_DOC_PATH)custom_network-config-schema.json \ - $(GENERATE_DOC_PATH)custom_network-config-doc.html + --config expand_buttons=true \ + --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ + $(GENERATE_DOC_PATH)custom_network-config-schema.json \ + $(GENERATE_DOC_PATH)custom_network-config-doc.html $(GENERATE_SCHEMA_DOC) --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/md/base.md \ - --config footer_show_time=false \ - --config example_format=JSON \ - $(GENERATE_DOC_PATH)custom_network-config-schema.json \ - $(GENERATE_DOC_PATH)custom_network-config-doc.md - + --config footer_show_time=false \ + --config example_format=JSON \ + $(GENERATE_DOC_PATH)custom_network-config-schema.json \ + $(GENERATE_DOC_PATH)custom_network-config-doc.md .PHONY: update-external-dependencies update-external-dependencies: ## Updates external dependencies like images, test vectors or proto files @@ -130,6 +172,6 @@ generate-code-from-proto: ## Generates code from proto files .PHONY: help help: ## Prints this help - @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ - | sort \ - | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ + | sort \ + | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/README.md b/README.md index df3759f3a1..e375253ab0 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,6 @@ Glossary: - Consolidated state: state that is proven on-chain by submitting a ZKP (Zero Knowledge Proof) that proves the execution of a sequence of the last virtual batch. - Invalid transaction: a transaction that can't be processed and doesn't affect the state. Note that such a transaction could be included in a virtual batch. The reason for a transaction to be invalid could be related to the Ethereum protocol (invalid nonce, not enough balance, ...) or due to limitations introduced by the X1 (each batch can make use of a limited amount of resources such as the total amount of keccak hashes that can be computed) - Reverted transaction: a transaction that is executed, but is reverted (because of smart contract logic). The main difference with *invalid transaction* is that this transaction modifies the state, at least to increment nonce of the sender. -- Proof of Efficiency (PoE): name of the protocol used by the network, it's enforced by the [smart contracts](https://github.com/okx/x1-contracts) ## Architecture @@ -32,19 +31,21 @@ Glossary: The diagram represents the main components of the software and how they interact between them. Note that this reflects a single entity running a node, in particular a node that acts as the trusted sequencer. But there are many entities running nodes in the network, and each of these entities can perform different roles. More on this later. -- (JSON) RPC: an interface that allows users (metamask, etherscan, ...) to interact with the node. Fully compatible with Ethereum RPC + some extra endpoints specifics of the network. It interacts with the `state` to get data and process transactions and with the `pool` to store transactions +- (JSON) RPC: an HTTP interface that allows users (dApps, metamask, etherscan, ...) to interact with the node. Fully compatible with Ethereum RPC + some extra [custom endpoints]() specifics of the network. It interacts with the `state` (to get data and process transactions) as well as the `pool` (to store transactions). +- L2GasPricer: it fetches the L1 gas price and applies some formula to calculate the gas price that will be suggested for the users to use for paying fees on L2. The suggestions are stored on the `pool`, and will be consumed by the `rpc` - Pool: DB that stores transactions by the `RPC` to be selected/discarded by the `sequencer` later on -- Trusted Sequencer: get transactions from the `pool`, check if they are valid by processing them using the `state`, and create sequences. Once transactions are added into the state, they are immediately available through the `rpc`. Sequences are sent to L1 using the `etherman` -- Permissionless Sequencer: *coming soon* +- Sequencer: responsible for building the trusted state. To do so, it gets transactions from the pool and puts them in a specific order. It needs to take care of opening and closing batches while trying to make them as full as possible. To achieve this it needs to use the executor to actually process the transaction not only to execute the state transition (and update the hashDB) but also to check the consumed resources by the transactions and the remaining resources of the batch. After executing a transaction that fits into a batch, it gets stored on the `state`. Once transactions are added into the state, they are immediately available through the `rpc`. +- SequenceSender: gets closed batches from the `state`, tries to aggregate as many of them as possible, and at some point, decides that it's time to send those batches to L1, turning the state from trusted to virtualized. In order to send the L1 tx, it uses the `ethtxmanager` +- EthTxManager: handles requests to send L1 transactions from `sequencesender` and `aggregator`. It takes care of dealing with the nonce of the accounts, increasing the gas price, and other actions that may be needed to ensure that L1 transactions get mined - Etherman: abstraction that implements the needed methods to interact with the Ethereum network and the relevant smart contracts. -- Synchronizer: Updates the `state` by fetching data from Ethereum through the `etherman`. If the node is not a `trusted sequencer` it also updates the state with the data fetched from the `rpc` of the `trusted sequencer`. It also detects and handles reorgs that can happen if the `trusted sequencer` sends different data in the rpc vs the sequences sent to L1 (trusted vs virtual state) +- Synchronizer: Updates the `state` (virtual batches, verified batches, forced batches, ...) by fetching data from L1 through the `etherman`. If the node is not a `trusted sequencer` it also updates the state with the data fetched from the `rpc` of the `trusted sequencer`. It also detects and handles reorgs that can happen if the `trusted sequencer` sends different data in the rpc vs the sequences sent to L1 (trusted reorg aka L2 reorg). Also handles L1 reorgs (reorgs that happen on the L1 network) - State: Responsible for managing the state data (batches, blocks, transactions, ...) that is stored on the `state SB`. It also handles the integration with the `executor` and the `Merkletree` service -- State DB: persistence layer for the state data (except the Merkletree that is handled by the `Merkletree` service) -- Aggregator: consolidates batches by generating ZKPs (Zero Knowledge proofs). To do so it gathers the necessary data that the `prover` needs as input through the `state` and sends a request to it. Once the proof is generated it's sent to Ethereum through the `etherman` -- Prover/Executor: service that generates ZK proofs. Note that this component is not implemented in this repository, and it's treated as a "black box" from the perspective of the node. The prover/executor has two implementations: [JS reference implementation](https://github.com/0xPolygonHermez/zkevm-proverjs) and [C production-ready implementation](https://github.com/okx/x1-prover). Although it's the same software/service, it has two very different purposes: - - Provide an EVM implementation that allows processing transactions and getting all needed results metadata (state root, receipts, logs, ...) - - Generate ZKPs -- Merkletree: service that stores the Merkletree, containing all the account information (balances, nonces, smart contract code, and smart contract storage). This component is also not implemented in this repo and is consumed as an external service by the node. The implementation can be found [here](https://github.com/okx/x1-prover) +- State DB: persistence layer for the state data (except the Merkletree that is handled by the `HashDB` service), it stores informationrelated to L1 (blocks, global exit root updates, ...) and L2 (batches, L2 blocks, transactions, ...) +- Aggregator: consolidates batches by generating ZKPs (Zero Knowledge proofs). To do so it gathers the necessary data that the `prover` needs as input through the `state` and sends a request to it. Once the proof is generated it sends a request to send an L1 tx to verify the proof and move the state from virtual to verified to the `ethtxmanager`. Note that provers connect to the aggregator and not the other way arround. The aggregator can handle multiple connected provers at once and make them work concurrently in the generation of different proofs +- Prover/Executor/hashDB: service that generates ZK proofs. Note that this component is not implemented in this repository, and it's treated as a "black box" from the perspective of the node. The prover/executor has two implementations: [JS reference implementation](https://github.com/okx/x1-proverjs) and [C production-ready implementation](https://github.com/okx/x1-prover). Although it's the same software/binary, it implements three services: + - Executor: Provides an EVM implementation that allows processing batches as well as getting metadata (state root, transaction receipts, logs, ...) of all the needed results. + - Prover: Generates ZKPs for batches, batches aggregation, and final proofs. + - HashDB: service that stores the Merkletree, containing all the account information (balances, nonces, smart contract code, and smart contract storage) ## Roles of the network @@ -64,7 +65,7 @@ Required services and components: There must be only one synchronizer, and it's recommended that it has exclusive access to an executor instance, although it's not necessary. This role can perfectly be run in a single instance, however, the JSON RPC and executor services can benefit from running in multiple instances, if the performance decreases due to the number of requests received - [`X1 RPC endpoints`](./docs/json-rpc-endpoints.md) -- [`X1 RPC Custom endpoints documentation`](./docs/zkEVM-custom-endpoints.md) +- [`X1 RPC Custom endpoints documentation`]() ### Trusted sequencer @@ -80,10 +81,6 @@ Required services and components: Note that the JSON RPC is required to receive transactions. It's recommended that the JSON RPC runs on separated instances, and potentially more than one (depending on the load of the network). It's also recommended that the JSON RPC and the Sequencer don't share the same executor instance, to make sure that the sequencer has exclusive access to an executor -### Permissionless sequencer - -TBD - ### Aggregator This role can be performed by anyone. @@ -110,7 +107,7 @@ It's recommended to use `make` for building, and testing the code, ... Run `make ### Requirements -- Go 1.19 +- Go 1.21 - Docker - Docker Compose - Make diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 751000af1f..3764292ce6 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -275,10 +275,10 @@ func (a *Aggregator) sendFinalProof() { continue } monitoredTxID := buildMonitoredTxID(proof.BatchNumber, proof.BatchNumberFinal) - err = a.EthTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, sender, to, nil, data, nil) + err = a.EthTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, sender, to, nil, data, a.cfg.GasOffset, nil) if err != nil { - log := log.WithFields("tx", monitoredTxID) - log.Errorf("Error to add batch verification tx to eth tx manager: %v", err) + mTxLogger := ethtxmanager.CreateLogger(ethTxManagerOwner, monitoredTxID, sender, to) + mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) continue } @@ -973,7 +973,7 @@ func (a *Aggregator) isSynced(ctx context.Context, batchNum *uint64) bool { func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.Batch) (*prover.InputProver, error) { previousBatch, err := a.State.GetBatchByNumber(ctx, batchToVerify.BatchNumber-1, nil) - if err != nil && err != state.ErrStateNotSynchronized { + if err != nil && err != state.ErrNotFound { return nil, fmt.Errorf("failed to get previous batch, err: %v", err) } @@ -1027,9 +1027,9 @@ func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpche } func (a *Aggregator) handleMonitoredTxResult(result ethtxmanager.MonitoredTxResult) { - resLog := log.WithFields("owner", ethTxManagerOwner, "txId", result.ID) + mTxResultLogger := ethtxmanager.CreateMonitoredTxResultLogger(ethTxManagerOwner, result) if result.Status == ethtxmanager.MonitoredTxStatusFailed { - resLog.Fatal("failed to send batch verification, TODO: review this fatal and define what to do in this case") + mTxResultLogger.Fatal("failed to send batch verification, TODO: review this fatal and define what to do in this case") } // monitoredIDFormat: "proof-from-%v-to-%v" @@ -1037,13 +1037,13 @@ func (a *Aggregator) handleMonitoredTxResult(result ethtxmanager.MonitoredTxResu proofBatchNumberStr := idSlice[2] proofBatchNumber, err := strconv.ParseUint(proofBatchNumberStr, encoding.Base10, 0) if err != nil { - resLog.Errorf("failed to read final proof batch number from monitored tx: %v", err) + mTxResultLogger.Errorf("failed to read final proof batch number from monitored tx: %v", err) } proofBatchNumberFinalStr := idSlice[4] proofBatchNumberFinal, err := strconv.ParseUint(proofBatchNumberFinalStr, encoding.Base10, 0) if err != nil { - resLog.Errorf("failed to read final proof batch number final from monitored tx: %v", err) + mTxResultLogger.Errorf("failed to read final proof batch number final from monitored tx: %v", err) } log := log.WithFields("txId", result.ID, "batches", fmt.Sprintf("%d-%d", proofBatchNumber, proofBatchNumberFinal)) diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index d6e8166d04..9303c44454 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -54,7 +54,7 @@ func TestSendFinalProof(t *testing.T) { BatchNumberFinal: batchNumFinal, } finalProof := &prover.FinalProof{} - cfg := Config{SenderAddress: from.Hex()} + cfg := Config{SenderAddress: from.Hex(), GasOffset: uint64(10)} testCases := []struct { name string @@ -135,7 +135,7 @@ func TestSendFinalProof(t *testing.T) { assert.True(a.verifyingProof) }).Return(&to, data, nil).Once() monitoredTxID := buildMonitoredTxID(batchNum, batchNumFinal) - m.ethTxManager.On("Add", mock.Anything, ethTxManagerOwner, monitoredTxID, from, &to, value, data, nil).Return(errBanana).Once() + m.ethTxManager.On("Add", mock.Anything, ethTxManagerOwner, monitoredTxID, from, &to, value, data, cfg.GasOffset, nil).Return(errBanana).Once() m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Run(func(args mock.Arguments) { // test is done, stop the sendFinalProof method a.exit() @@ -160,7 +160,7 @@ func TestSendFinalProof(t *testing.T) { assert.True(a.verifyingProof) }).Return(&to, data, nil).Once() monitoredTxID := buildMonitoredTxID(batchNum, batchNumFinal) - m.ethTxManager.On("Add", mock.Anything, ethTxManagerOwner, monitoredTxID, from, &to, value, data, nil).Return(nil).Once() + m.ethTxManager.On("Add", mock.Anything, ethTxManagerOwner, monitoredTxID, from, &to, value, data, cfg.GasOffset, nil).Return(nil).Once() ethTxManResult := ethtxmanager.MonitoredTxResult{ ID: monitoredTxID, Status: ethtxmanager.MonitoredTxStatusConfirmed, diff --git a/aggregator/config.go b/aggregator/config.go index d654a1aaf3..fd966f2223 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -73,4 +73,16 @@ type Config struct { // which a proof in generating state is considered to be stuck and // allowed to be cleared. GeneratingProofCleanupThreshold string `mapstructure:"GeneratingProofCleanupThreshold"` + + // GasOffset is the amount of gas to be added to the gas estimation in order + // to provide an amount that is higher than the estimated one. This is used + // to avoid the TX getting reverted in case something has changed in the network + // state after the estimation which can cause the TX to require more gas to be + // executed. + // + // ex: + // gas estimation: 1000 + // gas offset: 100 + // final gas: 1100 + GasOffset uint64 `mapstructure:"GasOffset"` } diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 119ea48291..8f0bc17cdb 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -29,7 +29,7 @@ type proverInterface interface { // ethTxManager contains the methods required to send txs to // ethereum. type ethTxManager interface { - Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error + Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, gasOffset uint64, dbTx pgx.Tx) error Result(ctx context.Context, owner, id string, dbTx pgx.Tx) (ethtxmanager.MonitoredTxResult, error) ResultsByStatus(ctx context.Context, owner string, statuses []ethtxmanager.MonitoredTxStatus, dbTx pgx.Tx) ([]ethtxmanager.MonitoredTxResult, error) ProcessPendingMonitoredTxs(ctx context.Context, owner string, failedResultHandler ethtxmanager.ResultHandler, dbTx pgx.Tx) diff --git a/aggregator/mocks/mock_dbtx.go b/aggregator/mocks/mock_dbtx.go index 8ad33d476e..fab84a2baa 100644 --- a/aggregator/mocks/mock_dbtx.go +++ b/aggregator/mocks/mock_dbtx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -283,13 +283,12 @@ func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResult return r0 } -type mockConstructorTestingTNewDbTxMock interface { +// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbTxMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDbTxMock(t mockConstructorTestingTNewDbTxMock) *DbTxMock { +}) *DbTxMock { mock := &DbTxMock{} mock.Mock.Test(t) diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go index 50831aac38..0850f326e0 100644 --- a/aggregator/mocks/mock_etherman.go +++ b/aggregator/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -73,13 +73,12 @@ func (_m *Etherman) GetLatestVerifiedBatchNum() (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewEtherman interface { +// NewEtherman creates a new instance of Etherman. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEtherman(t interface { mock.TestingT Cleanup(func()) -} - -// NewEtherman creates a new instance of Etherman. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEtherman(t mockConstructorTestingTNewEtherman) *Etherman { +}) *Etherman { mock := &Etherman{} mock.Mock.Test(t) diff --git a/aggregator/mocks/mock_ethtxmanager.go b/aggregator/mocks/mock_ethtxmanager.go index 8aeae6304a..17141e8287 100644 --- a/aggregator/mocks/mock_ethtxmanager.go +++ b/aggregator/mocks/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -20,13 +20,13 @@ type EthTxManager struct { mock.Mock } -// Add provides a mock function with given fields: ctx, owner, id, from, to, value, data, dbTx -func (_m *EthTxManager) Add(ctx context.Context, owner string, id string, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error { - ret := _m.Called(ctx, owner, id, from, to, value, data, dbTx) +// Add provides a mock function with given fields: ctx, owner, id, from, to, value, data, gasOffset, dbTx +func (_m *EthTxManager) Add(ctx context.Context, owner string, id string, from common.Address, to *common.Address, value *big.Int, data []byte, gasOffset uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, owner, id, from, to, value, data, gasOffset, dbTx) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, common.Address, *common.Address, *big.Int, []byte, pgx.Tx) error); ok { - r0 = rf(ctx, owner, id, from, to, value, data, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, string, string, common.Address, *common.Address, *big.Int, []byte, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, owner, id, from, to, value, data, gasOffset, dbTx) } else { r0 = ret.Error(0) } @@ -89,13 +89,12 @@ func (_m *EthTxManager) ResultsByStatus(ctx context.Context, owner string, statu return r0, r1 } -type mockConstructorTestingTNewEthTxManager interface { +// NewEthTxManager creates a new instance of EthTxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewEthTxManager creates a new instance of EthTxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEthTxManager(t mockConstructorTestingTNewEthTxManager) *EthTxManager { +}) *EthTxManager { mock := &EthTxManager{} mock.Mock.Test(t) diff --git a/aggregator/mocks/mock_profitabilitychecker.go b/aggregator/mocks/mock_profitabilitychecker.go index 870e791f64..af64de9ada 100644 --- a/aggregator/mocks/mock_profitabilitychecker.go +++ b/aggregator/mocks/mock_profitabilitychecker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -38,13 +38,12 @@ func (_m *ProfitabilityCheckerMock) IsProfitable(_a0 context.Context, _a1 *big.I return r0, r1 } -type mockConstructorTestingTNewProfitabilityCheckerMock interface { +// NewProfitabilityCheckerMock creates a new instance of ProfitabilityCheckerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProfitabilityCheckerMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewProfitabilityCheckerMock creates a new instance of ProfitabilityCheckerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProfitabilityCheckerMock(t mockConstructorTestingTNewProfitabilityCheckerMock) *ProfitabilityCheckerMock { +}) *ProfitabilityCheckerMock { mock := &ProfitabilityCheckerMock{} mock.Mock.Test(t) diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover.go index 0e7a01384b..0cb86c6059 100644 --- a/aggregator/mocks/mock_prover.go +++ b/aggregator/mocks/mock_prover.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -208,13 +208,12 @@ func (_m *ProverMock) WaitRecursiveProof(ctx context.Context, proofID string) (s return r0, r1 } -type mockConstructorTestingTNewProverMock interface { +// NewProverMock creates a new instance of ProverMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProverMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewProverMock creates a new instance of ProverMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProverMock(t mockConstructorTestingTNewProverMock) *ProverMock { +}) *ProverMock { mock := &ProverMock{} mock.Mock.Test(t) diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go index 75d5e26c26..7d46130a8a 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -299,13 +299,12 @@ func (_m *StateMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proo return r0 } -type mockConstructorTestingTNewStateMock interface { +// NewStateMock creates a new instance of StateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewStateMock creates a new instance of StateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateMock(t mockConstructorTestingTNewStateMock) *StateMock { +}) *StateMock { mock := &StateMock{} mock.Mock.Test(t) diff --git a/aggregator/prover/aggregator.pb.go b/aggregator/prover/aggregator.pb.go index 1b54fe910f..84392a69a2 100644 --- a/aggregator/prover/aggregator.pb.go +++ b/aggregator/prover/aggregator.pb.go @@ -7,10 +7,11 @@ package prover import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/aggregator/prover/aggregator_grpc.pb.go b/aggregator/prover/aggregator_grpc.pb.go index c2c1b6ed54..e02dc31091 100644 --- a/aggregator/prover/aggregator_grpc.pb.go +++ b/aggregator/prover/aggregator_grpc.pb.go @@ -8,6 +8,7 @@ package prover import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/ci/e2e-group-dac/datacommittee_test.go b/ci/e2e-group-dac/datacommittee_test.go new file mode 100644 index 0000000000..da9b4929e5 --- /dev/null +++ b/ci/e2e-group-dac/datacommittee_test.go @@ -0,0 +1 @@ +package e2e_group_dac diff --git a/ci/e2e-group10/forced_batches_test.go b/ci/e2e-group10/forced_batches_test.go deleted file mode 100644 index be8b835406..0000000000 --- a/ci/e2e-group10/forced_batches_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/core/types" - - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevmglobalexitroot" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/constants" - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -const ( - toAddressHex = "0x4d5Cf5032B2a844602278b01199ED191A86c93ff" - gerFinalityBlocks = uint64(2500) - forkID5 = 5 -) - -var ( - toAddress = common.HexToAddress(toAddressHex) -) - -func TestForcedBatches(t *testing.T) { - if testing.Short() { - t.Skip() - } - - defer func() { - require.NoError(t, operations.Teardown()) - }() - - var err error - nTxs := 10 - ctx := context.Background() - opsman, auth, client, amount, gasLimit, gasPrice, nonce := setupEnvironment(ctx, t) - - txs := make([]*types.Transaction, 0, nTxs) - for i := 0; i < nTxs; i++ { - tx := types.NewTransaction(nonce, toAddress, amount, gasLimit, gasPrice, nil) - nonce = nonce + 1 - txs = append(txs, tx) - } - - var l2BlockNumbers []*big.Int - l2BlockNumbers, err = operations.ApplyL2Txs(ctx, txs, auth, client, operations.VerifiedConfirmationLevel) - require.NoError(t, err) - - time.Sleep(2 * time.Second) - amount = big.NewInt(0).Add(amount, big.NewInt(10)) - unsignedTx := types.NewTransaction(nonce, toAddress, amount, gasLimit, gasPrice, nil) - signedTx, err := auth.Signer(auth.From, unsignedTx) - require.NoError(t, err) - encodedTxs, err := state.EncodeTransactions([]types.Transaction{*signedTx}, constants.EffectivePercentage, forkID5) - require.NoError(t, err) - forcedBatch, err := sendForcedBatch(t, encodedTxs, opsman) - require.NoError(t, err) - - // Checking if all txs sent before the forced batch were processed within previous closed batch - for _, l2blockNum := range l2BlockNumbers { - batch, err := opsman.State().GetBatchByL2BlockNumber(ctx, l2blockNum.Uint64(), nil) - require.NoError(t, err) - require.Less(t, batch.BatchNumber, forcedBatch.BatchNumber) - } -} - -func setupEnvironment(ctx context.Context, t *testing.T) (*operations.Manager, *bind.TransactOpts, *ethclient.Client, *big.Int, uint64, *big.Int, uint64) { - - err := operations.Teardown() - require.NoError(t, err) - opsCfg := operations.GetDefaultOperationsConfig() - opsCfg.State.MaxCumulativeGasUsed = 80000000000 - opsman, err := operations.NewManager(ctx, opsCfg) - require.NoError(t, err) - err = opsman.Setup() - require.NoError(t, err) - time.Sleep(5 * time.Second) - // Load account with balance on local genesis - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID) - require.NoError(t, err) - // Load eth client - client, err := ethclient.Dial(operations.DefaultL2NetworkURL) - require.NoError(t, err) - // Send txs - amount := big.NewInt(10000) - senderBalance, err := client.BalanceAt(ctx, auth.From, nil) - require.NoError(t, err) - senderNonce, err := client.PendingNonceAt(ctx, auth.From) - require.NoError(t, err) - - log.Infof("Receiver Addr: %v", toAddress.String()) - log.Infof("Sender Addr: %v", auth.From.String()) - log.Infof("Sender Balance: %v", senderBalance.String()) - log.Infof("Sender Nonce: %v", senderNonce) - - gasLimit, err := client.EstimateGas(ctx, ethereum.CallMsg{From: auth.From, To: &toAddress, Value: amount}) - require.NoError(t, err) - - gasPrice, err := client.SuggestGasPrice(ctx) - require.NoError(t, err) - - nonce, err := client.PendingNonceAt(ctx, auth.From) - require.NoError(t, err) - return opsman, auth, client, amount, gasLimit, gasPrice, nonce -} - -func sendForcedBatch(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { - ctx := context.Background() - st := opsman.State() - // Connect to ethereum node - ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) - require.NoError(t, err) - - initialGer, _, err := st.GetLatestGer(ctx, gerFinalityBlocks) - require.NoError(t, err) - - // Create smc client - zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) - zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) - require.NoError(t, err) - - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) - require.NoError(t, err) - - log.Info("Using address: ", auth.From) - - num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - log.Info("Number of forceBatches in the smc: ", num) - - // Get tip - tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - managerAddress, err := zkEvm.GlobalExitRootManager(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - manager, err := polygonzkevmglobalexitroot.NewPolygonzkevmglobalexitroot(managerAddress, ethClient) - require.NoError(t, err) - - rootInContract, err := manager.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - rootInContractHash := common.BytesToHash(rootInContract[:]) - - disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - if disallowed { - tx, err := zkEvm.ActivateForceBatches(auth) - require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - } - - currentBlock, err := ethClient.BlockByNumber(ctx, nil) - require.NoError(t, err) - - log.Debug("currentBlock.Time(): ", currentBlock.Time()) - temp, _, err := st.GetLatestGer(ctx, gerFinalityBlocks) - log.Infof("temp: %v", temp.GlobalExitRoot.String()) - - // Send forceBatch - tx, err := zkEvm.ForceBatch(auth, txs, tip) - require.NoError(t, err) - - log.Info("TxHash: ", tx.Hash()) - time.Sleep(1 * time.Second) - - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - - query := ethereum.FilterQuery{ - FromBlock: currentBlock.Number(), - Addresses: []common.Address{zkEvmAddr}, - } - logs, err := ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - - var forcedBatch *state.Batch - for _, vLog := range logs { - if vLog.Topics[0] != constants.ForcedBatchSignatureHash { - logs, err = ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - continue - } - fb, err := zkEvm.ParseForceBatch(vLog) - if err != nil { - log.Errorf("failed to parse force batch log event, err: ", err) - } - log.Debugf("log decoded: %+v", fb) - ger := fb.LastGlobalExitRoot - log.Info("GlobalExitRoot: ", ger) - log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) - fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) - if err != nil { - log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - return nil, err - } - log.Info("MinForcedTimestamp: ", fullBlock.Time()) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - for err == state.ErrStateNotSynchronized { - time.Sleep(1 * time.Second) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - } - log.Info("ForcedBatchNum: ", forcedBatch.BatchNumber) - require.NoError(t, err) - require.NotNil(t, forcedBatch) - - log.Info("Waiting for batch to be virtualized...") - err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - log.Info("Waiting for batch to be consolidated...") - err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - if rootInContractHash != initialGer.GlobalExitRoot { - finalGer, _, err := st.GetLatestGer(ctx, gerFinalityBlocks) - require.NoError(t, err) - if finalGer.GlobalExitRoot != rootInContractHash { - log.Infof("initialGer.GlobalExitRoot: %v, finalGer.GlobalExitRoot: %v, rootInContractHash: %v", initialGer.GlobalExitRoot.String(), finalGer.GlobalExitRoot.String(), rootInContractHash.String()) - log.Fatal("global exit root is not updated") - } - } - } - - return forcedBatch, nil -} diff --git a/ci/e2e-group10/forced_batches_test.go b/ci/e2e-group10/forced_batches_test.go new file mode 120000 index 0000000000..8681813c2c --- /dev/null +++ b/ci/e2e-group10/forced_batches_test.go @@ -0,0 +1 @@ +../../test/e2e/forced_batches_test.go \ No newline at end of file diff --git a/ci/e2e-group10/forced_batches_vector_group2_test.go b/ci/e2e-group10/forced_batches_vector_group2_test.go deleted file mode 100644 index 0faf78a046..0000000000 --- a/ci/e2e-group10/forced_batches_vector_group2_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" - "github.com/0xPolygonHermez/zkevm-node/hex" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/constants" - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/0xPolygonHermez/zkevm-node/test/vectors" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -func TestForcedBatchesVectorFiles(t *testing.T) { - - if testing.Short() { - t.Skip() - } - vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group2" - ctx := context.Background() - err := filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && !strings.HasSuffix(info.Name(), "list.json") { - - t.Run(info.Name(), func(t *testing.T) { - - defer func() { - require.NoError(t, operations.Teardown()) - }() - - // Load test vectors - log.Info("=====================================================================") - log.Info(path) - log.Info("=====================================================================") - testCase, err := vectors.LoadStateTransitionTestCaseV2(path) - require.NoError(t, err) - - opsCfg := operations.GetDefaultOperationsConfig() - opsCfg.State.MaxCumulativeGasUsed = 80000000000 - opsman, err := operations.NewManager(ctx, opsCfg) - require.NoError(t, err) - - // Setting Genesis - log.Info("###################") - log.Info("# Setting Genesis #") - log.Info("###################") - genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) - require.NoError(t, opsman.SetGenesis(genesisActions)) - require.NoError(t, opsman.Setup()) - - // Check initial root - log.Info("################################") - log.Info("# Verifying initial state root #") - log.Info("################################") - actualOldStateRoot, err := opsman.State().GetLastStateRoot(ctx, nil) - require.NoError(t, err) - require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) - decodedData, err := hex.DecodeHex(testCase.BatchL2Data) - require.NoError(t, err) - _, txBytes, _, err := state.DecodeTxs(decodedData, forkID5) - forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) - require.NoError(t, err) - actualNewStateRoot := forcedBatch.StateRoot - isClosed, err := opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - - // wait until is closed - for !isClosed { - time.Sleep(1 * time.Second) - isClosed, err = opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - } - - log.Info("#######################") - log.Info("# Verifying new leafs #") - log.Info("#######################") - merkleTree := opsman.State().GetTree() - for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { - if expectedNewLeaf.IsSmartContract { - log.Info("Smart Contract Address: ", expectedNewLeaf.Address) - } else { - log.Info("Account Address: ", expectedNewLeaf.Address) - } - log.Info("Verifying Balance...") - actualBalance, err := merkleTree.GetBalance(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Balance.String(), actualBalance.String()) - - log.Info("Verifying Nonce...") - actualNonce, err := merkleTree.GetNonce(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Nonce, actualNonce.String()) - if expectedNewLeaf.IsSmartContract { - log.Info("Verifying Storage...") - for positionHex, expectedNewStorageHex := range expectedNewLeaf.Storage { - position, ok := big.NewInt(0).SetString(positionHex[2:], 16) - require.True(t, ok) - expectedNewStorage, ok := big.NewInt(0).SetString(expectedNewStorageHex[2:], 16) - require.True(t, ok) - actualStorage, err := merkleTree.GetStorageAt(ctx, common.HexToAddress(expectedNewLeaf.Address), position, actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewStorage, actualStorage) - } - - log.Info("Verifying HashBytecode...") - actualHashByteCode, err := merkleTree.GetCodeHash(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.HashBytecode, common.BytesToHash(actualHashByteCode).String()) - } - } - return - }) - - return nil - } - return nil - }) - require.NoError(t, err) -} - -func sendForcedBatchForVector(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { - ctx := context.Background() - st := opsman.State() - // Connect to ethereum node - ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) - require.NoError(t, err) - - // Create smc client - zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) - zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) - require.NoError(t, err) - - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) - require.NoError(t, err) - - log.Info("Using address: ", auth.From) - num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - log.Info("Number of forceBatches in the smc: ", num) - - // Get tip - tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - if disallowed { - tx, err := zkEvm.ActivateForceBatches(auth) - require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - } - - // Send forceBatch - tx, err := zkEvm.ForceBatch(auth, txs, tip) - require.NoError(t, err) - - log.Info("Forced Batch Submit to L1 TxHash: ", tx.Hash()) - time.Sleep(1 * time.Second) - - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - - currentBlock, err := ethClient.BlockByNumber(ctx, nil) - require.NoError(t, err) - log.Debug("currentBlock.Time(): ", currentBlock.Time()) - - query := ethereum.FilterQuery{ - FromBlock: currentBlock.Number(), - Addresses: []common.Address{zkEvmAddr}, - } - logs, err := ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - - var forcedBatch *state.Batch - for _, vLog := range logs { - if vLog.Topics[0] != constants.ForcedBatchSignatureHash { - logs, err = ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - continue - } - fb, err := zkEvm.ParseForceBatch(vLog) - if err != nil { - log.Errorf("failed to parse force batch log event, err: ", err) - } - log.Debugf("log decoded: %+v", fb) - ger := fb.LastGlobalExitRoot - log.Info("GlobalExitRoot: ", ger) - log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) - fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) - if err != nil { - log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - return nil, err - } - log.Info("MinForcedTimestamp: ", fullBlock.Time()) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - for err == state.ErrStateNotSynchronized { - time.Sleep(1 * time.Second) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - } - require.NoError(t, err) - require.NotNil(t, forcedBatch) - - log.Info("Waiting Forced Batch to be virtualized ...") - err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - log.Info("Waiting Forced Batch to be consolidated ...") - err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - } - - return forcedBatch, nil -} diff --git a/ci/e2e-group10/forced_batches_vector_group2_test.go b/ci/e2e-group10/forced_batches_vector_group2_test.go new file mode 120000 index 0000000000..e52931aaa9 --- /dev/null +++ b/ci/e2e-group10/forced_batches_vector_group2_test.go @@ -0,0 +1 @@ +../../test/e2e/forced_batches_vector_group2_test.go \ No newline at end of file diff --git a/ci/e2e-group10/shared.go b/ci/e2e-group10/shared.go new file mode 120000 index 0000000000..2762ace935 --- /dev/null +++ b/ci/e2e-group10/shared.go @@ -0,0 +1 @@ +../../test/e2e/shared.go \ No newline at end of file diff --git a/ci/e2e-group11/forced_batches_vector_group3_test.go b/ci/e2e-group11/forced_batches_vector_group3_test.go deleted file mode 100644 index 4bbf4b508b..0000000000 --- a/ci/e2e-group11/forced_batches_vector_group3_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" - "github.com/0xPolygonHermez/zkevm-node/hex" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/constants" - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/0xPolygonHermez/zkevm-node/test/vectors" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -const ( - forkID5 = 5 -) - -func TestForcedBatchesVectorFiles(t *testing.T) { - - if testing.Short() { - t.Skip() - } - vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group3" - ctx := context.Background() - err := filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && !strings.HasSuffix(info.Name(), "list.json") { - - t.Run(info.Name(), func(t *testing.T) { - - defer func() { - require.NoError(t, operations.Teardown()) - }() - - // Load test vectors - log.Info("=====================================================================") - log.Info(path) - log.Info("=====================================================================") - testCase, err := vectors.LoadStateTransitionTestCaseV2(path) - require.NoError(t, err) - - opsCfg := operations.GetDefaultOperationsConfig() - opsCfg.State.MaxCumulativeGasUsed = 80000000000 - opsman, err := operations.NewManager(ctx, opsCfg) - require.NoError(t, err) - - // Setting Genesis - log.Info("###################") - log.Info("# Setting Genesis #") - log.Info("###################") - genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) - require.NoError(t, opsman.SetGenesis(genesisActions)) - require.NoError(t, opsman.Setup()) - - // Check initial root - log.Info("################################") - log.Info("# Verifying initial state root #") - log.Info("################################") - actualOldStateRoot, err := opsman.State().GetLastStateRoot(ctx, nil) - require.NoError(t, err) - require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) - decodedData, err := hex.DecodeHex(testCase.BatchL2Data) - require.NoError(t, err) - _, txBytes, _, err := state.DecodeTxs(decodedData, forkID5) - forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) - require.NoError(t, err) - actualNewStateRoot := forcedBatch.StateRoot - isClosed, err := opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - - // wait until is closed - for !isClosed { - time.Sleep(1 * time.Second) - isClosed, err = opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - } - - log.Info("#######################") - log.Info("# Verifying new leafs #") - log.Info("#######################") - merkleTree := opsman.State().GetTree() - for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { - if expectedNewLeaf.IsSmartContract { - log.Info("Smart Contract Address: ", expectedNewLeaf.Address) - } else { - log.Info("Account Address: ", expectedNewLeaf.Address) - } - log.Info("Verifying Balance...") - actualBalance, err := merkleTree.GetBalance(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Balance.String(), actualBalance.String()) - - log.Info("Verifying Nonce...") - actualNonce, err := merkleTree.GetNonce(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Nonce, actualNonce.String()) - if expectedNewLeaf.IsSmartContract { - log.Info("Verifying Storage...") - for positionHex, expectedNewStorageHex := range expectedNewLeaf.Storage { - position, ok := big.NewInt(0).SetString(positionHex[2:], 16) - require.True(t, ok) - expectedNewStorage, ok := big.NewInt(0).SetString(expectedNewStorageHex[2:], 16) - require.True(t, ok) - actualStorage, err := merkleTree.GetStorageAt(ctx, common.HexToAddress(expectedNewLeaf.Address), position, actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewStorage, actualStorage) - } - - log.Info("Verifying HashBytecode...") - actualHashByteCode, err := merkleTree.GetCodeHash(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.HashBytecode, common.BytesToHash(actualHashByteCode).String()) - } - } - return - }) - - return nil - } - return nil - }) - require.NoError(t, err) -} - -func sendForcedBatchForVector(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { - ctx := context.Background() - st := opsman.State() - // Connect to ethereum node - ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) - require.NoError(t, err) - - // Create smc client - zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) - zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) - require.NoError(t, err) - - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) - require.NoError(t, err) - - log.Info("Using address: ", auth.From) - num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - log.Info("Number of forceBatches in the smc: ", num) - - // Get tip - tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - if disallowed { - tx, err := zkEvm.ActivateForceBatches(auth) - require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - } - - // Send forceBatch - tx, err := zkEvm.ForceBatch(auth, txs, tip) - require.NoError(t, err) - - log.Info("Forced Batch Submit to L1 TxHash: ", tx.Hash()) - time.Sleep(1 * time.Second) - - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - - currentBlock, err := ethClient.BlockByNumber(ctx, nil) - require.NoError(t, err) - log.Debug("currentBlock.Time(): ", currentBlock.Time()) - - query := ethereum.FilterQuery{ - FromBlock: currentBlock.Number(), - Addresses: []common.Address{zkEvmAddr}, - } - logs, err := ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - - var forcedBatch *state.Batch - for _, vLog := range logs { - if vLog.Topics[0] != constants.ForcedBatchSignatureHash { - logs, err = ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - continue - } - fb, err := zkEvm.ParseForceBatch(vLog) - if err != nil { - log.Errorf("failed to parse force batch log event, err: ", err) - } - log.Debugf("log decoded: %+v", fb) - ger := fb.LastGlobalExitRoot - log.Info("GlobalExitRoot: ", ger) - log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) - fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) - if err != nil { - log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - return nil, err - } - log.Info("MinForcedTimestamp: ", fullBlock.Time()) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - for err == state.ErrStateNotSynchronized { - time.Sleep(1 * time.Second) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - } - require.NoError(t, err) - require.NotNil(t, forcedBatch) - - log.Info("Waiting Forced Batch to be virtualized ...") - err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - log.Info("Waiting Forced Batch to be consolidated ...") - err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - } - - return forcedBatch, nil -} diff --git a/ci/e2e-group11/forced_batches_vector_group3_test.go b/ci/e2e-group11/forced_batches_vector_group3_test.go new file mode 120000 index 0000000000..2bcb4c322a --- /dev/null +++ b/ci/e2e-group11/forced_batches_vector_group3_test.go @@ -0,0 +1 @@ +../../test/e2e/forced_batches_vector_group3_test.go \ No newline at end of file diff --git a/ci/e2e-group11/shared.go b/ci/e2e-group11/shared.go new file mode 120000 index 0000000000..2762ace935 --- /dev/null +++ b/ci/e2e-group11/shared.go @@ -0,0 +1 @@ +../../test/e2e/shared.go \ No newline at end of file diff --git a/ci/e2e-group2/gasless_test.go b/ci/e2e-group2/gasless_test.go new file mode 120000 index 0000000000..280cb4bce2 --- /dev/null +++ b/ci/e2e-group2/gasless_test.go @@ -0,0 +1 @@ +../../test/e2e/gasless_test.go \ No newline at end of file diff --git a/ci/e2e-group9/forced_batches_vector_group1_test.go b/ci/e2e-group9/forced_batches_vector_group1_test.go deleted file mode 100644 index 81fd0db8ac..0000000000 --- a/ci/e2e-group9/forced_batches_vector_group1_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" - "github.com/0xPolygonHermez/zkevm-node/hex" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/constants" - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/0xPolygonHermez/zkevm-node/test/vectors" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -const ( - forkID5 uint64 = 5 -) - -func TestForcedBatchesVectorFiles(t *testing.T) { - - if testing.Short() { - t.Skip() - } - vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group1" - ctx := context.Background() - err := filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && !strings.HasSuffix(info.Name(), "list.json") { - - t.Run(info.Name(), func(t *testing.T) { - - defer func() { - require.NoError(t, operations.Teardown()) - }() - - // Load test vectors - log.Info("=====================================================================") - log.Info(path) - log.Info("=====================================================================") - testCase, err := vectors.LoadStateTransitionTestCaseV2(path) - require.NoError(t, err) - - opsCfg := operations.GetDefaultOperationsConfig() - opsCfg.State.MaxCumulativeGasUsed = 80000000000 - opsman, err := operations.NewManager(ctx, opsCfg) - require.NoError(t, err) - - // Setting Genesis - log.Info("###################") - log.Info("# Setting Genesis #") - log.Info("###################") - genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) - require.NoError(t, opsman.SetGenesis(genesisActions)) - require.NoError(t, opsman.SetForkID(forkID5)) - require.NoError(t, opsman.Setup()) - - // Check initial root - log.Info("################################") - log.Info("# Verifying initial state root #") - log.Info("################################") - actualOldStateRoot, err := opsman.State().GetLastStateRoot(ctx, nil) - require.NoError(t, err) - require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) - decodedData, err := hex.DecodeHex(testCase.BatchL2Data) - require.NoError(t, err) - _, txBytes, _, err := state.DecodeTxs(decodedData, forkID5) - require.NoError(t, err) - forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) - require.NoError(t, err) - actualNewStateRoot := forcedBatch.StateRoot - isClosed, err := opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - - // wait until is closed - for !isClosed { - time.Sleep(1 * time.Second) - isClosed, err = opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) - require.NoError(t, err) - } - - log.Info("#######################") - log.Info("# Verifying new leafs #") - log.Info("#######################") - merkleTree := opsman.State().GetTree() - for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { - if expectedNewLeaf.IsSmartContract { - log.Info("Smart Contract Address: ", expectedNewLeaf.Address) - } else { - log.Info("Account Address: ", expectedNewLeaf.Address) - } - log.Info("Verifying Balance...") - actualBalance, err := merkleTree.GetBalance(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Balance.String(), actualBalance.String()) - - log.Info("Verifying Nonce...") - actualNonce, err := merkleTree.GetNonce(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.Nonce, actualNonce.String()) - if expectedNewLeaf.IsSmartContract { - log.Info("Verifying Storage...") - for positionHex, expectedNewStorageHex := range expectedNewLeaf.Storage { - position, ok := big.NewInt(0).SetString(positionHex[2:], 16) - require.True(t, ok) - expectedNewStorage, ok := big.NewInt(0).SetString(expectedNewStorageHex[2:], 16) - require.True(t, ok) - actualStorage, err := merkleTree.GetStorageAt(ctx, common.HexToAddress(expectedNewLeaf.Address), position, actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewStorage, actualStorage) - } - - log.Info("Verifying HashBytecode...") - actualHashByteCode, err := merkleTree.GetCodeHash(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) - require.NoError(t, err) - require.Equal(t, expectedNewLeaf.HashBytecode, common.BytesToHash(actualHashByteCode).String()) - } - } - return - }) - - return nil - } - return nil - }) - require.NoError(t, err) -} - -func sendForcedBatchForVector(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { - ctx := context.Background() - st := opsman.State() - // Connect to ethereum node - ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) - require.NoError(t, err) - - // Create smc client - zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) - zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) - require.NoError(t, err) - - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) - require.NoError(t, err) - - log.Info("Using address: ", auth.From) - num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - log.Info("Number of forceBatches in the smc: ", num) - - // Get tip - tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - if disallowed { - tx, err := zkEvm.ActivateForceBatches(auth) - require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - } - - // Send forceBatch - tx, err := zkEvm.ForceBatch(auth, txs, tip) - require.NoError(t, err) - - log.Info("Forced Batch Submit to L1 TxHash: ", tx.Hash()) - time.Sleep(1 * time.Second) - - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - - currentBlock, err := ethClient.BlockByNumber(ctx, nil) - require.NoError(t, err) - log.Debug("currentBlock.Time(): ", currentBlock.Time()) - - query := ethereum.FilterQuery{ - FromBlock: currentBlock.Number(), - Addresses: []common.Address{zkEvmAddr}, - } - logs, err := ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - - var forcedBatch *state.Batch - for _, vLog := range logs { - if vLog.Topics[0] != constants.ForcedBatchSignatureHash { - logs, err = ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - continue - } - fb, err := zkEvm.ParseForceBatch(vLog) - if err != nil { - log.Errorf("failed to parse force batch log event, err: ", err) - } - log.Debugf("log decoded: %+v", fb) - ger := fb.LastGlobalExitRoot - log.Info("GlobalExitRoot: ", ger) - log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) - fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) - if err != nil { - log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - return nil, err - } - log.Info("MinForcedTimestamp: ", fullBlock.Time()) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - for err == state.ErrStateNotSynchronized { - time.Sleep(1 * time.Second) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - } - require.NoError(t, err) - require.NotNil(t, forcedBatch) - - log.Info("Waiting Forced Batch to be virtualized ...") - err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - log.Info("Waiting Forced Batch to be consolidated ...") - err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - } - - return forcedBatch, nil -} diff --git a/ci/e2e-group9/forced_batches_vector_group1_test.go b/ci/e2e-group9/forced_batches_vector_group1_test.go new file mode 120000 index 0000000000..dcba3a9097 --- /dev/null +++ b/ci/e2e-group9/forced_batches_vector_group1_test.go @@ -0,0 +1 @@ +../../test/e2e/forced_batches_vector_group1_test.go \ No newline at end of file diff --git a/ci/e2e-group9/shared.go b/ci/e2e-group9/shared.go new file mode 120000 index 0000000000..2762ace935 --- /dev/null +++ b/ci/e2e-group9/shared.go @@ -0,0 +1 @@ +../../test/e2e/shared.go \ No newline at end of file diff --git a/cmd/dumpstate.go b/cmd/dumpstate.go index 05b0bbea4e..350048a3eb 100644 --- a/cmd/dumpstate.go +++ b/cmd/dumpstate.go @@ -109,11 +109,11 @@ func dumpState(ctx *cli.Context) error { } // Connect to SQL - stateSqlDB, err := db.NewSQLDB(c.StateDB) + stateSqlDB, err := db.NewSQLDB(c.State.DB) if err != nil { return err } - stateDB := state.NewPostgresStorage(stateSqlDB) + stateDB := state.NewPostgresStorage(state.Config{}, stateSqlDB) dump := dumpedState{ Description: description, diff --git a/cmd/restore.go b/cmd/restore.go index 425b79a7d4..caa33fd18f 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -50,7 +50,7 @@ func restore(ctx *cli.Context) error { return errors.New("stateDB input file must end in .sql.tar.gz") } - d, err := db.NewSQLDB(c.StateDB) + d, err := db.NewSQLDB(c.State.DB) if err != nil { log.Error("error conecting to stateDB. Error: ", err) return err @@ -60,17 +60,17 @@ func restore(ctx *cli.Context) error { log.Error("error dropping state schema or migration table. Error: ", err) return err } - port, err := strconv.Atoi(c.StateDB.Port) + port, err := strconv.Atoi(c.State.DB.Port) if err != nil { log.Error("error converting port to int. Error: ", err) return err } restore, err := pg.NewRestore(&pg.Postgres{ - Host: c.StateDB.Host, + Host: c.State.DB.Host, Port: port, - DB: c.StateDB.Name, - Username: c.StateDB.User, - Password: c.StateDB.Password, + DB: c.State.DB.Name, + Username: c.State.DB.User, + Password: c.State.DB.Password, }) if err != nil { log.Error("error: ", err) diff --git a/cmd/run.go b/cmd/run.go index eaeb832628..41a4637599 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -14,6 +14,7 @@ import ( "time" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" + datastreamerlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" "github.com/0xPolygonHermez/zkevm-node" "github.com/0xPolygonHermez/zkevm-node/aggregator" "github.com/0xPolygonHermez/zkevm-node/config" @@ -66,11 +67,11 @@ func start(cliCtx *cli.Context) error { if !cliCtx.Bool(config.FlagMigrations) { for _, comp := range components { if comp == SYNCHRONIZER { - runStateMigrations(c.StateDB) + runStateMigrations(c.State.DB) } } } - checkStateMigrations(c.StateDB) + checkStateMigrations(c.State.DB) var ( eventLog *event.EventLog @@ -102,7 +103,7 @@ func start(cliCtx *cli.Context) error { eventLog = event.NewEventLog(c.EventLog, eventStorage) // Core State DB - stateSqlDB, err := db.NewSQLDB(c.StateDB) + stateSqlDB, err := db.NewSQLDB(c.State.DB) if err != nil { log.Fatal(err) } @@ -132,7 +133,7 @@ func start(cliCtx *cli.Context) error { c.Aggregator.ForkId = currentForkID log.Infof("Chain ID read from POE SC = %v", l2ChainID) - ethTxManagerStorage, err := ethtxmanager.NewPostgresStorage(c.StateDB) + ethTxManagerStorage, err := ethtxmanager.NewPostgresStorage(c.State.DB) if err != nil { log.Fatal(err) } @@ -162,6 +163,11 @@ func start(cliCtx *cli.Context) error { } go runAggregator(cliCtx.Context, c.Aggregator, etherman, etm, st) case SEQUENCER: + c.Sequencer.StreamServer.Log = datastreamerlog.Config{ + Environment: datastreamerlog.LogEnvironment(c.Log.Environment), + Level: c.Log.Level, + Outputs: c.Log.Outputs, + } ev.Component = event.Component_Sequencer ev.Description = "Running sequencer" err := eventLog.LogEvent(cliCtx.Context, ev) @@ -169,9 +175,9 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.State.Batch.Constraints, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } - seq := createSequencer(*c, poolInstance, ethTxManagerStorage, st, eventLog) + seq := createSequencer(*c, poolInstance, st, eventLog) go seq.Start(cliCtx.Context) case SEQUENCE_SENDER: ev.Component = event.Component_Sequence_Sender @@ -181,7 +187,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.State.Batch.Constraints, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } seqSender := createSequenceSender(*c, poolInstance, ethTxManagerStorage, st, eventLog) go seqSender.Start(cliCtx.Context) @@ -193,12 +199,13 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.State.Batch.Constraints, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } if c.RPC.EnableL2SuggestedGasPricePolling { // Needed for rejecting transactions with too low gas price poolInstance.StartPollingMinSuggestedGasPrice(cliCtx.Context) } + poolInstance.StartRefreshingBlockedAddressesPeriodically() apis := map[string]bool{} for _, a := range cliCtx.StringSlice(config.FlagHTTPAPI) { apis[a] = true @@ -212,9 +219,9 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.State.Batch.Constraints, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } - go runSynchronizer(*c, etherman, etm, st, poolInstance, eventLog) + go runSynchronizer(*c, etherman, ethTxManagerStorage, st, poolInstance, eventLog) case ETHTXMANAGER: ev.Component = event.Component_EthTxManager ev.Description = "Running eth tx manager service" @@ -232,7 +239,7 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } if poolInstance == nil { - poolInstance = createPool(c.Pool, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) + poolInstance = createPool(c.Pool, c.State.Batch.Constraints, c.NetworkConfig.L2BridgeAddr, l2ChainID, st, eventLog) } go runL2GasPriceSuggester(c.L2GasPriceSuggester, st, poolInstance, etherman) } @@ -282,7 +289,7 @@ func newEtherman(c config.Config) (*etherman.Client, error) { return etherman, nil } -func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager *ethtxmanager.Client, st *state.State, pool *pool.Pool, eventLog *event.EventLog) { +func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerStorage *ethtxmanager.PostgresStorage, st *state.State, pool *pool.Pool, eventLog *event.EventLog) { var trustedSequencerURL string var err error if !cfg.IsTrustedSequencer { @@ -299,10 +306,21 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager } zkEVMClient := client.NewClient(trustedSequencerURL) + etherManForL1 := []synchronizer.EthermanInterface{} + // If synchronizer are using sequential mode, we only need one etherman client + if cfg.Synchronizer.L1SynchronizationMode == synchronizer.ParallelMode { + for i := 0; i < int(cfg.Synchronizer.L1ParallelSynchronization.MaxClients+1); i++ { + eth, err := newEtherman(cfg) + if err != nil { + log.Fatal(err) + } + etherManForL1 = append(etherManForL1, eth) + } + } + etm := ethtxmanager.New(cfg.EthTxManager, etherman, ethTxManagerStorage, st) sy, err := synchronizer.NewSynchronizer( - cfg.IsTrustedSequencer, etherman, st, pool, ethTxManager, - zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, - &dataCommitteeClient.ClientFactory{}, + cfg.IsTrustedSequencer, etherman, etherManForL1, st, pool, etm, + zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, &dataCommitteeClient.ClientFactory{}, cfg.Log.Environment == "development", ) if err != nil { log.Fatal(err) @@ -315,7 +333,8 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager func runJSONRPCServer(c config.Config, etherman *etherman.Client, chainID uint64, pool *pool.Pool, st *state.State, apis map[string]bool) { var err error storage := jsonrpc.NewStorage() - c.RPC.MaxCumulativeGasUsed = c.Sequencer.MaxCumulativeGasUsed + c.RPC.MaxCumulativeGasUsed = c.State.Batch.Constraints.MaxCumulativeGasUsed + c.RPC.L2Coinbase = c.SequenceSender.L2Coinbase if !c.IsTrustedSequencer { if c.RPC.SequencerNodeURI == "" { log.Debug("getting trusted sequencer URL from smc") @@ -375,15 +394,13 @@ func runJSONRPCServer(c config.Config, etherman *etherman.Client, chainID uint64 } } -func createSequencer(cfg config.Config, pool *pool.Pool, etmStorage *ethtxmanager.PostgresStorage, st *state.State, eventLog *event.EventLog) *sequencer.Sequencer { +func createSequencer(cfg config.Config, pool *pool.Pool, st *state.State, eventLog *event.EventLog) *sequencer.Sequencer { etherman, err := newEtherman(cfg) if err != nil { log.Fatal(err) } - ethTxManager := ethtxmanager.New(cfg.EthTxManager, etherman, etmStorage, st) - - seq, err := sequencer.New(cfg.Sequencer, pool, st, etherman, ethTxManager, eventLog) + seq, err := sequencer.New(cfg.Sequencer, cfg.State.Batch, cfg.Pool, pool, st, etherman, eventLog) if err != nil { log.Fatal(err) } @@ -460,7 +477,7 @@ func waitSignal(cancelFuncs []context.CancelFunc) { } func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDIntervals []state.ForkIDInterval, sqlDB *pgxpool.Pool, eventLog *event.EventLog, needsExecutor, needsStateTree bool) *state.State { - stateDb := state.NewPostgresStorage(sqlDB) + stateDb := state.NewPostgresStorage(c.State, sqlDB) // Executor var executorClient executor.ExecutorServiceClient @@ -476,26 +493,29 @@ func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDInt } stateCfg := state.Config{ - MaxCumulativeGasUsed: c.Sequencer.MaxCumulativeGasUsed, + MaxCumulativeGasUsed: c.State.Batch.Constraints.MaxCumulativeGasUsed, ChainID: l2ChainID, ForkIDIntervals: forkIDIntervals, MaxResourceExhaustedAttempts: c.Executor.MaxResourceExhaustedAttempts, WaitOnResourceExhaustion: c.Executor.WaitOnResourceExhaustion, ForkUpgradeBatchNumber: c.ForkUpgradeBatchNumber, ForkUpgradeNewForkId: c.ForkUpgradeNewForkId, + MaxLogsCount: c.RPC.MaxLogsCount, + MaxLogsBlockRange: c.RPC.MaxLogsBlockRange, + MaxNativeBlockHashBlockRange: c.RPC.MaxNativeBlockHashBlockRange, } st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog) return st } -func createPool(cfgPool pool.Config, l2BridgeAddr common.Address, l2ChainID uint64, st *state.State, eventLog *event.EventLog) *pool.Pool { +func createPool(cfgPool pool.Config, constraintsCfg state.BatchConstraintsCfg, l2BridgeAddr common.Address, l2ChainID uint64, st *state.State, eventLog *event.EventLog) *pool.Pool { runPoolMigrations(cfgPool.DB) poolStorage, err := pgpoolstorage.NewPostgresPoolStorage(cfgPool.DB) if err != nil { log.Fatal(err) } - poolInstance := pool.NewPool(cfgPool, poolStorage, st, l2BridgeAddr, l2ChainID, eventLog) + poolInstance := pool.NewPool(cfgPool, constraintsCfg, poolStorage, st, l2BridgeAddr, l2ChainID, eventLog) return poolInstance } diff --git a/cmd/snapshot.go b/cmd/snapshot.go index 34268fa788..e6111b14ed 100644 --- a/cmd/snapshot.go +++ b/cmd/snapshot.go @@ -26,17 +26,17 @@ func snapshot(ctx *cli.Context) error { } setupLog(c.Log) - port, err := strconv.Atoi(c.StateDB.Port) + port, err := strconv.Atoi(c.State.DB.Port) if err != nil { log.Error("error converting port to int. Error: ", err) return err } dump, err := pg.NewDump(&pg.Postgres{ - Host: c.StateDB.Host, + Host: c.State.DB.Host, Port: port, - DB: c.StateDB.Name, - Username: c.StateDB.User, - Password: c.StateDB.Password, + DB: c.State.DB.Name, + Username: c.State.DB.User, + Password: c.State.DB.Password, }) if err != nil { log.Error("error: ", err) diff --git a/config/config.go b/config/config.go index 9660faac75..acfb2186f8 100644 --- a/config/config.go +++ b/config/config.go @@ -18,6 +18,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/sequencer" "github.com/0xPolygonHermez/zkevm-node/sequencesender" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/synchronizer" "github.com/mitchellh/mapstructure" @@ -108,14 +109,14 @@ type Config struct { Executor executor.Config // Configuration of the merkle tree client service. Not use in the node, only for testing MTClient merkletree.Config - // Configuration of the state database connection - StateDB db.Config // Configuration of the metrics service, basically is where is going to publish the metrics Metrics metrics.Config // Configuration of the event database connection EventLog event.Config // Configuration of the hash database connection HashDB db.Config + // State service configuration + State state.Config } // Default parses the default configuration values. diff --git a/config/config_test.go b/config/config_test.go index f484917780..f22f047f8d 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -41,16 +41,20 @@ func Test_Defaults(t *testing.T) { expectedValue: uint64(100), }, { - path: "Sequencer.WaitPeriodPoolIsEmpty", - expectedValue: types.NewDuration(1 * time.Second), + path: "Synchronizer.L1SynchronizationMode", + expectedValue: "sequential", }, { - path: "Sequencer.MaxTxsPerBatch", - expectedValue: uint64(300), + path: "Synchronizer.L1ParallelSynchronization.MaxClients", + expectedValue: uint64(10), }, { - path: "Sequencer.MaxBatchBytesSize", - expectedValue: uint64(120000), + path: "Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks", + expectedValue: uint64(25), + }, + { + path: "Sequencer.WaitPeriodPoolIsEmpty", + expectedValue: types.NewDuration(1 * time.Second), }, { path: "Sequencer.BlocksAmountForTxsToBeDeleted", @@ -60,38 +64,6 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.FrequencyToCheckTxsForDelete", expectedValue: types.NewDuration(12 * time.Hour), }, - { - path: "Sequencer.MaxCumulativeGasUsed", - expectedValue: uint64(30000000), - }, - { - path: "Sequencer.MaxKeccakHashes", - expectedValue: uint32(2145), - }, - { - path: "Sequencer.MaxPoseidonHashes", - expectedValue: uint32(252357), - }, - { - path: "Sequencer.MaxPoseidonPaddings", - expectedValue: uint32(135191), - }, - { - path: "Sequencer.MaxMemAligns", - expectedValue: uint32(236585), - }, - { - path: "Sequencer.MaxArithmetics", - expectedValue: uint32(236585), - }, - { - path: "Sequencer.MaxBinaries", - expectedValue: uint32(473170), - }, - { - path: "Sequencer.MaxSteps", - expectedValue: uint32(7570538), - }, { path: "Sequencer.TxLifetimeCheckTimeout", expectedValue: types.NewDuration(10 * time.Minute), @@ -145,33 +117,25 @@ func Test_Defaults(t *testing.T) { expectedValue: types.NewDuration(10 * time.Second), }, { - path: "Sequencer.EffectiveGasPrice.MaxBreakEvenGasPriceDeviationPercentage", - expectedValue: uint64(10), + path: "Sequencer.DBManager.PoolRetrievalInterval", + expectedValue: types.NewDuration(500 * time.Millisecond), }, { - path: "Sequencer.EffectiveGasPrice.L1GasPriceFactor", - expectedValue: float64(0.25), + path: "Sequencer.DBManager.L2ReorgRetrievalInterval", + expectedValue: types.NewDuration(5 * time.Second), }, { - path: "Sequencer.EffectiveGasPrice.ByteGasCost", - expectedValue: uint64(16), + path: "Sequencer.StreamServer.Port", + expectedValue: uint16(0), }, { - path: "Sequencer.EffectiveGasPrice.MarginFactor", - expectedValue: float64(1), + path: "Sequencer.StreamServer.Filename", + expectedValue: "", }, { - path: "Sequencer.EffectiveGasPrice.Enabled", + path: "Sequencer.StreamServer.Enabled", expectedValue: false, }, - { - path: "Sequencer.DBManager.PoolRetrievalInterval", - expectedValue: types.NewDuration(500 * time.Millisecond), - }, - { - path: "Sequencer.DBManager.L2ReorgRetrievalInterval", - expectedValue: types.NewDuration(5 * time.Second), - }, { path: "SequenceSender.WaitPeriodSendSequence", expectedValue: types.NewDuration(5 * time.Second), @@ -184,6 +148,10 @@ func Test_Defaults(t *testing.T) { path: "SequenceSender.MaxBatchesForL1", expectedValue: uint64(10), }, + { + path: "SequenceSender.GasOffset", + expectedValue: uint64(80000), + }, { path: "Etherman.URL", expectedValue: "http://localhost:8545", @@ -245,31 +213,31 @@ func Test_Defaults(t *testing.T) { expectedValue: "x1-prover:50061", }, { - path: "StateDB.User", + path: "State.DB.User", expectedValue: "state_user", }, { - path: "StateDB.Password", + path: "State.DB.Password", expectedValue: "state_password", }, { - path: "StateDB.Name", + path: "State.DB.Name", expectedValue: "state_db", }, { - path: "StateDB.Host", + path: "State.DB.Host", expectedValue: "x1-state-db", }, { - path: "StateDB.Port", + path: "State.DB.Port", expectedValue: "5432", }, { - path: "StateDB.EnableLog", + path: "State.DB.EnableLog", expectedValue: false, }, { - path: "StateDB.MaxConns", + path: "State.DB.MaxConns", expectedValue: 200, }, { @@ -305,6 +273,34 @@ func Test_Defaults(t *testing.T) { path: "Pool.GlobalQueue", expectedValue: uint64(1024), }, + { + path: "Pool.EffectiveGasPrice.Enabled", + expectedValue: false, + }, + { + path: "Pool.EffectiveGasPrice.L1GasPriceFactor", + expectedValue: float64(0.25), + }, + { + path: "Pool.EffectiveGasPrice.ByteGasCost", + expectedValue: uint64(16), + }, + { + path: "Pool.EffectiveGasPrice.ZeroByteGasCost", + expectedValue: uint64(4), + }, + { + path: "Pool.EffectiveGasPrice.NetProfit", + expectedValue: float64(1), + }, + { + path: "Pool.EffectiveGasPrice.BreakEvenFactor", + expectedValue: float64(1.1), + }, + { + path: "Pool.EffectiveGasPrice.FinalDeviationPct", + expectedValue: uint64(10), + }, { path: "Pool.DB.User", expectedValue: "pool_user", @@ -361,6 +357,30 @@ func Test_Defaults(t *testing.T) { path: "RPC.EnableL2SuggestedGasPricePolling", expectedValue: true, }, + { + path: "RPC.BatchRequestsEnabled", + expectedValue: false, + }, + { + path: "RPC.BatchRequestsLimit", + expectedValue: uint(20), + }, + { + path: "RPC.MaxLogsCount", + expectedValue: uint64(10000), + }, + { + path: "RPC.MaxLogsBlockRange", + expectedValue: uint64(10000), + }, + { + path: "RPC.MaxNativeBlockHashBlockRange", + expectedValue: uint64(60000), + }, + { + path: "RPC.EnableHttpLog", + expectedValue: true, + }, { path: "RPC.WebSockets.Enabled", expectedValue: true, @@ -373,6 +393,10 @@ func Test_Defaults(t *testing.T) { path: "RPC.WebSockets.Port", expectedValue: int(8546), }, + { + path: "RPC.WebSockets.ReadLimit", + expectedValue: int64(104857600), + }, { path: "Executor.URI", expectedValue: "x1-prover:50071", @@ -437,6 +461,46 @@ func Test_Defaults(t *testing.T) { path: "Aggregator.GeneratingProofCleanupThreshold", expectedValue: "10m", }, + { + path: "Aggregator.GasOffset", + expectedValue: uint64(0), + }, + { + path: "State.Batch.Constraints.MaxTxsPerBatch", + expectedValue: uint64(300), + }, + { + path: "State.Batch.Constraints.MaxBatchBytesSize", + expectedValue: uint64(120000), + }, + { + path: "State.Batch.Constraints.MaxCumulativeGasUsed", + expectedValue: uint64(30000000), + }, + { + path: "State.Batch.Constraints.MaxKeccakHashes", + expectedValue: uint32(2145), + }, + { + path: "State.Batch.Constraints.MaxPoseidonHashes", + expectedValue: uint32(252357), + }, + { + path: "State.Batch.Constraints.MaxPoseidonPaddings", + expectedValue: uint32(135191), + }, + { + path: "State.Batch.Constraints.MaxMemAligns", + expectedValue: uint32(236585), + }, + { + path: "State.Batch.Constraints.MaxArithmetics", + expectedValue: uint32(236585), + }, + { + path: "State.Batch.Constraints.MaxBinaries", + expectedValue: uint32(473170), + }, } file, err := os.CreateTemp("", "genesisConfig") require.NoError(t, err) diff --git a/config/default.go b/config/default.go index 223d7a5053..7df650a548 100644 --- a/config/default.go +++ b/config/default.go @@ -11,14 +11,27 @@ Environment = "development" # "production" or "development" Level = "info" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "x1-state-db" -Port = "5432" -EnableLog = false -MaxConns = 200 +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "x1-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 150000 @@ -32,7 +45,16 @@ PollMinAllowedGasPriceInterval = "15s" AccountQueue = 64 GlobalQueue = 1024 FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" - [Pool.DB] + [Pool.EffectiveGasPrice] + Enabled = false + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + ZeroByteGasCost = 4 + NetProfit = 1 + BreakEvenFactor = 1.1 + FinalDeviationPct = 10 + L2GasPriceSuggesterFactor = 0.5 + [Pool.DB] User = "pool_user" Password = "pool_password" Name = "pool_db" @@ -63,31 +85,41 @@ WriteTimeout = "60s" MaxRequestsPerIPAndSecond = 500 SequencerNodeURI = "" EnableL2SuggestedGasPricePolling = true -TraceBatchUseHTTPS = true +BatchRequestsEnabled = false +BatchRequestsLimit = 20 +MaxLogsCount = 10000 +MaxLogsBlockRange = 10000 +MaxNativeBlockHashBlockRange = 60000 +EnableHttpLog = true [RPC.WebSockets] Enabled = true Host = "0.0.0.0" Port = 8546 + ReadLimit = 104857600 [Synchronizer] SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +L1SynchronizationMode = "sequential" # "sequential" or "parallel" + [Synchronizer.L1ParallelSynchronization] + MaxClients = 10 + MaxPendingNoProcessedBlocks = 25 + RequestLastBlockPeriod = "5s" + RequestLastBlockTimeout = "5s" + RequestLastBlockMaxRetries = 3 + StatisticsPeriod = "5m" + TimeoutMainLoop = "5m" + RollupInfoRetriesSpacing= "5s" + FallbackToSequentialModeOnSynchronized = false + [Synchronizer.L1ParallelSynchronization.PerformanceWarning] + AceptableInacctivityTime = "5s" + ApplyAfterNumRollupReceived = 10 [Sequencer] WaitPeriodPoolIsEmpty = "1s" BlocksAmountForTxsToBeDeleted = 100 FrequencyToCheckTxsForDelete = "12h" -MaxTxsPerBatch = 300 -MaxBatchBytesSize = 120000 -MaxCumulativeGasUsed = 30000000 -MaxKeccakHashes = 2145 -MaxPoseidonHashes = 252357 -MaxPoseidonPaddings = 135191 -MaxMemAligns = 236585 -MaxArithmetics = 236585 -MaxBinaries = 473170 -MaxSteps = 7570538 TxLifetimeCheckTimeout = "10m" MaxTxLifetime = "3h" [Sequencer.Finalizer] @@ -106,11 +138,9 @@ MaxTxLifetime = "3h" [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" - [Sequencer.EffectiveGasPrice] - MaxBreakEvenGasPriceDeviationPercentage = 10 - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 + [Sequencer.StreamServer] + Port = 0 + Filename = "" Enabled = false [SequenceSender] @@ -120,6 +150,7 @@ MaxBatchesForL1 = 10 L2Coinbase = "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" PrivateKey = {Path = "/pk/sequencer.keystore", Password = "testonly"} UseValidium = true +GasOffset = 80000 [Aggregator] Host = "0.0.0.0" @@ -131,6 +162,7 @@ TxProfitabilityMinReward = "1.1" ProofStatePollingInterval = "5s" CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" +GasOffset = 0 [L2GasPriceSuggester] Type = "follower" diff --git a/config/environments/local/local.genesis.config.json b/config/environments/local/local.genesis.config.json index 9ac4f77df0..a5007182e4 100644 --- a/config/environments/local/local.genesis.config.json +++ b/config/environments/local/local.genesis.config.json @@ -6,7 +6,7 @@ "polygonZkEVMGlobalExitRootAddress": "0xEd236da21Ff62bC7B62608AdB818da49E8549fa7" }, "root": "0xd88680f1b151dd67518f9aca85161424c0cac61df2f5424a3ddc04ea25adecc7", - "genesisBlockNumber": 102, + "genesisBlockNumber": 134, "genesis": [ { "contractName": "PolygonZkEVMDeployer", diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index a2c1a7ac56..89cac617a6 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -6,14 +6,27 @@ Environment = "development" # "production" or "development" Level = "debug" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "x1-state-db" -Port = "5432" -EnableLog = false -MaxConns = 200 +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "x1-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 1500000 @@ -25,6 +38,17 @@ DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +AccountQueue = 64 +GlobalQueue = 1024 + [Pool.EffectiveGasPrice] + Enabled = false + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + ZeroByteGasCost = 4 + NetProfit = 1 + BreakEvenFactor = 1.1 + FinalDeviationPct = 10 + L2GasPriceSuggesterFactor = 0.5 [Pool.DB] User = "pool_user" Password = "pool_password" @@ -69,19 +93,6 @@ SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc [Sequencer] -WaitPeriodPoolIsEmpty = "1s" -BlocksAmountForTxsToBeDeleted = 100 -FrequencyToCheckTxsForDelete = "12h" -MaxTxsPerBatch = 300 -MaxBatchBytesSize = 120000 -MaxCumulativeGasUsed = 30000000 -MaxKeccakHashes = 2145 -MaxPoseidonHashes = 252357 -MaxPoseidonPaddings = 135191 -MaxMemAligns = 236585 -MaxArithmetics = 236585 -MaxBinaries = 473170 -MaxSteps = 7570538 TxLifetimeCheckTimeout = "10m" MaxTxLifetime = "3h" [Sequencer.Finalizer] @@ -99,11 +110,9 @@ MaxTxLifetime = "3h" [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" - [Sequencer.EffectiveGasPrice] - MaxBreakEvenGasPriceDeviationPercentage = 10 - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 + [Sequencer.StreamServer] + Port = 0 + Filename = "" Enabled = false [SequenceSender] diff --git a/config/environments/mainnet/node.config.toml b/config/environments/mainnet/node.config.toml index 4ecfe2c953..505e15d22f 100644 --- a/config/environments/mainnet/node.config.toml +++ b/config/environments/mainnet/node.config.toml @@ -3,14 +3,27 @@ Environment = "development" # "production" or "development" Level = "info" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "x1-state-db" -Port = "5432" -EnableLog = false -MaxConns = 200 +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "x1-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 1500000 @@ -20,6 +33,8 @@ DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" FreeGasAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +AccountQueue = 64 +GlobalQueue = 1024 [Pool.DB] User = "pool_user" Password = "pool_password" @@ -87,4 +102,4 @@ Name = "prover_db" Host = "x1-state-db" Port = "5432" EnableLog = false -MaxConns = 200 \ No newline at end of file +MaxConns = 200 diff --git a/config/environments/mainnet/prover.config.json b/config/environments/mainnet/prover.config.json index cd53c38dd4..6a68f4f07e 100644 --- a/config/environments/mainnet/prover.config.json +++ b/config/environments/mainnet/prover.config.json @@ -113,5 +113,5 @@ "ECRecoverPrecalc": false, "ECRecoverPrecalcNThreads": 4, "stateManager": true, - "useAssociativeCache" : false + "useAssociativeCache" : false } diff --git a/config/environments/testnet/node.config.toml b/config/environments/testnet/node.config.toml index aed2f66b18..c2f9b0ce63 100644 --- a/config/environments/testnet/node.config.toml +++ b/config/environments/testnet/node.config.toml @@ -3,14 +3,28 @@ Environment = "development" # "production" or "development" Level = "info" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "x1-state-db" -Port = "5432" -EnableLog = false -MaxConns = 200 +[State] +AccountQueue = 64 + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "x1-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 1500000 @@ -88,4 +102,4 @@ Name = "prover_db" Host = "x1-state-db" Port = "5432" EnableLog = false -MaxConns = 200 \ No newline at end of file +MaxConns = 200 diff --git a/config/gen_json_schema.go b/config/gen_json_schema.go index dbaaff1d97..44b0aaa4e1 100644 --- a/config/gen_json_schema.go +++ b/config/gen_json_schema.go @@ -44,7 +44,7 @@ func NewNodeConfigJsonSchemaGenerater() ConfigJsonSchemaGenerater[Config] { config_default_values, err := Default() res.defaultValues = config_default_values if err != nil { - panic("can't create default values for config file") + log.Fatal("can't create default values for config file: " + err.Error()) } return res } @@ -201,37 +201,35 @@ func fillDefaultValuesPartial(schema *jsonschema.Schema, default_config interfac if schema.Properties == nil { return } - for _, key := range schema.Properties.Keys() { + for pair := schema.Properties.Oldest(); pair != nil; pair = pair.Next() { + key := pair.Key + value_schema := pair.Value log.Debugf("fillDefaultValuesPartial: key: %s", key) - value, ok := schema.Properties.Get(key) - if ok { - value_schema, _ := value.(*jsonschema.Schema) - default_value := getValueFromStruct(default_config, key) - if default_value.IsValid() && variantFieldIsSet(&value_schema.Default) { - switch value_schema.Type { - case "array": - if default_value.Kind() == reflect.ValueOf(common.Address{}).Kind() { - if !default_value.IsZero() { - def_value := default_value.Interface() - value_schema.Default = def_value - } - } else { - if !default_value.IsZero() && !default_value.IsNil() { - def_value := default_value.Interface() - value_schema.Default = def_value - } + default_value := getValueFromStruct(default_config, key) + if default_value.IsValid() && variantFieldIsSet(&value_schema.Default) { + switch value_schema.Type { + case "array": + if default_value.Kind() == reflect.ValueOf(common.Address{}).Kind() { + if !default_value.IsZero() { + def_value := default_value.Interface() + value_schema.Default = def_value + } + } else { + if !default_value.IsZero() && !default_value.IsNil() { + def_value := default_value.Interface() + value_schema.Default = def_value } - case "object": - fillDefaultValuesPartial(value_schema, default_value.Interface()) - default: // string, number, integer, boolean - if default_value.Type() == reflect.TypeOf(types.Duration{}) { - duration, ok := default_value.Interface().(types.Duration) - if ok { - value_schema.Default = duration.String() - } - } else { - value_schema.Default = default_value.Interface() + } + case "object": + fillDefaultValuesPartial(value_schema, default_value.Interface()) + default: // string, number, integer, boolean + if default_value.Type() == reflect.TypeOf(types.Duration{}) { + duration, ok := default_value.Interface().(types.Duration) + if ok { + value_schema.Default = duration.String() } + } else { + value_schema.Default = default_value.Interface() } } } @@ -243,17 +241,14 @@ func cleanRequiredFields(schema *jsonschema.Schema) { if schema.Properties == nil { return } - for _, key := range schema.Properties.Keys() { - value, ok := schema.Properties.Get(key) - if ok { - value_schema, _ := value.(*jsonschema.Schema) - value_schema.Required = []string{} - switch value_schema.Type { - case "object": - cleanRequiredFields(value_schema) - case "array": - cleanRequiredFields(value_schema.Items) - } + for pair := schema.Properties.Oldest(); pair != nil; pair = pair.Next() { + value_schema := pair.Value + value_schema.Required = []string{} + switch value_schema.Type { + case "object": + cleanRequiredFields(value_schema) + case "array": + cleanRequiredFields(value_schema.Items) } } } diff --git a/config/gen_json_schema_test.go b/config/gen_json_schema_test.go index 85958f16f5..97c9edc773 100644 --- a/config/gen_json_schema_test.go +++ b/config/gen_json_schema_test.go @@ -377,16 +377,10 @@ func getValueFromSchema(schema *jsonschema.Schema, keys []string) (*jsonschema.S for _, key := range keys { v, exist := subschema.Properties.Get(key) - if !exist { return nil, errors.New("key " + key + " doesnt exist in schema") } - - new_schema, ok := v.(*jsonschema.Schema) - if !ok { - return nil, errors.New("fails conversion for key " + key + " doesnt exist in schema") - } - subschema = new_schema + subschema = v } return subschema, nil } diff --git a/config/network.go b/config/network.go index 57f1ac8488..a0076a4a18 100644 --- a/config/network.go +++ b/config/network.go @@ -71,22 +71,23 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { networkJSON = TestnetNetworkConfigJSON case string(custom): var err error - networkJSON, err = loadGenesisFileAsString(ctx) + cfgPath := ctx.String(FlagCustomNetwork) + networkJSON, err = LoadGenesisFileAsString(cfgPath) if err != nil { panic(err.Error()) } default: log.Fatalf("unsupported --network value. Must be one of: [%s, %s, %s]", mainnet, testnet, custom) } - config, err := loadGenesisFromJSONString(networkJSON) + config, err := LoadGenesisFromJSONString(networkJSON) if err != nil { panic(fmt.Errorf("failed to load genesis configuration from file. Error: %v", err)) } cfg.NetworkConfig = config } -func loadGenesisFileAsString(ctx *cli.Context) (string, error) { - cfgPath := ctx.String(FlagCustomNetwork) +// LoadGenesisFileAsString loads the genesis file as a string +func LoadGenesisFileAsString(cfgPath string) (string, error) { if cfgPath != "" { f, err := os.Open(cfgPath) //nolint:gosec if err != nil { @@ -109,7 +110,8 @@ func loadGenesisFileAsString(ctx *cli.Context) (string, error) { } } -func loadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { +// LoadGenesisFromJSONString loads the genesis file from JSON string +func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { var cfg NetworkConfig var cfgJSON GenesisFromJSON diff --git a/db/migrations/pool/0011.sql b/db/migrations/pool/0011.sql new file mode 100644 index 0000000000..5bd6181a68 --- /dev/null +++ b/db/migrations/pool/0011.sql @@ -0,0 +1,15 @@ +-- +migrate Up +CREATE TABLE IF NOT EXISTS pool.whitelisted ( + addr VARCHAR PRIMARY KEY +); + +CREATE INDEX IF NOT EXISTS idx_transaction_from_nonce ON pool.transaction (from_address, nonce); +CREATE INDEX IF NOT EXISTS idx_transaction_status ON pool.transaction (status); +CREATE INDEX IF NOT EXISTS idx_transaction_hash ON pool.transaction (hash); + +-- +migrate Down +DROP TABLE pool.whitelisted; + +DROP INDEX IF EXISTS pool.idx_transaction_from_nonce; +DROP INDEX IF EXISTS pool.idx_transaction_status; +DROP INDEX IF EXISTS pool.idx_transaction_hash; diff --git a/db/migrations/pool/0011_test.go b/db/migrations/pool/0011_test.go new file mode 100644 index 0000000000..6fede37ae1 --- /dev/null +++ b/db/migrations/pool/0011_test.go @@ -0,0 +1,49 @@ +package pool_migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0011 struct{} + +func (m migrationTest0011) InsertData(db *sql.DB) error { + return nil +} + +var indexes = []string{ + "idx_transaction_from_nonce", + "idx_transaction_status", + "idx_transaction_hash", +} + +func (m migrationTest0011) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + // Check indexes adding + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } +} + +func (m migrationTest0011) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + // Check indexes removing + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } +} + +func TestMigration0011(t *testing.T) { + runMigrationTest(t, 11, migrationTest0011{}) +} diff --git a/db/migrations/pool/utils_test.go b/db/migrations/pool/utils_test.go new file mode 100644 index 0000000000..e2fb037842 --- /dev/null +++ b/db/migrations/pool/utils_test.go @@ -0,0 +1,116 @@ +package pool_migrations_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/dbutils" + "github.com/gobuffalo/packr/v2" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" + migrate "github.com/rubenv/sql-migrate" + "github.com/stretchr/testify/require" +) + +/* + Considerations tricks and tips for migration file testing: + + - Functionality of the DB is tested by the rest of the packages, migration tests only have to check persistence across migrations (both UP and DOWN) + - It's recommended to use real data (from testnet/mainnet), but modifying NULL fields to check that those are migrated properly + - It's recommended to use some SQL tool (such as DBeaver) that generates insert queries from existing rows + - Any new migration file could be tested using the existing `migrationTester` interface. +*/ + +func init() { + log.Init(log.Config{ + Level: "debug", + Outputs: []string{"stderr"}, + }) +} + +type migrationTester interface { + // InsertData used to insert data in the affected tables of the migration that is being tested + // data will be inserted with the schema as it was previous the migration that is being tested + InsertData(*sql.DB) error + // RunAssertsAfterMigrationUp this function will be called after running the migration is being tested + // and should assert that the data inserted in the function InsertData is persisted properly + RunAssertsAfterMigrationUp(*testing.T, *sql.DB) + // RunAssertsAfterMigrationDown this function will be called after reverting the migration that is being tested + // and should assert that the data inserted in the function InsertData is persisted properly + RunAssertsAfterMigrationDown(*testing.T, *sql.DB) +} + +var ( + packrMigrations = map[string]*packr.Box{ + db.PoolMigrationName: packr.New(db.PoolMigrationName, "./migrations/pool"), + } +) + +func runMigrationTest(t *testing.T, migrationNumber int, miter migrationTester) { + // Initialize an empty DB + d, err := initCleanSQLDB(dbutils.NewPoolConfigFromEnv()) + require.NoError(t, err) + require.NoError(t, runMigrationsDown(d, 0, db.PoolMigrationName)) + // Run migrations until migration to test + require.NoError(t, runMigrationsUp(d, migrationNumber-1, db.PoolMigrationName)) + // Insert data into table(s) affected by migration + require.NoError(t, miter.InsertData(d)) + // Run migration that is being tested + require.NoError(t, runMigrationsUp(d, 1, db.PoolMigrationName)) + // Check that data is persisted properly after migration up + miter.RunAssertsAfterMigrationUp(t, d) + // Revert migration to test + require.NoError(t, runMigrationsDown(d, 1, db.PoolMigrationName)) + // Check that data is persisted properly after migration down + miter.RunAssertsAfterMigrationDown(t, d) +} + +func initCleanSQLDB(config db.Config) (*sql.DB, error) { + // run migrations + if err := db.RunMigrationsDown(config, db.PoolMigrationName); err != nil { + return nil, err + } + c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", config.User, config.Password, config.Host, config.Port, config.Name)) + if err != nil { + return nil, err + } + sqlDB := stdlib.OpenDB(*c) + return sqlDB, nil +} + +func runMigrationsUp(d *sql.DB, n int, packrName string) error { + box, ok := packrMigrations[packrName] + if !ok { + return fmt.Errorf("packr box not found with name: %v", packrName) + } + + var migrations = &migrate.PackrMigrationSource{Box: box} + nMigrations, err := migrate.ExecMax(d, "postgres", migrations, migrate.Up, n) + if err != nil { + return err + } + if nMigrations != n { + return fmt.Errorf("Unexpected amount of migrations: expected: %d, actual: %d", n, nMigrations) + } + return nil +} + +func runMigrationsDown(d *sql.DB, n int, packrName string) error { + box, ok := packrMigrations[packrName] + if !ok { + return fmt.Errorf("packr box not found with name: %v", packrName) + } + + var migrations = &migrate.PackrMigrationSource{Box: box} + nMigrations, err := migrate.ExecMax(d, "postgres", migrations, migrate.Down, n) + if err != nil { + return err + } + if nMigrations != n { + return fmt.Errorf("Unexpected amount of migrations: expected: %d, actual: %d", n, nMigrations) + } + return nil +} diff --git a/db/migrations/state/0009.sql b/db/migrations/state/0009.sql new file mode 100644 index 0000000000..518d3f5b93 --- /dev/null +++ b/db/migrations/state/0009.sql @@ -0,0 +1,7 @@ +-- +migrate Up +ALTER TABLE IF EXISTS state.fork_id DROP CONSTRAINT IF EXISTS fork_id_block_num_fkey; + +-- +migrate Down +ALTER TABLE IF EXISTS state.fork_id ADD CONSTRAINT fork_id_block_num_fkey +FOREIGN KEY(block_num) +REFERENCES state.block (block_num) ON DELETE CASCADE; diff --git a/db/migrations/state/0009_test.go b/db/migrations/state/0009_test.go new file mode 100644 index 0000000000..361947144b --- /dev/null +++ b/db/migrations/state/0009_test.go @@ -0,0 +1,95 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0009 struct{} + +const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + +func (m migrationTest0009) InsertData(db *sql.DB) error { + // Insert block to respect the FKey + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + if _, err := db.Exec(addBlock, 2, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f2"); err != nil { + return err + } + + return nil +} + +func (m migrationTest0009) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + // Insert forkID + const insertForkID = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ( + 1, 10, 1, 'First version', 1 + );` + _, err := db.Exec(insertForkID) + assert.NoError(t, err) + + const insertForkID2 = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ( + 1, 10, 2, 'First version', 10 + );` + _, err = db.Exec(insertForkID2) + assert.NoError(t, err) + + const insertForkID3 = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version) VALUES ( + 1, 10, 3, 'First version' + );` + _, err = db.Exec(insertForkID3) + assert.Error(t, err) + + const constrainQuery = `select count(*) from pg_constraint c join pg_class t on c.conrelid = t.oid + join pg_namespace n on t.relnamespace = n.oid where c.conname = 'fork_id_block_num_fkey';` + row := db.QueryRow(constrainQuery) + var count int + assert.NoError(t, row.Scan(&count)) + assert.Equal(t, 0, count) + + _, err = db.Exec(addBlock, 10, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f2") + assert.NoError(t, err) +} + +func (m migrationTest0009) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + // Insert forkID + const insertForkID = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ( + 1, 10, 4, 'First version', 2 + );` + _, err := db.Exec(insertForkID) + assert.NoError(t, err) + + const insertForkID2 = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ( + 1, 10, 5, 'First version', 20 + );` + _, err = db.Exec(insertForkID2) + assert.Error(t, err) + + const insertForkID3 = `INSERT INTO state.fork_id ( + from_batch_num, to_batch_num, fork_id, version) VALUES ( + 1, 10, 6, 'First version' + );` + _, err = db.Exec(insertForkID3) + assert.Error(t, err) + + const constrainQuery = `select count(*) from pg_constraint c join pg_class t on c.conrelid = t.oid + join pg_namespace n on t.relnamespace = n.oid where c.conname = 'fork_id_block_num_fkey';` + row := db.QueryRow(constrainQuery) + var count int + assert.NoError(t, row.Scan(&count)) + assert.Equal(t, 1, count) +} + +func TestMigration0009(t *testing.T) { + runMigrationTest(t, 9, migrationTest0009{}) +} diff --git a/db/migrations/state/0010.sql b/db/migrations/state/0010.sql new file mode 100644 index 0000000000..787610e1e3 --- /dev/null +++ b/db/migrations/state/0010.sql @@ -0,0 +1,21 @@ +-- +migrate Up +CREATE INDEX IF NOT EXISTS l2block_block_hash_idx ON state.l2block (block_hash); + +DELETE FROM state.sequences a USING ( + SELECT MIN(ctid) as ctid, from_batch_num + FROM state.sequences + GROUP BY from_batch_num HAVING COUNT(*) > 1 +) b +WHERE a.from_batch_num = b.from_batch_num +AND a.ctid <> b.ctid; + +ALTER TABLE state.sequences ADD PRIMARY KEY(from_batch_num); +ALTER TABLE state.trusted_reorg ADD PRIMARY KEY(timestamp); +ALTER TABLE state.sync_info ADD PRIMARY KEY(last_batch_num_seen, last_batch_num_consolidated, init_sync_batch); + +-- +migrate Down +DROP INDEX IF EXISTS state.l2block_block_hash_idx; + +ALTER TABLE state.sequences DROP CONSTRAINT sequences_pkey; +ALTER TABLE state.trusted_reorg DROP CONSTRAINT trusted_reorg_pkey; +ALTER TABLE state.sync_info DROP CONSTRAINT sync_info_pkey; diff --git a/db/migrations/state/0010_test.go b/db/migrations/state/0010_test.go new file mode 100644 index 0000000000..2452150cf9 --- /dev/null +++ b/db/migrations/state/0010_test.go @@ -0,0 +1,67 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0010 struct{} + +func (m migrationTest0010) InsertData(db *sql.DB) error { + return nil +} + +func (m migrationTest0010) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + indexes := []string{"l2block_block_hash_idx"} + // Check indexes adding + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } + + constraints := []string{"sequences_pkey", "trusted_reorg_pkey", "sync_info_pkey"} + // Check constraint adding + for _, idx := range constraints { + // getConstraint + const getConstraint = ` SELECT count(*) FROM pg_constraint c WHERE c.conname = $1;` + row := db.QueryRow(getConstraint, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } +} + +func (m migrationTest0010) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + indexes := []string{"l2block_block_hash_idx"} + // Check indexes removing + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } + + constraints := []string{"sequences_pkey", "trusted_reorg_pkey", "sync_info_pkey"} + // Check constraint adding + for _, idx := range constraints { + // getConstraint + const getConstraint = ` SELECT count(*) FROM pg_constraint c WHERE c.conname = $1;` + row := db.QueryRow(getConstraint, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } +} + +func TestMigration0010(t *testing.T) { + runMigrationTest(t, 10, migrationTest0010{}) +} diff --git a/db/migrations/state/0011.sql b/db/migrations/state/0011.sql new file mode 100644 index 0000000000..e4294cdf20 --- /dev/null +++ b/db/migrations/state/0011.sql @@ -0,0 +1,21 @@ +-- +migrate Up +CREATE INDEX IF NOT EXISTS l2block_created_at_idx ON state.l2block (created_at); + +CREATE INDEX IF NOT EXISTS log_log_index_idx ON state.log (log_index); +CREATE INDEX IF NOT EXISTS log_topic0_idx ON state.log (topic0); +CREATE INDEX IF NOT EXISTS log_topic1_idx ON state.log (topic1); +CREATE INDEX IF NOT EXISTS log_topic2_idx ON state.log (topic2); +CREATE INDEX IF NOT EXISTS log_topic3_idx ON state.log (topic3); + +ALTER TABLE state.transaction ADD COLUMN egp_log JSONB; + +-- +migrate Down +DROP INDEX IF EXISTS state.l2block_created_at_idx; + +DROP INDEX IF EXISTS state.log_log_index_idx; +DROP INDEX IF EXISTS state.log_topic0_idx; +DROP INDEX IF EXISTS state.log_topic1_idx; +DROP INDEX IF EXISTS state.log_topic2_idx; +DROP INDEX IF EXISTS state.log_topic3_idx; + +ALTER TABLE state.transaction DROP COLUMN egp_log; \ No newline at end of file diff --git a/db/migrations/state/0011_test.go b/db/migrations/state/0011_test.go new file mode 100644 index 0000000000..3c245e7d31 --- /dev/null +++ b/db/migrations/state/0011_test.go @@ -0,0 +1,73 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0011 struct{} + +func (m migrationTest0011) InsertData(db *sql.DB) error { + return nil +} + +func (m migrationTest0011) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + indexes := []string{ + "l2block_created_at_idx", + "log_log_index_idx", + "log_topic0_idx", + "log_topic1_idx", + "log_topic2_idx", + "log_topic3_idx", + } + // Check indexes adding + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } + + // Check column egp_log exists in state.transactions table + const getFinalDeviationColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='transaction' and column_name='egp_log'` + row := db.QueryRow(getFinalDeviationColumn) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) +} + +func (m migrationTest0011) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + indexes := []string{ + "l2block_created_at_idx", + "log_log_index_idx", + "log_topic0_idx", + "log_topic1_idx", + "log_topic2_idx", + "log_topic3_idx", + } + // Check indexes removing + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } + + // Check column egp_log doesn't exists in state.transactions table + const getFinalDeviationColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='transaction' and column_name='egp_log'` + row := db.QueryRow(getFinalDeviationColumn) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) +} + +func TestMigration0011(t *testing.T) { + runMigrationTest(t, 11, migrationTest0011{}) +} diff --git a/db/migrations/state/0012.sql b/db/migrations/state/0012.sql new file mode 100644 index 0000000000..27d0173d8c --- /dev/null +++ b/db/migrations/state/0012.sql @@ -0,0 +1,8 @@ +-- +migrate Up +ALTER TABLE state.monitored_txs + ADD COLUMN gas_offset DECIMAL(78, 0) NOT NULL DEFAULT 0; +ALTER TABLE state.monitored_txs ALTER COLUMN gas_offset DROP DEFAULT; + +-- +migrate Down +ALTER TABLE state.monitored_txs + DROP COLUMN gas_offset; \ No newline at end of file diff --git a/db/migrations/state/0012_test.go b/db/migrations/state/0012_test.go new file mode 100644 index 0000000000..a3b46371c2 --- /dev/null +++ b/db/migrations/state/0012_test.go @@ -0,0 +1,62 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0012 struct{} + +func (m migrationTest0012) InsertData(db *sql.DB) error { + addMonitoredTx := ` + INSERT INTO state.monitored_txs (owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, history, block_num, created_at, updated_at) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14);` + + args := []interface{}{ + "owner", "id1", common.HexToAddress("0x111").String(), common.HexToAddress("0x222").String(), 333, 444, + []byte{5, 5, 5}, 666, 777, "status", []string{common.HexToHash("0x888").String()}, 999, time.Now(), time.Now(), + } + if _, err := db.Exec(addMonitoredTx, args...); err != nil { + return err + } + + return nil +} + +func (m migrationTest0012) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + addMonitoredTx := ` + INSERT INTO state.monitored_txs (owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, history, block_num, created_at, updated_at, gas_offset) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);` + + args := []interface{}{ + "owner", "id2", common.HexToAddress("0x111").String(), common.HexToAddress("0x222").String(), 333, 444, + []byte{5, 5, 5}, 666, 777, "status", []string{common.HexToHash("0x888").String()}, 999, time.Now(), time.Now(), + 101010, + } + _, err := db.Exec(addMonitoredTx, args...) + assert.NoError(t, err) + + gasOffset := 999 + + getGasOffsetQuery := `SELECT gas_offset FROM state.monitored_txs WHERE id = $1` + err = db.QueryRow(getGasOffsetQuery, "id1").Scan(&gasOffset) + assert.NoError(t, err) + assert.Equal(t, 0, gasOffset) + + err = db.QueryRow(getGasOffsetQuery, "id2").Scan(&gasOffset) + assert.NoError(t, err) + assert.Equal(t, 101010, gasOffset) +} + +func (m migrationTest0012) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + +} + +func TestMigration0012(t *testing.T) { + runMigrationTest(t, 12, migrationTest0012{}) +} diff --git a/docker-compose.yml b/docker-compose.yml index 61d17279ed..d0db5c9969 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -26,7 +26,7 @@ services: environment: - ZKEVM_NODE_ETHERMAN_URL=${ZKEVM_NODE_ETHERMAN_URL} volumes: - - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/testnet}/node.config.toml:/app/config.toml + - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/${ZKEVM_NETWORK}}/node.config.toml:/app/config.toml command: - "/bin/sh" - "-c" @@ -50,7 +50,7 @@ services: environment: - ZKEVM_NODE_ETHERMAN_URL=${ZKEVM_NODE_ETHERMAN_URL} volumes: - - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/testnet}/node.config.toml:/app/config.toml + - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/${ZKEVM_NETWORK}}/node.config.toml:/app/config.toml command: - "/bin/sh" - "-c" @@ -70,7 +70,7 @@ services: volumes: - ./db/scripts/init_prover_db.sql:/docker-entrypoint-initdb.d/init.sql - ${ZKEVM_NODE_STATEDB_DATA_DIR}:/var/lib/postgresql/data - - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/testnet}/postgresql.conf:/etc/postgresql.conf + - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/${ZKEVM_NETWORK}}/postgresql.conf:/etc/postgresql.conf environment: - POSTGRES_USER=state_user - POSTGRES_PASSWORD=state_password @@ -107,7 +107,7 @@ services: x1-prover: container_name: x1-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v2.2.0 + image: hermeznetwork/zkevm-prover:v3.0.2 depends_on: x1-state-db: condition: service_healthy @@ -115,6 +115,6 @@ services: - 50061:50061 # MT - 50071:50071 # Executor volumes: - - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/testnet}/prover.config.json:/usr/src/app/config.json + - ${ZKEVM_ADVANCED_CONFIG_DIR:-./config/environments/${ZKEVM_NETWORK}}/prover.config.json:/usr/src/app/config.json command: > zkProver -c /usr/src/app/config.json diff --git a/docs/architecture.drawio.png b/docs/architecture.drawio.png index 6b6510c17f9d28fb015e17b0f2bcca4fbb5f020b..52f11986c3b92cf51ca111c9231c04d0a74b6ae6 100644 GIT binary patch delta 90371 zcmcG$2RPhY+dirc8PR44B8W0NQAY2*mn0IMXhV=h@5HFl>nKrz2oWK=L_*Zj2_Yeb z=v@dQdYmI?V4{iqFcw@eLZp}o zhV?QnW`^wo?lUHWk_$u0e;I=&4knNg!N8$(7w_75g2%ggIN4!BX*DqKArzQm0$u`P zQOpnlkD{o6h!}^6vb~F!hyM+G52pwAUiKbfBtqazPcIt}FC`BTS6}dji>r(MKi{c4 z-M|0m8cY=*4The80&_$30>+Jo42<$Jrjw5fH%i)TLIOfULZW|9@X!3NNnoC`$z!UB z^#~+HG3o>on0#8EzeXi5%#4xegF=J^B*p%ZkBwMJ04X9V`e!V*gxsDyn44EaF~v+U zjDpA|Oasgm1A|6m1cWIuSZ2f8Qz$h7wV33e#riWB5u{LNl@vY)#*IT2V?i#;Dt_M! z*vQs{11WwN_cv}Y4{uK|dlXmzU`#K29*hlC0`u*%7)GDk0HaT#0T&gK`Zx37tfPi` z#Q?)>QYd5Il2G9+g*l|mr4|$W_m_)_3t_m}@6~>xdPP)A&nSjZEhZ)L?+?2s@~@`_ z6Qc>lk(dP1%U68BGCTN#A=%n^**V(dc89Jo_~GRTM#lt}Lju#mY{dQ_t$qdlB?2*V zOdFp<<~K=vZUPAjum`D`TViL7a*&^@>{<`!Y zrk;a}+`!%jw|pE(b?_6@%~6TL5?sdQaI!G{zZwYhRG1$#%PEID2N(W22jYJ(h6R@s zmDskin0{Pldz9#U%f>l#k7QZ*a(QZphff>3>=4MJ4tUn|P=&+q@s7}dNSZ~N)lxY*pqz+vhbV^R1Y zPeg>VVED@eQ49aKO&}`v*Crqk7see0Do=ZNZ+jOzurvSL5lHwCC-LX}{nttS*W(}# zmIg{kX?Ne%^Dn3H_X)&UL$88k$BSX4CdDuT5C5MUkw8oebDiQ}{`ZoQfS8z+*xz>j zE z^fKjN9_HUdQQ3cHV)6fveIoJ4Oa9yNy}$zhTLkVT z*5VRUnQjvJCYcyne0lWdG4U5XJQh4HRi)d0cQ&7pG#HH@bkhy0h4WlVOT9#1C|XLE zbtSDdSTnqcN*S)rEt(a3qx41uCwat+;av4AqA%EEiyVg!(eft;G6&~Af!pu5-MS9S z&t&C3zhBv$YaE+6X&g5T{7#*CRW9Im9XCO2h?4dCr&6SKE8f}&x;ReMaai%@O-PNZ zbz2xwjpIkQqq!(%)bu;=8vB0QqqBn<`-81%kJ(mysksPh_Un1F1_g@vJQfXAxetF$ z7(CadfBU1IwAlIcOM~X1BmS?$7YwLFJLBj`k86XDJWLyXUfuuJG_m?&27P`lih?P1 zbE3Atmn)yYBtJMH`y2Xpxyj9!hGm?FFK=t{+|a*)Of}g3)W#l)r&?sw@o1#>?sfm; zwG#WjG#+iu)K(EgD>;wF=y-Y&b@|_ic1N4FLq(|GR0sDLH=c&&`hN2_X!3Kf^;;au zxAIu(|_q@1Thmbc=;2dQi-xgIq#$p)27Q zjX2ZzIqb58OYX-fPNUCz>F?&Uel9c4)K2Hsny9faR*j)rz6*mzLn9!zH4jgGo=Myr zD^IcS&**LP|2Z&J@&1?DGYN@luUoS_hDa(u_&VOEVk;XPExy({T4r)sIxGP%uB|{R zeDT$rZ>eVW&RImW@6a0+7?B}ak=OL3;|2tSLSarz;yeiR&p_p>3Pj1 zDD17ndUpkuWcbbv<@pP1elt;WfxmigU679k?}>Fo?FW99DWW;P7+wtDCD-Fb>j zE?_UY_JedkI{mjzr2p32mH5sgtCry1{PWZ9bf0?o32b$=BT>(-Z|^<2 znS38xoA8U7oyOtqs5aOVQ(om~lJ@%1^sM0|S|M!soAlPCs4y-hU4Bn~&id;Gqw@EmiY9 zv&L;snR=?PH0N&q!!f~W=?Z3T^&q}0-yc&$*>$*6AI`)GJ3aZus+i2G!87B(!gRu0 zU;Ug@FZ*#5I~L=(LrJho-d(S7I7=wCly>WMwE7&qzc{3Dp5`!^o%L)b7%@R>0v=c* zKFL}FtBTY7VwiMm;Zp#$iuLK?4(Dw0DKj_+I^|SRGn3VJQOSGj)4nt5d=(Atb7OVK zCa{obH)n^DBCTrKtu+ydt!H0XQUtpLQu+1)Fx=8cWs5ldd>vyPo>n68KXx7v3-lR! z|6_vOu9!ITqnSqI50c`b;m9om_T~7&>j}jIls2gYVae@RQcfzW* zMph&;^>k4Ys=PD7C$D7phE+Iwlgzun5+E$$NBiSeOQY8n(&?;%ZaR-wJfNC#+_^xY z55G{dzJI}aZk?zGyDuF0Yg3WzG>lXLoh&gEG&&$=)}$0-{WM+brB zu1>s}f5J>)5vI0F?>*sHLgfM{dNa90(fi3YOOnijIXLTatuiAiMTYa%>yPvD1SUl; zEoVMfj5B&(dyFScashQa0f$df+UhRNM-H`*sV+R_kcUK9h7Ke!$%r@8?K_#n+;UhN zrt_|`IB_^ZbG=5imZ@ZZQz*B6LhEY>AYGi@>F|+dws6*2snw+}23Jv9Dg~7DU|M{# zQX&I^m^yoKI^t18&gI^al1@uV6C3fJ`VE11rQ=uLtmlFKx$HB|LgSLbIcpRb2z7IQ ze!JA5%Y0VN!LU(7`?*p1A%pn+AEyF2y1hLlT)D6Y%1Kub7<5nNOh^dN8$X`t|E)_O6bLR&r=4|{1iu;O0-!Y$htp8CG6J}vC~U=ttO*} z@}_kx^uB7VPtk;0MTb}&&j}p2xg@xLzTOsO`qYW&1}}Z@MV^!3bFdjmN*)IvEzubZ zysxcX$r_I+t91NWxZ6Rg(1Qlva>1y!yMBQbsZ5YBv)wdW^l|Cft_;RkrqO#XaJ-iNZaFPTEl0Cqq7(6w=wQEK%l$+LUV8WIGglw(-IN0HX>WVx z{IU80h&}#4mlGyaC>2g`xV&A;c+rBWSHLeC%#j%S@vu?7n^+F5k;qsB9NLR54wvU$ zh7)gGts1lA8?Ra=!wm%A+@BbE%w&^ak{9rj9*T;PVVf6@;h~Q)zd&#vW(=VIgzDG{a%D`xUHdLpA!UT1gTU}jOEPk-dI47f&%;H|ea zbU`f#L;fU0&xnAr_K8^u`7*2FO|fO_*AQE);|=Z$y6Dum;H{?(#Mh9iH$T8f>|lqn zG!XxksP$AG0bwoV-4D4=BPZAMZc6tDgg)UpUZNZt7&%`Aw^2z#9gT>jV9_yHbj%}{%2#-BcF!q$+&q2B*71h zqy*UV2lSJBA@%%->vH>Pxq(}6AH9(MeYmhYaW}u0ud<14W9${RE{a1ZBkg3n#Y&+0 z(6(&~OG4YK@r6nWn(w@yg%dv7}n#Ln`Bu#{|J3v@i>a2B-D0SNBGS zx6$9;e{JyKKKi*Lq3eDA`z-7{4bhWkZ1vD^t8rWYXNm_8tla%emCds6;_r~X0;tO+ z4#bD2jCDlvj%VKb9>2>cj*ZTO?+M1Sd9ujZJh7S@vK>H(`xFlm0m%GDt!<`6#~JvT^KwUv;6Pdiki; z=yN@cx$FDS6#HaB{bO%?a2m#PIOfqxlDWIe8eH@B`7gvvvFBD_+#_cRdNEu{TJU_4yZYCa zq}^sis;L%IyV9|YFHl&yl-cE0UR%>Iqi+~V>DvN^q$9+_FKg`arN%3IxuGYjymDN;^jhTl?soB zu!r^7k9?8`>&LnNSdr0%J|aZ)-q--t6XcVej_Cygw%RU@-^G6huD71|f8-shLCpGm zpUfg5u31pOiL@2$GrhKm-XxCZsgXA*^S)QpI?!7b#y8Y|a=GS8)cY4s52{&U-_4gE z3*yINOKfNl3WYM^pETWANzG6nk2;xRV3|q-tXt=fo!M58(O(obX!U!ozjwdi{T8@Q zZ;Su7?TK>k1$QP`N_!+}4k2T?J63_sSA6y~(ObX>*)FqqvF%of8oK1p`+(&JX;p4M z3G%aLx#MLu0#PtK#p=XWW%X!^F4Sb{EiM&a>r9K8fhSU%9yaLB%iI!EZsDw)$#9qY zh|pl*SC)OJDxiD`%Lksyue@ujC_kIfx(&oZ{7jd1%DlZGyZ8(FqE4wRsEFWBS5=^I)z6^-W@m*ME%2=##Xl{v2RK(3tG zPI)<1nyU|eyuXp~&6MPeqr@{=8tE2E*3g9w_FpiA)lZh95tP=fPYdo1D4|sAgK4+9 zv$kKmOgr0N`FG8!#L$`2s@t4j< z9i5As&*kcduz7l`T}NjS2Vwnh`koVuh#Q#_5LdsW8p;SJyd>C%B}smSw;|AoviR5l z`EkUhT`>PWTs~izfPXO=tJpjf<&2}$Jjp6W9t|vwl68Q=a6{!U3@D~evWVMT?Qrc)XW4s zL1hhRP#E@9{{zCOz@meKr}Nkh)Vy|V8ZDsWwU=&pp{cpJ!||NNR#zXI7;O7F5^uq_ zOE+n&(_nthlYf6ri00cOBJCrs>-}!YigcOQI`o2)+?M{OyqyR^ zZ%`TRBEhrjij-)$F|vqow@vHvMJ^8(uH;LJzJ-Wnqxr`y$e1+Lajs^d4f-M1eaS$v zI~)j#V4FK6pW9g9nu(PZL{o585`0`VO$USMo)T&_Fp=%aj;^Ftc*ss4v+|g+%}Fm? zRCorSkDrdRDH z8a=d%N@@C39w|+$Oh7+}MH?QQkn1ZZcM2gLB6MC=H0^48dgUTouzqgmDu2I=mNuwA zcaM!mWSjzP0CohV-~#&W--XdiFCdhKQH;e1{fHp^G{2QRprVJ)9oen~}}t?oHxn+UtxJs=%)y0USb5CvhD zfj2TnEnXr#e~mE9vbzb1Q)}duGJ5)TKUbgDhxKTtiLn7`Tb+y@;Z=%Wy(o28sKep* zycFk!(q&DwB|n|sgj}9USNRYs{-2jfKQ_#Rzv)`+nxxH^MP#6_o=dmITnhW$pc-9! zf+|B?j$cR>5@j3MS@1MqMP7iALO6Yc)nS(-NqY1s% zwcZS8&OHHp`=5jFy38WV!>V3KauV_dd+Xj&&BiDtif&D*JDRHYfFkYThc6hlM3TH{ zuL||9eC8~|urGL91*|GeZBblLyi!T7EK-tDwaBgPxu4fLf9^x2BV4Rkho3I1cQ~Sr z#WcP(I7S(4plmM0c8|?zD2F8~XID?5yxY>4Ph4Bl!i??yHoadqD<_YwgYT$nVoaQw z)|IYm*LI?Yu!v#ESRqUKgQxFuh}%Qy13$gE*=4IV_lY2jzjp^9V}eD~&e7fwUs8W& zP0>qPgar5cG?*WvQcY5{sEK3IF}V@9lHpY5?nwMX<<~}W1&SWXcQ3Qp)qQB6@NcUA zJm0vu9(MIpuna;C{rSpYo98`|Q}#>W`c(c?(m_K|hpO#$FFk zP3qhUE=QOMb2sSp3=KV@6w(Sa>wT8rJnJ%dkix~ppGoh7ixcAN7BIIubXJpyc>^a$nP!CAy5rl*uk4^=6F^+GyWQs zJ~;`}p=Wn48DAf@o(@4v7Pa)mn8N*PiQK)y-$T$H>2Ki{mBW!5|06J! zsNV5o`;>>{(fT!{?c2~7;RF6AdYW(IbGV&S{pVc>7`z{=r3}(aXvl=wY#k6wu(L`tWI@E&2iLOFhud zG$ySg|4SYnLqc&io-sa#p5+Cd$H0|Tv!zdY23!6(o2*IWl^d0=F2M+EJpl zvFL3m%ql52$HqKBxNHHm_4?&u;Nsn$u?DB?uYx+XckkDo^4|^84ODm%ph;t4|8keeGz6Uca*s0F-iQ?gG>S>CR(Km58QdKm`Wnzn2g2l zeinYhU!&a9Jah73?!X+qsUj_QPkY%sL0x@dF`%6U8GP#J8Ia7x<-%~$lflJa-1>eE z)wSL|sgL3vr&k&7>{7##$)>FPkPU_s)zKOB>gIU;t9#=WoCxVWNM$S+3R~9pZn{FB zc8hI`j$k%3xFIgM%3SBK7ete`G3m;)kB}Gnjt=C>nd9K4utQQhfn1m$c*9d-mETks z5N=D<;?^2r`Yo+mxztyQr=nMKJrnOv8zARd_CW0cbs;tk)Ppp+%sj;2dWP;qHGg(Q zp?=J^aXyNWl(ZNHxi=}^^FR-7o2%E0*l|le>iauTMxvS}H(4t^ERtdyUwXs7H!Xe^ zVGfEScL54P8OF5Ny$J@T1{~KPx+%5wZ3PFYL7SY%T~6CgW&r&0V1AH_CmFs==RQB* z8cN8Cm){z|XNgzhB!os&D=4!*PiSb6@?3t55d%MUi%hki50Qwy5X?HvHz?N5x|P=> zw;E{z_lTP{n!jC^$#DzWexXY%6QqiM6Wxlh^`xyi^Q-N;iEGZ4w(GuPTfhGb6JT!H z&!5t0&@E@w+}RU+n3n>|a-CX-!6MI<56`n5{ndNIRmbiSY@n%mEcP^I3DEZd1VxY{ zMWQBqW4J#-;+vxZ%B?bE9QQKY;~qrPW!e`2)UwVgs*M_O+9Ae&0}V6(?iX$=X}0u< zFy>{=JL4WfM}E^9Y!QD|jmb%|mCf3P9r@e{HZ`>yq#2DLpK0{JnF-#AV+AnTDyym1 z4wpODNasEpn*0#n!bVS9?-v&`s@k@8{9a8^_Lb*kF+lU2#>iTdaxL@#ID&jDSl5t* zhe{57QI*W@Jnhjkem*k~DvJa)_KY~W>GiuAC@dR$6_-oblAqbJU5$(P*wA+vLb-+^ zsIAI@gY9=p#;u69(txvY3%`Icekq53o=lcYQ)pRK?XtMXwQnqn*2oI+uw)W-3hg6+ z#0#v5o#Z%%_TRp3wKX;0n#z(mea_yjLz;{ zk#M=MKj=0XI}@7Pt+$mL^n{M6GbU^m14v4h|nM+zHO9 zGyTzU=DoYv+eRSD{=U;bob&-8K$Z* zGuGD)-b&LxSZv|d?LMQ5Nu7wqi}@tk)&FW?>gt9uU8>~*xrf}CS`|uUzXM%umAIWM zmSMhmcB%Y|UYE9dSozW9K@s2+tO1{3@Z@9W!zeth{sv^;?JdZe|4nFo&}`_1h>dBf z4`rR_P`AxYvL=)e*MlayEVJHn?QCC{IVMrZ8!Wpk0P1gXxkt&Q%EDKe@_CzSGu!Ap zA*81goA4a+8$PveWJYK}goYJq-(Y`(O>NZFse-ZG;*%Z)We7(0Q$qiK!_R0`4k0m_ z(D6dlz{_fFy(J@c3CVEgd`){nF374Ox5yD z!Dj)Op7>K569-2LtLC8U=8B;sGJO3BYbJUrkD|Ad)u5J7G|{d?N6f~ueke68GwR}< z-?tRmiB;~^Y>Hm{$rGa(F#lAQouf1NQ$fQl1xCUhpPdO&5y7531;^;l=`&JsQc)_8 zhy|TIKGI3!PSLGDu1JC%u9f8LvgwAI^BGt0>*a`xk!(QI^y^>)1F~W^?GV#0F<+d|m`m*Ont`qEPStT+`vEdL1E-v|#O%Wc`nj$SW!>7tb zo?fX;JX)}yD?@^BJ(iH~e*<0#M^bXyus7N#hE|wVx3dpSHn=~t@EjCdu2cEBJh+6V zzP{J1S!M-%nqckb;G+cNGIqS#ldKK=e0ix&Yw^9A40tAD^($74<(Vf(RA?Jqp}o%aT+ zKjYdXFGYDt5|uesibY7OLAi;r)WMWzE51@=jEG~h?2-X&}) zDzdP_sG>;%!fA?ivyhx?OwuY{v$uJ*)eW?=rD@d^;Wr?c@YPsCpxpE%1;cgyu_0^) zP(bD?^0F47*wc$jLEaB8z+FcW9g>=YcTumg9Crckx*{rq|Az;m|9^YZfA%2s|H;Yy zFNTEv-b>uWJ!G(FZ! zOZpO-WN?%SGGNcqw88CZ({3N%NR>^;rEs$-%sSKyy)|X39)Fqj`((Yyu{p?~cucCS zUsB3cEt~+p5(Vha_9jtO56o%$U47H(Zht6Kl8*%(iX^B=f&0V(zthqL-ZlateKO%3 zS^t{4`x6Gl6pW5Od}f#MTM$*=8%A%}p63)PM_wGMv{K+;38phi`>mvV)qfFgT5TJN zj%Se2kG;f0)q2x0lO}YQZT2Gl9ATwNbs z@1wg@$+;eVJT+cx22v-RwF|+VPo?W9tRZ^pn_v=of*pr2gkXBvE7CMnLZD z4flP(I|Y^0SC6J)RlGm&SlAsg-Q@2{4Si9Ka91f2YzR?Ix{8)V0oGxp1&yXN8|9o* zEdZ%_s*JD8+UK&&oIz*vSVc!T7|U$9ef#pZAE~?_CuF~~J1$p#o0tF_ySFx~0f>wZ zb6J;5&n$JMAmDbE=Nf^x^j*Ca*$U$foy$V|B4P8=r91*Dd&ZPMDsbO>JH7lkkEiku) zc%l36xreOU$G@>hg$Nk+&aE80F>DUbO#UU94s%P7H)(x;yk3R`bb)D;U-jnH+glfD zc-eu5CIyiFnkD*A3`ne)bS@$a;tVg69-xJlO68Q9EWf~d()luTftbYOoBv7{pV<`& zv>(lRE(8Tct2F7ZArZxv2_&Hijl>?Hdf-$z{Sgk9 zvIC{Qf}C8*3)B~Am<@Zp?zYa#fzW3y+kY-gfSXJ7-*cHp!)Fc5YI4zgA2pVLxmSXG zu`+qwhcF4uDU{CUIpGcT(eu|q-d^WPt)EJ7f!ft2C6bgbz=8E9vhYeNlsy_;NJ2oD zJbt?wAD-qjN(-qF2@(UEjg z_3K$d$W`>IdvC_I9FY9>?%pY3zP+=r>8%Ms+(=UdTOiR4>#bu z$UtC&KqCSVw$eeOlvSiv^2BG-ef~G_JM4h=OqFo`_TuFa@Tt%A?h~5JVk9|1gow-E zU)VvDqawC_!LdoKPr2Ett!@z|`-}KybOM7!A0RHD%t4p~9~>7=Lm@WR@4zl08oh^N zU0~Bnta!K13kc>J_jjG!+s!8~Kckd0(2F?95I|oZ8x4w$TOc~3C^OmAVjt(<9OB5d zV9O6aF8w&}SnRzqHd5zw3$rf_<$&&z6C?(&FSLbU-~jA(w>QtT{FBmcm;Fw@_r}Qi zjEDut3sfUdRW9H%&D@~f-d*4(b5NqE3X_SVi7yAAQOf=ZnRx26F?Q{nFZ z1?PW}(MC3qf+04Mv@^!o15RfKOeQAQ#-nc6^Nh5`zcF4_K~fsf5V z>}G8A%8Ua9gLH86FUmXz5-^SE*H*O-2~$9F<8k=ICQHV*qLj`<#i2Wt@Pfma_xl^; zIhQ`2J{-~F>J%YEU(X{C3zEa{ib^Q#Udcx{1I3eh#x--=`ly!k-;KHMUG(ZJ2px3= zE*+j6B|+*?fBbsmfJzjW)amX=PE}NAEY0rklWm=y`A++3Is&V~9 z$ldR6X0!p7ulpI66h=ha2dY6qx;uVPKb09@H>$kzAUekI@r#+DpL##ZBzI(`E(*L` zWwXV>SzpM7#KZ)u7y?i3qWPrNjFK-Xrc*owWT@XYxpGbS-6fh14rXZJf`usBe z$UEVR7*4F_v7B|cFB*;*SiCOxV6rX+CjjFY#R;S)NHV!^w(~3E)C>M-5Wh#>dfuG? zh!&9j?C%2dJ}Z)_4BeW{s#4@O*X{vyF#X_Fn<7QI+aCb=Zd(rJ{}5C_DStRCT!bx4 z639%uz)IqfO4WjkA)$C9>+P|p$}>xUPL&L5;! z811~Q!>~S8Utw~ zP9jL|^oae|uuXYTj)bc?Y6;*GLK!LX!7k(vfbi^5?n3b9WPKS90&;o3ZzNKJ6fB%c zez4qsJBRE2NFWD7{-lcK0@!GoAVxEfio_MgUlHr8+8-&_(a99B91!8AWM+u$Pdt3D zhF1UbRN3W<_x}lJ143l7~OI4}ZIN!bA^;RnU~h*SL)a zpv*LJ5auWKan{|jB+Z}zdQ>3lG=dEZO1_kKJMJvpQgF;uXuGy7o1R zb@7V&z~mdUmI@`Q#>V_b(D#bZ8=SrTV5y<**<>T{29Z`u@v9hBJ17iM3&w$?Uymn)<3E|ZeQ(JT`4k!Et+ zJU8yw6uO33vr|vz%0TK4Hx!I?$E&UjHtO||vC|mbqX{#uu6t$KX!MAj!GZH`fBH`V zv@pCO$H+%qmSA?+0SsUkwWLZEg&t{;-mSeN+(hi75YLrOZQ*(+kEM6olpWe>4JoebqNS7HvUJB6aUqhw#tAlm*}P05KNufh zkagJSK5B!{Dg=9|M5jrgK%o>;U1%zdFd6lu2vb&MZM-pwD(M?~?K)$sVTiHRN(PrN>72|?5R#ul}rT)H> zV%NbNUE#C&<>UR+==^r)Qy?t9LU+xD+2Tv7`1OF|18w!wJ^SJfQh6=(V}%lwBwO!= z(9)(GPn9{DAuV^uvzo2d=KMIG@0wincF!gjxywG+3b}6XvnU=@%p4rZNwXXOC5A^v z$s?w~c&)Y^Y4a>@jL3YIv*i9T55M3gFDl!(ELmuG-}~&e?`c_M99=I5cvQUvYfyVX zd+xgJ+>j*6xT#g|KLYXvG_ugDj4hdk07OI*pYf05RbwC@9fT-Q&s=&{Rg3cIzI`>K zaF;wRtuDz2F`NF4kG|SG)y&Ef_GA6oK3{7Cq5FnCDkaW~^vIhm<;>uSSV?Y4H|lk6sQ=K7?_)pjjRFi<;5ks7tt8lH0!@R~a@}s9ZfHu58?r#z22e-m4_v zG-(Qmo7@>@o&BuAAYCALrK)cnF-tgAo|=MG&V5b(A$EC-hqvHWl%P2KWwX|e z&c59ooEF=1QiNi6KWb1{pRW#ZJk^{h_$)E40*a=Cs$Yf0EIqzJiI;lKTpZVSKnHWm zS%WqFKHeBi_>`Rbm*HuURIaj`O+yz9S0}fv5jb<){}eVFsL<~V5)YjB&HDP92PA7E zQ3N$+5a7)`JxTq*XY@nXXaI#~{brQ%P&H_~_8bhO-Xk)e-F7XyJ2mz2$G)B#i#z1B z$%I3m_u;5=Yvt}4@VUiaYa_b=QtZKHbl}J*k6WQF-+8UE@o(MDbyt6+d>`aUw=3^h zPQG#7CRJjV_R`GYH)X?>T59Oj?-5{q;9j}$?9^z&)Pcqrl|oluf@;>;@lM{?-!^WU zvd$VG{o0x~2dF(4D8EbGgc*qX+e7dPBRX-6@+E$*Co;a`8PvhQKpNExl+LWa)83gN zJzFv9Emq@l}O^b)P< z88aGnw|i3!)t|~uM{`kga#Kn|z>9u+ThqC;b0x(b!h>Ocu(ufO%ct)V>!NTkzEN&OQ)XzYrnv*1d`~p`O?}8F7 zCynCdU|XVP0YWao*@aKi?(BIm?VGc5B()fESN?2oR1~@U>DKiu=`)kGcyw-4DB&xXjp9?BiwZOkBu@(?ZA)RFi?uMEkK$+7k9PY76Nreyl&8@qa%{cOvjU2X zW9dNcKY7ZG+y%evx6ic9P*)E=-O-TRZu+?h^j_(?Ql5R^-UkvX_jr0P4+t+*Up)J< zF)gUAPIYd<$coU%21wq#J70zKTbfB%<>KGIMW6q^s1eH=axa{y$++@PGXp&;v>_RY zv4^p)V4)Sd0$R~2BU{srIeeS${nowKq3ES_V_n@Ya83kq?KFMqd^wjX%YMj@LCd+4 z$;)ZOFHnkO@iQr0sDJ+moRUn2mtU#7Rie*C51NMDKU#O{yV8e7R0$Iefxe-fFBnG@ z1nK?zXr&(zZ}%qsR-Z;K7Gd;L!4bJ^OkM-W1 zQ&P_%^=0pYnj&g2Tg;)x$XL(WDSWBWw8}a(rsannNDXweHwZjhoi6z-MNu+~L!1wP z%;9>aJVzcOA0B(zow581Ct@;y`IBANAszo#36!;rCrhpt+$L(}hp?&;D6U9^5tOr{ zL1&dPE!n(6G?&(ObrzM=tH#UN80x`m-;?1`15lNZ@*|Sr7D5dklH?Kl;ph24Ew?)x zPI+-*`i*P;8bbv`4~Cg^k)$w!Kg_HtHoYc2UXp2M&g)Fc@ap$BzED_elomZz1&0{S zy81(oM5R0Y@GWept(1m#)6re0!4fU0ry^A5z5ZwimLK}LEp|sTVDE){y)})^RBj>b zHwYaWQBC+sgwG3VgJcDCu$>e@WaFwGixj4n2Q=RY>@NbxD>j7dL=#quDNgwX^-17| zWdrY+Jt`F}0QN>^B2ZMojrZme#HYBbfMun|@ZRUOQ4 z`R?`y;1+P`b36#(4LVfUXzAPDQ9TE&kGs=Xb@i$EHMiPV*r%T*I>PYY(m!qx4)?rd zaD5zdeRs2ogly@MR~VN21$wKkuU&85#0-?>g+UKZmht8l{(0TE5;gbB&J{lJ?@bZSGyeO5$_L?wtoe>f$2L;(tG`CT(+ zRCG)E`75439FjEPkfzeBQsKTkV`KkNO(BZOJQBH>FH%Q?`To8^@=-`C zyt^R5topy*f)`p=`TkdS@A>>eAvz-k%Ta`19%EPX&j2Lj!F^Hq`o{0g)TcVl&2Rn9 zYeDgF@25%qZw~|4`bMT9Q4!V%7yiZF;-2<|#Hqj^pjYT!T>0zge=}e(6G&Qo1K+ze zRs@6|js-xSCDF)Vb3ns2)gUIf?x4@SF|TK~)d)?fGn2pLmscmlx5E8EU)q2rVNvqR zJ7)u7W=S{2yr5mJqt$0g15d76Hu|IkiFE&5M^t3aO{o?+9LxafTd-`LJ2y9g7y65! z3sq#%L`B@Y+0a8{K=p}=w;zfvbeVpKL(@EkKhL1i=MPL=-?&VZ!PmaNsob1yG&XyE zpGU-DAbGO>o(>3Ziy(Oi?S1ch2q|S*h%R1&zAUew`SWmLHo*O<7u*lvzl|yaDTx7a zMRrWR;GDU=z5M2fLw|ZNy z3pZN!$IR_PuKXz8jno-1vQz2t|5$#O9=B?d^7cJFAD+m7rZfE%&}*Or!uZII#dlk8 z&@;hbbL&EH-v;fU&F8-ZF{VkEVdV(W@|Ox?1^tNf5$HKgsK+&Vf(8MzdhHJi=Rv+4 z&;Hq!evj}3PC*OTEzawCo2JaR4@~UTZPnHfSMF0<7n6kDbnlF3lrr#G9^estzn3Zt zwCXj08n-)>1aW)`BywnpOcFi7HMunHs2>5`jf@0Ga3stFoX4=@?eY~|(T_`FaEb4j z1PzX0h4TNk5iR>n)I;DEy#*BSY>@yh%<*O@yR zf>tiyz5?InpNKaOE>Og~(BA_zF=+CS9}E9%Qi{Je=}H@&XpqmdHOrKij`Es9HY(ztIbOZ#lD<}`PWpi|P=5!GZC`Tt^%+2E_6N-<4fNtf%E z^dzQH^Q2O+HhhxuMO+`PD;{EO2UPJMYsHyYbk2$BELf;Hb(6us9`9X(m9YR9r<9uU z%kXcyx>XpW8R9?bRFcJb8|;fz>g%!}`S)O{0EyKG&?M768T7YU5WobIxyo>zP$_@m z5(nU?Z!hrSO@;x-O{wtfbK_lqS^fs(G(s_7{$Y{t_P4Hf>vRBA_W+n`zQiTF8D;>R z5RC4zOOkUiFonDVEO|DzuI0};No0jNF+GJ-C@15Ih?GHTV`%t{9(Pb+bMK=k>w0qs z*h^^KV@=F@LL<$INQrl1P44V2_1~=t$PZXAHE3|N@m$BGQCVzhyOfn1XseoOtlqov zifzxbH`S5?4?2(~qTv8~tI$1^B(gc8PHf-|)v`)Kra<$=`$ww~5ER>W zuOWQyC5@H**`9t7%>XH!x=z_1*ARp>bn#qCBiC>uaq(_6$!s(`o0k_m)j&*(T-XNKk3Jf1_I)z5n@B3r7$@|f z8yJ-eTxXh>?;w9KuO}DK9G`(g?Qz-l9wn+G5TP_jMwhVX`L@?TgUqERfPdWsXyy|DDvZ{C zem!3Rh-K0MR9up+|9rqt(s+ns`DIxJRbqWQD0%I>G6ocHM`4tPrhN^graNV7$23ox zL7P5ChTTvHWN3pH6U`3;ttf@}P&joR(iRI{u*20Fy@keA)=E~S*9M*-%;$irYheG| zp^B7?MwQXyxOKOxqDtvaG^Kw^#e2V<`5Mrgqu3pdid+Vi!@%Tge=T3R#DByDKOL;B z^61QvVQi`F&;Lbbn>y7|qKArp19jeG8P*<>R?`z9n$L5yIx?Hmo5c{Ri~SON0JZR+ zAUB>|_Vd6o8Uj=;8N;b5SHzq(!->g@0uRtz!RZOV_|zdn^*{vhc?u4HUMQU_Jwj1N z5y+!7MYl*j*PMApg z4X-V1(=EHW^kEgU*Ogwq?AY1RfPI5vC<3n~0m>=+7UwBbMi|Wtpbu(ha#`(xPVE2M zo)J7dIXHNAqs}gdb-4bXF?9hrrj=M;Xpi~J4t)4+f@!ii3NE~a@0C@DDU1C487a7K z_whwOFBC)}5a+`{67ZVu4cch+8UfHnB7L=r0yd-9tKI}Gqc&_l+HeWS*SpXvj7UXr zI1~XsNn5>;yPHHqt|p0w`-Y;WXm#)_{J;-L&+W;2K5Mz8%0@8mIhvx9j>3-MWD-;n z@H|hY^g8SVu3l>tj*;=ofL9VTVnp|z7Qvz6K^;O}MKr9fG%wIW)9|a3)Dlz~vIrdE zl?qodRo_$bT~RrBnJ&~LL1wq7jXoW&=o@F^2@%<`x9M%21k@e}0z~zrhB}%QA>Bor z%XW{wO_w$j_par3ct|A7?Dv)Z_NNt@{~R)X)KvHPW5{Yb%Yvp;nj<|;T)yu&-tqse zz=7BbLc2b_*U1;@L%k{#|4~o&Y%Bx3DMoev6u=eQKu9%s(+ZiM!E4y}L|<~a55#NN zEwe^|fh1{duKB*-eTfFWt~Zol8BB}5By7`YEz^=Z?CF#UbCd+lx5hmS9Wx--S_jb_ z&pQ3>26R6GDogsgM({Xa z9Rx_b?l(4pte=5%Uj*pRp8z;!cR*xF^JsS|X`|+^sq)b8H2PeqL*L=4%W;deiFcV_ zd-M8s^Ohz%UoO}6y!-rQAl8YvB#pf?$NkF}oBAuZj`koh>;mKj-4D7~i*en^t%Q`C zK=+6Y5-Hp18ZLU4&hRm zBAos&%HA?As<79% zC?T=;@p<0eefPH?c0c&Z;mny6_kCUey1ukDEk2&svYX_&7x#G+Mtb-jln$KiuiL^5 z9SA2-&6RL11}~VgFW0AiJL8nQr2mdiLIB;^oe2boU;AH&_xI#>oWo}cW~JoOeg;S& zjf=D2CBP`BS8J#B#-#2okhqQhNlMcST{sK-<7O0y;kEZ*#W?vGY^!vCH>PeMqYG~W<^*YOy!Lz)$+e2?%ZQV;~+!|O{ZrU)m2-O2I4_IoOrG(JHt^!gY zeyu3LZg1F%o*WPAGXg)Yd{8k&H_BV7XCV22Jff0K0;hOEf0yZ>XGYw{ve1Ht!tsU1Fr$d=+n>(D{~dSOJ=t$_w?p7bQib22+tksb3y}{+obQ9X z-cYr=d%Pd`pBLx<^}YF@!{FwiVX=W%AH6*%lj_FUD;h{ZJ81_zYJEE2AMdL_&%d6@ z5?mIC-iLr5ybnqe&cl!sH0B_~V;*C`{aT1%91CT6XLv!xNgwc|ZWdOj*8pzyu*BoD z4OqKeh{e1vDQ~yozUuEaCjeNW$L5*SJE{e(rY5OAV23t@^%LQ*ONE(yfn9M$DFdLQv&hUqXCad`q5Bm8TdtSI!qcyGGC#Yw zFL727>UnCkxsbsIe(wVgbgy9EMMTAN|Bn6@);*QBi$A8YUwy<=h}(kwRa1AE!#7+a zniJ*awY?a|bv-eP1|zYM&e5lT^8LP~RGK%dm`?};ANM1L_2UFuVF{=wk|UnU*5z}3 zh17wh(?{MnW=%EKNw?3oUmgG{Yt9ECKBm0IOU9(lqM36;O{n!q&s%JD{ppMp2FTXDkz%U7#HVt9=4`}*PdvmSu;FNeI-B<#S$puw)IIy0Cht79f%OpN zy-eqEJ_>ewMr#52ccQdMd7a-fN;Zp*8GZis*T40<<;uXoKne#C)+afFt2;1IXahnZ zPM*Uk-qzLa?x=In5@e0|CsUhI!h)Pd?{V3!5y@M36)~z*s%m>`x;E6=Qb>LR9zvq~ zEbE-@?#E-$5D#5m{2k@&+R#}w7gAr_4TRKG&wbR545`p!JWNf3}H>or7#3O_j<2cAH|mV&gUR>k@PRL?>wp z*iy~51Q{yKwFAKA{UB?tj85NH+D}3(EX^9 zQ;%4pQk&Qz(D_)6%>MY?aLbSR*Dhf8N2MNTHQQnp0v+BWXkg$r^HB5s87r0Mz>&>F zkpi&ge-pd!-Nx*P2(e=OT8V`q&&HaE(yI{3b)*qBLC0=sRXmfPwx39S74p*Uk-G)+ zQnmgHDq;+@X}AR8St74<+q^I3zO1Dy_J1Bn75I6hfiV|x3_2BJ%&J(az^B7_vYfW{ zbwpe4G#q8}x~JW@6!0|K(nM|g=$1O-@jTrCNM0S7j1ljSlS!1o`r)dV))LC{W#cdI z**b32zB*YaJv2XXE%M$j+AIwKw`X*`QTJMcS^|yWh_ZtV?~4$7-)}bTUcM%6$+rit ze_8^FY>rUGl`^e43Wde@ZLz7g6W?X7QT|WXEugis4JCd4l9gqv&U;L%KUvPq$4toY zld*mFDB0m85R9M}-aD$$KrIQ>JC)6QV!7`@gwUrE8#3ks8}I1K+y~(SC&N{}Hrh&+y6o@Ywn0QBY$S^}m!XtU*=~$VP??^n#D0>os&%}ltAl;#< ziD;=J@AMk}IhVXj5^&j$7$ePRLuwpdqJH;K$7c=CA`mX*GEd!w^c z$Z3d?M%$7f02v|#cKtD<71nk#PlPxs&kN*akUHI`%Fb^$T+kAr*ETP{oR-k^Ay$ znj!PDFsku7ismPIth+QGcZfXB^4aJQk4s(Z0+I&wRTF3$9A$(xJA)U4H?nS5#D=O; zMRl@s7-5z#SNq%CU%6{{OkN>J`6YhdW%gd_(OL)7 zSkzc?;XtF<4*n0wMymQu6o{iK?{>!&PBu2qhO4oguv*`a&wm}f?%2I!zrPtjzd!`)~k3?OJI&F&@h5E3Dk@LpX1AyW&U?BJ=;b&$^Os&OAU1e_oMa6KiR{x zbj{9urE&^oK@OgioM5SnWBPCRK}ZxGp|6;xlF_wY-Z&CE3dLkzH`?<>D$RGV9QBqy2NTK2`v zy*uE%j^eED`_jK3Ebc%JlSCHjFNBAI(4S?T35wtW>*N7=_uN1fic($+4INNbSzklE zNllr%ar-`VQpF#e!U(iH9)ckeIY0>I+xijgR}m`Z${YB0lj*rhDfb7kwBEgSiU-Rz zDIMZTtkv)7fZ#kQA(D0X0-OFZp`4$I0T^p}y!4&=^11>SCB%zVNHkh_!Ot#Jjys}h z`k>bY(-Yffiyvt?LTyx$e6u(*@mg_sBM~xpAfoV)B!jZdD1%%TLHpqmGsHm>6ClDs z-zhe-7)2sal0B;<{f*ORQn9Vke^PKur~FOcGxhfqetqg>&c#7}geV_|uS$P^d<^=}`Y%Tn`QdkK;h+Spb} z8()LBlgYU~gSU_$gIUQS0;GI$3i^BGw|g8MJ@z7<2MRWO#bj*bO2cE(v0mECx<(>d zW)8w&W1J!NZPq-eVm}5jYYMs#_mMS)HG9IMZNI>=(k7$3jD_6KExPIcAft6VQH592 zw5me=O4WoEbFqsR5kBZ8p)=sZzCd*=bOy$9;ig6u)KnRJ8bcdgoytT_Jf0>SjyVH= zPNkL~00G|e^<2#Is9$fMedN(DuU3P9$cj7xHWmN7a@H(nU!bU?S%wwBT@cs?5kHSJziw@qI|Ap zo1*y0uLa!Gfu@-f@}>jRU+0w{w|uwEKt^(eMHNNtO3z^H78w-_3b%Py{yK;Q7h&8emzUOh<>D+86o9eRHt%Jaah1&d`NX@ z;Wqr1OHPsq)eWT5Yb)q)qy93F|ApnJN1}S~CW0Vszmi$^0yY}Vr5Gz4>J8Gs;agDx zsqVUNE=_IDfh5Fd@UoyS;>&S7M5e%~F?eKXv>Gt0txYJ%c)Fm_9Ul0EA0T3*OmygSR}qi3tcIL0JD^Q8qa zh7?KgS0?;E{IXsPoO4#opnH#lT_4fCoTfW#x?~;9JYh7mG^mljv!2uDuFy44V)lO? zV~DX(Z1BUbkOlGFo~J{|Bsl{5hlPmjGyvG)Tg-pEFk5w%0WEo)TMCKr*SQ=^zL(?T zc#HieE^O@n-oT7d-g;|?XvQS@xXnFz2xaP`Ab4gQB!4NSGrgu#~_A2lQL_Z@$^>AadR;Qq7xAr(c z$tCG{yIe1eW&U^23XN=`bYWz>t3aJHb5r^g*4czIn-7M)RarDDaI6Zd6OwA z950WibpUpBAN&4rtcH9HR6*a&O?PLVCR+529(wh+^uUK<_3D#5?HCRH;5ODhFFR^M zlN*BA_0+$H^6eJu`ev{>Pm4X45XP};(yubeGmqp!mKX+ey}@pd)j8Ufu5a7^Jby2D zXx1?*pAG!72OUdJW>=lRS*a?|S(jvu9hwkiJF!0a^C1;#EFF%mp?9D@pSpgVzBBFG3 zV=+GXS(wr;6>~Q`Pv{!}oHc$Tvve!{-8AB}Gkx`<5WTSq;_1eg7Xm-?hF3TXEr|gT zx^-;G}Jo_)F!cn0=1r4aiyOt#ySU$>z4!Dq%)a=eBc}UK`-e`u|MrzbG#Yj}ogIDS07SuW zb)NU;F=SacoihOZFDUq<=I*G!MaB23uk0S>*Z?c-$3VZden-kn_KJv4LoN6I>nz%PA9TItXNtJ2F0(J5;T`BB0}b;Z>`xYgp%#ftKi7Z1g$hzq z09}%W694{o|9hH8srhhAE0DtTnbwz!C!Q1=%seVYbZxmssl~WP3qAkdn4I+4vs>?6 z?#o09In~p$JhfFBkfU0){b=ZEEg*@3kHaDW^coLB%ingt;R6Dds#!fww`7zJz@`tN z4+2RII;Dnt-7vDW?#i_w&pU{)TJ>|EC<=f4^NZ~L-6PpT)(+)|KxU$KYLs&1Q3il+ zOxJA{BcD0EZHmf>AnPg`Gr-gm%#uH&<oeazkE7cgpXDgyNbVn#WhieQD8;%CiwkF~ke&C#lzs!T8cKlAoa+_!=E z^{|Gq^8M(NBp_|oo0;d*Rn}cN-=liE2r@@OABRKf*d0w=@=v?4_K^J+i~WH`y-RkJ zCgCkppxV2;4#G6;XCB?UDRNp5XpH`eBnDRYMci87A0~YIZ=q+*`e!vHYuiSjCEUNb zV4Yu>DC($?UBe6qSZFHn)(is;4(#m>O*h>Bn|=oh<+E&SX>$6TB(tY_YBghoTPe*X zp!em@E9u?VYvtEhuA2g(`%0hcOoU8)6G-X(({}wuD!F9>&*@kI*UCmv8$aI;Rp&o?RB4ZU??{@IO=#_){(pp*JyxSiX+_3z`V< zXC&dtve(xD&@KRk>Cb=+5t`1ECh)p82qW9ptboDmk}rAp>$?mv(oFS#O%F3?D3GAU z!g&~Tmlaibts3xJiqSTgp}#uuL#E2+nibj${nrR6KQ6y{@M_NTs58wX_o zUX#Sg56CgC!D{{|j{uC__d|gpJWWS#{+8?#klVxNM_6Q+W)sJ82^=8@T32g7V&TK2 z_;aPN99j@acJ6(5j_>dUhbq(akLv(AQ7Awk9!%b2Q3JkgNUKJZCPe#9qxt3KU-vW{ zM-VhC28`m~s0JXSgY^sz&DKFd|9+MWt?dzED@Bz?_q3fNu&8+;qL{+iz~B8m=)`Bq z%HyoHAI|`iNe!S2DhB>fSg0@vTwFh1&eb_*BF!3gf!K~i+`wi9pH0SEB+Z1yhVK&Fs0!;J?viYc0&a*#Gx%rA7yES!G7Qf<~h-FlScD5K^3Xqn?o=hC>`e38Q~wpa17#r2ao1q`P80 zu)(J|A39l$f#aY_2VdJTmj1ipa~o+#55NuomgEe`(hrUg@%0dxe;%xHrK9V?!+WTxOCd%c{-}-DAJrDYwFF^14ZT(QaLx@->mjDy%Z8sa4S3Ifc z6u&jGeg$Bw^vr%-u&wBZmq+?g0qt>8S6R9Y8p8l0PufeRN;KnO`n`F@^jj8qp9V!= zRC8D#ykn9L{P@U`$r8wr%pw;5_bb77TSr9|M{7A|MXG_DlBQ9d$gH`ji93XcDN7D76PYRO&<;~8bHZlJ!PEs!6X3) zhvpK(#s<|@c4uCPeJqVLmdObFa6cc(7n%Y&05jd=&kh(>>q}ntm1oT zssJ1#DPRKzKmD9g8`xW8i6GJR9x#M%><>J3y)J$Re1x_mUx6{1hipR1crcbSa1;6~ zt-l{4%x?L(3hH(x^&)YrIhScnR_=9Aub(d5<&z_`f#M;$tFLZE&1CaP>tv2FM*BPDTfx@An@7$xUwPOr^OgAq|ojjw(k3ney53PhqgcCBtGD0G`o?(P{z{bZloV34O3?7gk}-=7A;ms~vfak%x;YBp_ye5emq}t)brzZcH@i z)7mmxa_7J99^O(a0wC%E_=~KObeIhRR_p9So7T>y?wdcz(m~0n0oLP9px2P?^SJ@m zV8_|YeAogFGC%}@Dza5-sbglhC1|Ijf#EJY((_tpW}nN zMXbkWcdi;1lfV)DQ%Gzh1|T!B&smdXrw!Rp?Aa+@y#SyO>Ls8TqPKNRwPlL_M@!%k z^R;>iW~ZWcdbp)XVQa1Z z1NP9u#7u-wY}Zd?r79-n?p3%9P{BEKizU+qUL{T;pZ<*zAxBuH28@azLVsP-kEasAxIesaQ&eGkaAT@i2?s>tpl?{@l>6b+*T><4fx<^(hcIoY# zJoro`jeO*4w^0q_2)v364+76p@5`gavUZw{IXbYO&V)-6(Abyb!CGdZL1h$+;KFRU zrWB4&>2a1fh;3hChAx$81nh8j;`OtlDm2rwjsG!f;oB<`YkxUPv8OOayYdzDd)~-j z13CIIhAztEM3tLI7p*45`t4BuvDrwEj&7L`vfCNUgVxL`Can_ zbsJLjEF7$bxPfR5yd6_O$pXvmND#ULtZ99zU5%_9Eq6b-E#Am0UNv3&YkeJqv>Zuh z+xF5ya!!D_LNJ~Dm(%2LnNo|@HD|FdRIeIlIURBYncPT~^^6w>>QEp&kxJlhZh+1F zB0p+=QR?<9hQjdxHYp%+x`|B;XTmcomEcAyT7+DMsG|%-Q)EZ60kgrQF8lE4;FZt) zE?*iwZKip+-F=e{mXMr{vOrDy`~3roqs+J<(1mr0svrn{dhv%37KF*t&KNslW-vx@8Fgnj}F(RuLv3hX{ z^q?bA{d)<@X%RD#xJ;CyC>(a|`&P8QV+qOyuY-LH&b8#3h@1k4Ro`Q@05w>4Agd^q zZ-r(T433XzQ%%WtMqO@ZTH2|qu=2ehmC@`%C$2v=@`Wdb+%*tsw#nv&9+aZLG0NN< zSupZqCCF=BaWCim)}IuxOd_Vp49us=(hD|*M3d90XNaQ0TF6Aol&}T=pB}iP_hHBdlsX{k4uxW`wL>avi zg-6MXJ$Fsv5{D4W07OLT8Yo5~h(S8s1L!=>AcIH!LpX_bT|*XwN(sL?TWxU!u8n&g zgma`Cxz8z^lz?TZk9G`)SRo?M>a@sxSLxdwJI_lvWS?Gus2UfXJ7X`Om|_wU^7fIe z*i&WXP{lY7E#}(8s-6^l9A;&fG`_s}ZuW@Y2mekS`d?=rrA%6eAZkfxU?jrar#7O> zqMLuS8r_+%z|;#;_L)HLMF1hJ1a$mVo;|<1?i9z#p_w{~^~-}C0QTm6Iyw`~G!zHy zD_d52Pv%y>ur<=WI0Paa4d4Mwq{h|@XN(IoMY1IGE%5M?M@+v^COU-VDvN+iV_9m; z@%&fv_23iK2wK7yc@L!U(Qw7)>Z@uhhde_s-Z&>2j-@73Llcw+#xfg`X~s2cXaN18 zQz{|tkhDnynQll@(tE?klN|IeNL0tEQsAuOU9o1f!-T{Q$gmz(ORp4`k{QL3%1((X z4mp4MO4?6Qt@!I2C&X^T-~w2Zuo&tM(8SzO$ZdpJNoW+h7CKYz2`Er;yDY3xVH8Id z-HC7@(0+oJm#-278+fmghGczgbIh^PE}Njy9S6!6J%hB`qGT(6AP$2uRldmjef6t5 zpJMe&gj1B*#l@^~l_)q>e+gx9A%#)PoV+Pk$^x(FDhBUDpFv98J8nMrdleP0__5xO z!@Iw0@GQ!(XGp%v486pMHSZHY@`F+^?5cHn_}W;s@k|T zP6LQ1FEfo)UvL?42;oeD0TVp7armO_IFzOBuQn4A@G7M9 z^wzFjSpDb29RKUX(EqPb|35xF5xFIex2L?efGLJ)Dy>;>BQ(shx(qCVOxvPPU%UTP z!YjVHczg1kDyRrwOYDx2FK4zD(AaBH+^LjhB@$JGE>EB{)5qAdRzHGY|EtPXUsHWy zDCZC5?Y(}Y;C<&Ka7AbNZen4tF<`hAY{g>;#>?xH!{xiGuFtJEp=E14xP@)IFtSUX9?+FM7Zistch|kC*2Qqz5mqle7J<)hu@ySf?h_Psw9sO(bM7# z0xzUf2>~vcNhYu3NE1M+RAbV8W6ZGNCxTqY9Lo0 zj%9>{E`bWoNp)iC5gs#Uep)&j$2^Aedpm!qV%0Yd680*PF_kUhngfR-{EM#u^>qpG z#&7)uLK(w&T45m&G9%H0jfF)Hg(u5vvhspTIt_N*0f`=pVR#gS&-ahj zy%!NOUqQQ{0b^M_g~8T{5kUR(!`%ez6oHgfcXTgc{{IFJtI3@ij7chCgi z+}X0$@B?0*#4}7RfE=a+>CZ4wKf$t4FW_bjik<;tFxy`%P$!JjegkP1U<_DCZJR@P z{J(Q~eoA;F$ksvWM{_tu>;iC>!QyMU`0mqM%Z$i<4S*=Uw-5Q?^c}60GVsl=|GwGN z^y_U`om#^{LWq|!CJMBU@?f}$?OyV}^XgbG7(k{9@-MFO0n0;U+`sRE+`i|2b#?rs zKCVobL*gM_3QpDwAdif**Iqr^8lq(%-v&{xk;dIh)7JKm2#{1WT`;&kNk;@iN3ehtbhsHaHDOL#V0ePd)VcGBfyc zQ*XZ6uiebBJwDI;{F>A%&gWcs*o7KUSB^O50(^CTIqcJGgc9q&vx;Wv0ev6}Alcma zFbC6oe}1W8d8$J`Po>8#IdT$QW>SDx?R$;bVhdNtZg3l!mL zp!dx|1c}{o+ULPSt!(u4fkzFV?X~qK71VU|h51^O7B?NIlnALIepDMH520JOmT72a z_HrdKSzjjq_H;0q3>Xs3P^&`!d(NGd)(PX9=R(Ni{d`+|VdxH^xw=NIF0 zr@mhpjGML~+WBVtb~cW`e=Y=WmSv`KC%zr#d$QZmqm=YyE)$G*(jtg^fJG9}3im-w zk@5gz)I4|zESoXs(+J?fJl+%JoX-IQwqN+`>*Omrg9Wn9HZi=dTmdz`OHhkdr1ly!c8PGrkS|e zhbP&V8cwWXxrrJxA8pssx2xqVELu%qG({8ps_gf8e$dABH;v6%S17v>Xh$wBKYTQv zDzy#a3szzCWX<@URejTK(FnFHfh@F#YpZ2ugR!aPkr+)uV3Ofv~C z^=HXIv*@5(HtMx`vIp=l+dsSWNjPODOI)u8B6@dhFa35#Jij=gqWn{K-c|6=nukteSy9xsy?`5AG zp9x);3s&L|tx*kKIQlEv9F>1GxH2Ik1M&!i zA1%(@CdYq%-r?o=lFJwL=dwMBTR^PqClSpEPivtI8%S@ear{Vc*ljL>Q)|sU`8kw1 zMy7;u;-)INrzy`ad#+?8FD8DFi=8Tg$!6;(Kbmpgj-C9$H?9eO@G;3AMPGfr66#g< z+R>^5L8SQ_QFe8`uI)geTnBBjRNw1WG))-0udedAny~ZhJC?u$MQ(edOr(%y+wIe# zn%ApeEqw=028lLSo^ueSq~uA}4o4o}yh;15={}Io@&YYr%i#TDWpY&<%>0+|`meSz zOfV;N_MAt!3aq}_x&BRMy@OpB;txs%Ge+Cb1I)@}XWR@j$T!R{q4PDbar0RzUw17| zyv5#b;&yUDm z{t$YGjZbQO^4I_sMGKm4J}t)CWP$Gn!0q)|nmT6ymIsz|EFP38lgHBj_YC8X%TDk7 z8N)V#;;p}}{~nNvXMMpNfru~S#nZj^T1WYs)!L&!j|?=k#QE(;qz*?`-(+#{^CJjf z;M`A*-N;+_3;i*BOgy~XTH7rCxmHI1t4COnz4XV2hF8sRKfJ{#oPAR8`5uP>4lPbb z3a{g*U1jd+?|A_UT$@;Y$Xt>x2nwFmY@#)iW$G&_Ln z*KhHWfK=&Uiz%zL1>2kX)6h>KE#iOphZ$JPwSR064`FV0EHinOG+$NA-JE&xH9<-0 zHScGG=U(HO7j5$)*iI8n(D^?1y3<1(kb>itdW}Wuen!Ce@n@ebxvipn0aC_&b-SpQ zsmfC3`D=@3N-Ce=IegBalm<7L77Fe_ZOQ2YWfk>ejOL3DV5hQ!%uWYVC>xsoCibWA z8Hng1rtXK?jKs)4aHs}&=K-)w3;pi^ zH>N-jXb+Ka@&>Xocv!Gu0I(n&K9iccayo5izImF3qU|4x^o8$=;d1=?a4M0TJ(oiB zSt&ON#}Lv2sQCtf?$N!sJ}R7#hQnnVh!f<8_132lyoA*Z3#FA+et7$kCIrM7f~Zdp zwoFYqFep+e6_8AWfXH$^GjDJ+*DfMhq0Z1|OV00l`LJS)cJr>^-w4(^^u`V)Pg!9w z!4Vi=<{XX_oErpV=X5E2bk7rk3v@s(s3E1BWRJ{okKFAenI$%_ zemp+DNaIhy-rC8^{r>nsuQkx;c^G$0G$=exNo01b-eb!GHMuPWiovHXyv596xE%2I z+@!AKvu(BwK1HPb3Te?|1SfyM5hM>^>#}9C*&eyIJ@)%v^bur%*-ynpda+t!g#dra z0AP9eH#Uosne@#uxf+&ybE~Q!v~Pd`iy{eUAe1m2NdMKqNH1HE-M1Cx3#!Ipz<3JB zut1@WP3-Y?QBG6NpUOpFNqdECOb45S38&r((GO5mH!d66H_E_E5urRBaSI^TNR#(Q z;HvYK8J3JIug$V{mdhq>^EU4?ExFP@lW1@PnX&Z2+GnU@CRX!1)boSa!6?<-)V8XJ z)}wG_TXh}`z8}mOT4wtMf*G4b5Li83klDBITFZQ@DuE@)Mw0E9XU>NlpMKuI$86j7 z7o;NEkTB)CeRy+viw3uY?+k;8|13O`NIILrSfP0SU>u|Jz+-7(pL^+0zZ@C_Q!N@Dh=v>5 zlB-ZuOR(D36i7@NpvWJ!ta{jX{j3aKC?Y!%EV)!^R;{=-YRc>V6`34xNUPQao!Ys&3>m+S193&4Myarj0ex9xX+qSPp4*LlM0r@=)GHp5Gy2mC-N(I=|sbBJtNNtccU0wdY` z_RMvyPk{rhfux=p~?^A7Vr3R`|K?TmFwYViOM{OFq}PZA@rtUTT-eDmyH zgJB!!C7I}^O;TO>a|yesv}ZokMagjL2{xjr;Y*+f9GFj~Cl?#4`F?b>@OHc1N_ z(m&=+8XYNd_w)D5Ue1Ae9AVbcVMOJaWJRHXkcl9|&hGTmYYz>n<-YaVMI8IGnhdVj zndr_JZ@7%~q>%YxJ)&kCi#)kjPKbelD8q-c13ah2NSxI=FCvNXLgXQ%8>}RJE+=2I z$U8vdLVj1)piQ?l>m7Z%!*t&U&qo+4tL1M@|4Qq8O1B+#hbhg<($0Ip-@t);u{PV zUCrCrr{O7WA%AnGj%cOr40gqf=lpj9#uD|SlL)soqXGg6Z%|g{pYkJfAw_? zw&4t+TNz}Re^P1D&(4Ee>2&pK4Se2Q1ziNYiODc08O{12UN`^zYYdbMlG+Kben{g^h|m7GuvaRON%H5XAw*EzvyH^ z(lYV3RA28Yv$Dlp`E2Ejd|@;JlZ(hAnvWP%7&ULKaA-+qcQ93r)QmU`#^>5j?tEf+ zXUj>AUdidAz~1io9`D9kN}NF4`GF~njw6FnqcI)B8>~CQU+&mJay|`yUdq(sT`KFp zMFV-%$;fi4B4Q_26KY>=ef*jO#>N?7Of5y>{pLtU)G4pklS_ze#$}xzP_q^I5omuC zDa0b4lZ2)0Eahxu2`pS;d>KE@8R%9&cB>Ik3KJ%v3$kLWlZ&WCpKnF}QnPs>2a=xd z{zy{v?}qYtyF;Oqke8puF_LeV%??bJ4BK`_DA!mwL5oj>>GshzUM2B*FYRu=(JQHW zCSduc(OW|I#??s-lh=fh*UF`gS|S7ESzmY7E?BV~tub!g45nKgX(e7{=`kZqc*2F* z#h{2G;2eK5+Tu1aMI~O}A`3H(2o~+pdG9&FpPjIKZ3~CekN((xkvb?V0N#T z83I~o=u%F=jVOGi{|lSLNCxvA2F99J{gSUKc6>w_c^_#JTjjO4;smMD#s$2kXe3mWj8y#aB~MW^L7U6vv2e0q*xE-OB}-D_+xIZ_E!HLc;O9G zIBnKc_aq_VkFgY;D z)_5i|jdU&iS8>+7Hb@ggsAbl zp5P~Un0Ko@RgUz0-au~;vXj&+|6s9JsgpG;R>{!lk-T7%u6|u4FhSa=kE!^@3kSF> z`6u~|v^_YmlXfqodZVSVrmD6*$k>h2S%YG+u_^_S$VcA_G&vPJRk;_{56ds+LO&7V z5MtuI4wJZVhPcJ^C@N_VbE3?T1L{~FylO2~nGk%u8V2a}?hkuE?-LEtd(LqH}y^Wyh<_dR? zXe2m)T@TT0flg&4f@6y5(aS#s+l=jeuLi4}gBmHeU3vx@c`^jkfD1hfn!WGalTUU; z!@VDZ(cEYw$yUP8fFVBQnjlz@sQ9d$kU9L-@@Wb|`?$!#2fI-d<2tXB>L{ee`;Qo@ ztW;F!xq1%>=s1<%8oWVL3+Ri4dZDZ{^K*wqmyUrHsjF46C#Q#B3IKtu)06fGJ(wtx z{Jw?;{P!O#=O0Un%>9P?e^!V6x;ogrqP2-?Q;D%rP%3EIWB!8qjMZ=ZpeFX_(*oP+ zetz<29)&A4tA9q|N%M`EEMMeJiFjM2d*6lh4L5(ul~m{_-}J70?03J(5lM&nD2N~& zdzHcyj->HkP)@FTVMXy3XEZu7%*%E-m4yydJ}gHnz-FzxeHPFC`mZGQ!RPi$9(-S#$I~$qR(;Kz#LNhjp4H;;!v7)}E+SUa6*cm<+$K|-f z=Gr7?eM7|SSNK0-f|0@@>Jq0J{;9*Pv6}w9iel^7&hkYQtJ(yOwk+OF-Z!qMA`#cU zmA5S(%LV3R^To`+c2?aCs)R391i7ZA1;mb(lZavYa#jnQuG3mx@w$ZDd8UQ!7QTuD ztp=U}VJsc)&JNB)vOL@ss|Tx`=r|Jb>!E5Lh<)CylParZN6KH}QfZQu9SExdEP4t4 z^8$oy0)|z15r*$JWUk?z|b#vbZb4GJR0sSq{`j}?ymlR+^;FNka6 z{s`BI1VK42;_e^rcwDZNrLyxv@<_B$CKd*@`YMM8BCH!GmmbMJJ*xLhY?p?d)ys}3`!*X zZKBmawjds>s5T5mi?xrVb_lvhBSxApSlz$>)?5%Sd8o%dDj6a%-E=63kp!J2cIVvr zaW3H*efw(dtC-D}86`(5(I%ma%kt)MdfKQ&nK13~H#V8~ZVcZ?84T5L(6)VV9tv;t z?3j^Ozd^VErF0MnB+7OAU}gCOtFq>`cv^HaK{D98L)n{~;_W&SowZ@qSk@g;=Rk;& zwbSU$@#k(6x=I}B^8I@=Bpu|l3{-WtsMQy?Vs^V!r=dMK^fI9)D<@>_O);1%9@D!Y z>zf{b3A#6L`-(>Im75kpnM;m|%8OJZ*ISNoZ=FDawhC9}!n#qd3sv2(U0vrN#6x3n zW8h&RMtl}StsdB?^=n1iM#*eQ7$tmlO{Q9BuiON!@C^eBB-Nsz{R4Iirdx?DbZlc` zClsME*{O)fFRH)AOyBrKj#XBbWktn)nvW$n~8N~QnZr9y9v-A$X|H!mO$ zlFckL2Mgaks|=4hejVE5?~`CI3`Pl!y$_T{L&O9T+HqJsWKfDEHIlY5(4|yP25(WJ zCqW*Az*=`#E9aqz;vUg~ZL)mq&KojM;V`zXAy>vVLl6AbF1BitOU*?c432UMH1!${ z@G}KhhLLwrRp#;X8isG}$$H1VwPNZ>F=E{4)T^J7z@$G(#024R37>DEV~Xv=KvK+D z`pG#GC!L6a&wX=izBOHnS!`)M3^?RUH(2@Yc=!ntGD6jA66yf4y(e%f3#$T zn6m#aesWly$4sTE4iXy$K%jD{! z$GgE~T<`MaTw-7N)a4iNsSj2h#w^}x9%FGN>49R2IyI_aVss<6GDv;LK&wP0k)JC( zRNQVXGajNI)@c1ur-;p+W6wEYICv#~ip4Qsz63J}*hu^#7e3V|tE}Uw&k)tm%@@ty z^g3%DszM|2>&+5uq=S%4plv1X*Rn=a>4FXR3A0;UmvRvHiz*!Qo&+w$EM9>5Ie#bx zS|g9xjS?$>;DD>}T6jB#oRj5B0Y}M=Jux#=$YNMkXhvdi9*dPNn#Yt-23sK8u>B%5 z;$x+;zOO@T$-~McKYFk4ACczuiqTKASXn~OUXElpzt)`9(j7)Dx8CpmFtz@=74zYq;T87*(scF^KgY`OQ6NE>`*d%gN0F?~_Z3(84h&vA*%|XVhD$`9|wgvgh*c zK%bj^BOfkv>rj3p^Keaz9W6!mKZ&Q#@eHWg4Glyr?No@M$GS?AHe(iUzSmj6|0%wa zyEYZk+N)~DQZrnk?PZi=AB!9*HdW2cS7OGuw)BWwf%Fc`PU3J8RahRY#$qyF%nTk8j6|%8zeL;b zIwq@*5vh2+saN}Zb|OSo-We08?oSxlsi#y`t+JTX!k>fOLag9pW)ums{2T; z!!6+qzvU~7HfDoB%vM&bV!d8MFZs;;lCRZ&)grZ=GK~~JRagqI_vxhheR&zXY7I%P zAKgWj3Em=}`Cd}zSd-GYz0601E-vf&3j#?_Dh=f^9X}fnn-O-5j&x#^rKtUOeavO( zjfvIxPO1pQK~*ujEboZGqO82|de_D1^Kk<@yq*aX`G`gBZCgDt|H94-&8ZtCCS)NV zkreXEmwY4^jXzGCv{=Y53>T_m%pOC2Lh!XBIzC!@&op3AG%rtpx2Z0kK@vQQ^V50; z19hJxiHII3bwc*qm)k1xtWvM%qJuh!{JoR&;F}x#-LwE4O3h|

9o@6Ms3Q$>_|YL`jqEW1Fw_k z%^FJ|pjS(zX;mJwpv7Z+=tf){;wm6RQbk|eL}BHW7Wv4Q)&CjJ(=hUO^_E8K^G^al zbc3J%)?$-eoDO3arh^DfLOv6z>VR4{+7w9irdrw14yQ;wFyjew=HY--ZcO_&X7XL9k5;4WC=8hCFVxsc;XztuxgYW zz3I3JV{%6oCxhNCg@h`<2=3yfU@_??hi0U>l)0{{gX}fPrKyRGhVh^;irGpwq%YWp zIa?~|d6Ri$nO&J6uX17R;PlVJ% z8cWhQQO8RrgVnOq8r@nu*?!edg61C-xplJn3PbnP&c%G=186tv^{1n`iGyn^7&p7F zFZ)TQU}=j&s{*|V*uji$wAO5FZIM>)Zc`oy{clq$Dc&u`KA07t!O2Yz1Q{fKGuDC} z*#^saXpgGrE6hUIg)o!5<1}z3+|xGgq-CR=(}V9*t#S;Oj4nl$ML#AEBQN)@CluZ1 zlc?sZ7Q~>vbRT|-n#C z>ozUJeM&C*PN`>6xDr{Ko#1^LBSe6Rl_)#?kIXEeGQAlF{OyY%HVR;pJKBkGwyV2uVw4 zG*{3!pBklX*kWq@z*3{#dxR&SmW~i?sjaq~EU3Dj}OXM~Ym-?-OYp!PiAWkd~r% zmj9oJ*G7oRi@4_*iN`3Wr=??}J7u;nDiz0AroK-$ zUQ!}of3^45 zmTp0-{@~8uK`G}l&vARe-ElddG+c`7JV-0EmFv4LQQA7Fmt%I_BWkEmW&F;?1$B1O zojVHoyzaW~y6NG%4|-?)=8`Q|72nr7u$^GgmOxvgEg-)bdFgdqu(4rg?7 zLO7W;dd!UgTzBd2L&3pRHNza&L93%$axuNr*O2^BaslHoE4(@9BPP_!hI6>I&m{SD zu4J&tE+5&W%G&SAKN~nZew8VCTqiFwSImo%!w}rPc6lJmK&eP&;Gu2R;bSa6-P*)8 zR6tv{IuR!}iK}FkC!eLsf{r0w^bBq(Ze_X+us&?zt#_!c_y6p+*AoP?N=iW{9zT>X z=USF#D9=HI|Jl3OZFxY7TktY(TDmP6{+Wmfk<2&TcVPm)J19}&eDQ*gFw3_--m^*L z+cQtT4LHzWdPRJD@51qSrhM7pnx@Ty6a&G5pX)9Ml}$3&D6L{Xs13a|RCpnEK6P5h z=G7GD?A=dR3ct*#0y#{&xhbW~0CK_vBGsl(a(V4Rx*ck_+7uy2TL&8sJrYviOG#)O z2RJ#J zTLUXY<(FKH?+! zLi|~MEFC0Of5qvJy(y=BSeb9F)nP=*BvM?rW9_FNlod@wz*l3F@t$0p>6TKsz|C0t z_b$cB8D%HZI?&hfUtz!ta)CEgqfNIvfnhN^;I&>wj_oC7gAWI=7uLY58bWP0*&}`X^icYnoIme5pW$L;tJTw`f?2#b}VNVRs^U!DT8gz7_ebw8{N&(vE7A zTZg?q%>1I5doKBFmnS9cIK^yek>duDR`1_Il0!s&6zYaZACI}mnFp~(arA@ZN!(U< zOgGsZH(#$CXj{s>(bmI%)%ttVIA0;+tM-=?6kp@Ddas%$O!!%Ps7R>M8)}&<(+5}e zl+(zF;QqEWJ9nxk2q)7|AY9@#|LxtPq;7#B?M?rqA;M0H<$#j_mVkjCd3n3s^PvY& z!&FzQxJ}Eq=lOxylE~A0V=MRhVm&KN=(6vNT)$&+34E{EzrMF|`Q8mUa0@oHTNE|> z9XX`r3v9%IO2%TbtiU^Ax{owiX}r>Xw)M?Zqt}Cy&6gBt{NuppdoTR#C-c=ck+Kyc z?PnIx#d|s5H2i3?72|Qf0cFVnX^h!YR_%A27{SX%Lgiire5BgsMCfzDtD_or@>=x= z&lBB%VzK9b6+I_WA&^@(fy@|~O;vVs%lwu9gh%FsYfB5? zVU==SIS1Uo;7~C*TEN86_Ol`L-=TZi>dA00Y|1B(|j8ieCUdEq{N8Rg89{;rzE)IyjQDtQPVy zzl2`j%Dgr($FmTtqF|4PnF0O=zl>@ZC~b8LU9%HfKxZ}`jN-zxryk8DOaJTwnZ35S}?+9`#+ zvwxIK7$vQp#@b)lHb6#aq3U7NFZNPEhdTSoYlRKk%5%;=QTu;Ac4$~5=kJqV?mFg_Q#U5%Jnr)E44qnA^3KdOx+Z?XzNPwgAyp6=OUDyUM!Ta z8T#3$;Wqo%pkVh#keZx z%(2~@Tggc*f66Oun=Jv(f7`qYBb_1D_VbI`mGcwk-zKmVy4ID+x0H&om)dpb8S%T{ zftORV3GlcK{iFnz)Th|zK1ne|0?xuWUE1R3Hg9^!n?U8~NcH8F&W}M@_9wdY{pT0> zOUiuJVQR}I3DQbl3RXEN4G2#!1FuDxa^}IImxaIRQ_HV?4z!+K0g*zkAHTTDm9A&# ziTuTk79cVnKvAg{kqmcWpViaEq2L+Br_FK-C9WX0&<38W0^nVgnXsBrF%@@EEZi%~ z83wj{?lN=Qb{mRJl+}`bUE%!#x!0fPW0;?AxpgB6An>x)(%#n$@4vxNvtYHZ%M;n( z!6vKt03iT-PlA_t2>*7^ZQ}YjzEoa=mJ+u$na{= zSn)`K;>gLv{a-Fw_d!}%!$^49zzkyTf3F;ni)?}f5h*5&Z`9LS4Xts&TCtLRd*aU% zq|$IBD+_hB*cMAdXJC|JWe(4UQ7L1(i9QXy$BK}GGDa*9^kJ5(xeAR&4Bl| ztY8F``G$fnm0<`n1o`B7nQah7X4Xit2aFZ-`ELVKtQ-Wv9FbZ;v}7Uc17tn+8=R|i z8H31)eo}2(kyFvb_>&(5@@v3S^lQl>*SVXOCxDJ41E>V~z>NXPiJ->C1)ak)R`?QV z<~~*u@A-0AVGOv-)5X08><7m2`$5(HOStbY1bp#Xkqjr7A`c$tzjC9Iym#d(-4R6j z#0auPXY_R!ML+Btv6su76z|a56A9ua)plah-%I^@$)b$si+ZPR!@%Hn$Ww`>T+}@+ zrMD-OUw6SC-S5HDpIXvg$;5Dy7eUQogr&UB_^qvSjEF1_^(>Pii9W6Ij-olk$=^?= z!k>g%CJT4}8QZRypY)m`ff^*prFl1CRw2%_{{nh|Q+fB_1#|N{24w0cqRP(8KCDh5qP%O;c0K#Z1(*mc4$j z4KWYOC#qub){5Q)xDWXvzQKU^ zFaS4hfDw6Sj)ZN_C!+3l;Y&TU8gZ{a1Fdl_Y*FtHag|?LW^(JE0K}1gm}y1&`-nzh z^{IvbV!Dp^{yKOPwScvL<VuZ3e=MUmsn(dq2m39_s?)+)ZnowA_LBAd=hsD}^cHc{8+TyEJJcFiQ|-gs?6^ z@xKd8J->It!JzknTOeEVdj7S-bifdr#R-?rFXKeF=FeL73Lb#Tq;aPC8$B%4TEMk;q3QwH!oI5h0nW4o;GOpQ zH0|BI;qnvuKEYQ zef6Kc#dJ7=viv^4w!3A9k@kT91(;)3`k4rmDltu_aB$7u8Q?E~vqo6u(fKV3cBRpi zo~lfU}X)%LjQ7Pt$a z4}9aN(bJ>8$m)303|N1lSnfrpjbJ6>3@HKv=|0ceM(KAd>*6TVsjeI2PaN9_ybiuk z40=x4D-HwGU)U%(T0d%c)ibYA`S9}#sX{>Xt8hPl^}#3;c64oYhe^@!xdHf@LYZzc zOZgyH=$;hm6m}2Xh%btWzbUj<_MzQFWTR-o z?9_@8L#^W@2!rur!*Eb}G>y1Kql)j*wyHYeuiFj|!he8N#TE|FdWDUhq7+vEEMvJ5 z>ew&{XRcChX|f(wOVo70FSI6;{3I<*`bi0Sx(6Fzi9 zStDh@+JRPe2d2G98KA4a(a#J)m5}R-TL@t;PW+QqkqktGl5&Bq$_m+!4PsP_6$%+4Jq2A?zZO8As*{Sjy0yoxP zAq3U;mT4%tfp@*Ayr9^1+uxXP0pL0KqZruDBxDgbT~eKZpa!g05tmzC9_s9bY{HW% z;J#1dCp-Dg$;|>|0|zvmm&BTgtfF$_nWFcND2b+_XL{RPiem1_z(H*7FJ3&a|0GJy z{7pNVqLBk+CQqYV@R{L|2IK{B29m=%0@7p|gpvHKDmBak@}bvVGI)*HO6%Ch?&$7V z1>?zNWe zu`4g7+K7u%w>;vp?oP4QwKeX)37AkF@fTrgX%aa#zJWN6e|$kQ_jiegnK8L(uu3G0 z^EDnFzC+kK1s&FhD-uIz7aIo=bA2hZ%)~N;ud1wTI{Mh};{67de}sPY(|G;IT$A)w z7beL!HKo4Ybcv|BiKxdpPfAK^wn^Wm>Vc);_pHSc3OStfWr%^Km@_j8z9a(9+B+eI zbt(4TRb;KTgi6)E%<4uYwAUzj)*H!5EOmtDHe+)cleRf0b)dr}iBV;17_w8Tg9G`FvF^#!kl}>^-@uM|=x}R!OLhJ6__WZ1GMW4f!2kq6t>_ zB*Smh=S6B0L(h876CiJL0Rk7QdOGU^m_PcOci&Rx+*r7Qn|mTDo7mr2FeMv8=(d!E~jm}+IZ zVEnXNWgnP==&ki$Er)KtC}c!Gsle94n!TFT(f!+bb-cEyiY;m!NigT6tW#ArCs@7) z5LZE42UlHVJ45Ikd16?AW2NL`7lb@OQ9VY9;6&?)GcI)3cCapSRPrH$yzlNLb1yqs zrUlCgG&f$`@c58?&dP2_t6euIL*sG?y;4qki4A_Oi{1M-%PVC(U+;#nVieb+6G_?x zy@@{(>}Vqh6@tXQx{T)-V;f2C{Olytp4ukab#tzdbsb$^LXwX%D)u3U*PtHt$_Wq) z z9LUl7K_BM6wkNx2pcc9(NUGZiJoZQ(DY_z1y7QKtlDwZUocXAe6A`u%&l2j>E}%k? zmII`_k}8iTNR$(kA)GQd_laUwOw-NsGX{4kWBS&y{Zeutrf<-=S_chgg}BPc-lYI>nq! zPQ>}F6|a9>{@MNjg?{PHs?O*qy`AC`*LZWaKpJr`PjOS=@Caxha=Mbz0;wy=BK%)I z2d^%|sVJ46naEt|7(!cxn+)m}GNx5f-7jAgc+IAU-i4L0N5O-< zLy}551V5X>FyTl@waVNDUcqiR)q1W%?6nzA9Bm;1JyHq5YRyM1ew&#t!m+gN8SxWd z!35>)fDkv5H2k=>4Ln`G)dKY!2v%%t4feE+HD>*Gn->)QrRMeAJty|Yq3aK8uo1OZ zVKwaUnKtD^;AihKL5r@?3ze$?*H5^)_Q4b`cxSO=k?q}{wVqWomGd`=3uNaAJ`z+B zR8l|PVfm^ZdUwwJEw`^)%5gH!m)spfvzI%lN@B*Po4H($@{T`}m~N0%WsYI;h6;wd zFr`uwkUHBOPC&3ODT)InX$FkidOve*GKrhb#EK<8rredRfKDUM0H_Qu;jC1W@cgs6 z-Z{Qi>MRs2l=%|?7li4TBnjct_U40E{_4uqW}vFIMAMqRPr7L>diZ|V$~LtSCEEFZ z&pND+i^#l?A!+f-C#UgD(#WI>cPJqNG0G?8*SZxVM9TiwRR8V={~vu0`nRw2|IrWr zm*0c__fzwK^MfVX^*al_gFrgD1kCBuVQutdt;AiS{7+`u?EeMG%K*i&+kRxtg-RKW z669d%+W5n#bQhzLI=_3#diMuu$*D5{hSonw%RY?x{-Hco6)r2AgMDC;G|&uN0)-!4 z?pEc3?zc7&l?2OsWv-8MhyJMPU4-N9kSId_dsjp|Z~r{9xTF8aUUZ~CYbAX1*q-tn z5g+X11jMWQKrjsF=2{^jQ|nzH?L^Zp5dN3MdEN#NA_TRvAMzdVF;4~gwUOq=mi$1L zgGi$6l}26HgM~h=LOL>S&}*ks-XFcV1eSw$g?qo<#Fa*0IH-k!gVYkJv68z(cIAn) zzlHm${Eo~J?X-XQOjSqf+R=9&wooH}=63Q%&uKulvl69*DCgV&o!7<*qafPZ&C~Gp zD!<{KHvv|mmZ3)M%sl94K&JvySm=X>>C;b~IXz)NFgAtC)fgL#JBhl!=gsso$ivgS_hlZ$s|vl7F>Su-74)9I ze5T;De4_^=NQNnqJqfb`SAQIu$=mr=>d9{9?tR_CII(?;mU+e(`2ydL1szj%GM=5J z2)fTIy6+9YkZanKSp7xu8#4vT`sZBhnuzUmE2>`^iQ*5R`KeoJ{FjDoK z;&(*M1*QX4`lQ%l6R#`naRcfmzHQm2H@jO(hCebqwEM}ne@qx5`6E=AnWUijQ(-hs zcGTlJoLK3PgiyVL7h)HhzbpPTusvfmt%6S7c>+FBlvX>^>hd(hUWS5$Rrt*bpV2M0 z{if=bM_7JOD8(a;RNI^|AsSVcRV%hE;MH1n_~Y4Z(X8TXfaA!;cHJrVi39qiC*<@5 zL>LiQt0DbQ;dVRI7H>7)`i!o06yA+auU3Ax0XkDG!laaqrUO-Zxv!31&%dmQ{AcdY zQ~jT&1o~gh$$$Tpgr-F;4dxC4GBxA^axO+L`t<+kfTww*aVQu(iKbN;<)^j{RbrO( zuKyaSo9W~BKG?DmPTCF!3MwEAY>plNZ2Y-E#x(&>VoCu{Z^oaKpGjNcaZH4tdBE!$GQ4I>HOmS@$36< z*N-BB_|YGLFM}X|({pOH##x7N9oWeR_2)cZJNM5$PSO)+hU1Aq%HQlZh_)P&OXPmH z4+>vh&|X>u@LB2}TEA!ca?xHtJ5vD27Hxn{FgKgGE&#kSftyL_zAqHr0yI;*BhZ=b z!w*s~UjVz^et=4N;Ua;$Y9RcAH&x?dt7Pb$e=B3-%H|4gy7=AdJIcKv%g#FBBBUnE=L8 z2fCX1b1Bug1tHxqKbTX@%?_*sg;(weq+r4}>I6Q(am^)Q#&>~=j+I3t{vyB^ zGvQ7iVKqCz$>4CMK2WAfh*?&mYdg2N7vtrtflsh9Seq%(FjStt_vPkiLs)PIylgLv z1@x(`6CM2lgDC;cy_EU%Dv? zA3lFt-D;P16B9nmy^!#oC3o)>Tu=#+74HIJWj~PQ-zbX{qdmPlqu@QG^!z^J7O)M` z4h2t7DKHtR1%874z?~$ywe&fBmR*k6N59?$mz@nls7t)0pa-gZ?^fRzFIB#LCv#(c z^deO@kUI8TpH7KMHqPPkt$@HrdF}+i>fn;9;wP;RS2zAHV`3F=4WNhC=-n64slEbG zniXd$0=e@MF!xR_(j@)>o-JhfQZJPv;oY+DKg^qI<} z(4Vx6l+vV)K!N8!AX47<_Rpygs3>o|gXy;)`qjF;JDg!IAD;( zABDs}WZAU_L1x88M=TKJn>#lK%LYO4x4m=%+q5g4dK6!|C!w%;RtPLu1(m$Q7Hqsy z3!vGu>?Tm-e%%l1q)^laFg}I#JQX4)64;%6q)i)9gMAxA0d77tTX`DS?k#Kte>np;RkdNu-5wWKm(XW{PzO1?r^Hno-(N4F=pq>N-Yqcn> z&w{%;#l|bZA{TDrt+HsKl0bIu*nL_e3RahpBmxwHXFD?6O09+tL?n0~Mp_$V-*OEHM$ z8Mi5@@mG)fCB#-=XaOzF9@;1^WCi{Ja~r9!x*s0tkEa1U^WDo= zUG81G3k**cAmX>D<4?^f&>HcdLQ=nRGg7){0)lSVmRJ~FAZ9uZ<+rmWIM zI&r`-f6~cVmHyS6I}w8!O~TODtyV+|xS8)(t;MOJQ2BKJH;FQ zswx(K>u}YJ2;S!Zc|UZ~(P`0z8}YP@LwXtAxPDdwWzZ<;)Ta&uEQaKHLK0^h2x+cv z&5NG6iiW*cfoGEMVT_Y*BRI&<|idkY+REtP4j%8<~ zsQXiQbFBKTq;!e#P;N`@@Q8keHz6#|V-PiWsAA__33;QO@g`F0cS zszI#UN9fw#d7T^b+;P)*?#sBf;K|CXf@P@nUK$;Cp1gZCbB;CD?NO7M;d6{hMXSK{ zm}tT3ZZ)IR8$@7O_H}m|q9ToUTQMw<&XBFbP2pWqMD&!W??)_6EV1|>;E(M3(Joki zZnji><@k76G_|R9%fHpSz50nC_dc!ndZHB6YTdY5U}OX^HnxRlJ$S^Rd7~u*{6=U8 zh9XkTq#7pYfMcu!R_y_eOLm$14W`M2UPp#)Y&w>m35u!K+$O+ zLr0{IE`8KT(&vn7h{fxbh;iyJA*Y^`(={N8CNowyaX#qztJZCs0Egp6>+ z!mW`8@#6OaH}=F6GiqowDCe&kG&&Q%#yhOmMLTYS&T^TT6CpF7MvbSoIDalFrd_xC zcJ8p5utudTzARs6qs`$Ye)+sC5zhB@C6G_oR61%B9_A2gU#BC`7pL8%i7qpFEgbTK zmY}*MQ~9?9SrXP=74QRZoAK{JLe!J0GeyF??J-2T2(kc*{qdNIuJ5Vdy@03F#7J)Z#ICs>soW=rWj;;tM238* zyPI;5c@6ifF6kjKq9f#YyuFBs>n7M9>L#jG>dGD^x&=mn?l)j()@X-l-llta@J-^~ zepjcb5IPj%`8#hG!C)Nkb zhhtp!n}bh-uxy13?-4?qSTT`2^W1f|3d#*cU`T3S1|^4zr(6owiTcb;lEL=y<&Mw$ zZg!F~!NX6l8Ch8TwITKE;K?3VH;HSstDdL1m_eC@xD^b#a0|{W%S^yW@N3Px8XCka zo|CyXmv>Z2h?$)~w!eThshLq+aXN6jqImrIAWb64jufI~Tt557);)gpjisk#p$7+4 z#@mD;crAYC{HLPF`blzBT{s67x_Ic&h8PAvy*{BXhLC+(NF8+}`1Qe9zwGYSmBk&W4H=Su5F+;PFHch!Z@sFXTX@aEO-;jaaThQ zZ2Yh84Sw9RBY^^2mvz*tmgCs5ceJaBF@o4n`ZK$l3J8=2iL8>hUYoZ&efAKt?dpxL zCSO}|%Ej-;ukVGiFO&J!s~2=Sh-M24^$G5hwVi@aC^>V~=p$;&41-Vlzc`rry!_cd z=;PNrg$fl)rG%RNQ=mzd1MRv}!sA{{TsG2-%Mt?Mdi1B~iFfDJhk}Y63<9|~i_C3~ zH9iZ+t^fk(g;2T%Wv67m5UL6`Zw7Jq%hLhe-m{9w`S@Wkt(Oyx4x-bsFQv}P|M~J^ zX#&0r>`UYEpHUylNgs&WP4423SpKN2GMmXOj^@tZJm0b~SvL&pV*2RI>^q zz)0YK7g_3wYK)kjaWgyk+%4mP$+y;PBrn=mqs0GPGZr-bQ!`#unuE{sXP`uV2(nHb z^o~JO+-GJ{;``1&e;C&CKNXk%Mj-|LcNUlbMr{fGcNUlbMr{fGf3COx>y$wM*Y&n6 z8b(0!LD2~gK|(*2-NRCp;kpC1Y6s=r=#K8=4SQE5o3q!R=S5_GEY%FW)Q?ZB-!fQK z_QSfQo^wTZqt1>!d8{2fUI|u_u!(p>WKa5?K9?rb;anMV5l(MoY z+!J#?`XNCF<3ax!-dWr$xb6@-*9Nvd;EiaW4$1}*sy9vRARV4#t9Vlx{_Wy{fN|5M zs?&dd8xTF^4jTlLFrz^f0^8_bd5TjP^Ven#HAq#t`Og}=W#H$Zgc+kgqi zX3)G4Y-oqZ5x@7-frCsxATTiIZj~+crK3PEEeCj!^aG}Zzvol5y%mdO(pFZ#aHe3V z{nsa|*LX#4?|7C0APC)2-gN~I6Bq!lFLulhmOA-4C~&6A1Rb`9RmfS(l%+@pqngP3 zROZKNG(hWeR=T3o0hvP|q>jKP6kLR_Z`{Mf5al6(vA7tBLd^jsvK?^sz~El?MfQny zPVY%d`Uu+Hc{8iAq%3Kgf2!o44!(tE(0eEbq>#VDngWN&zu;k%-a#$%Q33Js6O&>W zz&3$;hHrwphrdTYGo?wiGdLVE5_XtyFDElD_t=(t@KE96cJ?>FbBK@ zoPziO87$!h$xfkm(Qq`%U})^jYB01bo@ej z#Q|R@qg?8H!u!yBfX3Sb8s`3>c?b;v=N^wrL%^N(R8D(UmnrdZ|4m519H0U*ImN)v zD#HOwdrvXX1Fo@H+r?LiX*b`KIt55au|GK3^j~`z0UYt<{FLc8nsLf$U3F${5B2YE zb|NJ&!_gqSAktYMz>8Trq}nt6>ZS~EBa|1kyHb$+pyCVqg@j(ML@%kEel%*2L~|Sx zvU79#fo?Zp+<1l6m8Q%)sH({(8*N(t4B#SF6FFr53k1nSvz*e(i9_`Epq9RS`2j=F*n1ii@l1q3fR zt{1_C1_H&%T-43FccA~&c=Da{Sa5J2*sBP^jKTXslUFHq9@{aI!WXRhEj2}!lRX5K z^Gh9MQgTtxKrFYfDxrmXu`l>IAi1T*o;M$BKJg0AqmfhG@o4)~*S%|4uig;F4{*-ZKfz#Yj;hk?eM@J}`s)Eux|Q5qj+y0|HR zfdVtX5|SVBHNnHvJWJ~yoq^rI{!*Snd^dAhaR6~VfJV)AJ3qQ5#G*h#Pc)nlBzU0& z5Vo~sDh>@gCseG^v|KE4)B>w7|AR{~p-ZWD;yZn7{kdx!4q3yROWDH#?utmT>}26W zPZS)?u?f1qDCm+((0BTVZw%cbnCE+4QR$D<7$3y?w;|y_CNi6l%tpm8;VObS!_c)L zF)3d)wAK?K>1E#^n@+RGC0wJQTDSu87zQ{m5nicucxw;vaA5!y^v_8|$iY5tCA;UZ zY)JiKw+1lb2eBrT)q3GiL%q3vb?=-hv^;Dv<&L^GI!6AE1K z@!k|uHGRX02`stQZ-&7E91$-nC|P7K9e|+fKEg(@SuGGgYQtQ(e((dDUaszcR3e_> z;R&3C8ew2nuv^1eRR7vzq4(|pe6b%Dbl$lSCcqZ`*9{Iy#LmqHozw)bjcLJgJ}_RF z#HMHj@Rp^;VtLOgktQH*Jge%>c`o1XCpIi0a-@?VTHuBhgEjr5O~m z=ZC?&DGGhX#Cr$g(!mzyY3^4KsB7sqG~iWK%)A`&{;}43!*?J21+o0kYFts;17rE5 z+A?$xr=rsVP0b2W01A@$KaBa3bHyFkOcrI*j`c%fH9LfR4Kqr`VnE(1=5h6KP}_~V&)h>VnNxX0p|_lcuimx zEd^NEa{esz=S#!$p((D-57WEkt)VOR{elHq_a^`_|cM3J-{`Yr3I_&~DId2R9A>ayoG%aR8`ipSPb(0aX(Sj{U$XwL& zDOl4j;TBn|&@4ve54$ls!=VtRJsPDr7n|=$F!P05zQAKD74rN^ecs zXaNW+NQX|>9sX_%@Ku40K?Fz!KwF%6{S8=F%Q$||omV?=TZL(&=vgeMV!ozg2O0cE zQ04+!nf#5DbREv=KAByx^%O{-fw$RzvuWiX$_N})6q!E*&!(Z4(PL|W;WVq(@X!F< z`5)DtLynB6CX?(4zEx(-3_$B-Pt)x=(W{CLILz_0NYj?2X7^s7bwys%Ibj^bHM>ZsF94C zkueyUD;oTP$%OX-x~cudx>DQ*qZR43WaqES;Wj8+)eD8L+(B#V=SpS+HyY-T-!%d z*qIFsq~Jx0b8&Co&?o29+MLbSWAB&Cqw(x`+^te`2z`RP_zJF0O(*-}>oc?kPaa+X zfU%B^-B3F)VZ50E=dlAI@>(G7qhX9uFJ4z3xYrWkSn(Pu1t#Ze|CG``TIzu00DqtS zXI)56F!Bb>i98Cv8le6QLNijD|KQ>`i_!TZ6d9oZg(^jLSJ#Vjs{1NNHl*9 zrhp3FjLkZ?@#@B~zj$jPe7DZtc49*AEcK2o>dwnDz6?tAyQisa(|WYiw_e8c?rtVY znMJDrPHKW9M$@In<_0k%tr0bK0s+d*PG7m!vNTWHBFx(AG@0YePq(xu330DFp$NDLTf4pSX2mS8dYyX#0%>}1NCjU6@g9B)em?3J=OC?@ zrnN)-wJ>;}oS~|o^r!B6e!IKyZ4H!l0x)|*?shg}1QAxl>UbyX4fzFZw#F}HCB~%# z98sJ}v=4oYbS@CCj?+d@0Vi^lb~T1O90;q$kIqZdQwx)|kt#^CRT*jUy z%ZqTNwqRok2p*}f3>RD@-37EeW>nE%0By_fmgXa*b{eWe7_+O?AJJ@Ts<#xXqL{oy zBxzU)XLct}XI(uPd5a{MG`?fhm0E>zzR0niSkf3rIoC_3{rg~Jz2e$NRD1nOIh8oG z-b6Rdh($8)cS4_*v2jbh09Val;_h#pZmD4``XgaHe~sNOPqFJ8ap6^T$D{NDNCOx< zw6orzJO9{6&>#;RVmlexcA=y3NBEq8H(#a2SA=Dy$PfALDW9$Aa>V83{-{(`Q=+xX z-!&yRH3YC+Sw=NfiG-Qw(vim#rkjwR+Xg6B3_1HnK0Y<+PQC;+HXe zdAu6mw^cY&t$oeUy3G?3uK`0q?HymuIa@H3)sfZIV8K!-E<#v|dt@a>)8uCS44$om zva*izn^}--VnSZ~MY+~7jgs$!*y|g={Q_kEyrAXs1z$C1p0=?;m7Za8ZtR|$bC*2M zHRECo;6GJ0ULMVjyv6vyQRKlp10wCpX5t31omw72>~1x9?aGDoSI^Do+s);*S&0<28_< z79T4yA4kzFFDY^^lPjTAhx4R6!(}z;^YkzDPpG6yFt=xQ^LAyl*GUc8cZq}E-$1x7 z+gyx0#V8I+8KyV?UY9h7SB=a?u$*MRLS8LG*$~{HD)9{Z7m!C(LkZ!m!4hf% zvpo&AMWN%m>?@D-HpFZTA8l9TPdCxXUF+`&+a?d!lt#>r?GOvNN4($;59#e#4BO!> z&o53b^L+4#+;-ndfv1aR@g_r22@sM;y`?3?6j7YW6&j?M_VWR zv}|JBTz0UDWkFEi9)usOY0Y=F8AqcKNQpt@!fil&)3WbZecqJ^s?ae>5M3gZX?dO3 zC_691Oto`;|A7Zfd)=kca|P$y;*`CcmU0A~`Ow*O7J;Y*4WCCJ;wv0cAmoN@wYzp1 zah;p{A&Pm^RZ_BuDpaj65_f#BlXb6M^U4(BuJ8f-jizr!5FvH@n`K2#%9&L?btdQD z=L?S!MbYVU6&$`%BVNF%Hv>e@`0UdcAus76+^F#+0yNuHuiETNG74BC3m=_=)?z^> z&uI95NE9h)A9J*Y;`G3*^@bXo17n#qXcoP&%pADUY~sDLlBK+bjR+ zlGPlco{r}zJL&sBIi|fm?YC_;RWol5_I~zGQ>OlSN1j(+_d;J}TQh=X%dtu>eus^U zJN622kVe&HsHX&<t_(XV}!;(6!@*G_I={(@3wPM&V7;Yk9Nd+&Nx zPk;Zd=}L~~XC%LoveyxM5}vo}Fu8u}&yja-7b8TTDD#|<-SX|HcYppCPl~>s$bynI zv36IBClo3jo|Dof?>#U7to=JB`R~}=`6*n6ewtQ+^au+PC89Ee zd?(uXe)5ot_`oYxU+YtkbEMiC!YE9+z(?Y%dRKeluN)}$ob&^yU)^JkLLMFBpM$#N zd2*)cj3p+a0PcQwUCWCUY(`5i^L*=t4P#zS$PLy09$t8}(BxmmmX0E$hcE#StMZgESSSAvhzwqQRdST zO{hbj)N4%Ws{MP{9NkxzmdBA6olkujT*&t?N{VK@nck(>ODDBjZ<&+etN+CNG?|7| zLAJI&gSppsE!*H>B!lDAhWTFs88g)+8F^*K89t8fj(a-N)%}?kY5VI>E#j4zN-SUV z|5Rc5qw7nU-cBu`9<5aSAw%Zg_$d8zUX%nBA3A0+R_DzsL;2gnSewN-K2vUDKfadF zgu3mnin;wi9d*`ZPK%yR@i`M+ipozI~kw z?g}jd104hdT_D{3Ad$6+h%=T-x~cpmZSyOcw(sYxiB^XR-!`o@X#~`gN5*1?#OU#Y zK?=7KYkDO%oj;t5DYM}%_xCBaGJSJw5xYdc^($bE*}2fb(1iWkBmAT`qA?~}Z}oB5 zJb8@%^RuosQ9IN8+{zf7f)NJ-Wkh4G(T9Ki!>i&F-HV3CKCdsoev0L!f z__K-9c*3V`#BCyRkh}c5*)nE8Qqf~TZIOCWB1pyA&D(ER77%iLdmcBq4Ej`4L#Lvn zw?6u*eCgo1Ew_pznbEQ~HEWN1zF%PTO+j~Jo7=f)pV*R3eDf(o0@;SGn~MEEgAoex z;7}iDY8&ZY`)oVHZ>MEFLlBwmM8=SW4i2yUs%79O0@-FO*_G;xQzIA)#`12_LO%90aRYPr4mhQ-vX$Hdc-m8y_~lSgMc$jf zUQz0Y_(fcxQ0H^f7I$73h&(7G5crrEkqp=0sB=H?Zl};M4WhKLZhR`=snLCd#nHgE z?qQV&6p|)wd_Vh>^(xnT^yP#u0JCTzwW~crN&61pFnU?Kf(DxVBL2Z5w$#7Gq14(S z^Y(QXp5=uMeMxDp(zs)0pcj#rw?R9h0?fqhYPS*m=KHbWP;9l}^vIauPb6R?0LE^3 z`~BD_%9&)$?J{`8j-L1lJEw>&>)`6;Immx~s-`$_b+jEvV@ttYRJ;fHD!>l#J0X<6_sNx|LoSwln-l+`(&{btkif) zdUZ+IM-tCZ$DZ8@3(|WnJ@=_cV7PtI=57Wr4OoCpz&>Xuj%-A!@52Z9coekbowFv| zQFg4n+@y)mhj~h%OM~CD{O+fVkA)LsmP4t&?e(;4HAd9T%Wp?)8J&XIg!hx4sAm{ExdfiVGnKU)U9o{#j2MSpj3{f-N zpi$TOap$?xjfz`zn0K$Cx+b-1Z+~3-v;Gm0Sa;OQ-$zY4heqv-rY(Q~U%X%G%6Rk{ zR2sG&w=tH-YtQZkJgLp@{*W9MIeNSCv?fC+9soCw5quamKI&|ISI?d`VLPoUr>FFZ z7D!i(+ZgZGEXGxqS7W710!8>*7n_?h2ylp2Ab-oRowL`3uA+0tcfu-=D=LH$Y5_2q zmY-@q+n0?ppXwQia%Fg6V`JeHdG~^zVMU4U7R&W`||BhAXL7+_q>By zWXE-;tP|S3FW2$&615cLy^{A}GdEntdqjCu5>3R1(dy>aY;zrp2X?+#EK9ts-}erp zBsI&YzCeav;@fJ$%qI{+pQ}e3B&K-MH%+f|50nJjj2@NNfRBU@Q7i?siopBIf!E}1Kyc6A}m zLwBE-zklk_U-owS^f_~_Cr22q`8*1M;zE+~hLX`NKb!G0BalxzIBjW{l~fS1WWcw@FB4m zEl%D}8Qu41MC}zR));|D0uWJtpcRTP_97UagWBb-Zfs3;wc zvE=s6Q?<4>6KCdW=eX)eN1#o7xr3^2_VUg*MBn|BzoDGEb|QrU5~3a1qW$?qsmrg9 zi#y}ip2YXoy3=H3@VLQIrlk)rWrHxSCOuLeo5a9;uTV~7X@Gf|?Ip5{HTLO!ph7o- zbOXa{)^DB!K+Aw@V%^9bB@lZ6Gv1tec3!(*k7+@7u{e^1f}Gssi2^pmNkdfMHxBXn zQJ`;i9aB$RQZT{Bx+r>vEY=z)FcDc}fkFJ+v`S*Lhrk)C2+JQi%m1Aw-61wpQ9bt6 zkIS3Nb1P--yc(@c7)AcG{lR<57APwcJ?R?%G)O$_zDc83wWTp1IqX?Pv8qdfU8*H! zWUgdg%zrF}kpYJ8)&~9`(kAXBtkv~JnR)sc*D2ciXh-lOX$iEq)7!bvEEBkh`Z`Ft zku}Bm+4I2FfN$qFr%4Loo=Vg+Px{7dS@WGpHMAzR7>oP+N7ig7wQiZ&VL%v7i1vk0 zcP`~xYYWHevM}HXj_h>pgMsaWCqi4eX%~?3Sga^R9ej7v%(H3`#)lU@iKjh|VvY%a zH3J`0i|YGW=||>dISRYFjyyb1+3Q9VIFoS9UWDgy#FI3mk4#mS(Jp<|C|zpr(OT1R zcw#Qh@(!e9-B#M}6!OoRh}g43)!0~OYMF5x-Vyp7N*VKG*ZL(^$<1v;h3PH?X|w@S z$Xc||ZO|S-kcQ3zkBqqy?X=jt_r)-wf}0t^ekAV&MOb~8kV?c+bH8v5kvcUOaTL>a zBDK0Lq|ycBa=hEw)gPx1E$E@|f7 z-MqB`&()epuD9i82Q>j5P|D@-R1>N(Tp}l->Aifr9qGZECVqG8$zlq;T{p8ZK_fHa z+;KGA(PXxIaAhKdm&9#_&~p$6waXqbfL4(99)&v{w|rU=A32bHn4=Al0>G#u-nLbKF@=h# zPg;YLrpo10Wx)>1gOo0L;7W1ngkWl@QS!1e|Hs@c(o~cSHGY~gW(}l(2)B@wut@nU@OpWpKi~7r1HDLdNY-?+GD_g*U_?!B%VWk-qY8rfTfB1!fv zDe^Kd*{)FuWhAtWQc-qhA)}H}gfwMDsifa|y}zH|zwh4HYux93KF@ia#{dZU8l&M# z+<0-=Gq)Ym@@N*eMvW16n)FnrJ4-T{i8JxBN&j3y)pP27^Wb>R9M;n1lvA8EN+gV4 zK3Kf!I0^f+r0F%pIKx6YVml_EH4m%~-me0AxuCx@;ME#)$Z69E9S4$9+Kh+@4Si?z zSvijicHVM6Am1xUXFC3o{Wcb6n7pfIUyDh|wyS%4eoG=;hX;KOgUugxC`0ZSeaOG%nT+ACPZ%j)jyD}TMQHL@$Y#c`9E(!{ z%JUl#RLEf>-T#x+tnyOouw6?Vc0I3`V)5p@a&n20B2nOmC*OQHsqmP1Tk?+Q5b0vG z;9y$Rxp&v15VkEX-qYu{dhn3ss38{A*LH zGY``iKalM3U@AlQdO)=ydn1e>?0UPbt7DjW-d_k^VSGC%FCjjoK#mF4FjsBEMS zdqIhLo0rHu=J^c)bBs_5r0YZ%uKjxKOU`E;(G|~MP!hd(?l-OZXNm5(eK?Y%A=diElbMR=sE1uKyr1Yg8wtDr7$y zX|g6MXqvjd+!!(6ts$n-hA^8S0M`$*3d^Vl>6;EWJQdHqxHV&4>2A9?{VTGOX5YaqsN43w2*Sgmrm^PRU8=ej>)e>>1 zJMzJ8fdo_#`4kb;Q=>T9c5fb!qkqpFJCtw!XzMcnOsc3&G`CsioSD>py;s=eP2Gkh zP}fSxJ!{iCla$Z?`|jL$*fXUJjhwKB&Uz2u+0#eNz<(%ak@+?gQJ~Lj(euK);CPpJ z+wSk(o+iylMCV<`#I8H#C*JPk&}byk^y;LF)R9<^$@hOx{{7Bdt2>i)G$xiiD8MG7^u+V4(#MXync216UCAU@HyGKD zZDXez#O}T*7>nOkHKHh`x$$c9Dw~wJ1k)ZNafHP!S(>%zj6PpUYDq+_JBgq}lL*oC zB@NLw{B>asH94i=yBgVU&N5KT_gd(3aM>5iJa`1aVYCuUtaPl0nnzfBuyUT-azrTGP z+2nEJTT$Ii&yW!2`^Fx7URvSA#Of^O%q&-_`)#Fwr$8YdxLOy<951=O6rPsf-AJsT z*?-$Bo`u1RK^<&<7mY^TRVR5p4&VLcryW!W>zxJPT_~W0hyS{e{tWA615_T% zXs%aQ-3!vO*l#m53+C8$USfQ73333K50_D!i{zC+&#-QtXTF0MwFmPq|BAjksTH7# zW~ZWpk>Kh1++XtN@Y~vM>@Tj=5Aq(~T^S)p9gykq=B{E&$8pE`$GRSl6|}moDMLR! z@h~`eN;Keh(d>Bi_z-7*3{WjHxLMOMV%lvSY1rrEx4vL5hn>>6upJRLjOx6y+>d5e z3=n>E;11oAx?jM=7htiw-|7F830_4QSaTRNQ9u2b-4NP>5_>XMYIb+Bekr1e?~G+q zpD7eh_b~O`+HB`~8p#yYAIB?nIQ9$HKW*&2N6im{US9S7?x%ZZmd1`m4LDz^t!COT zlD3CEJzqFh4Dl!*!cuu~pT=t3eI8bGWpM0$JEhHtRPK1GHmjf9yY0(w&u_)jk=&T! z(7hkZPk{#_2Vz^ybAff9SV~so(k`a&Od8-7rt?zR(nDP7`IuN%;-q;4G5M0oKxNz{ z(@ySJIcS>hz@>~U4f0ejA#8F3LDM7VA~&-xllf(&d&+A>Z}C{^O85P8vGC84wL}%o zSnur^V!}_?!22)RUzICQM7(6LJ~o?L%Oe5$pz5U3ze3C$1YYCiopX903{$ip+oTli z@#1)2e&H-@sm%umpSkXMCL-wuziikguXe0%G&rO=h_{{Ul8nW+w?K8_rp0hpj&CjG zhx~KhbRAM-G+p`adrgeDw-Qy5ea}7rgj{vg;f6YWC9)IUk#$VU$sbmUZ3xrD?GJ01 z$ko*k52)-89Nob|;55dDJlQm}EBBO&=TQc06%eR4S$Tct3;J~YrC5Dnrp`7)gK=ZmpZG%rTAoh7NdGX4-3h-Uu~2 zkhhZqdX$h`vGY~}TMF}E9wXy1KGlE0&91s)XbcMDqCmCZNBG2v$M^L$phM+o%p?f z2Osv%m+jU;{o)~Q%$dfeZ)6hDB3y2$OXC^M!bj*LuFN6%=nNgH2EeOmS7WIPy;8o7rrWxAouFh|vDB{o>K zPvQ6=^HYluS9T)phYP&;7vge$%CrnRjGhaPE}ZQ6V4lHy=lGlz!Q`G5^S)IEC7VfV zN*(-%?wO8dBGUBwj}6v!Z+|(ymzLgFSnA&8swh={x{^s!zK>bP0h<)mynZlu@Nm1y zLXPqJv5`?$L%UmoC*Bscl!{}!;~>(=VY*8pKII|KZvgYhZCuYtNi694=? zpB2s{?=)#ON16#D*pDA7N>`PMsD;3fJ5RYIpEQ4VLRKz*EuL?jc|{PCO9Q}OF=qY-+dH!(_h49~48|_ng`Ue;mFE*w4gw?7! znK%XAOm`GhR#~MSdS$|uf;vw+o(X-p;n>uLf{39~UPV`czN9^HEO8cBb}`$7kaU3y zaUu&A)Tu+?u{sb}hLOsTuW6X&I#%I0!C=jAQaS1$VRJeMMGsg_LL|W(>E#>h>eRhv zAuhQGYhbHm21cXr;Zzikzw`IzYQ?Qc?(LYD#wl!_#Z4aD^4r znAhr(8Rt2?9n!<`f&?b z*c*=Tm2S-{J@jLIf(YyUq+N~kgkz+47c=b6^4Z7<5Lp-oClr8{7+UhN4V%V&3+vjz4Ff-BC-Q@kX<98)LargU?KaZDCGr_CTtEO-RW;gAAh_{`k zeZ{7{tkwKv$yjUZB*drjIF-J#etKkKZD3i;#=(P5)yf-!XQc0#~+!Ns>DBx$<^gc)^aJ zn{Pp=KBx8b?YY7IAHIG%SNuUIb^q;kcmJAo_oW7}KEj>C+4vP{m9dUQ?nK72{>`HF zSo^d*_a0_`q0Fy75IC(cKNjTIs*@CJKm7}_Vj!t1f+_cHp{i2>%S65FTZuj06#0Sc zgP1|YqheNvWj>;9iTQ-3ptMv<0V^ez(kV_xd8;y?HGaDp5&bZpZ+>2jv3QhuEU2f5spz1B@nZWuk~pLy*= z@h58=E??*N`{6F8{7ITmYUaghiPNebvz97%mkICr=DvI$J9e|La8Iz=Rd%O6n#2sL zMQv>p)O%_a4CG?h_AUnPg32+baF%sn$+e8cbs z^=;Lc(pd?N2@^Z1y|@x2vEO|24U|Itvcsw#hejzmmXXNwLbKNJGz0(pIBM-jFbaQl z=tQGPu+#lTsMZ-FBcK<1eDm+{Db#0xEyZZI-*Qq`z}Qa{YzhXJ@2xSX=*P|mdkrKqWB3xce?D6`lbZ zS@VmNfw#!S|6bFDbOy^+B`ByJz7os_woJ&Pu ziPs239V(pG%elum{OV3$^-?Ee%Sn+;Sode#`mka@fkSniH48B!{wKHGvG=4hB}sZ0-6?EQv%S}2Y9>bwjmb7jl-h8%o7hmU6OuI*`5V=QL{ zGEpOaP(Cc6=EY;*H;?X%sCp&);Hq#F6_#Q*cv5#1k78qlHT+-YC6)qNe|7uKyJc_M zzO{mTguFlBFe+W2%9XX2SP2I&(56nNY57v}=&PULRm-FI^b;38|~;5%27kWx+8q1jamMnk#Q$@+HvWV zRPc$nHsJd(*)$?j=b7K_E!){}@n$()HMbM1suDO+pI@o{t{J6j*B|H-!zw%V7CejQ zj)xyoqsSy^T`$ApOu2S*;b7Qz6W__N|GV{gV`h$Oi_5Cxi$6`r691$xXZg^GObi6q zixuVBG$CTZzWH&$TcKZdpHtkTSGvlMzkYnWMmuai8OPui(@c)QcqaT{ZX3=lv2EwP zu)0zdun`>1OjOMf)yOIH#j?G)xa+V&%np=U=M(!nnW!*Nhyd>j7BcPSq72Apo^zwTz-rF1_H)l_#RL{IX=*-YQ8av$(mUTsc2 z^5H}4_w=P=@+Dlh^T5d9fXlgiWX4_=t8*VhrKMJe(&>NFof(I`ygn_ukI>2XvesGZE5P99Ye%}L>xH49U3O@)JOa=Ftz z*?@2Bn2(yH0J9$H|CS|TTQUBz7ls@+Cr^ntDFipm)JNi@Jmo&XG?aG?Sve+eQxcnh zr1tw(5Odo|kYFmPm`m&{DJKVI=|Z@kbt3D(+4W`nn$ zCqL9XVX|w>lD=71`u!TZ$k@!Ey47|ID7mN`Oh0n{eQnVOL;l~5A3-_vIQ5SZzmkyIexWEL$15+qQ`L zA!d1UM;8VPWycO}$piUYI7+1hak}kn+fS`aH6HCuEsFXk)hGC!n-(uW96bAi{GMH0 zh2vO-bmb6iL5oQ;zG4D#bmITLmYiW7sXC&0lEbF<@?5OW>ul69vwwF{$X!Tq{;af7 zUBdhc0pA<91nb^ccU&#A$wqcLwMS(-^w@IfKco+$A18mP{p|FqqSn=^PYB*Q8CKsZuA8R#LO#^8%c)r{vm}_Dh?O&-+UzKEdaTz(CP`+uQ}sG9Q7_Zycw-Zjzyo=c4JAW z+KbLC_3bLI!P&lp!*}RT3xU(HSuEPxVKb5Gm^mL|llM`?;^$9;+V@SQxz#&1LLCM! zAUiM3r`y_M;*-&9PxYC=#wxw<#w*`iy$tz!qjp`t>8g!w%;Z`krE2R+mF|w+hF9Qt zdAU`8)j;NOSn`Y*ZHI!O5wYt37cMmkLx(h-1f#Ijx2p>SI}0zQ>fuYondzv_Z&H=o zZuk{Bl+~3j2#Df4A`)znV{*fhpT9R}{NLq)dL#x;yG9=!zLH`S#QVnNnkr}AF}+Ob z-HclIocS}B@cIZT{MUC7$uA!Hfl(6K{rdBR7@fHPeclIz3~o2S*sWPf2V&$N7*Tjn z1vzYHF+pa|-jfV*iUaZ^2Yy7-TvEVc=WMq{E~w%aQe(*;70~_~q`rv8ZHaV0IBT{}pW%W5Ygjz7N>&+y8Yz9A`iEBwQt(y79#vVnh5Dt?)wcHlIE%P=uE z(`pDCs<%z0vg4NpMVPsZ)@rl%fazkw(8kY*7oCP9sHMx+V)+k6N6e6Bz~qC0mvhsw zpU5kofykP?7zmo~I{EeWunkwcITHU_r)S5Oid8RTU#li>= z8{soz7y{vGCU2bMO{8MK4g1g>es7`yPOe(r{;4-#a%oN~XsAOnX93 zLOF2e7r?^z^z9utnzbDE&1&4i#%SZRA<(|N9&Bt*z4l8GV17uNF;`{j-N}SUnEu@aQAzSYtP5ce z=LCNB9Ye2|yM>IaghGlFfAlZ>=meOf5cJOt{RIxax$!~I-wC^_()${Izp%Gv+rhPn zU@%qIhAg{iF*}{Azlx?SH`?914vV6CkCNwS@(ppra+7Rwb-RsN88(uc**zD}9y`kU z>0&~_!yWt2);q8o?JO(9mM1Y4$~7tzDOwNqD&evVH2j~IvJm!%FIbEba%2$8f_!Lo zx$!IU>^-0AUjE$4@(#OkHQnYOFa%W>z*ZM6FEKBv91-I~YOs|}@w9tOLEL9jT9LVqFSY9}QTBt`Lx z?%q6f$3?^Ra7`Ijj-^fthgD_K0ec?76R$u`)026z=i6;C0?@kiNF$5hGsEK$e&cZc zneW~Pr^da^&eElt5$l7ltsAr4=n9Lyfc`GMc;b*Y2Hu1Gg|faYzQjtJC2p^TzT3R%sChZZkW^le{U_rh&Oc-lY1Oq)QF3GceBH`pzS+>th zwzs6&SGu#EI6PwGuZwTZ3|akc6xKA`l~xGe2C{3{hY9IEHCaw`2QoyZ1rtLZVE*aX zgh2*P#r4QXL`9c`5Dw zHnd>|Y+8_Pch{@WS@POXd3eJZ42!8Tq<9U0!k72H%O)=M{5^&Hk%QVF#5D#!R!TTw zO!5!d)$cm|C?9NiI@iZVq!9s;=INjlsQTztjr(pxR1I`5s_;l9ykku~R#AjmvdHDe zYMSNLv!ku_rJR@og{IH9{lXW%9JeLadUkOQGtx!Wx~D(@V*93WqHBdrSUGD|9s$*s zgSaDniMRXs=Ttxc#u*V+S>j}*sI=D*R8hwkrJ^F*vipuJ^6;WrQ_yXtGRdn5j`&#a z=xzF@oya~ZLNE({>BI2DEBj8TUQ_sW#)P*7{n}Fuh|69WAJg<#4+34ohmC|&u3Z)O z8!y>=AP}J&dK zh&7&X=a59y!Jxx~C0 zzkijUhBZqp@uKi(-26*3GubeFx3h@$hKy)UqVg62u(2~qJ~(~qSudt$2T0Y@(R>Ag zB&lF)kPvkSbB3SrBPiJ>3%>MuYfz;p{{j4jgwf7R4X9r#QL|Mpus=E%mi)I2^00Cu zrfkYW>K2-YQ!}Pn?kV>1dRPsE*f}B$M2mGzQgx-W?w>b-cF}o5(7iP>rRce#N?%yvkEAqO+#jm zk*eX`@bb6~j)HEhc?{WkAemoclUP3uyOjlW1YH&AS2}sSSQ>iOy7yti@WbXW)>q>qY*AL&|Zy}?$F|0>RG`vxs@z|9o#K$TV z9DB4~T`?)?&*lZoZfeNLlKp}JGa3$pr%a7igAazXpFVesJY$Zzo!%WJMSdrm1C77Q z#};(IZD57p*SGB)fr;dqGngsw0#jbZrlx-MJ3nn-3Dz(U9#*bx#9R>WL6zmmCV17xj=O!f-=)6VX8y#ghd?U1kni-d98Zm~x9-|ybtD7SQiwTyfex4{U+ zeZ((MvVVKHWvEwn_B?q;k;O`$xE->%E|tZo$&3= zzJ$Z0ljtKJ!D0dv5Iz@VPQ!XlJC%AH;K`-RPii?D{s-liPfz$(c1`SVwuWUeOQ+iG zSt$9j&uY|I@*4KF@yy1`oSoNqSZ)u_SvT#n$W0eebRLQZkFg5am{!gf>mGoQc~XfV z#lnXENIuCMb`^-L!fya)!UD}00X%T~db3C~$r4e8k@&-W5b0b7H-+PC`_Hz*7uA^@ z8`Sxa)haYOl;u+(`m0|Uyy7o+?}A@=Wh=8k>dsK9=sdAK__@&hz+ z11L=a4Tlgr-SCAlk;QLQb%uLgxD~p?{XrS;_nW{Eb@qqFImTn0$r^jp3FNuNx7@I^t0!11hOutFyMv@0Gcj* z5I9+(vjvuKh{}vM7;jD1pyJ8@!ipellYiwZs5r$eeBtzY@YkYwXdjjmHuo2}LH!^@ z<&hln3}V{3!!E*$|s zLDS%f0x$Y2@a*V;bf@v!b{Q9Eq6#NcM+L>{%S{l2Er9-21uO`aq1dxnUkAN!GwQd| z4`oz<23|D`%`}2z_4Om&iL_ZGV>Cp=_R{CK6$B^SGW>- zlY87EXw1eq07?HTby0iV)*Cox=C_UECknPa-TXfwj|_Q?>Ni4a-w}rZnl@BFP<#U$ zDG`VIqbWMqwt<^EN9LOJVO3Ope$*5tZ8w69pD@u2)&S%41BGK2SN3YW-~agZk|yK{ zy1gQ8dFdH5Tn+>obxW{4*PQ(tynm=EW3+; zB^}(Q!S7Ws7FfUC;4Nw`@Er^oMOw7y!4fZLDK$8D3GHqw;A~1_Wf=^}_Y(#&zCW{9 z3&3V!!5>epWoQpfU)XinrFQ_VNOlB%4{#4gIAl$*fghrb|61swo9~PlX5%8@b`s+U zYMNHf4$PQdN*_|VRT|Kc@`6Fdfy!Mvjr8Cv9I4l1J1R^Y@Kk`W_9do+CD?(jZ;*nQ zV|n$2bH~E7zKnsN;JzU&sQz^+PM}!-|8og|Yj*Jq-~bB$2KSxO$P=N{D`f7^71 zRzciebPypO$qwnrO8nB86)_}il4%x(V+u}Ip1)8R%afE#*e4cX?8=?}A!-Bk;K#4( z$L@5znK!$q<7d10{Nr~_9S_@nz67%lHE<~YwFTa7Qlb`L_AcmPq)!YMtsX8NPMG>S z`Otc6mu-&d5uyM1#73j6c+@B~dJb*-myz8pUkw24w@B?EDNO}t<0WE~=!xO$pI_e1 z4pZ=aa_m^f0SSVTj}VW#pkizj$7(`t-}B3AJpoeZ6I}h2{68kY^VEjq{G5GGcy+w*o1jG{|$V)m?%;F&PMNRVeJ4`Q=2>JKZo^RrF$C-(G-^HYaMMHDOckmvMFSdcuj2k$amq zANecCK!Bq|wwf`1E#x9%g6rI=RLu0(V=jwnUbrKfN;5F!oa>&dYl+)v{W~qY1W{2IIg=2_jwkXk^LJ2hWyb~Ggh3>|D~L`^H35+Ys4&>GN0E&MlBVtAIeiP(eu zN=M1kc?{~!A-XdK`aY>uzlpwgG$TB8VPEh0jEp}Ym;CiWJ79T0>pH%!`-!GVT@Y2O zt*;;hMwTtP89BT=c#VwdUED8siBT*D2O8T&_Jb(J}VcrqzA!XWCn7oiK-PJlO+W8eb z-T;*@0Ju1=yQ-e5@1qjb&*x@C^_mi)>^8I)73nt19N(C3PhS!qY5z?>=ztb;R>m{w zYJ)hZWHenRZ@2rAS36{j5J#yD^A}!}?1X3W$@cQ)tPhf864=53*eVl=qoUe@vPKW) z59L`AtOINXJVLLBl%0geA!X^=EaytYyCgBg+_(pECtyTlVHI4ou)#l#(>$q(5%N%1 zrO%d)BR6}DQ147465$Y0s;w|FqTVx%m2o==1$Mp9y<|BP^}{&?Z4XLS3pq zl>4XQ10$sP4z@v%xt@MoJ4nze8}DGkz^e;uC5I9GU)Pp08)`Z|`$Tw2NF(t@1iY$k zhMko29rfi7g@6sMkrkb2{h^EWq#t)TXzI??L+T_NS_tQt+PL4N-YW^4DBJI2MKsvl zGgwlUunpSiK-WhnZ|HC+aV~b3z91wE(b-NRnm+=P`=N!U>$Kb0W{g@3oixFT@)X%N zvbPF|FL(Tj`zv<-&`uf?InkSqeE#WvaoOG-i=FDMp$2cRezb3Jm2Kf)>^Ol@D!XvL zTfP`?!V)eJejKb}6LAJVRJy1yhPU2z^0s>f9h)$)$Lgc1kgmMmX*KsdU-F}(_4<)e zll@y1T+r2Qll7Y4=~^ne>sL~#oA?kxk-A6cl|=Qp0-obCLXOTYhKxYr-y0AF; zFpZu5VPwy0iHC0K;z8QYwckh4lV<`Ev3k7GT1DB${LpZ8=vcj!54%!atU*1=99to3 zv0E#V|LBv7ENYv|IhtRek>_EKL+v~%dY7erIPQFqW;woZutn4|7dyXx`M;Z%7D++& ztf!>Le#bUQKTl4?%n6dCBE3aEq8h7%TPY`DG8ZO>Hq9^ zKAHaG!1r`5y?^)FQmHP`MhyQd#Wo1U`RsAK6e0C(z|L)!?}n}oK13Wkh|FzqnpC{W z4(2zJ*4@S{Lp4VDWm#wwH?^xLc1n^;#FqcD+lMX>m%pl(V>yccG5Jz#BvV{TaHsxK zD@R3@rY*h~2Om(yKqTC4v`Z=p>eXL7z3P3j#S|YR0oNGJihDrC*JeR0$P9Ud=>iYk3bv|1p~0xklW|hlvrXW8~(nMoVL|Tik)RFJg^jlad>pRZMa$B8JpH z6L#ho&{jMo&b;J(b89i;S%d#Rn2(?h(za_==WH-rS+Qf95jFRnWDB6er?A6ArnEaE z#W%59NwH3vk~mQ&VJe zecX-TwfieL_X>Qn*f`nq^{7z^i_o4mXVCe8cd6oxwXPB!q7%-H>a=yE? zKPsYeIkFwO(MS&H-Ao#u6B7PQ(Vz_^w$(hxd# zGV1twZ22Dsj~j0B56Oi=*mNgKdr=?1ix*f z(LTSrBPOUjOo*?NxZ2$%a%Ksjz{I?Mv|P)Ze_pJ2#B%h590Jd4j)UjTi5v2kW68#8 zK|_IKpd#my>*m~?;EL|o?{S>Iy>oRWvN;$k3;7snKn@9(ebBv#nCGrZ7*X*dBv^oM zT5=NaT_YVYJDp24Hc!mL&m0KQq1*@sh z5e@}_9mC-MbHt0*l~Sn%Sk!>s2C|C||K3x*1h zW`^RF<#f3ssLzeQW1A1ccP}`8g$~14{h=gCpX|fi%o-7A*Nf8Q zYfpZ%BT1J(d1%tdChW!-?)m&T6#;DicefYQ+j@o<}Uz^YdONLHIE!=1{w2I&N zPF`ErITnswD zWiTd#9xGiuAia3iC&j3KO!kfx{zzw}bNM_N!3o$A0%x^kCu^1tVm8rDMlY-S#{eGl zV`;jWMnf?f66wfIWB*GYubT(6^-VrpH&oKM*xLFNOp>PC-a^ruaCF@wd{O7_J;!ap z^hDZTp@4yMolQ(&#-cO+Y%SUN?7wdptmaI3kPzE0LC8EQC7=2Uv6{bTy*y0VO?+<~&0p0S^)(s{BDc#I1_7 z-d%3t@5|d0d_)cW;PN!>*DtX|L1q@=zy92tooBm;T+;y-HzYhKH z6_{~|Kn%-x)0um)5>iDV%b@S-{>N@TYAWYunsk7TY2Y9zw#N|Vvj7+GstUN#3l9*+ z%HraNbir6r#;Mw^0$4C%yeZ>E3jk3r`hNixF0u#OG+){0yI}5T=mXWb`-nFZb8&eI z?5?vvlxmP3a-9j1lS@8`U5!6D4)AT~p{;ze;&C*JWS1?fEcP9=+wXo3tM%g~iH<_VO;?bgnCF z1c0M6pzEBEc}Q>fHU$Sc*CBYAT;=q$ET4E&siJ6c^Qja|Cx(0+D$Nrofo3IPgYNTS z@E`NMv??uL&F`=CmJ$bf~x#t+CbGE`$x z@qv)u{8$J~Sjo%y4TwGA_BSx#Qwe+nHiqo7F1?&GEwWc5$?qp%%2^035wT6Z5oi>I z)m8Ety8sUvwz=+I!6_@{V;$n~{x?kJDj5}hxw71x+9)^2{7JEo=`sQ$^Gi4p^CTV- z@)BJgfzeqbud?WUn9-%xV3D8OMAc2y2^$47 zuJPT>o*l)8ziU7JT%2^W(5vSlbf~FZC_i8vd8EPJyuq8Iq{{NCLox9adpPgwk`-ot z^C9-qzxui5WmpOqTF=oA7DsWhfOVbv3k;{=*P+NykCjZF{f_49OB}4p##?AG@Zt_B zEbV9R!w}jt)noJolF)TVpvR_{Z9Hs6vfUz%Rkm{$W-8VqtH2gbyU3c${yetcW3|0C zfj4@lEpGKH?1P6b$N_JgSXbkvl#ugT?0jWD{vjprmq_OQNRe-7*?SSKrm$8c;~_KZ zL86LJFmbyfW~B*`SJCLhigD>4L4Lr=#z~~5TNJ39BK73b0(>uaOHm-GUYR9fNYo6? zrp^zfP2=a6sDG|*M-OLqr*nQ7jG}6XY1+p)7>_(P&CT_yQh38VbKN`Zw83PNU&2^# za^ffyDZcsPm)ycqpl6D6^Kjvm?u3@cJj4>-8F}nQh1J zmCn{;Kan_vVlWzhFK{_PsMQ)sO0uL~wV5=4bqI)GXGQV8(?PapP^QgRCf(&opM~f% zFGp1Zx8K<;IO<>P!zsNZZ>^w#=#icM=+u|gCqz1?BW`iWg*}D|^OipHwbll-*PHlc z%;zA4pl!r;&CoUOAWJ2Uu549=wLO3AnXD)CpZ$EP^rhL7#Amib)Ocgq zctzD2%VuHwN?c@%F)sMzZO@5kJD!($bw0m#>zqBN$OtZeZ-e9WVs^iJDHGKgVlri{ z&QJnqQE}lCw0dS;8M?nQ*EKqQCjIo{{V}9MC+9=S{S7d|lm1GOrHW%n^7U-t+Xl}X z?x&tq95A7%t^zinhH#fBM7_=@Y2^DyYR)Hq%cM0U$vc#ReY)_aF0x)pm3q18oKIenV*m&nk}hGV?3F?R9z zV2-OH3M>)QU~}r6y=A>kzl~3%eQB;+EI7}EUDc@?YVn(QXaYojL*5y8f`l>W(KZ2t z7>tG5M)FJ)?KODco(z%>H&m~I0QT2tgfbp+`&0e4QC|Z zy;5uX_IxlmDLdv;2?I}Vl*>79uhwEnF}b#D7@agX>e}&FcTr66`zqPOEb5)A#z#JB ze)=OAR}VE=*>?kYBLO6w006L<0kQP}x%BfhZRm6-&2+cjb@zH1pFR^4c3)2Eh8Iok z^~ix!?)5SB`+7h*q7|tuq2u1@(jhLweZd=MyG4b2>)tG%Drz)!Gj>f@NE3J)sjm16CSUmB>C_j*sc$~XAC|fdkd&o)5r@)`jc?v$M3$(kJk08(+AI=f9PuK zcceHM#)@bQuXpUzv%}_QL$h&Wp$o;@lgL79-geTH)vz8_DMxVHNMy(GBV0% zV8;jVyp}`w4Pn^h_KLZkXYva(!TR1OkzNeggi6H0S8@0I5g3Ept&9<{EHXlyS!BoG z@291*@nw&zhWs5rNt69y{Ndf1j4zYdH_+dE#yok1c8>rTs}P4L(+Y9*!pOcddAB(D zTrLpk7G8!QVxIx&takx@q5t9z(xX*zp6cO-5$~Sqe=9J# z5YyMfE%HtRn;`8Ty^=U}@6?xrzfKdDj;iWUc5GH)ShP_v}2=)s!1gFpC&hBNivru z55#^S2xUgIfVrTJM?2$bFa7VXCtS#f>bl;Y%LOV12T2LZL#(t>_BhjNp<; zrzrEL7EwqmH))~d-L#bBZ)t%mt+H-Ygw=B%7wfoGBEC%>Qm4#6t)q9O`>1?9_&Gx+ z`pww`e(x5)T?w4HTY>@?K2qO85L*KSnh7rPi3GgIlaHSajE+^tQHOd>bYxiR?3P$v zIUhacESp~Z;`#q5iLhtjuke~8`mxa*@CH5$ahfWPJKKqt?@keg6<}*1vqxMfa%g5B z&B(9F{z;l@_Ya3=bNsR}oHm;pSF6A9mS!+;s}yGnJ^iM_TLaj3e|9U{rEwa>0AHZ| zA|dl3s#ez9ThY9%4xiE4AN?&*r>~euqGZO?>%pz2IpgKUaRlw$mGX7M@qCXMglHS0 z^k#i##)+ior+FP!XCISJJuoojq|#JR3@TC#tq%`o9?+St_M%+%dGz6s)@z!XF8Eao z*v%$+BH#2)TyuEPYJv4VQ&U104}r$!R%8d&x=8F1j$3JP+xy>oCB5G#^Qq*zeCfpf zR5F7HgP{Na@#2=bp;7}Hb>m$=2j>m9#=F*0x8B&P64JG)Z*#My4sfZIo>l3jH%SA- zmy9+XvL`vmo|uLM<0PlB&CJX2Z5&82M;S-+&f>H(Z13lmxl*X)^UR5LChK1>giXKr zSZji1>mZU!56RA9Zs>L&M|S#(e(%*lZb z^}~g0S-Psl=YCu+&)jvSsbQWl^BjnCW=~JhHi#1F0;bcqiO17aDFh^YIeAJ9D54BK z(;bJen+cic(y?;0TC^v=y^{H2625tN6HX5KXEpQSL>RuPl%x-V26D?JF#@_X;}j1j1O#F0JR?@hoj`NHmbAP@;Z()x?dOZziJ< zN2XuEx!EC%{N+8tR~b2gO%^$1$v&u@Xx&-mfA<`{>GbuSZ1wK1;9ROM&^XbGv_=OE z6z(}4KzD~6gSu(EpeAT679b?e#MxEu2OB4dbCAajoRF9x)KNB=4m`xdWiSY-SeJqv z!-_}SJ{O47FIRd|I^^b(Cm;d#9?%JIy!|BeN6xzpRrH}u0$_ERiMuayb={SFop|y5 zpC^eoOBpx|n|Z7+bW?8*d^x3Sgwi#V+#^f1iX)sJ3rew2tP=7&m7v+iV2})w@`2h`0QK{uh>hZ?Z`!}AZm@94*Zn7f zIpJF5oiblH|NfFYV3tpmDCNN9h>)lpYhYqlog!QoRKN{@2SK;=7bRU5%3Uc}+4x-I z0*x$K)fhh>G!4&>GT6L9BUg6c;B`7`e2(%auB-D_21aKP_h|VCzj>rlDyl2YV?9$q z{ktPAuY`0|g6y_&uUOxRu{Gvh72A;r4^ox+DtX6Jh+Cxt$q zG3=hr(q`irtuG56S}wuse~Ihn2+tC{8U<&vIWisE?YJVuA|yg85?4L5_R&_&cZ8x+ zqoPMMJ)yXtTUH7;4Q=#gz-0>AxP_9xYZ@lvmLO}yG#ZNoMM?$0Ir25vN^3)j2n7W36_WJEWZe(tJsbNL>?*CU*e%9Rk9qyfEK*alM^ru2 zOG5;+cgDp^k-_lNnmfyDUsFE@ z2h0vd-acUN@_uSM)K4l1;9_OxQD($dSQZuls3Jxt67jfTO>9TsFTSCZHcnO6#kwr* zf=+HEOwUMq$b|Gnnu`HKX-Jq?{YiY7zip*v_N;kzfF*W&)Qz)-YV6*Dn-5(hoRHhLalpO*{OFLo}aJN|Zqk6P`!-C&#c6=uR#Wj_Df4*UOqG66|Zm1USnRs`#Jnj$K_ zSV_AHnlMy!+mE+UlQzp^46w4Wh>SOcUMmW^1g7%J{5i)sPMomV9c|!qOtI_<5>6yB z1Q(?6N8uTp=~V^?eU<<#Q$uq+Cl63-3=)m-`$QRu0mc- zSDhKhri@r4V>zH(vKH<~DhTT2fYtn2NQ?j!INFLJB$~pOJ`;Hr6xgW;DGvF!`Vm0*jP`k2f!u@2ui`}zcOdH;UM>nK)uYe)pTL6fqfYV9ur;t+#=(Asz| z);F^(i7^HO+k618oj1$ntYZ&tv*cho`o5JUjy}dwY2|T;@F45)cFg1e6lrb@cb%Kkvtx!|b!q*?T|hdA_SX?8U~IOVd5Ebb6EkTV#B()$6hD zx~L7xm8pgf9&lV{@nA`k$%XBiH~8ohlBAyJIH2>=Opju~DB^^y>FgMW0l_qy#=}OF z+15ylC=c=5@goIk6AY#rF3c`0vJ^B!IbNscj-i}kjeC3U*QZQPW2JrQ&}&wh$pZI! zDNZ-0`OUAF?!(+$_L17ocII1TL}}DGSHuf)yy@Ap*7){zhmhIW1@eT9bS8Op(x+7R z^i5t}t*D|1*T$`WOk;UFN~e>Lzd#IZW%)lYdGPzPxY87J3um4#dk9t@C+^4^hm5W^ zqN{!OB&Hfo2-A8oZiPX!Q}iC@tBbetb1N%G&a0lpG>=i`+%ja=WhZ2!Cg|Q@s?3e` z-}O-QwTnTQ7(_~qrh<`Mf9i5nB!RZh*n&e+pjA6nmK%noU*zK5@psXDpg`Y?k2kLP zSiECH2=P!*>SM^x6lpO3seKu}mphWO*(RD0 z+MzlYLUPk6D4gokyC|@<7py3)&n0~&bHCimh{{%0B?LpqUDlu8zm!8Cz!4p4F52p2 zF1DFkn6GAN9I9}#GM}7d$A9m74JBbpj1rmY9mOZT9u2DY8)lGF&_T1qmw%L3zWs_M zD#?MP|K55Z)@}6mtf%eW0QAL3ZHvmk?&iy3&KgD=3D}5Qylz2YpwaQIAS6mKM&N#i zb1zB~YsGNS^Tj=*k*#;fMM%x!0sR(6U43i;WAZ^>8SR4YHInizuxK~-7UJd!wHij_ znTKjT^h&FK<}U}WY`23yl>SCiX1;Zm_YeHThu;3`IMlwj$im{%w^qEJlKz+9pmliH zgf&L~>N7aE$jN26V*)OI%s)B zPXB~M5w!?aq!iZTRfYInvdS1vuhd??s2gFW@_{c`4f9G9@m|7y)u(c&8--fVD>!Hx zk)Nn=!CJ9<=-`B^7}y9C%}TgR-qa=XIUrh5knuZjKHg$)|ATG5W$(t*gL317(i+8V zCJDgpr~goHC5+K$q+tgs=3#b-!%ysLSCaX| z#+sCqeEk&{@&OHlI&%1c%V7W?&btxvIx%gP+SM`7-~AyFDb|(97@2opnR#@Dgc<%0 z4X*v9U2FKadnZDSesZyi!dB9JLE=hn0p>Rw-H3Qq?fJ`Nka0P|M7SO#2Yb^5Qi=AD znt#8Y7rwm?huWoqekWdaC#t8USs>#1Yh5C@`L=u6_*8Y(xi#xJsmnDegHq< zERY*%qse~3(Tq4TB4|nyKC;I5XzVK7B+lP9sBd{|95O$^be9Xc2anRH%U@sdP7LKp zI}czW(!$a6IrM@tvU$x)Pr0NPXq3X$kA@$?%MZCIc>t(p=(A@p)1gbt!S-1WB*f9r zl~>3R!8MZoJJl_Vv4wG7s(IuR@1P@FCZgnn3^?u{q4)g=dXFHkxB;k=ur>67UsYy* z`|FF(NF*d5)XE7Ava0+%7wWi)T)$0vSU>fFT_&K;9AU%fb`eTYNjLci!$SH&73B-N zB=EPPHuY!=4T+!8Pii*a&zsP0)dNb+`AZAUU`o=7W{^Z$HUmfAsawcrIr@dqlpE5` z1*(ySl{3F!mCbE*eVYgN{`|X=Eh3ot-}PR;NC(X)UpszHhq-lQ$@AE!S**I+_3`?? z{i^p0r))Dregb$?aPcfe6@1@44%VIPju`~P+sQyi-#6|OP!!|2)O=qaC?lbw7}~2M zkd6AV+N~W`0M=mAKhzf_+cq^loq=04i0H1EbAI#M(aK`a+MU0=#w3>m(7+A8&?LR| z-=JR``Ri9YBN3ToUTwaT)go`$rV{c)``ny!vm?~k%OUH(1l>#G4}vroN1Uj5R8{^9 z3z6hu!7PC69wY|tqJJnhksj1%mh>>vW7b zR32@UEGkZYU%02q*STjLqiF8=(F*u&)*DK2Mf0ks^TG6+th*ay91)}n+`wc zDj2NolkFC~NznL8Tq-9qwGCZ^eSizUZ`=~ObMCpxr9J@b2RHhvJ*{4?hd$m z8J;MRNqrm!n_oOWfA2hUI2 zu>8LLvWFUx>V!HSy`gZ48@o)=4?p8`C;vtzA)L}xN1{NB@8V_VScE=Q*VyGTURkA9 zw}Dcw-5%c&_uo(N+GgE&ikb(s<0+yjGI3SOi>)v&B1ESWPc@c=c;tw?oVl2~C!W!z z3p0=Nyzcdm-#;_edjz`6hpk8ntH8aSL&?is6XUUBc?t0=$7vgGewcI`a=IRw1bUV+ zO&*jh$I`Ms$nkr~j;IGlXIU z40!09sEJ}m+~n3cmDctDJ=Lv4fpp0q2IM{pL}I1pPW{+|Qo&mn+LJ;O+C;ndFNql$ z)v}T{w>mR=@F*U#%+LNKPDENlXl0MHd-X^jHbfpnLOf6Ct>>^}cpXP~(axo)kN#wX zszvK0P3zvoe%cN@hL^6I%j8LX$7_Z3_5?ShhA*<+bF>m9LIEGo8h)<151C;WGb(R> z61IxVO+nx`W|2)^JW15hRny}rBQMjbXVIl@$GyafxL>ss#3QE;$i3 z?xm&JX6M$p+YY4jp>pDtiXg~jGbLZJ%EU;;pZYY&^Jr?4IAH9P^ zS3yJHDvg%jv9~G(McW{U$c`BMGfHj)0N1!c2}=%1vkrq&Tt5zWbKG3VpJzV6D=W zLi3LcG&s(GiTX`{b{UK+3+)A(CiL?k5>M-kusLRY@$lMDniV+V7g=mM6idf{&^hB5 z)#)gHj=c1a%sIoInM=Buh!8xU*81f^;__O3bi`-*2^uyH;-ndFZ=vJhng-72mX!OM zyC-FFP(tTl65mV##S8 zyY3VFoG5%(@>5j^RCyYcN))20Y$tfI6yzu|JIr8T78^y@Z*t;LcK^|l3aRk)AkEc; ziEF_{lRvJ`YuFZDFU>0NlSxaR6{jv3Fby(NY24E(HFzb#&7S>N<|jXB3| z?ccMS?%_1KZWyaS*6GVDwii4r%_Ua3;#-JuWr(FH=j!IoZ7U&qA8TVtd6p#RM2CJl zpQliNmm@_f$yvQk@~-HDL*I9rHG8+>X(kJI1mdOFR~#9@wl1CBm(lzK#^CKra&x=| zwXn*PKspJ8NSNoT@w!|?HeZ^RE$jIxO_&~vExie)J#qRnAQ`=YqRtH-sr*c)`Vlt)fcz-{iTI9(U$64(NUJ_QkFZ) zT=T}sndpi$*Cqxe;+1)qRcO00gH=qXNfJCh+}l}$%ODNiN0I%QR(CRwidLul=yG&H zs%z6-V6HxXTTF;UPAEw5-_!FxkZ$c_W3WxXZd3phja52HCav8})ql0*9m4T}bo0_Q zb|IvWFDo!0RSy%+v{ro+CVc`Ja=1RcQg6eGyNL|%+m7CY8O zqr7FTv<&sCvVV4-N-d(qs<4y)-Bp(|^F7S{CT!2qOOf92aPh&}Ez$Yb&XnX1MgV_( ze$v8@UnBSOD%eL=Equ_E5iNN6(6%gd!tKtL*K{ND%0Wkx zddG`+v1vU5GK@zzjyz-`-@!2;^Sx*}m!@1oRA&XbyY}Ox$X}yl+?L1*;?M^5_Ydi= zcg_^jerrT|W9_}P@-2U-N3}*Cv4ab3S7#V*kf7(WZ1Xrh-m|4mpCqx)4A~Jq;wBW> zuE)Ucu+4nP^Y-7t2pf_fleKmW0gX)?lw;I0PE-Y-B&3K*w~kfYCv6jI%CA2!cgBLn zv2IsmDohMcq?3evcVXe9)rpZB26DUa7e=OPz5RD&(SOfpsGgy2!=|yvZEOVdxydoXZCuE26%3_Au9rT9hv>t!I00CzhZ1Md+Hlxuuwp`L; z;8QBW%A`SPzs;}4RQ*u>)7H-oDNI4pwykz}9L)Il`quxhAN$}`s`dxD8`GbTPoc)M z7lJ#zr-p^SJ7`2p>gg~HZ|)!3qAr!2$_p-BZo3;%i(g5#9T{`6&qzlyB4XdXh?+jA z5oCNb?pb<1b^XF0@lkP`7=k6r9|^`AeH%nqh0e#{u&&1FfCzqmZ_**n}5@<(2VfAqTPB1!I)~)EkHIkx-UfO?-Krl7zEDd+88pO%q`}rb;Y6QA*Vc5-rT`-L3cX6a6k& z$Nq~3ytQy<3_TQ2ab8901Xab=ulW2(X)o!xyPxig8ymD$GpnE4+VH z`C?Y{j?@fqKV~ZlfY@P{%nAe$)0U%)--3uJBCjs8YS(gerMPEq;#f;pe};#t2r+bs zv2CqgJJ&qdB2<(V2inT~oHxKeOg1vo13^qy-7j$7Aaz~9e~+8AhJjJn1-bxZjt9F8 z2^s#>;^$n?y2TcuKX)^0@q~OB{+rC}`Y=-!>- zCKX8b8moQa`LQPV*4a{tOBw_+QRQfnXP!03-i5gkmu>y5xa2&H9S+uhrC8+$7+3#v0KhN*C&26Yh|n5x7zEFQVDY-3l$_!N znym6^ix8Pd{rhFFSscoeZnY3phpTPB#mvtLhCNzh+?qk@yJMp-%U%Cdy~ozk~lOvH{+XsQMdc& zw}DSUISZrX6yOXMzyEk5QkfIwv3kf56)G8^lH zLYhOt+fcVGH;uXS;442RX5E^Uo)2tQ2FJu5Vp?HSx$>~`hIrhc5lmMi5hWKxVy(Wyu zilJob26qGTRpgDFax6+{udx{d5l~^$WpEl9H(P{aRZWpu1m{u3yi+kgSk`QoyEWz81Y;~!-@x2AqzGE?o1DM zn2({O;l_GjL1v35$0hA3mY@7)R0~?I%M~iEckCs=vGDpPL;f_k0}(#33A9fkZfzG2nN{_GyVV zx(`dfG5{km{X9`5^d-Jv2v23q_-q>c=>&Jom;Z$BT>OEo<{_46_k{%c=|?IW1$YQd z$l*2hQYofI$M?A)RmN=w?Ta3}wl9(le@xOh4Jv#5Hkvm4F_eGP=;x4S-=wJ^zbE<^ zgc}m^q=$-NWD)l}kJ%(+gBDr^y3&K6pquqYd7YV6DI$%jg6NO-aTWI;+t+zFK8_RC zN;Z>mB33Q+@n!f811u0No=y%2#;1PnuO;_E$r#KMH4`xW1@C&MuU4h5=21v+=xgz= z;`q}kyNy7j=W&_$Yr#;^=V8*wC5wlHcVBwyazyoI%;UspJCz?w4n_S9o0*qXrJ?Sa zArg16yLvYl^;MHuhkzvNxiZ<4jx%r9`HP{-UGpg%s@$x4s7p7-_hBx%AsxSS`RiJ~ ziS08AzXB(3qO(1~7X{e~od_el*wREu7)u` z0z>nQ=uwkPoK43EA>V}>Z?p&rGkoqL@5XZlpZv2k(gl)9c+;^aLVR83 z87;+ie&on7V_=-`8_L=%!ePdhq0)$jma(<2>%8e#@MVyDWazC!sYf_GWAo>>_rGxn zFKoTOVhse0L2zssK{eM95Wt_`)u_MF!s}$z9rCOKPL2L!%f&KI6_K^mTu{505fap_y9r?e32>#Ym>J6&C!yT}o zX9`|m!8bjbr2iVH3s-p^kywTJ5cwQIo6x2EH*{_;*O%Kp`RS^e8*0thBs~`iq&1^VYkU!?ng zArT3FV+CUG!on-dnR{X&<7X%-`{wXX(Cm8ogZkR5xcc}A8}VRpAJStPZ3;~G+6Wox z`492hm)!{I3>^LgNb?cTZl_-@AeIh8z4P?qRS+hKgm3jg1waCGiPVV`B~c=228wAP zCKdyrjYktSXMgX$-}AC(NF^{y2AHWV&?wIzz1^XH4!XP(W%-`1c}Mu20ECqRYWnMq zl7ljJ-n8i@!*cs-H4ISk%b{Pl8xk8|HhoFYAmjX+>OB~0MVE8#jkjNi4N$Dg8S_$I zL{Wa7!X>`Z<(Gf{V;VgUV**qn!EM1`O`h*{8yT)t8f)P7i{0wCA3U*mk{7bp&~FG! zN5PGx=j!l>WGo~^cx@ty3#hmo{K(%84A(%PlaUEO3f{T^4wX85w)p*&e?%``I=0JpKz6qJyG zd|W~%`W3{B2O!lW+DjrM1DbsRJn(Pi!RYi9wvinmDx|c@N*L62gtJ{)*Yp1yT{6EE zr;>SS(8q=Nn;bZ6;jJX=JF9+mGf{}OgDLEHbYT%;TOok@suo>$4UeQ%&Z35 z8sOIlbN8Mb4;fOk^IR7G+8d>B<|i5w2|#TM8FN@>^S`r z%^0e(SQ0a2v)zIY_=*|I{#xlKFnP6jtUr<9)%m_e=FB`fvG0h7TFcp_<))`WqZvr_ zaaP53yJAEu>GTZB55f;tguvJKP6tOIKBeZN02R|#IyTOltzY%E;A?p!KM?{0`HGji zBeqUzaEC0=CzNYwbwB$UxEF8yqDQ7GO+OiHa=98~E}i}(Nxdm-XW3D*!`PiN;{z(RnA*)wuUb+ku*z{tUBl;~2cQ1O2~ z4IIfuiGO3dQGUD|L?LLLepT*OTPd{U4b8v%p0UOE!vTYbdtXuo$OD?;S-ZJl@_Ge1RGCTtW`!9*wxXT5v%=extsX08>cT{q6xzJ zJ~;42>Gu6bZsHv4+}3B{$88@UZeT7HqTqX(B8z>7G*;_|ekSp&`v_)irF+Mf28(@1 z0*4p+<9%lUFQH$q{w)?ZI4uAFH>|K}Meqe9XSe<3hpA;O8HtL>genM0Yco08@X3xq)Bh85tmxomXPN# zEQ{YFExZ{SV+eYgd^G63dCw}!hwgZoG(+YyovwIt2dNiJeNAVdgT1|RF-8S(R?<>W zg3-{0u{7%}PtkcP`c7q9r}HIeI^Ke^c|**+5-lHKj*KXrLtXL|Z%8)hZZex}Aysx2 zg`ljU3X~NbLj>>KE>sCo`AF<@U6(#09wAa)n{2`Z+D_?VYQXw$kccylkfV&j z-l`-suXC~aR`x8~xZ)xgUQ!uij5C24_NY6s0my~-`hM+=$&zvUU?|2?lQkf3_L~p` z{Ykd+pcWbD)mVW8QXp{;AgG;04TMn|8Q&PsE%@iN-|`5r8T1r1YYX<=>~-UwXX_WqA3@@AhHq82QnAzFwWJHhnWFI?ou{ zZFsNs1$}qrVW_Tt?lIb(C1TQz02XJ6E83sNbV3GMc$NK?Kj+I2ioGn?3fuJY@xpNu zySh-kv^vrU2b24mTi>rC7^R|J<5os;!q;tA>g;5G+HnR_F%eM#E^ScrLS)QLWTFGQ8+8lNQY!Vd%k z0DfJ47fo0IZ%?l}+`0jk{8Gm~3^-SN*CJ1kUUh%4$Zq`ep8()s2ZmipzY=OK4Zw|V zF26IGw!mKxcvxBg1^ooNh0PDI?t}$EEnldE_dvHFxz(&Y_&d^bzI#l?Kihy%CGgN2 zM6}E))E#S6`c8Ajpf$^?|1^>x{*wWOKYVt|fZ?x~;TM*!Nc11G3Mv9+Z5dR?X_^dKT&B}v2-hm!s{|$Qz+>`@FYBO}Pc9@5{ z)X2Un4=6cPh+o1FQ=vM-fik{<8f3Lf11sd)E|gO>!!^V*6MO_wEm_fL=fEOsR%dta zJ+!u%I0ZM4K!~ARfR_w5h0=SuSwNU#&@)&wbBO1TRPUFT00@vs`I8DL0(d4vnH}_0n zbyodsXHV+>!%`IXZ)Bv$0Oi}*H<8Wr?Kd1~*=6ob-`*Zf%=&9;Tj7hSdq>7|8||hs z(6J}x_j1(h%Zld7Wl9*k(7pHHRSYUE&C9_xA?(I!zyBHB&O#V?%_bK7{d9)JEtr08 zuxoLOLYh4Y$V$brHt|A?>Hqy!(nyFG>GSA1ioEd+VY@&0&^= z9)O#+s0jGlfGM9Bc-|Aju7-hdL~4SiCxJz}eHzNpQm(wn8qr(QfB{CC)0?hgY-TzV z%h)4}1jmNHIg=XajGvc~Jb@aG;5Xv=qK2Sq;q$r$n$vn4ef#uhr78b|gFez@!3bV= zi&uc4YgVRD&$DaMT8i4^A>=DX5myLNbwJA9y0ld6O6{q*6ofkT2kk+dnFwjIvTm-V z`%f$zG%+kvEVALCEK4ELVQk1f@hd|%w>}3lh*cxJ)ggX|MQjO{qyr31=gXKvJof0r z!xpozoS8B9iL66b=+(JEy{{-X*GT6Q7|PruoJLn25)04su1zmOev$wSVLVw}43Oh2 z!d+A!aZ}+p_WX=zJB{ldx_T8b+C{Y}EQDCJE41IyEBPXJ(=Lacu^P z&5>{7VnwcFrmD(;W)WIFN0r`hA-!uZi--K%-q7CFID%J-f03pT!8=EE5Uvrish;S% z^_1ljiz(-#F}>Mfc2TzR1%t$koI0I32$c@lT_)jsA(kEBc*7X*wScmUqjj=J{5s+0 zkk&ysj>!*8ea>q{90_?CS=wCn@VmFDcP<+?xi*|D`sLb6CA*L*jpM6C?U52T&M;VY zKr#TI;r)B$Hr-6;Cura61ny?mleQDKc^er6wa>Uysu>_(A1JvkRkQ`DZ1r5Z{p zFuaDRoOwyw#PcKVJT>{(uiDH@SfWx|+W$G_j=%2!fOfGLH*7U>b@oYL;fOQm4tTJ- zv+N^@PG~!2<#93)Y=i6k;3owq7f61!t;TW+S) zd0s$E>`e;1S@{L_9tDK=_ALD6?9TWTYsDV0gt_pLx@~%75 z-8d)pzyKz5)2@CzN#F6Q4CmGIKla)rtOji(Z+^qhaP(82zXWDDLSMb$2~QvMsA7RW;kTpj#yH#YcdexE%-s6gV)KYK}m)H}0Yf!h4bHQZ6WBzm|T z4l0XR&8r2+i13AQ&dPSgnoVIvP5a4newq|ZW+lxM7z&v3m4VNUA@(yopk~~E0I;14 z9{S-X%=OP_Rv==|N-`kX)qq1evn3XB4Iq*ixy{=_=d@`KkA4a~RmH;u>lkekoXp#*dqy#(kp=!*&-f8U_4WuH z@3FWh@3EnJ4f!j3A`cRll&vdHL7*H1k4ylk9yENRp3 zSfXIFm@P7?egcRNCh-ivv{Dd#d_oFfk79WoV&C2=P-LfzzJN8}v?P~0?T_xZX2-H{ zvm;Ja8dl1xb2~`+DR$7fOEFR*CN{jhf(^roP41K?-Ltl?!NF%K6@kH0 zfx${4OhNa;h0b#POhbY*OX)>2mxmy8x$SMvHKsR~>_Qqo9yRRH6I1VSj1uMtK|}^R z$VPnljbgr=v`HgYYKo&PA{wBeBR|exCcn5&CpZ~-HBN4tPEf&b5-2{-TMZ!)@s1JE z5DOmTPgUXLP|LsR;eUot!2@GfsT>>hNKbc}IK(YjEl*e>bW+u+H$GlTo#-V32u;U{ zCDxWC&v%F_MkrAFN9!h78L+UyIWo0rIKx;*lkHId&esk8c1m}X0u5}A<;~<+uPx+U8CQZ$j4Q4=O3O!yZO*ER=-MPW(TuW_RDOL55ar-sVxH`Y zO>*WB7xGXQ@xqBd_@n!!D>h|kb!>QNW5!+~ssPb<YA_`{c%#ke&PCj@8aKyrwTc`QODBAY2*sBiM~GsGZt1P}LzQrN3UT zF%^L?g|?)cXAo3uW`W(r!XEFnwyRthu5gDAI?a;L8K z&M{Q8Ic_gDr4%>Mv0cmC%XN4YA*WF$)#KprARAY2sregS;J$UY^L7Rcn})S!*z&E) z5#fQ89`rRhnnI+<-T6U2_?3Kbx0u%UN_Z2o#CfQLlwk@Jle9`-nW)@rTl0NO$C$&7 zOP&;Cv0mw#^86O%Gn=orkM%bw1o_({dT-Arx!uK8_^zj2s(2N2fnJ{-UgH_0&tBmk zqqO<>ua#jLzdY?CMffjyaonN$3i@)41I<6Vf=jLCMhhWsBHC<38h~<^#DOvVCPqnd_4Eh6&*o%)Jv=zlOOh#7; z*^Iwz=AkAhp&~Qm^Yo4CJ0fQ5sv*M9Dhte&a(TG&%@KHdPPa85jURPEkip~jh-&2N z1xiC{!4lrNJ2^S0$OZX#)J$i(l;5>N;~Vxe;G3|b=qB0Z9A@<}Whuxa-HWI`TU{na z^Ufq_eiTQZAn;8ylrGH&M9QupzNc4I+Gc;5CK}PCKW<&ySvcp&@BZ|46Tww9d)yI& zlB!d6SCaFiMP2Le*tig3(|IxRru+4aaIC_&>qAXLTekc4z_!7kY;iFK=x3V_I4UqBJs1Wi8oG{|sI4 zPTe3l*o~Uc@@u+KS`qo{YjXs7oivkE6j-CU^dNPZa>G!(jLC@fcc*~k6|~7hkOn_? z12Iv2HTH65b<4@}vZG5`Po delta 75296 zcmce;1z6Nw*FMS&AOnaD4I(fj-JOynC5^OnODkRSBLz_;hEf^@6i`x1kWlGRLQ+H; z6)6Ep&mJFrzj)90zs|X?bH3-A>k)^U{o8x(wbowizVGdua+3BJ#O!zUbkxX5&XeHb z;gMZYSH6me2gl&y;a^9b0q^jw#4O^$WS=C9_*08Wh_H)*mykmcyQnhwQ9?u{x%uFX z>hJ%sFJT!l2`r3A5}VI-KI4KY>>SpPkO%vYge(KP1Vd%W%fV={H)zbw zcNd6=_=N<7q=lrhHxOdjeA4sSaeO+YxX?e3`STPJtN;NAHlJJ@dlmi=YelCF2B5-v zp+v9&`2JLqLjUd4;=(BeOmImdY&$b8R-E7k_6R|fF(?gF&ae=K$!GM)!^rVPL?yA= zNLMTcNgQsX4*xN?ymM4o#WRF8x}@!}3^oxM5?@qIIHOz;#*Tf+smvq%=VAV~?tYH{ z7{S2N(*fAT&eqrdI{5R3x7Q6XA6s{lv2D!ca4|{jdu9%- zHMJ=A9+_B%mL!Z3*ozUXNkN*CEeSKlGBdM-@2J70gn?1yu_$UHtStFeU>I5|VX^y={FR!MgOky}X>TpUK=aZd`&1Du6{Rd3$>W z02_FEc>+^lz&olg?(X0XA@DaJUt4eAzn`#oxApOHvH$xv_!w3miN;nkqTnK6pQ!Y( zNgUi_f1K+-J^r$nr;nGri^K03NVxlg0qnflMI@Ym|AaF*_9k2f>qW$sfs}&@fE99J zTNwYC+X(=TfAKES|72wftUEy%R{BC7<$rTQG0ER1tubUG zfWiLmB@!lD^PYJShVx+&X(^FE10itRTyuaDgyFU@8|g;>?>+;J>F?-`6&A3>+VBcz z97q8_lMs=_>T-&29`S?2Ge)7Sh`Dm~#gyI=Vk}z@XEZhhtjm}6IeZ0ZFD;9_UtqQcns6jWd$H&Ft(y%H7@$8Gvu7SVrqcwOJ?j=;l&JiQzoK?GA_ z+t{Tq;&uWoPgGg>w+DNBxjA0;a`*BE?>IO*+4{NrVv{eZ{+r?cY-r6aryV@wPzpv~ zqswD}PbDt($I<`notT6$&b~Fw0u8^;i~gHYRB$u$u=UKiAP5Wl@Amlf-Hb7QST$Ts zy#9^=i~c`|-qp1h7FXQ5?g zQ-njWkb%EW_8k|ylfAz_tJf&e$q9WL71Np5_HDAp@%v2U_R0RIzU`$x$AO39)h;6( zlZ`$l-=^zz@5_AQ;lx z_RQNe55K?MaG0!itacsa5zqBspJFFEcOl6RdK-VG(R=B!Mj8*H{Zsu%s@}^FoHei< z+7{zfX;g`Y$aoEt_k&L;!VzC)8f#`kHd;g+x{(iLgGAoyjmijX?lH^ws&^)`2yVPO zdajitW27UyrMye&I#F4W9sK3#)kmsiyD2=@ubNZ&%&urLZb ziiKFh^aPu{ULC0JeVb%ulJQL<6=+aw@?U>gWL!1nPMIlnv+%sGL9tfy!u?A+4<)?> z5*Q`ZoBY0NJiGQ{bO;rOibUN;#Y=e3Jn0-BF4A;tk0jjQ9k(va^ZPcTpHTGu<4fn7 zE4gFZ=tTw%T4nez)_j#DT7|)1SQ=usi{Tm2;6kE;fMD}xWUc$;)t2jnmo{XjPk%0m z!p{f}6S0J%ejTqUq!G=<^FB}dTG1-M;4LZ=Pj{p<*Tn**I_CzdcHU>=Ey?kJc)X?y zU1hP~q%RI>3yEG~=o~&i*eMFwnDt(NG2VNHGbhzE*{tcMIOeZ09Y z6f&Os@nt~YTzk|~RTne%^yH{deB6SSGcU9{fQFu(-=aY;H{`o?(BWFiQr)yCHspJw z8rZBQO-nwpO9aLhR{Z4hKl6_lSbinR-#Y9zGO;X@Rj^{&q?Y@2ytBQC)$RRsx+mEP zte09Z;z|^wHyV6z@AgAQo+|8@{Q^(uLc&f=8(5hD{+Y9S2F6im$WlHXFH61}UErgh zEg@nKR1At|4*O}__+wZT|7k|rPfeSjo1a}Tpam!}^>%OLPmQ@QPS<%D*dw-*_CG*-+m~r6565(CKIG#qA#R|N_8jVS$&?=c8Fq5mqsDTJ?U7ny`ri7qbJ18J6!f`_ zdCUE$Wa4$oOi8cYnH#Ukcs>x#q7-BmpLVxhCa6Hp#|ga+0n0CGVxw*?L6WFQzKeT8 zRY@aRx1yFqzOInc)*eM+ydr!wNUv^`NiJ8K5VXBNNJl};FCb*yLfiJLMOYU z2D&|d`${rb}#x1Wmqcg>YjE zbBbd8h>DQ??`h*!A^ciS@6$1e88C{gI6b^DnFr0+l`ZXKh!Qu>AY+u^<8xxKjlyt^ ze!RTUp@T9I#;)z!u3SC})5dErGktix-6t4Jsax;)(Qp?1hI|Vl34`vjKfaPFWE*K% zYRK)|>G`qADEP-}!BzQa?H^f{T+tXw-|KpYh(LC`W*$zS8Slx#`cGlzgw+>Yb>A1N zT)t!1m(Cx#7*x@`9bfAIPG8b9Op8&^7qjm?+jy{?WzoCY)ynk$Uycvq0s%aO=Xi72c0@WtsUEf7vy;EcZWvlCQt>f{x6C_9mUj zYVN89B2Yqy@6}k0&czAo#Im!-xQIxdN#f*nhmE=rqvP_KF8SZ^i+I}-ae--qD9)}M zehs0!YhM1-G7OQteD8{SifL=ut@e}yt_$5nQT&&fgBFvyGP9^8?C!`m$8VmA;TD?W z4<*oh%^Te^*il08NpqPF$LMeGlXN zOm&8=IIH6aoa-KktjXq2`w9`}&nUc&8OIDu+IaTuHOltkLlf&;Mgv?iOO>=3I!oW= z;AGbCF|karHwH@TuXaC}`_pZIE2lzS%O+H9zNV4R$M8Zn72<$ST?u2qWOC)1)$f%9+f z@H|}t{_&&CyB67z&VNT)wL1PaTeQtW<4P`Xchy)+$o@T3NAH_YVvy9l@HawUXYqmggHt>|zcn_hx|0pLPaaLwL+KoB4gu z6t-6;$1}KoAh*aJeaw+TD>ym zzfC4`(A@j|Mn~<$Ory`(%if^Jl$aaTZAWqIe4DdJYbAN3NAK1sI*uqHA_AFN?4ewI z9$J=g0q@)y=Z5&56Sv!Wt0G=4yB{zmoqAoRMrDs(MvBdHm`PiZ!Si=G9ePu_UGGyz zI)kc>M-@T*pubUVov9;~H6J7O`Pv(T0{q=y1J!ecm@fN_rTk`0Xr;q`d&-l&C-L!5 zrkGw)An&Oxw~)V2$S|LP{1-Ykk%And$m_aza+vme+W8zrbqjv<8R2v zq3_9^$cx1!`JZ216BjKeY8}zRf5YfPPLYlH3E6Jw1jZ(@b|)Y)FOv;cmolu4r(+!! z;-Zl}plpg*ZTIX(VnEt7{PH22Wo)*PN>_F(Zti*sa`^itvS#RV?70ZSs|E;1`%KOX z(JyuvzwE`jiT!j-2->heBp%WEN&mWzkzFHovwoYpXaf|U0Nm)9o^KwCVtF<=Jt$vhbvLO zyp}JXl)8wHhQxe*xs=Z}%RdN=&NuYy*MIkpJ35ks*_z_9xvDUAuCP%i`ewU2I?9cclDn9!% zk*6$-k=Y}Pd|FA5K1<@0D;gK>B5zu~ZzJsH&&r(r+~!ux$SrrB*G^@Ao>N%Y#4YVO-AlTU-;v5++?f)SNzDT^!; zN5Ao&Se7s@a@o!BnV^-ND?11m$|s~O@Sm3$)vm|rN!SqRbF%O$>FHQ?;Y#bAO9B4S z3z_~-*65s6&;l$+IzE!gMD8pyO1LL24`h|mky~{4q-T15dSxk>S()v7dVD|#JFdJ8 z4HRu(yL#3!Ae6@VWmX8Z)SDLR0{IsFK?`+Tt@%B?kZ=3Ou=1kUgL6zK16H66Tbuc`r(lwMK8zq9|I`7=Ga~mkZf_=K37x>L*8E&_9$7s?LT^4<_*kA10z~ zd^YSgW40E$%6^^;b!3&Gw0mj$`nLC%rzNFKFUt&{UR>(S$Vg$=EDhfK{*m_*6!1M2 zlwrjFyuUujvN(M1h!F|dbARD`-uerj#f=CH?;dtnb9~9zgaOaq1?lg^)PhzPgcWqW z#{DNM6mmOl5%{j3+d$J$GYlsw2(Q{{rxd#Js7c}YGx>0xhq*Z6=j9u`(f#bj#c}P; zp|<8Pr7Xu}iplCegAQxz>C$5vh)!LEE~v0BKfyZepeXe8V8!!cuMZrR)YC#P97Ur% zk?(!5y;SWqaGpI76ZD*9clBJ zW@x$}|5Qo8dH>OMAB)@MAQ$ek#pvBAH_{HRBse<*HbfmPC54?SO0An3vs`Od%3d7& zmZ({=Tfj|}|293B)?F3!Wq*IQ%at$Gwq_Fx2io^ap%=oc-3oPsDVNdF*$g5rRy{Pn zd#fEyiR9^XX9B7p&Oq_#dJIAsW4cUIHGhvngClx|Px$Q*`bcxq=_^z2kEZ<{TZz0! zBDrRd^lPOB9`TNqR^?AIyg3qGF-eflc=O0-OQfAt*(>F|VJwk8`ET#GjAeXp<2h5N z`Nk=-o8VrWrRl>`ACFZd`{K9&c}1=E4W~O#2k^VKL#`Tph8V&qtUD-jfAAa%Yh!?R|AEZmL%Doq^cmW*x0uvFlGv2R{MaJ(i+|2@6Jq1z6{2* znQ-C~RvmoU<3S_oVmidy+bT^txao^)x9f=*j0Tv#>gS4@nrA&WNC*kC-)f&{*WT!j zfA}QsA=EOvYv`*P|9cA_UBZNUNE_d~2vOIytNYogDmy8UlQ{3$GN*T`PRI=+#yc`aF~Z{TOHArJlRBTy-nKDH08x8#o|0UJ0SG48erVU743?l{9lp`bXpA-3lm;mjBw5mX|Zk z`XP|r=bm!2nniG=`%fzm4fQo4HEN>-qBZm6Z>5fRNq4zC=MpVxt-YkjYvX?H$pUZx z!ALTz8c!V3s(6K%nQdpqlrn)SP+ENXAd}QD!j_J~wq9^FC(Cg`go0-I+~@1J!zQ6M z4>L-BnVVKvogFZZoWsBY&>G6)GcMX08QI1%u-vUm=2PVY>-QOwGueFADWNhcQ!Nnv z?#9Y!YVP51M6c86%Y&E7eRrw&y8_On>~4IrCKI^OW1Mgx%zyjvr%lfXy#Q-3)3>Uo z>)yH&^+WxOv$|}qT3h(lPIkJ-pCFDCOm%M0o8_7B9vjyZ*Mz@Wn@)5vt&}?NoX{-q zbGpC&fSJsuHP~3fL1uMpBD3@owPCje4jelLpOn4kkm+#J)jL6!1OuZ|5u<%b2y%T& zkltBg1wx$T@#pY(4RB=V)eCz1LmL@M<(cxpo5Ycdt+sS`WosMKrQ6y$=H0 zL?9Aw*Ktw4!%hva+ul-zWB(N+I(qGxkd&5MNbEKKFk#l6fwd1kU+=SR0??>*z-Roq zB`*!H@jbF!kLfz%V*je-O&riO=C6++yq8Cw9v4^039SV_=JCC#I zB|uVk;jFl#WwZNbjEAYe_1DP;#y+O7gS<+qn<;v+Qyg*~?QWXPq4KcKm&q5v()y8&$7r{I1T0UPcWf>x3OFkc{k_UA_}g@zd&}}Jq9?IL2oK( zjk)F^P%xMhUtxaFicFOP&Gzc9VlBVt=#%8B%KaZB04h(7v7!<%NA<3c_q&W9K2^|xz$I}3|RoYU~Rm@n(h1!XfS)`-`*v?nxNdVn#o9f zbL#OGswD={;+QR7yW~80VTrh754{$O{5dvP8=XXZSY`m<&(qXo6 zMHBapsD`XUmAl1e@vRw5vVnTYq38xSf*0EePMG`Um@x-X9$8% zN1z@erpn~HfdfMEjqlp93WxT?)DP5{;&HVz)MWgqaJq@-mM^QDdK@6zj}Zy-eQCUO zQyH|e!@m$Vd?&z&uGWJ>I#__r+>zIn+a1+Qr1y{yw_ zJUWrfvb{Vas|nA9!a6*si!?KgHNR-4a@m?i)|cR#9R9SfQ^$!d+42$p**Y2dB6Rp+ zY1#9uW)ELi)-r1@CX*e3ASAc6@h!9}LB{g%54 zkdj!}JE6Agp#6S49>h;9g?LOU{WMHBzfleEV4{*#60Sj^&NRBGqP`Ikew|?2&e>;C zW`W`syjwBhrY~R-N*BA7+(+Zd43qf+L5u3D6~k^{J3adt>8%;Rk{qrH?|E}hB}p?} zI&caC0J-DlTzfBTlq|3MtHA9Z&Lhmc!moowjg&YGu}$@Y*=onW!>e?p1Lt7n98o5# zEWdunqP~-kIE4kq&A~c;&>2Uatb0KQv-v{bn4C0oFx~0&L!2-=g4$I;6IA!2RVWtW z=XI0Qcuh*V3EzaG3efX$NtLuAVW37&ghbf_RF26hWiA#l73;f>m8M;(YL-|e2s_?V zOY5eK82uNvUS>}5B=38KO|b0H>A?`x3*dDlMKS*@skO)0VSbUXT{4+>6!MrMf z)IH^T_x=omN8G@nh+aHTqs#WsATY`}Mi`h4^k1j*zdjr2zfR|WeKyc?NJmOKlc30PhE17%PIvt0`Kezu}Y+S3ddok~$>ya%OM)Q58A ziU3I>trI=K8S#xWF6K8Nx0n+!o9xwkVqE7j%>gR1RB+y?1DfTr*QZyL?EO<$+MMAh zhr8G+`>x(G6Kj5eJfwFfF!l*|K^GQ$p7F_B!f~u5sPwuQd$3CYW5t4#qt8B3F5UBH z^L+w!)Ma=wEhaE&#&<+}xZF&%@mqD@VtLJA3Lr57WMaMuz~U4f{tqY(RzPnV3nG}= zkeqOUDqul|)db0A3H0~HKkwMAV5mR(PkU6~_>6fVbrUKyEHmbhe3swChV+0Z(_k{s zG;DP;E)`{1r3hFyv8~4dq_YQf00Q=%_w+rlgHh?=0F6xoICKxFgqSiomfSDhKF7qf z(faFE|3h(JXNJnpY)Sm^WNOUMU{_E{^=C^P1VvnB+C29HP@J!hzF*qi?T;cT)WI9E z4T6G#TwP<-WDz#5HChiuvN`Um1ys{eTtr?$`@UD9NaPJB0VvnY5GNnQbAVtmf^d{lv=Ev$x&uhmfoK$mj$>Qkwr(gq;nD7#08*l>H)1M zhk3o{3s0{ijWh=U<6NzA(*Eie1S5u`W7_hSq8z}!irBr0tISlsBzsZFrVWqVs5Es3 z`gOvb)@=uJ(^ZPNY$V1V8Tq&?ktMIxqxJBFJ;S0fvY3I_A>lqt)NOJW0Z@pO zB^!yoC57Yg3_wOERbi~!92nT@R(&48v*CWv3cmc~u)ovp$40^BSVP6;Q|y;tQi3@? z!!3hmCQVC^LvpeXUz_?jNw7B!_Ekf}OAp8xD_xIQA0?&(e>Vol1?xbTq{+0qZOxi* zVmAp|EHmqQ@$`DLKrc8N3THx|dZ_)P<-V%CS%1(qFf^0BP#2oMt8nUR+8`bKjg)1? zlK~U%F;S^RD`>@M9l90M4E$LGiDO5fm45?KbeO#ThDx{l(nd4ur!eT~nfr8|Hji;R zm-a)kWDwd{KE1kSpb!*zhFCccFGep~hL_A>veiX#fn8d64#bc#!{n#X9qqM=sz)@5 zrU-P++t4`ET?O6yDA@fig#W{so^RHqrm|@SXW>A`kfJHrVu%Y$pg0H^xQ*!+=gC!v ze?|c-NgcKe@Vabk-jf!*0YRt)NBPC^K7wd^)lFYBlCK6yU;ex$MzR|i)mC!AC zpPkQb#r3Hdjvdj&xEx^OQH=DCxx<;P7>EEnn)_<=HRWCfi=mm507b*j;H9GL?+baH zx(6sU&|-j+V~`E3WIU3SBl4RCDvI3no(}akH#iikAFM5Au~Y@U9%HHH2qusRWuwyO z55WJ=_x{u)L7Sv=O5Y(*)>_?~;zG{RCt?JPyjYJUZ}~T;5GdLJj%oYo(=RKa16U_P z#3b=*$=)I8xD2Mf`6GrUy%IXf?-lX6gSUM)O~CS6Qt0~uCG_HL${}XA;{@|nb7bD#HNdtL&2GSlD0VOd9Um~OX2zC;2M zws2W`knCQh@|pbJ-9tke02$S+uxc3{67oFyv1WkE{jmY^5L1va_3kOzLgpiFs8OwLSsql{EnE8U z?3zQZf|h97KY^?!@RPFZZfEF~C85G{v0Fb@-;3OsQ+$yn<>`dLGeAzx%$d0;#%oX! z7l7uWG?igO5*cfN`0U(p?#{44^Csr)8}8tK5uw~i!OAVrquw0dziRdvr7N z>fMHrXv(*HY&*uT+epk01fl3TGg4yZG{r&E0xn%f0`b*s!7NZdKPd$g97|MEigtn$ zf}JF?bJSD>{dhrv@~vkkb2x)2itXK^X5Y4kp1fNZ{_dDdiLAHa*dLbKR_tSN)%OPLg}w zq~3-Pmym2A8mo#6C$5C>&kTO~{a6Sn%>G|KcienVX2a0-DNSXtv2&l=A;~SaqoA3Z ztdME^N$!DV`S>*C;dtvQJ5Gbq40IRXoea(nvGQ*0usq_4nK}8aj`>@tw$oi&9%a%x z2)~2*7LXH!bU0+yVY(m4elWjl_K*&&>X9jNL9uq02B;eyx8~nQe(RD|N8SIu#5#rV z9(KQir(bs)IWen$iGSViar=THApVRZae0lf$8jLfO*i=gF-zlfpiJWeAu6CwOFBgP zB^2;L${SDVGA@|R?%iuGnuKQ5swgn(KV$Kaj34~kqZ8NUjF`hsv)#N42p`){2U6#L zJ@2ZW=uT!OoLo<)U^!xZ_~6@%aHWf;hyt8E>wELgAl&v5bS`2sTV$liUZ1hfA1B87 zjYHj~8!f7*>DU<1E@^RN(&V`Zk`Og1`Ul9=#HX27s6Z(8`Kv?M-B=>xH+RAZ$e2s~ zS2WDuwZ?*i@@Eu()FyONf8ce>CryW74!kA*a}NK-Epp}eKwQLYVii<$USqklCD`@s zgI%we>k$NY6LN4QsQj)tJR0t{4nWhV z{d>n+Uo?C@B))dlQoo7M%^iM?KTY&G=W`Kq*qo-oU2gaGVK$=%0CI+~VcPY1?^tk* zBH*ig0K~QkQX-CBQ?K;va*V6q(tCIM6It2VzA*A-#)bGuGT>0g zo&N+Bn*{A6xD*RbXw}qpfuK!AZg-Sn?@RVH5e34Cf2!wWVo0jJmo%i8GwnmmV^o}@ z+JIybN8{pBox9Bl($NwI?(SpqnxgImzLiFC=~$> zI^A6tahsE46Le9%fG~9gc&@TCJc3HGx(j#yb_1axykhz*i9L68^N+>A&t}_ABc{y6 z4ea4XO@Vp|^uitM?6QIv3C^6QM}ht&Uge&ua?*$v5sVQ=;S?yb9`|N6lch>7ER|td zNojiiGM=>7pzMy8PU71VO(V@8we;SNZ}|JX`kQz@3{+7dZO_<_+`nq7dfa0&w)(M@sH0Y{V0YI3dY*#Y_dXn6R!Lpmob6 zBKEi|4|_x6_hs}-(o~*WE%8|<8N*rV*R9SoDlZxBZCet;5k65KQxvXf>gcXc-9&>w z-znb&py*36_X$1zpe3GaXmyvn_;uyA@xBmJsrFeJye`Sr`5{Bl+RDQ{2z2>R zw51v7nHC1`+x#9P%X#F!9!UYH{WLQKEEn&I4kGS}&c{^Z3w~Kv`U7P(s<){sMn6c@ zzJ%e9?l_xKm;4WU1>iuO^+YROfb=ypB;{Z4hZBI=g*!oJCXOTLNYHN0&Huu#uKXS< zT>BDNbjJ*~HsKF824}NjMZv}Tns~DO^$=zYON4*$FQhJ90)DZdz9|Yws-^8&wIgc(_y+Z zLlzPwdw~e*es^0i(1~k;;9RQi6R0`!hV&0&7fukS{d>ZB|ECGR!64e*MnST9k&Kqe z&K9!8MC>DSAB8|bPOS9CN^29fw6dc4>-2;$i)DTWWO8)?Ydllhf&W4M;;3+IS@xf< zW!=WCf{M(ruoy_)H2QJqg8b1pQ3kQ=NzI2}%0zA*`UU}d$PpZb8hmmGi`ZenqWEb8 z0%|Ns_Fr>W#`#9jWG%$%k5Lb}w|7_z8V1yT^)4EhEr(NqekP6SzlbCPXI(_d@Uz#& zOv!YZ%r8^bj&~_98I$BIYE}me$zx_J0$))sm}2>f<~t@n;Og z?}+Fv_We@jqP7~7M9c3aOa4X&aIuE@J3zV}60N5}%bJ>zQPQl6R2Xb1^hNEQ|NDH# z-`1-G)&tMae?dPyVlkLzi&S_90d4H9!jeuNljr59UeC|JuBDHBv_?U)vsv`VjV3Pw zx2tx1lobqOkr=&L{9ZQpUudl;&Nej|xhiE}Z})c=!H0569A zzj5RL*^8+$9|8f+^A8UE28+uoqA*+l#WZrdFcL%m{DJ@%z696P{fs2m^O4n^eSf_h zm5YnhOyUWgth!(B%D=oq5w~*1n)0%05ZOuMR#yKY+KTWkT$pxfwc!k6`~Aw4>Yoe& z!^Irzp`&FQ&Lrhq+rM2K0wEd4`8$)tagQeP(fyAdPr}Xi9pBedTx2^WWYGOVvlV53 zCl#vyDXIM5aD#BOjN$JsX$OyTw6|ZE!pid=vXi=z4jH?Nk+KE{1v7I3)Q1@p{;J|qbM2Q0RW*e_zkp|7=%K$H>C3ekp zoFYkOm@!&a4?(DK-<0*w?kT5-uhGOeNycHgs$VDF#{S8${)kMgf+rzST+!8{w@rEf ze)u6?GsaZqwvnBF0@*NA+tVs_a+UMCejnbbY>!u|F?#L^?=@f>0QBPS8loDuHJNhUK$=l zy}qwy;4Ta(2r@v^rLQBsME%sd_0?#ZiD2z|u^Lyc<>l9Z(n^~(y!&Kl4Jyg&1x29) zs0l>8W@TMw8mcG-!Ks>j?S(71KK{+9Y$Y%O6Y1!9I>G10<~a8A8EU#Eq~0Bf(v~Q% zwT86-iH2&};Tq=wPQV2^g>68iYXWMfC}?MSxLBL<0Nl-ZLSRS_$QPj>aF4%dKsdtY z=}|O9O!XVQejw_sakRI-^sQzny=G8G6A+smpkUpdNdeN$Zg9omH^MCV>L-S{0!Pz> z%7+8j;82=Q$^gTwr*sH9-N_rSan@~|kLOLT=UN3}c747)@n2`25U4tX0FvMUAZ%{{$dsX0 zggjy*)@lKze&zIZzqQOo>YM$U2-;C__))uj_{%N9C>5KBlF*lNf^%1c(bnT7LD+{} zE2E0m(0upGdD{KT4+?2Mv6zOXV6ALqRX&`=meTlF8DU_^_3k10`{XhwnM_SgjTS3h5mf zBUvLHhY-w5N^lOCdjtr3GP62&v&Z(4S9iaFdt_riZs0V&T59;zu;k?_5WfFr{0cSk z$Tc7~T{m!7OZIs4HMIi8H;uT$Y0sD1>-gd~=arv=Vw=ER344MHUs`VH!jma@eEA2Xez`PN6?{$_rP%N?9?(}GMt_wxhocQ337d81WVaq`(Uw&ow<`iLUo)j(om{;U4}Q;^&Og|fQw9TN&`NL;}Im*j;tk*#-UFA zC$I&)D!ZFQd>?`9xlGkQMeQXU*hBjC4{DPMFp2;&M*2o3asZ?|nz5j8S1NAD%D#Ta z`!1jLD~s7%`<7e5Gt;R7qqc(VrqO2@wAmw`P=dbh(Nw2JUDiofU(2u7y015p;~!VN z8a|(SAvh0g%<0&dUWA;l1YvqZH(XF-zq~Mz<47{TWw~WwS!&{CK!pVa6g{reaw0-N zOErB&g3m)CtTbUm)3;82rejyZT0O`{q0J5@OCP)+TD{<0Tjx94_woAp6~a^F3AA0gvs^a|S>alrPFpRvBX z!awtX26{T%aSb=K!)7^Zg<67->}YGst>Wp7^QHqj~wa5(@?sOtilLDyqPzBYqChjgR+=rYmiuZ1S1_bEpZfR2~blKNAT6_bfrXYIKAK zv1C-KU8h3SlvtPBgs{B|!gBgN*GuOlJf_&FIJA-ipxGcbr@?IQ_38SI znjv}fL7A_7xa*_T{MSZ(t`U(lr%L-QcLT7W9Y^Ja9&dFGgV|(0LW; zuF~E%LclIpTY~+>9b?AKanfiJ$6j@Ch>{w6t$%8@-rnAJSQ*T5eQ~~c?!$A-vbuex zy035V()C3Wk{ULX?);xcD&$UX1ug0XC|}ncYn% zLs9SKZrUYq1n5RQz;Vod0x!$;YOHVVLPj)?@1|*nu^b#6xH=jFuHdm%OYkr_V{m*8 zK(i#*OVP+?E}}a0>V7Y8MSpj0$NBjsskZH-7*e`kAmlcLUygg=_w_>$Ncm~qO0wk2 zxG`3(I>bW-M|`GguA&4^hw3aqueFgg@=!U9-Of6)5!PKUoYQ5eS3xKS=(IE zBI|Nix^Ng3m@>XZ!dQ?ZD(*A}(J?NUE((0+8VU(6y510ak^HUAPb!iY6Eo=$6}mQD zHI-rz63B9yQV z>XTm3W0|1RwT7^JGU(fcy9)^_&0jl@M0pj>AjQsu{!nW}zNO;eR{c0sOrwirO zpRg=}^tON?PubbuywUa*rcV@7i7zvC2Wn{v-uw3Si^RG}EPU`62)C=j??f2n!xE#k z+;Hj> zM{=0ahyn^$RW9^RP`1zpW}s+)7(SHdC)6*Bi4BtPy^X{KNfE*7fy}@GgN6tgZ_I-0 zT}I}!VMptc5OlO&Pq>t2;8Lvi1YB2g4JHi=Oj1&&tAr~B6^Tx+RK3d^6vH81px*A*_w~4nTV%{Dpwr!mHk)Y#7sC zRRBP$^F;Sgpb>;@(9r9leknGKvlZ{|aORq#0+F$Wgk8>gw~Y0|3hB}76s{keUQn7h z@&CP4Q3&Po(RNFV7t2}4T^`=jpJGkxXN-3H-t<*~0qFRqBHBG~Cwy9HOCVFqUGNLR zWj>PX5;HpDKt+)H_VZ(#a7|b+W5E6e1106GcOSp&wIPT*F7iCl zgG5s=jm_V)yoQjr?pVe*0o$y1^RJkIf38@xMJ->pI z!AkD{njpkB#0?$wGU9WyRrV8v(PiC7G4gUAhM@jNdxR?gjy8 zwMeYXp9NR;bs)LN#aa)p3Vq6ZLKgGvgZijM==+E}o>X6=c^THtbPA+#jsW{)?lN1sl|}K#+{ zuKon1Mw*P3g4BEBU>>q@2cBrXT%R-ys69xPQ^D`XZaZMqTV=C7`V0{?PEg|aaTI*v zm@QK*qJ8R31%(Y3;s`z+sc};14r;(cFhkf+45I7qO&`A%z|{t%NP^&s*MVv9{IxN<(1-Q_q__n zqrm%;1W4BCNooQuQB|%*3?erl9D+T#3N%N?Z`+ALO5;jxD07KoxYBW^V6gKCedo$r zX%DAPhBZZd(cPx|3gLR*jDUTfFzEVn{Zh^raqxM>2?(SXaDl7Xo@hxXyYlL$&e z&a8$PYELTLoFLI88jB=tTF+~uZsiQM9y8a9AQF@I0QoN4JNBb3 z#iG)#S7Er_bf%o7i#(7jHPc)z`gX(Tt5nGemF2?n1xd)S$mg6%0@IZNT$^E~k@ybu z_b8wu+kH!wIym=PoGs}qsF`jkm`a!YU#z`lTvXxL_bWK)fC@7pEy&P~f}|)gbVxTy zNGKsnsDR24gVLSSAuZif0!oL3l8Q(x5&{Y$oHgG6`#kqK&-3EEIIsNR%$}Kj?Y*z- zT5ElOzm=3)T+<(kUfxK(eCMF!gx}af08uC_=p1fF4GWogfyntciz;Vrnv9m{?2pr} z`gW(rTjfYG)A}qfx({=21%1vZYq>SLHu`Fj8Hh)!+|5vBh{iKn|09l^it{6s6^CgKWdHZ(Q zzh#Y@7k~7-OQr~~1%U>|8NMkwVo5DRD2DSgMk7%A$Bk(0V7PvC^giKU^KjQ2<-`A$ z(L*+<>c!k~XTE=J*$0X2c-IlCTVytZ=?b#oB3D zZi5gCP(&YhjTur7YoOIH1Kfjv$b1N$EIlVMJ z8<)|zq~skPF+*G<=hQ9}G0AfgCg6uwR|Q62+mc~MOHw0h%yk{OLd zWsf1!k%Bz0@BT{Lorzx{btq;+?tBVc+LKi|7sEY3=EJ95~Q0gVZV})>c`KlkIOxOd3<^G)A0g3CAMI)e_Ve;sV8`Q z#z6=7;*QLq$DjV>65_PvMRKFJFSF&kxrHO>?Mu|UwL%~&OR6)wO<8}5j>G; z4G#4pt%=D9YQ6lPy`O#fB%^iEq36>6(sAH=sNV@vNIl;#G*@{Mvl}oU{ z$7!ic%#ukvYVwF=8^>@gH`g;(Mpfc3Y;)r$g`WuIC;~>ZD6#2w0o(-!uaNB^CEs1V zr}8O^9d(_!NvPu^mxXEP(Qb^rN8h$1;XMbxKw&Cu*t8b3Lau142_Mvk`y>41>HJH@ z@0LOiN_G73Yk4sUthwkz$SB#$zb%dk`k|e>_`iQ(Z;k65VRz4WrKQqFw zqVF%e>Vv)yrh8=q6`tuCPG8%;?M*>wetzh2(5sNkL_o~Fqq{YIW$&Kz>kmcN4PxZD ze-F)htf71?SFG$F;ohCBOQ4ycv^t)m3|-!bxBTZFTTRa=is+~cw6pw6%o}tgpfFEY zCoG;)`}EJlLI8h3u*N1%R6b6EaJE=*ZCm=U*dwF^d5h646?!@Z<SpM1BXvj-_+^g(P-*^BbWmeqP^LHdj z>)D3OEM81$< z3WPh+mdGjD!C0ndQkL#tU@h0c_{2ar!uzK;QE5^{uyLAXtkiv1pze zr2sDa^XmxeDA{@>i?8=gy({R3GP*${M#$ED5svzB9>oo&UtmHe3-$a9tCLk+Cvzml zVkq06^D5qSMz0RqU6Q-zJ$wTyXCQLRT=cc`%ZLQY5zyG4?lU3f%HP{5~x%6z+< zEwZz;@PB3_ho7+A_s_O(zAUA}HXA~(>%4t*1 z-Az-r*BG;8&qIgQ^Yc$49O(os-dL@sMZ+Q zm9rA+e;$M(AK}UP7$qA=`oESYSAFlZ#o+Zn&XV6s zs&Y-L>~(R~cXWnQ9&gUcK?N-02^5&N-_lzCGxz|A_x!@Cwf8yC^$T_1{*;=fx$|=- z6Etk`%K&%S2EvF7@SUpEr9{&E!F!Q=8!yD}zL;tycUA0`fQktrz4$+~iz?f}=WwC4 zLV^cA)2+DHsR{ZXlM0;aIlcQp?!^=2Y0e+vWkdQA^hUr_is@>HoIS;27)0(n|IQBk z-tLM&NI~df$QgK`+;;flM8anqnb%M;EkHa&iA=QHKyFcOfhGNP_UZZ4qh?8l0o?u) z*co)^h-(*9er=m?5Z1p7R5K`KUu7rY zdcvo+e|TICq`(~T6BJjn{}Cprf5*7?6ZV@g2;(<~JD24OxQZY8-zy{^1WJP76qyKL zV$ zxc(hPhQp0u|5G|+Avyy>GMaKvSbP6ds%%1PN8r2Is#*!{h(+&%iUkvLW3#CMg7sr=Ew z&Z?nZW06E#?C>+t0d+%lk+>w@0PYvx6M?8MBGQFRL_udS9FT+^+f*qqX2sx;8Ae!c z85zriUmHTeY38ANzqt11#rkG|*x@j@uTqtz6EPEX7MVjZ-mAFeXwoRpN14o_S%){ z)bHM&H5#u68gD!drptOVdqnWTYGaFAX+%-5y-*^K=90yn-xst!slxJ<7+9%zNYZyh zraS`0!Tmn9^zt=nV66;B&3<3Q_xuM8$Dt3+wRE0B)^h|_4GxP`JsZ?jlq6^|B-^J? z%DsnV|2W;vUr-7)y{`$laXczuW#r=?R7it4f*8jF<)^>qS={98m76Rl!XkFR4)1>X z9Q@s7s@g&BDd5-q<MZoQ@DY8rILsUI{C@WIVb z38KRJdMJ)-BdWQukA*ab@Je9*M=s!5t!xL{@oi9p(`r~#i;3n1DuLlkB+88YkYt;W?&8#5rN&zDuq1gf3)ezh_6e!7Yy2V0ocT?1LT_a=0dd z_h!Lcczz+F7q$(eWb-CF+d4X`kQCzPjb}+0PG)$G1L^m8HKp_WQ463Jgf^pCjHTX> z(fOoz%g1*EhrZU&!(XKl6y6T$&zqJLV@XTMKOWPY_nI9CQ?HL}>`%7f!NAjUr$$eD4cp2=N;;q%*qns+|ebe>1{OrFY{%nu-6MRxCsSanyKXA91$@ zoC=80ZpodE63HX=7l^*({thYEtq-*-(4pk8`M<1b6ZskoIZv*}mU1hHS?2rWqc(ZZ z@4zk$xIImdBNoBk?R|xhi#sfKUkNI^9pwJmq_?oJ*=G2zu3r8cI%Ar!y6f`oixRvUNtbj24Ja~6HV?}=`VDu|MX(s=rg9cpj4e@U{(mTj6QrG?ApU7Oh<%0eOXY-l zvCl!-X#pelD@=jJ|20pMm6#c$;eApFXXZPraYIh< z(mZH92n$~8(d|$b7DK`-6^;^&fGR}c#0Z}ks6CbBg@L0`gxBOII;7yJb_h@jM;#sI z1`oZkB(u+x-6zzX3=15&vI*gB}9>4ABjLe!RAW@W5^KhIp_~W;pXT z(^KRZxM9kSH*;y=$WJP-fU$lE^VfIJEoWHYQRs;pr$j69eNZN=0xV|>XrFGOE=Lat zWN;@>lNTH(g5E`eX7oI~xg4HDGPB5GW|+45hzU*bfZiJ2zZp!)v9OeKF9FP( zhKu;qHQ=W|hOcoe8=AaD8AMIw83>30D|lrJ+%f8j;jn|P-Oj1EMfsC-l$*~%+ACvj zLI}Kk5gKKvW;r7N+Xj$-I0=u;e1H=aYE^)Bns9RX1UjWu8ghrr9 zSM`E>|7Eus*RCE?u2dGlIx5G?EG-$tUHHM1mreaNY=+56Vd%Q4{!cEs3yy$%&VJl9Hz$oeDJR>}F!OZ_H+z!eCydKb%Mse#c-5xa4He}QTr2M!-`o{a=cKfp?30pM6*a2dCN(fj^T z>;+<8?!Sy`m5-Nn`R4Y$AhNQBg=_?-s)mIa^@U%p$ITl^uTx4Lur%yV zImPF5Cj#+gzIL}Z9&t;7C4NG6_YYl`l^(0ciLV}=L1@6Z$s3Mv2j3Qt|F#|{)_gbw z3(my+bZ<3Y{Q?#MFuIdh?Z~o9&Pf^)ZZtunI@YdruhpqH-&9y`5+l8pG^PQ1Sb&i= z($lk+yd?T-Se`-^Dsu+?O#UTbpJ*fBHawaXqYT0jnYr3L_Bk6}p^9NUL4>D{Ao$cy zQ3;it-rK}60=H@!Oe%N&BkW{Kuk}5QmOV?F%jY=KEcgsLnafYv##;^<7B#rk;~sQ| zPrV*||BTb7HfZwHDGSkGv>K<+>w#S+2e|4f@Eauq4XLEy=uzqkF1-HNh)y>?pvdM1EJIbeszIB?v9!gxIu5}yW+`ski7DN$M&NJmE5rx`@-sWSbYad@>fSlXPF*V>! z{|nZ{vEa|rRVU@oE8&E_0~Fy%^W#Sjz&yEC=o}(&tExJK=TNToi z_bR|AM)I?tB>4%}6@bHxDnxylUi~UJ9g%x%oYntoTbz&u)CVRMXDJjJQ-2eB&5wY$K2Q?fLv^xa2-2-l#2^ z#u0DE2$@tWfC%-PCEqlz@owFd-#t(Ey*eHmMDpM_k7J#Vl}hWIn)R=hX8%TsKUJ6} zNc@0_3w1v6U=EA)dk?nPd5{8%8MSWt#QBCM!l4p&xIMs`(aSRN9~h4(2KrjNVDk4;Mx`oRh*hyo|oY9k`KF(Z;|8$92#M^0Q| zR_n|IR-!iSi62f2;-g-=f&PUz`Fx}&T6W|W*y9vB7fP$NCCL8ss;ygm`?EvwL=WYV z#MiTg2y$6{VIYC|I)A0lS23^0+vYt(pN%&8@|BUT+?J(bGh_M1V?`$8iHkdVH;qj{ zmzdy0_CYH~-H;pVcLvjW>KqssP1W9IEY;C)68^sxBlLW6mwRUTSom4l`5!KPLI(MS z4gn{#56-A-UX==5$pNP+dahpVkH2W-A~kOmj7MqSnb~A-I9k)xElH}Q&!^g@Bklk} zy4^m^$*kj9@LdbcbJfPV?{Gm*HoH4?ZuXljM0JRE&g<>=k!BInLXQ#UkLcR3gR}a^ zYjh~inCs#bX-o1BQQHeWc$CGqk>z`xL1*uxjoK`K0^u2`tl)63vMn&`Mxfsg1K(C0 zus3?C6+-;~%x&NL$SRckbr+wyha%2xp1I184uPk;Z{lN19B$U@F>d#pEf&ee?5<%# z@2w>SuJXAX_zH1);`}evA-$!$VjHh=$ggq~_>J9_a~b;CTxaYj1n^UfRk&T2H7z5U ziX{ZF`iEm%G65_4-4ZJ8pDmNI)!hHb#SW_$NQyBZZkb(mbNywgn;V2vrKz!=sPTaS zA-YG%_9yNW2mt49FjV)5o#eeUZEWd@R8X{4cssbc@xoUWvP|8pYUI4UNf1kL+XQHVNV&6|GnDQZe*}yjQ7f~d{ zI#jeeRpMuEzNj)t2OX)Sve)EomG_SQav!{;#MYPV7gBELmkQ+GoPGyG@!5e!Q4=Tg*JF{o z-DTJMy7MP90Ul=0lOl5433OXDj8lruMLJ@%!N_r@tFaTOvmizMK)Nmiu$(*JN@>(xTo zsFtdUOCHw_Sjk1np!(ar|Lc-Yg<>lE;F5MbznG`;s}FfrU)z^&je30xmhjAn31Dr8 z+c2B1-Ix%t>^J6@DV<{P=1f~FM`OE%XICC^^}d}oQ;)ad%jK2I`HTalo=oO9|B4&l)CQcM2{sYkf6*!lBC2IS`dTo}X%dqw&#I^|jRR z_gpXF<|U3m7U!EDtcvkNtLhx8iS1_Dh2m zr6l<=S*2w^aFg@QQL`F3yJ~t;P~s~TmL8Uz*sAF(ae~Wft4=PM8MdJO186dUog5e2 z5FpFaerR1ER!OhzV|*P%{scsrko&uPn3$1K#%v1tmA#wBNm%3qE-78oESB}%035C~ z#NYgG-9;Pj?{)-b_$wMdqtaBNJi(hsRT{^R!OowPms<`yp4%-CS_1SdP>HkLHkS`3 zQ<&RPoHbP&u3ra$Q z(hJWr2&>%7H3CF9e)4=e>tFW)q6bHil>mBvhInLD=73;N*xZa(==q7(|C1m?Qbiq2UEVi z8Sgr#!+JYi$EfkaUBNB`$#>(TtEu>fkfCe8Qm;TxC9Usi^Sh;HMa+VeQD@Hm6r#lk zp*0eI-M&Gp9(pny+P4Is*X^jfS4Xy+MeAyzz-}j!s)w z-5qh?!I{bkd#cq5WLkc6BfCylsp6l)(M3NW@2s7LPCYKemDqAg*)Imq8a z*WJ4B&;=gNgXOcu>~rJGOS^t2Rs%x~`w}e$G?PVEV24x5C1Y9ZGKpzDj1fAfR`_E( zU7NOcPx^Vq=#%|zJ1E{#t{n$5bqjdlwgO+h95*?K;Ojkr_fk{&cFI)h2`7;S4Mm zy*ajIS&;DIi6+U?YjCsMt)pkN^$xt>%u^YaVDUJ>!>oOUdcNkVB9lt&t7K05uI<69mo zN|V*WLsBjA9tRj6@8*xWU#8p*XzbKaVxj<^VyDsuC9g--%cn4dfq zr0h_m*1OwtE8@`VjvlAwL9jz~=m2-fV8evrS1{^Nl=-`=mCg`;!(e&P4D5{0^ zLSpV@(Tzw{CsmWw(%AWgprcAu_~rz;2n>LyNIuHnSmtY(h`uxB9Axn2xMXE_R-r?sT1#I`e zpIz-|XPfMJT=>-07k4<=4=644k&k!M_VU&Y;**ZXWdf|)TPt)aL#Z~!^HPD8I|C-6 z83$Hxi!|<141O$@`rCUh!ST?B|I6gYJDat<$hqD9U-RSJlRFz6GPtsX*t)A)507Jn z{`{I22&kjR9#bhO#?$cz%W$X{BxSdn&ls9sDHD2o4R<#69i_Kt9VmNNPu<8|hON%6 zFX$0*_LlA4@B*VBNu!(+6ck!Ni)YOD11v^><1@3l?$K{ISubX?&)s);OQiKfW_rzI zy5%=68uWMm+>o=MLa%TO^H$MoPgX|n(rBJ&_l82|8hi7+!kKJnW1tG5zTNghk0I&e z$QBaE(b!e}T%+u4=e79n%)>&iPxMz-m7APhaRMN|uzK9Z5>NGyaGiIcPGFJ%Bv)3tQKnKimVXH(MS`Qg8oH;yeN9dFz>^yM^ zvW_7|pOgKGm)8odQgdkO>w7-N_gu4>!2K>WuE^g0a9&_-pttBo%A?-t*p_I6V(}uD zm8QdMyv*EbY&S9bEI}bO@i8ye3V5nwwMqq_mag)T`{%H!N zQ-^_Ts;Q0@8cbAA#2Z-@&eV8nuQ85$xhEfYopTU0dsxcrR!TOaUN+!$|F-X4YjIqa zNed6&>bc}?m+N!=&y~b%f^`K(HEhBfTM#ab=92zP=1rcit6*%}n=uBC>fh3GW8Wej zB6;#y(k^4Yi$=VJUi?Z~oxD%Rusjw*uVOh7=U#b*G`Yps74X_0uZ3|(hqBLk>XVW` z+$f_>56BZ&VNB@f!)MUeQjQ!BkKwYB7rh{LYmj|zbR(@P@f zK5>8vj#xV%p5(0D1@A+f-l^i{v%MxQx0;)_50+tVp)q1CrupzeBQQ$v^_^(e?Uc{@ z31$a3*;}2o`$nm4#r)ZVe8wM4Oi+7a;<)c4b^(KD*nqsTAxt-2$+N8U5 z=#xxsg4(vCy%Gjv+ylJ@3% zb;Hf8Mi|LsU@onv-X0FSdgCccg;P9EO`eytO1b0M-{7sQwZFRh#Pe%EK7Fb8ovohD zl1Jj6+E<$`Lc=3yU6#qx!{c)bji(b_k5*emyJ=>uvcK%1%+GtQp3iV>(S>|7c=9ww z@3V8$i+|o0+kU`_huZDF7;x}P6Gmg}KWdf!Wpwyl8l3umL0>ZKpdy{i&^AmP2`)r* zB)4g-lZvX&ybebTlLY3k?TWbe%1Ktac(fC|L&SFq`9B9Ggj;DeZ!|em3CEn$=K3#zNBA+SHld zm{}8mt$t8c>&a~FT#1zwNN~2xNxq|hkMX2dG`Z+74X5e;;-QIfB&xS9&f?dP+4yid z1mWa-N*Ls?UM{AybCICjx9)H%WfdMNZ~%;r^W6Q}8a^Q7iqH!zQKkTD7m zltprw;X(@Y`_i9m_pli{ghQbLc%8q+1~!8)2kkSnEMD4`R$nrzNphvM-#_5f5L$hA zWePM;;~`=fyn|(!=>lIdQES^8f38jfN@`=Dsx9y)6eM0%OQ zh-e9e`cklJ?Xg)SGv;aJ>eH{KuYXAGZ+74)6qiHQKC;<%a8*V36*sd+IUYVvQ%2re zp3CacT}`SiBJJjhORO_WC^P2Dc-kQAV_Yk?_>i%2y%t-b5PWN-{!5RW+%nBbrjLN+ z!HTXU>9vSm##+^_dBvByZ@1HfS(ngceyGZ39kM>_=sO_;ecN~Y$VT^d(i!A4oVG}D z7{|QpD%D{V4RS^Ft=X+b7<_p<+38-5+xria4n8`ZvD^!O{M@`<+HtgQ;9LJlH}^HC zLE%E}((1!fxf?tt*-66WeQS@F*P|PbO$9GLJ$U8(sZwOoBiRgtzrC|%z}KrtTbm&E zia~YX&5kUFYiW5SmfrHIP)gnR7{Ro`I9yI&g4)G|uWI%Q5x&r!A-T=mU5o2>RL#D> z$5k~lzR!2*btHt=AnjM5_q<)B(s<%jp||BFk+;Osq$qXo5bD3n8qTzV*KgRdSso}; zvPSB&aW=O8@-Dkva>1oU!;fs;_S9}vVn z5it~pk=(j9a+J!soyfeN+gm|RCN2A%{9*!!`p52Eik9fR(+_(@jnag;K1dIaQrpZp z@e^aY8|&=r`6Iy+|51Q`eLvOR(^33KZ#XB!%&%!ss967%L!S?P|Dq+_=lk{6<2E^y zYL8b>#MU447*+?3$<(|^bv3;!nTJirvEjQ%?3@+<^6YEq~KYhaH-EnupkS$gwE zX>()Mm4kIvk`pY|Vs}5m@JdW7@;n`g1(R( zlv|aYW9b>p*Mf!Y&X7ItsObAlyrT9vAwC*~W6fOsS6tlsBY8issIf?m`h~C^83@G&Tqg_3G?tpFe@kNv z{ono_Rc1*(7vFN_UfHESh13IT(q@)<>MRqt~*EV zWp_IA()@>6@>Dpmu))ZkX*kjpb&AW`do-2@PN&}sug(9KHn)Ln2W`*p znDHe(65Y!eliwKag(r%Nmnd+DBw#q@8fSZctc@1vutob;K|Vi%9Dl3$v&(1o;tz*f zg;=dP4t4v+Qea(cJutnAT(2bt0-u@Msems)Ax5Z0T(gB?iu zkl9OJ7T!<~H;eLP+;t77mcxrrTfF)Hdg~fCa#<5KHZR`LUw<}od0$hL{1!bj7 z?AY4(E6Jq$RwQ(!{5FQF*C4~3U*DO1chh5S$3&0BnyEMC&&Is1 zNnwnU?$HxTg-bh;GEBK;A8}gW#Mg75{)y;(<(Tnz!=%vqZ|_)v9EXl!$E#Cy#&G*6 zdZTZe32z8+VjD}dmApBsvaaZ+7`W2=l}9}<@9$V#Byau=RqoMnaoi_v&B%svZ6BNc z+n+~%UHvt>f2OsI>C|LF8Glr4sbJjmhztqU6tVT(G@0-DpM6BQv{889Z@tv&AvU`# z+AdiUMq7S|HW~L_e8YlHBD>1778QC>D={M>qByTdZ=u;EPth^N% z4Ua(%v@4dch=C31J(_S)vGT2_ps?59RlU~tet(~@jNuAfBi^PATz|Y*;$ilav2gFk zBI*9K8E2Jj3AY6mEGcW+e~qJ5SuSTIVH|am$2aNxGNVH>SHEq!DS6jJ8;modsF=dW z@%v%ShFPzUry0dRowhNY$}sEP-Fzx^7?8Eu3-8dAJxz5@GNO?>go=>J`MbH{& zVZBzPnd=&Ku^mWRaMuVF?OyGSbvcpN|2Q-_ijfZ`oZRDF5OpIhKbD=iG4|~2t~#Yl z%M1qs#iw8Aa?=~knQk(Cp{S5K^b7*WusZ^2GWVDz($W>VA!WOB|5t*3sRr6;7rzNq zTS9`6W$9DrCQnQ;!#7T<6i43*3A+i!NRJQ1yWSYjuj+5ftkvlXD?ZonN{X z(Yu$5#QbV6(81*tfZjVo6Cb!A*#_=91@MHk-bRQkvCtztFc%wFqR&2j@`@SR#_Iv; z*Wr^C(sM=F=aH$XEo_%o?lmTwNaW%`N}BhB*6=9Bsr5a`!i@rRfhm!bb4;hoB070$ z7<|c{#vs)4C5bD-j=R%w>3S^PQydMKrtLCu5}^{rEoB zG)wti65Xx<7@Q(>V$23szFt%);oD_mu$svN*zY~~)NE{ZOPf!iHebYfLa_fKbUBvB z5Bw2A+g71D?6ByaD4fK>rV!Vi_apaiZDEyI_49Vd$F?lRM-7aYIHSF$-hKc85zTT9 zoRVmjUA3x(6p*8GC8+H)P#pT)JXv9@g`yL4PEyGktdL7@V+xjQV&$?3rwUg5s`?I9 z5AiqrDvfMxv)MVn(a#%GT78ChU)pqO<*ODX$yjPSe$2y(85}B;eymc?sf2ZFc@?Ck z9!^E2eY@ev_1V{nP^OdzUm+BKj8=3*%ZCVx6OZUxe4xy!AaZ*!7c2@Tub(;-CP?fO zd%kW_`O%{1lI)57z|y8|HSCEBv-O>Ow;lfGUA(j2$mH>Tr7XMi9f$%o>%?087{-cK zjLf6o;$CG1nj@ZpRi)6SegApCFO29IvNtzQeK`A3?dQ^f;wK_b#nKM|!F4zOrh^f#W-W^U|_c)*je} zY8Nkw7FrS2cg~nmNdLj@FDACdval>%Mf+1CahX2QFa)d{6{}&vlJYsMe_Y%ZqlaZt zj<`Uu69xl$GzdSVPv+=hW3QUYwKo|v)ie)Vk1%mw=C65~ro&&bTG za77%e-C!_4Pv)Tc*Olb0t~eHBxU8aPt$D?|z3}GRhA-Gisb8HvpWsw(-S)zI3+MLr zlnQ_)&yb5Q!2Q(S=va zFAqRz*At%`$|8R!(Qa^otTgBWz{n{D+!s69wzp&@JyuYFk4KO*OWz?C=foyh9eiaq z4%l8E(z-pE3O}*c-1mEp!3ta-){1ecGH4!yZ&K`Vx{w_Lx1-P|m|QWfrFGu8IFr>k zHx%ymOb}9pR_22A!GNk{d_4=nm~ zfeq_`Z&Aeu|1BrOR2Tt19@2>KREpD(Qp*HzFTWXp? z#^CuJm&Jg%o)@5hl|NZ>DCu$l!Rh6Ns8hmA7Y#iBo45}Q?5}^@QMdn=7$<<%RE~bG zHi&j#UjsLr-Ns8+?e0h#;&KIF6qq{%KoqZv_AODDxB3NEJn}%rpGRUy2|4Y01*MToJ_O3^mDCQp3jg2FRK{gR$9LD!4 znK->>!nNOX0jv?-WzT+RfW9G^%KH9u%&O~z8IXK;fD3MSzxA?0v71;e=v;4#xDX`F zbriBMO1dc=GIA)Q(tLhzS`XO-W{T-jX0i~N`of#9AJW*jCte~xU_(WI$6X>F!L1v2 zR3qo~>4{^EO3it086H6&HXX=Dm_H}w3MGg}T+g>aIf*rjF~2=~*ym{fHt)^G>Y&w{ zsU)O{ELnc1wwNt+ecIXVpy;9$B{cFJ1DF~sNtVj`GY6xk2gY8CP1@jU@>koh%NB7# zxlP482_Z>-`OFRu*H<~n3)nGoY@SKmKSH}{K{EadMFML zgB)a+esU-6c2xREQ@A7U_nsYEgKI-@3{nuD#(}k3Xa+&tBJsem+)7R-oQN42#Sq2i z$)ZhS1%8ltJxefdPg5}rOT6yW&NP-O*O7bd{@*acN$Y*mX`XW1JxaAF;TL7nU0U|- z`(|NOo~c=~A_h$BTm&4O&M`@iy$h>9!Nj*kd!-ZNIfqt$=M_s|#7Q+_rohnR$nK#Se@Xzm^^AsUvs3@hWyJOvf+4(cG= zE|%n;E1cM#%-!XhMiYD{O$#MT2;XS|)K&Rqz8~$#mFK=0$uqn10iw4MbbboRILJ{N z2fVoA5OZEz10Um$`xY7cfE_#R1iNNdb?*5C20ZeDFUZ3mk?+Ei2rj9R)naYvOg#ELe zKQz@_SW!quPWp-jQP?|Y!(&E8++Tm0SPsh)^vf&?w`q5io+u418;tHendOJbnh6>W z4~{yg5O82sYS9dVNhZm&8d+zH`NK( z%~z~|!e0NHQWMJDR;N_c*`}u_b7irEj|q$7gMV+Eu32yI+$MO1U|bGlL_^9|TnxjwoSH_+$_Q;BB| zVn1+$(@n06tFeJ*aRA1ISCnVJFy7nut$OE0y!nrYAIDN!N&hb4m)(#1E!j zb$#3&Pu3}FMw~qWGDR{2y6cNK$v!JpCtJ)j}s?0mR}h61OW=qy-!V8rb9*ngN#N? zg*!lQj)5;BgN%18vk=YB}*`5~EO>8G1- zPFH)Z>Y7R1=fa^cv0;^*P6fS(=_D{)Md4H*o?Q}zjx?=ZJXnXnQ6Kxh7L^a;=w8vW z@NIs+H&N~fvjS_-1Qgm+-i2gS3X3ph=1Y_KvsDaRLm8jIt@>O17Vo|=Qy)YtYX(>W z_U4!*HXme0C^A&eF;EYI(hq*neFb;qMB|KpkCWG7TO49i3=BChzx}X5O?%OR# zKH4hemt#Msk_xyoxbQD4aU4URnR;?Wi1$#wi~G>!dfxL8cwK8P6|&|DR@;+JRhU zu}Xwr_VbaKBr(2@!z`EKHhWn}9`zXr*sa~$>|`UXxZJ()O5kOWjZi@Y@GZQ4VovmTRY_r-k6Qf6vm6c42R(Ec<2?4G-znxYwWszUoZhPUVfEeC zVv&RCHUCS;^FM`T84OhcK1o5SrB)#Fx7y{JP!`%V{(ipiS#858bOp%Q!t_IxfxN%* z>Oyc3il5=WQ%^lR52ITmzX@y535ISZRwnkzAZ2`RywX~6jI<=D?Hq1H=~rrmLiZhX zjJxlT?)dCR?tqGuChAIU%*4p96Qurp7cO4+5wAr~8iO%9{b4;hgDV;t&oubFxMNVfCr$3-pM=qOML`9$cz=BFvkjhoY&oRV$ zcdXecwdrRE9)GU79f`ZIo{YxqJl7DqX*0YE(#u>Z=yTJw306F>IZJb;)9S7J-~Ea* zPYtEE8;Cjb)1>|>bQkBOV9gaX#DpmzFM)6K3j(lDtlqF1Nw|C-R2VuVQ`@jNBSpoh zKEAoeJ1hM#;N{B)DyMjgm508PsHdPY#s!(;{IQOkR}66-{nzpuPowJ(VdW=tif1UM z66%)JDZkw!~l`#vWx z-RD&4t|~@hBK@&Ws&p2gXXtU8)s}>#L)4lRo5X9*18q060h;@KN(z<%!py8B`v}n= zllY>ccDmj44qwPr!STrR9WY7N)za4dpC(QQ&Vg#m_g_kB+!-WI? zlp^T9S)s8m;fx`T(YWQ95M{t&T5rM0ZJ<@s{sM2Wf%_fux#xmSy+wLU=Z4;^&~An9 zgO=c%^zo-fVmhGnrXH&WH-v&>QE(2jYe2h=6^@WRQ} zuN?A$P8yuz22PYkwwxBPuwguX8@M^|)e&JzOclP27p?0=-Amjp99>c~nco}|9K|%o z`r)*wHA<)&q7?Naobp++^im4sCG<9t2ea@FG6{`wD3q&pudna-e`)ZGhBz55MwIyC zMVqKjc{M-JN#B=O)lHKjhY-u@X?o*#5vSRr)#~s)t!rEouz>*qQECv5wl`I>DqXG8 zFiXKVSerXBp6f00PJpZxKlWXlKut*1$2+}_W0fv?-d|G;NFP8_g%q6zC89A%{PQzA_E3L1$(>xS}xZI8i)*#LTS1q7#bSxQCkBk!;IMi)ey zkAjbW*w@~uaxFG01HcjYfUKtArTsE=m z*!F}0w8B)(v%OMtCntGBPicVdIeEli1JEq#cg=$Bxy>GlfncJn|U75!uXVF`f*$@S_nZ)Po@H=F{1>h zF^#NxqSPiu%uN~gYaZ6ZLeQ~(xa#=i<(>jk2uCkR?i5u)ujkeG&2G^Vg)xp8%kkTy zRK$oAy=$*2$ZH1Jj#n@5thKp8BsBXY6U{ZR%GFz;jYO)Zp%)|04zjqjSo|)G;Z)tZ zM9#Vk{g-$|)tLX<>GD3%5dSp;p}Gd49`MSf>fL~~u^1Z*%8Ip<(GNX*(hoEV@}4IN zvIz)4;+fSXGZp88q8MBwNQhK@i%;W#EEUXN=I~zrW3J0dm~jsCn^&H^npJ(;>F(dhfp=0YE4rI+;Zv=X> z>HMnU#-3+-16%MDpDGA@ERICAIPNcH!h&I?*U;^$;crIPHo^oisY>0iB>mgZkG&tR zN=s+k)8^E9s*naFT9sE6@LtrOQ~lh$<})P88&S?*7!SDg6+yJ%Zl8K)I#$tC@x;Zm z<&J`;)j_q+lUQiwhpU^4FZ)SHv~ZILghvHmo*T;w2{bTk#XmnoM2wu4@ZKq4k%J}m z%$qR5G}yAE#td)X`d#OJC=HJoJ=$NL3;y*8FBB9Qlir7DA&^hylt|y4)73X4Zan>( zZH4YP&hwuOjr*AYF3b#R3^7&;2%i{w8QGDs*mw6jan6|g6;PLDZ7?Y{Z}89bW`EGB zPICIv(qNi8qz<#dEhI4=_kVHq-GNm8@88D3K{yB5R_6Ts&;8dx#Ewdu#Ou2lW z75;9SggQD5_UdpJaJ|Smc%3?yWSq8sqkY7FAIfEt ziFpx-Y8vH2(Y*#zES1Y0LfG4p6Py&@PG)uQ6$0RwmO`BmI^~7X?GNJk_8ksbgT3YMz@i6I;X*`amA!ZnN1mB^*bAUDcisRBZlnS5+iW_|lZ5 ztZfw^6%)09ox4IUH~}KYL*yrE?Cb-#p5){UI-22%|9K zf4~gG*CJDM?FS;WcR{MoV}QVZ18FDtPX$^76n3FXhpS+NsLy=;Qrk{UYD@a-sH&Ahe@8ix03)i72Cxz(3K~_OJbjxfa@bYZ5GV4YV)< zCRU&^Z524=GpmHHtM!1pK`Vd-yd{b1s{ZfLe+0cnc#llKN42y~J2@+!c?6~{#7Kyd zGyz-VHMf4fln&~3u17s(Jck5CWPbSqkE$^6iUgkdmUAO-?wI<-($c5pPp|S6O$Pr{ zqP*Gn%Djd5L~fsM*@f^DKWgAJG-;y)y`m{@KAr1oBFF~=D$*RNK4gEEYFWtnL65ai?8jnZbg2h-lEBylOU%%2RS{F zsF5oBH%WMs{Pnohq>mq|S8LJSuAQlN-j@B7Ij7}&#FUPw=2=iE{X-X>%AX)#d;_%z z#Vli8NPhJWbtMqK6QfC@h|K}L{R@}BFWXNO^g9hLx4eaqy+=lCS!Qmuv=L9KA%0XA zBM6S&BT=xhdm$=8Qj&phbwWRfj9`AxLY>!$jMsn9_$!h6a&cSE6y7Y#8!_^=AmQvu znEec_{9wP}2}fpfe_|bm+9KyU8ywIxAN<&ae9jf)ra#r}S6&~fFO#(8Y{L8fmFGJ3 z!ydaMoc<;frFQ9k4rFHJR%ooGq@;jBi&J7O>O<nNn(p7NWCv*GM3kw`(VeZQ$$#3H>Kb%F+U)8lT9xc-z)e^&vy zAtDM?eMi9N10!A*hW!4)Zg$364?3Tu(01xQ_2uoutOfruWi}cfC3sV*0;*MqYiI1UV-D$%{3~Go69JSPe+fpLVaVDE1}tR(&3c&4y-kS)KhbFr2<-txlR-VnRO=S-639y3YnL$>41K>sQn6{eL}Qbllg%^kxsm&h zx2FpAV=gB$3`MudoVl-j{1u2xn1jlC$m>a$i*s+LjJ-(N+zd|E%2h+b%F^QT)m0vG zsUx{sqleayn~Jp?NO|@n+kxkUQ?r{6uNCSKRU0^ut;=x)P*NLg?c@)k@2nGIqw(5x zneTs1?ml)_G#iPGI#^MJ4(w7vkyK#!Y;A7oaHdGYRDv*nDMo#imYDCu5KiVBGyJVt zWz5HWjrF|bKn6cHz1e(*{(Sno6T+XN@E!i9{%Gut)Q4)gwp>KZDFg-0pu|kXvlHJKbX?>=#G{n<1D>|gWtTM{ax6FAq(06)C6nP9;s8Vop z&zyJKb@D1dzHnRx5Blp?aQlDKop6rFk&rVTFW243z*jKM$xrb+YudD!Aoz!aKhEiL#m9)a z)EySVZ)kqUL?d%E0zrHhNi6M{%2erPmpC+egM#ijtPXKo>u%P!#h+E28WDzt=*EiA z?&CLT$O?)M1|tp~f=>OK zK7hDCON=v^&H$dj2`{Y9IioDT;IeGWX&2S7OIo2t%_rte)+38YXqLb-#7gM3ZPcBl zjyE;zA+YDk%3nwp=;zyFX-UbI9k4OhY3 z8s~{`)GlD6M;XfSLB`_D$iUDCr+E4@-a!Bbf+GTen|Rvjs)Yt?-Cw|{=5N#R|Nppg z1}wAg;E4SNXVl(SiqY4nlBqYfmbgtkO|QB#!v{^1P}EU#VoRHYg85UkX;qp3HV4wr z+-YCpVZdh0{M*Z1_1y#QTr#n|7>77H%V$YQ6jRZ9(ymH3J#0c`B z59oa>Ft3X~aj$GX#zJ)~;?ove8z>n~XcrsnvH) z(g@`0{n>_mv$I?3A42MtPF6n2GUc}Jvv^AM_kVD+vdZ_2J8SW9^p>;Pn&*e?u@gTf z>WZcyzWy)*9XFS_6o~#af2wRAx(q%s*SU2R7cv#vxF9kKHX@to5A3i{se zRT}~ssWq z$Nlw6mH>pM#94`7t@(5P3}Nn%!1xHw>Ymhm9(jjBh#_8%_kHjI%s+@4y(u9^!1dvJ z<)Es?v_2jD_H-6AYY?pedsf#cP)%Lm_%?r1neHbxzv*jm;R&_d8t*7(BWs_5*X1-E z((wv8U)^fkXv4xICuPFX&tZb;$CzOIUqiOm0)*}qfrcfSI;k&>RrBhu5A{hthCtou z^HMA?mO#0Vth3pkAdS2V59{(-KRUoz8xb^~uSfw$59*ZMUaL}hvZO>ka z!=g{>y|c*_C1#DLrQT}jZrJZ1NbOaw_h+-5!@-kRkF0=o9m> zDB`)zdO<*O6E^0>a?U4c)6u59H^pU!)FkCeuu||TosKbBM%cY!AMA3k!=YeEBh2iw zI9Ga4<)72h=yHYq3j%`5Zsfq5<}xGf2d^eXm!#Lvkp#bd^EfMBRJt| z#a-%der=unSyN6f>_B4+)uzeDt~FTg4x$xw)b2uy7aHHuCwnT282rRA1|13%!A^0C zR9JV+1oG#%)0ZUkULuX~j0?0VreJh~0-DZqb;lf$@pB8l4sF-JiesRUbyIL7lt zYSLf~dE3&qTODznjkUCw*;YBBy^UJWyxU}JWsmxf?Zh<5T zr>Aidg^UeAZu}x+SaN^J_qS&muJqRWVhFIIC!kzRe?g*~!J5t^A3RN#O0r|u3HaJ*C%#*IBSK?ddFqcN=&Ej_c;ha=WhjLrOr(%kfdv45^Pbl_a^A-aNF>|&j_;!W2xCKlt09m0w{5& zd-&AHaC{N4ycX3H%VJ7rUkXKhq~`b}r^FO@1aCtl8pp))2=T#(rxeK9*L8DVjX59C zk;@jDLX1pp-@7ztOkdDGE;X@uHm7p3smr9mT_)QKi?4CdVWB^n*~GrAa5L|Bsv&=g zfx#r054<~aoKE+ENf$U^Kz@*YL|#HpJ4`@jKo49NVSN$5tBhUX(eKpEIWFdJ+lv=VAs#&6;5SJ?in(`nM)rXL%}2h}nJD+D4F?gSh7Mn!RGf|A z+TDVQBY-k7$9i>+Qr+6Db6JY|U-1MIP@H~XTp2GO=X#OiPg+yFj{hl}D2G&n7B#Ry z=-}mX-XiNG!z^BJO4yDAaM)tRSVLY5GN1O%TXIobXP`-8WK+dPYRXZu%LWwxm}n-xNbRh8_HRx41!iC_eYVSH z|CzqA$gIv^Rp4`P3Ly(6535KIolmm4s2Aq5!5_Ch70}W8Vnr!*lvN_jW0}RoDrOJ= zhP6WXt^DO1&L2zZ%*kteqi1qN!?s;1kBr`xKe+HV9r72S(GE*jX}Cp0n{*h*;p&CX zk6{*l@WpCv3)Z}@x17p8(Itqde<;}Hdiub}r9tNz>k_L824Tq{B*i(iZCx4u&funj z729{-BseY<7l%pizuU&j5*&k9dbb!U7TyeyQDL33=)Fhr4)2zHesQmpaC@jZn%ek@ zXU9+)Z|X{Yj02R07wDMj4sOn#mW${cP|XoC;+O1ZG%OZAc$q8$7rY*^wu=1u=Zz;* zG8)f8>*Rre_bb~5Ht2C>qF@maVF{y#eIa`SK7Ty@1hleoUIXn)W*h>F$^n<#6(L62jyD#G#Yvj`d7Z}c z?e+>@!l)!(u8*am!>46nMh(4#Vfr*#-DGY?0=AVpf1-jerj`x9`cADHkVf55!T)KW zh*OfV6aiJT=Y#Zt3&VnKPE-1y-g$h#N|(+_P&bCSv@fjFx%c7k^!ro9G-)i|sk?!e zV^oCLUdS(J8=|So05r%g4PpMXsh|(>e4Z$XIGZwGA<`0@J^cJKFJ5ccr8s>0-vgId@wRVlZp=?cAo* zC#Ncw0f6RkB5f;}ORQrQ=xPU_Sh2Ksy`bFvOvSH=ELZ)xU=Z^z6g>R4j+52|V=1#U z=|jWFwe}iPfp$kZMslagkL#^kXIuZAcN}RpvkP7=tosE((Vf zye|2gC(w9RPRBSk`$_A&b49?>6MvneQ9gZ@Xn6Y=oa4aQEO2sqB^f)+k0Zhk8JD5@ zy7>2pKfgsCA)LZG@IW|dx>O97a z5XE{HV9Co=py__QfONsO6^faZEj+w3!y|{X!nVIq+4qhMOr|=%&?jKjub^D8kw1~t z^J&Eo^{vLvLJM=j(VWew@7jLsWuYMxW`pcKskmPk;7J#K5{Vgr9<;QOJ@0lc$$<+$ zEJBR_0*sJoS8c~T`fVXQ1|Pkics*!5bdV$Z>il2?3$Ma+Lw4bONf=QhYYQNH zG=4S)DoFV6m_U4^0MK&IJ7c!|_Q0o1Ch|)YeWD6#BVtwXodsJvx0(7k@iVC?Rc*bFB}8F3m=oKk z6$t(p?O$I3V3fY-V5JuWt;YwW79|pIj`urGY6KJ3X*ow~@JIUW93MsOl;*@yV;89Y zGbOjRf>Z#~)bfvz>)@fYK*$$5c*-qn?oWBxftJ=#N6C9y--Sv>{KWdoz4Z1&12l6< z`)=LYS1TJi`m(Dq>5<>K1Fe|(i%@GWpLZL4`{Du%z~nbrTPwxbz=9koC{Ci0R)m#O zCJZ zSN?$shHYU=Ofy#E=M4|D)88r{Fwcg+MOGo@(SzGRUHaMK2(b+*f#6@;zWcVApB=^M z@pE;v0u*=i&Fr_AE~ryRR6SQDit2ZpBgr287&35}nIfN?Avrps4lS3c7K`h-X@`IR z2HGqNZ=heJf!-@e3cN%xvI+g%AJ)6Y``#LC@3>W5e2;4r>Nwtu4P7fl9ExUIKj2tb z8)3zBtk~?vJU9ViSMk~0Z z@^eb+FX5Y#JgkCxVg6+nLZ@}bj~OS6c_x#6nu4H}mwkR?UXl5_$5$_8VB=0ME@U38 zDt3Pbl^WJ-G0=9q;t`ziIth#mzjF)jmvDa-1TnE4=x{8xqjl~KninC z|FP97x=I3_3qvYBtu~gmmIFSdR%CXty*w!_S=u{Ll>$R&u-fy=m**r9J z>JQOvc#G%j@U#BWD(l1WFO=$dj90p#%KH@|C3q1eB=Gr9W%K5a(?pN}6qNI3> z_{hSD^HrwO%x>b@QHHKzJk}3OpMX@p25>g%GW(5Tye6yGPL-OMmu-JLmW}UEMw4A?Sy}=IoU?VRh-o5 zJRI|z!m$&WxvzdJUz{{PCbMvK6$zTq_ws2-LuW5UY{_mMcMKhRdXZ68OSmyh=hvFP z@fgvZFz26WrA5f-Cy4W6IQ$zJ*#s!DCkWS@_Ff6DsC=eODkt1Lh1^WSDw6a17rz+R zlgKr?wW7q!;s>psJTF>sxHSj@5XI-O7qf((&kPmGPxh(=;NLshPC8zfL=JQAw6CS^ z4^uS>s%bMzNF5$~&T_hiH_12jsNzOXDfF}N9EMLj+peZ(o|k!Y>GK%TmkyL*OZ z8%kLx4wfH7O5H><43tS<57KMfTw=*RzYvOcNeM?4L`Gge439oQ1mC2bsCP?wDA0Oq z9zIm1@u3mk_}B8&2e30*B+K_Aqo+HxW2(ofA?U%S-1k)(zgJv4(cC-7dlS^EL4h=a z%#Y2;Gog+ZZ&DSnWN3vKF9S4JB747)l#^j|4-pTbSQS>E#86+a+$)LvuJ6ViRRZmG`vpKTbGDq+CLLql;DuNma%SDA^7rKs7iBh(P<-{ z_N2b->$PZCIa&BaqX`ZnR536E!W^l#S`aH@_VK_T1Lrx>^Psn*wuhJ>MnqG%z%haP zC!39$-8^a;! z+!Ur(|0X$Xp>tM+ZXKi7B9J}~G?R<5nvRccJurq2{@w%l z@auosRbC@Vg~V7|b-vY6j)B2NX02ul@82*>UjXI z+MhB2m7VtNtx$G(VMq+{xEDZeJd}tY#zrsc)Sq6^V9vL!^YIf|$D4fZd!dW++2CC# z0cooUpr{|E=#=N^a}+)3-3{dpgi_RH_ZJ`qPm?RjHxF`i!2fzQTh}GC20QMVVYLr+1uq)OGyR7U;BG9Z&Rq-h|+hxIJ$Zc;qjiI>2hdPFqDg^x`hcxMe1Oz;v#X zKf*=K=j)s)mV-R5)-qT)vGU|km=~B$w@2OngFa_Yp3>9-C)BEq=s#8q)wC3Mu!q_) zoVN4c6${r+w5SBh=Qn5m(Qxbdc(=@X5kMNdLJP@;PYSBQ6un zS3P_ysGHl_>42lg<5Y-H?G`P!$e|4qq`_v`pA+0K0jMsN7dVb0R4Rv*!e)l~pJ6)f zywIQD;{x$Q*p%S+825!e?2GIhqYsO;EUh|!NBu@XPGYy%#j8wbgwg>QuxVjv#A9Tq z&K)MF^7BTN(uALML$zn5bz9n^&$Mh1sS^%0TpNoh+JNdTl>3_$04tvnBtYpkh^Ip5$!_}4xT%b-Rs1&~A8Mt~6R5mD9}O?I)q7>CG_ z>JFsYTc60D6yU3f<4Rk$9$lv|QL!$p&A(u?{{6~(yYm^=G|Z%?z1~@b@T#TI9~%W6 zI@{sXrcAUsDhB1bfXp{X-WWAN?>A~<%MsGg(1y|8Dj&12cROb2S}aabhO@6}^Z4DZ zrOITJO+<<)W212IxMB-cxkwlNR8xz~F{IIm_t z)-E5g1s2XZBQ0Y)pH8h*|5n(w32A=(R_JwvyfzZc%e3!A;*(>k2H1n=b1XGizP;Zw zxu$V2DiFWj+l0K|7_IEaSwGK(iuY0>is64Y0|QW#=oH6aa%6r1=ZV*8f850 zf73{0j_!f&1BX27lq&44$3UgYob&vY`w+ozaacMaX{Yh__mxRm6DwG=873X_P7LQl z;}*mhMP>#wZX*EIpuI)yqFubH@QqfW3)@Wh^^UjvWm(_giE)`+$hc)+qpGAQ&1r|h ze2z&GZtNTGELvbN?^27I-Kz7EV=}+?xYFew@E#!*0arP}M_Y;sH~$5>09Up1wcxMk z6Kv4~jUu5N)}>19Ejj;jH2}dl?J3i4gk`$H1Q>X9Wv%aJ%x7%C%=jVUaf=Pwk%n17 zJ6LrelLhU0ac9F>k47Yd;_?-gnB&*4hPhA%fqgk&a@(5_9yL{0_DB@@Wz8*X5XKNf zdxe#Uu78UhSOD%Z386259XEV;D|Pt6`4d=!7jXi|y40*Mq3U$k+UX0=*dY)HHz6oR zmp;AQ5ynuE=!SK8lBJ^Sx>`P zsOFnX&)rxWr*X<;XMH1ZzF*U~NAuIa0qk!``0B)Tq#sWbHtlOe=c` zyEc0pg%~x;$r|kZd!$wK&cQ1&pv)0ij%zH_mO1R)68O3YYv-qe#`Urh7ymYrCMB}Y z&(K8)H0*yu{`}rj1y#6Twf|ydI~G638e#4V)q*mw8eFp$}pBzM)%}THKtO-a;%R%f5t5a zFzPtpcCZKpOK3<$HvD7l3Hv>a=|`WrMqqeaX9vuGWoI6%_g=wMiJ|Ms^!Wbk`%v7_%M7_pwKFE(Q4&7m&gOk?f zKyM?YvbTA7Pm^d?Ea#r;t)Bwsr{6nzSflI}FM~`0Rd6!AW5aq+ff76U1ks&tYJU-m z0z90C?Rf*tg_pJ8rvH4Fx{EH8gJV6&`efWm4QwN{C#f1_T=NJRA<%&wL+g%x0Q*e;g+B)m4g~#j`?wzLA|g7fIL)&pYumd zgOYeQgqVB@X+;4}Y}@@rd%c@0_jb%CDN>$vsER55R3vKp?BjqhepYe*XgnRdtuF$r zt4l+Fge=bg6e^08ynYR;fQOCOhHaXw-NOt4x8)-0?vNY%F@%87bo1a>w{+Z;(&jkC z(F3|oJF(%rNbVULfCMSwx^Q7`?`zDv%~mT2bsBxW2E4^#G{AFGK{0uJk8|Yvdo%*)b>z_XLjP z0S$68n%2Ji#K!zl6Yza^bXvhnRWIpsyb51=T0*Gp#uB zP5J#j;etOw6ZO3)^4??;S>`JwqTooSctv~A5R2N2p=b@xVO0_7b>!iLNL+;+Pk&kC@1DIFf z&G+3F#qL6fL%x zn~(>g6meI9-}00}xia>0zyqwz+UUtkao1hJt|7oy!~EM`gm&(>{smXKliFLEOz7M6x4%*+86X?QFIV}z z0DIx<^5BSCJ zE7d;wBBVr`@0=>Xfy$uXU2mbOCP^w+RLeb~@@LQA9^okOQ%<@F>L&Lb07(r%!u;`B z_m8w&z~GuDh9AS%vTF6&>cH>=R5t0(e$xw6w_<5&i@bo$IWtL#xhnkr1`*Cs3tJB6 zJeN=UBOm`CGEF#68>btHl!fn!;EoUiSYJ>4$>aafXbAI7fM5~d^Hx1BK~I(KccYn# z0)=itiNm2v9D;_SgINhn#bqqO*EoTTU~h!_ z&yUjy-Y(^{1j{AV{s3q%IuCZ}p55ESbftpz5J9cvNBF6J>1t zAmiuq7*<-8G{FgN`pmm|p~UB~W%gli(dXrIl5%?e#~3+_PD)4FeCi2pCF0_@1OYYw z&XdxKZj7X9`_=cC8}HL8mM9<3apv*T_PBdWoRH_=`duR%^V5(oLhAh`3o@Z)I%siP zFCVD2GjmLcWdym_aJ}QgS>ZGsxONgFeF|>~in%cT`@_r2+$&w^b1`~=)dP-iN$p}@ zsG2+8fzN)O_yy!C1gU07tRa8ZhI?i6pAUaSxGR*O_An;={GO)DwPRz1#DIeMnW(oi zw*CH$f5&%{+GXHaRGk65q?t^Oo@RFxB7KJPE{b;M_^? z9TqrZ`)Ob-FhP%d6&7~ySCmDPus(YufSi7$=qUmk6i4Ac^J(i4rP5f%ryk*`>3v>& zpef5C{d=QI12~?!qEz*!pft^x{Dy=^A{hCnFd_oQ_epSvx}{7dZv73gi(1p1|FtX_Y~vpzw{PYyMKlQ7`FoG|GjM! zYQ#StMgtQeGVKN{$?veJ&63E*Msrg{NME4^es@x5yc6U;j|^KZrF} z%a4Gw5Yd|zXO*9oa^ozj>Msk2>Kp`$IIH~d&14;CgnuuAoHYo=1@$kP!{|;dhb$fL zAK0TM+F#LYd0k<9&kQJr6*U60{CLwKVB@)wnTkvv;G!AC{yoN+#9U};Lq;yXZ^?$6 zdV1PRTY*^<+w3zPi9Xk-MT}JO=+9?bt1>)?wQ_W;*$t<01ULIaEe<&y9|Tdx{A0u@Mo^Yu_LR%r#}(xA7`*3i zVvXtyVpYXIny+&DX}1IkX!!Nxck_wBM9#m*w**52r5!vc3|j`FPL{zPls3EaaEWtMeb4HD!^5XEWJdJ z8@K?glH7cp!qA_q{rIr-dp}k&DsQ_D4v8O=#pb-1!so2{A~ewbZ`^v1owlY;jI*+_ zz4dtj&4&s6wcfV?oIIY=3_ovIF%mfK++{*U0`>lqZq3fl+doPb!=;R_zIz}$#0-N| zbjaOr8&uO6oGtob450qSY*w&sg~@w`TaO-{G;V=L8#;h#GsBdsRwY zcv-1+wM0D%7G$WlH{o502V0b?9t+>XFOWA_41|Z?ji%+gXY+tv?KSj>8pU9e2#q)* z?&<^|LhDE+yxL_;(G@Oz@@VCwRmbY_yaji&LD=wsS7F@+yJ&E-QuIsz0=lG9@DgQw zD&RNQkHvB)MLwdU%pZ5YMJI+kqYc(uU-96d1PG=)`%lF%tOy<6J5*3^TYz(jdgwhV zV`2rsnZYS800+^qmv>9BLNMQ@m-?v{?3DXR#c?i$I&cGQR54N^9j~++tRjN?cds~F zngvn@3Oq_)jH%GI!4OG_ORW}6Wjc%cWi$oQMah^m;^>h*QqUno-j+ZKFi9LhN6Qc+ z*XqIS=}>$BAtky@_>`bP|9WPc6y;E8@bZ-S)}jdKCv~R4;I1!%bBiw0K7A@ z@8>$Rs*nHG$&e09!jXmf`=6yKKQsN$fb|p2j<;0`dYu&3I!uhbrH)*(gk&Dh9vo4{ zPeLCCv=XrTV&$Y#()z}t7*yz-fSU~;_r$fNK37zOF%1LXS*R!IUz^RQoZmQt! zxGuGTuStAhJ`j-O3U5Fm&c|auS*+NOMW(zuX&7Jajv`RnA#&Z+FrFLRLu~b0H%amK zpbi3&2x{cs$rFvM)Q@U_tID3ZZqG{fQ|f^`jn$`ln+jVUu9I-e`iFjBNV~JKyV4wq zu&+U&BrE4I$o#P8JxN%!3Z3h-x!h$3pjz-Kx8-6fd~`s@7q^Q6Gp0QQ-fETNeT)9y z3(QwtVSx37u!%ON>yOopGX>=lRA>6YpC#P#y>U4R#oGu;iA_XhfWq%)GC&5srlZYeQjMC1vSz@a=NCz~kS`i_~nMH$Za) zIw0Xh z=-?Eeuc*9V_bv%;Azic{W`C0=EqvehS$Xg$>KS0R!SHJVa>t6`z`w=9EtNV{b%&sA zIiP&|IaSGMrFywC97Sac6d$~D)=2LxDl&6mQd%w>eK1-ax)DNTdpJT%e-1!jYOK#c z`vY#ZXvlML{qwiEg?VDAaDsBNrs?i1`ocdClimj}0Xyj&_Tt;R=wyH~w)x}+`B9hW z`ia1KE(OYoH9XLA5PoE#fiu2mR4y7z;idy&m+VNja~_~d6t9`nAB zVA*1cq~Z3! zHS+_e0hr4(vX>+U-xod)y;0KtKH;f>LFg?0Fa&as`bH*A>)Z~Q)O}4meifi@GB<7A z^amUf{4fiCu0C;ILi}Amw&>IFbb_a%RW!?9gGg&h-S@7m!zMUGDnh*;_8+4~=GPYF z9Ma8?O7L3#Edr#wP(>Y^RSPCiVNB=KxWs!Dat2Zh{_%c%I7E8o!;`$PpI(^%J>%hs zAKr8RZC$bB-bQCMwh?kw4DV?^#4v*SKf)#`Dc5h3_|!mZdud?53o67AybeHQXhF%= z2>sDeQ#f_CH*?QHUpJC%?@xH`4R=yucNOUrx_O*5kVwTkIc*@Pd*84NKb}alQUgsg z^BBt`9~{lt8K~9d+llg|X}e>&3}RFKbS71l$SSIW`r?PSJ5UylpCG@#gQ3d(D5@i% zr(Z3}Y{E*X)z|uf2aHgDu#aj6#;-I;)*?}R6`dy7{O!)@VQB_nqG%ppU1n#UC)_Gy zQ|DLtNbP_v$es07T3kl$f+(kub;C5rotd5lWjb-=n1nrw>~Dv}#JtbvoC}$wCwYrE zJX3gVfolDZ0C7G?Zn_t3g7PurG!PxNq{3hRboCwsod!~;o;|I1MZxol=($EwvuvGUQuPo^!C+c-$G_HI~ z#2HAvWZB|u%lF3TX)a%ZykFsWX*e-{I&1wLYQ@rq@?9KjRrBYifzRdM!yYI#h`g8Yk=+jkm8iqRYFHyqqxdmF$!>~|B*;MJ6Fn@M$q^JV($ zz=lC=j$&FJi>(sE2xu)z+&q}c;8tRU5wuN0BQU)Z_SsI=w=IY zgB{~o29Vwa8su(b(V}$LcxLPDv)ch>n)}@dbL{t&|H_VVy`Z0SRGEFB_UCJ(?kU}$-!b7VrtG1sX zoF&X@l^imKpWG{b^skLJ90)_Emqw|UJ6a^%q6cpA+t#_;p4#wX?q%&e{c%Ww1j>*i zozNfD#_X6#A_KUe>D6FdIM@1umDxMqJ+eFxet4b+o9S5!Snynp z2AA51#M3f(Rdjp71wGrN!znijHtbM)<-tSBdpSrh@|H4o-aQAXoHoc!L zbBO9#fKeK+yj_jnJjAV0mAc4Z+cWZ$&H2|+j2t2skP;nNSvj&abf+qbHNEFE@cmQ) zc@18!?VCydPOWHQXZGe-E6|py{%h7ijJuL8o9)b+LqZ>TDTe|~+|WnR7E@yWBr7D;A)MU^Za#oj1c1(c^-TgSB% z<@@31q#U*YCZc1YM>_{<{NXD5{ZITqtwSG+GexbbL?;L;&H}}@N5WvBZSJ~_K1R4j z4zQ+`adR$lw4&iJgEx23L}OWv2pW75jX03#6aA>Iy8#;MW@UOYzzd9$h%D<@NUpZ$ zukCf|RukoFs@fWOMYAl}y24Un;qBx&>9{2)@W0n&KZ-y=(4(#SBzA zcB-AtV$L-td4vJ!U4_2u!6D$l3m$-d>nFRLibyk% zVHcX0?*FA#e%n*r%sG*V_2H+znV$k&5+mx|FoY?D4&xZKDW0&KxbF};!gQD4-ufPI z`hGc%anP)%TOfvm#E(+}J~HEL$2SXb*w6dgGqP+31Y2@*lH@ zHqC#5Hv+FXhplSMDI4Swr&0tfmPdDUtNzT3cq~7LX}^|@RzY5Hl9#7d9EMWi7cni+ zoowb7@84-|>Bw7uUT;{*Sq!Uk^4fF$^YDQ>Z~YKPXri(=RH~0L&UVarQgb|ZTx<~G_@MU1D(H1?d$4a>D;Aeiu$ch5f@>ne3MTdj%+kL9kBIEV0w#><({EwZH}D{B}&FK zS^p{F#CxixJKsycU^+AP-HQgW*i7cmsLwMHznw0^$;0WMhzZ;0jPtVCOYW;@DYxY| z#ocQ}de_{JIw!7EF!#F>UO$fvUx#`BRxgRcaeX9(h2dtNUsq28Hof+!S4c+oUh7HS ziW^F4>?)G-3q3pa^i|iw4%GAE@Ae#)5I5R7M&P}hX7%)%ach=Oo;_aCh@=pg8t>@h z=pN=xBXS!K(WAyR94cBCzZ=lmF9>PHeI_@NIo9z+G{3Xo(BX_jm)9liYSAB|6^^;4 zWxB}Qq^ux@J!qioLgsx7W?Zsv_lh(J*a+hEWeLZ`%+!-WA&<@VBk#{zf*A`HCL|l11&5 zZ@r6We^zEGG;LfnT44BjM=~iw=*X99#_G!8@CK>`y^8IYefa$wE}s56LlxnC67zK0 z#d}Q{&vr#d1oHk0TOO}nSai9nIB!OVl_F;ky4=8iCZUlqktO4s5wTD+Hzj8HfM;N7 zxK(WHPQ$qCVGj$$e%_6|>@B&{2Q_OKmw!bBUmtiKWb_GH6xv(nD;ap9(ZN7RwACrhv2>Sq}4ha0q#buoPY9rZy z4hU%m3Pg;={ver!-qysx8jwgH!CY2VYdgFsV|q&*`4#q=(=8!&L^FaqrjqsQ6n~cK zbu}8CE`Ia!7!!f}@LX#)Y)~%wd;(mj9pA3_yTQCS5arFPSpt(-U;gk>3>RKLEBb~T zJb`fMU8IVBHoq@y!yLxgF#PI^MhkLFN4BvmiY`HK*i>)$v*>uzQvQquSqODZ-x~P$ zs;9VZiW1c4?beJfJDff9?*m7ZLY=`g-$tsQ&j2m@zW;8T;NW zma=56EMbhXleJKi>`Nk{2*)zAWKT-TUJ)rRZXu#*}T%IiQ$k*IVj^pUPv@y?cwlPItcq ztP0&M(6r$*Q;zfkWFyxgP3u>>J6?JvqdLnK;Ge+&-2!A7`in1<>-HEE-ElAmOb&}- zYy!Q&03e5}pgpmJ{jY&yn1h8G52r&XTbUzaa`Oiu7KZ}c$sR3uq5D8Ms9RVg2B{84Vb#fFO*4> zwgIi@&19XEhkCLEbyeidl2shRn9;F>me_ww$y!Af1Fmrt;8Vd2ovLn|M6Nh=QxMkT zFUe9;v`!)fb0Bd*e(3D!1J1hp%yqzv^OA1UXo{Tm$4jn!-5vB!QISqGzG`tnh=E(G z@6A_#(;wT6m5lqpb0yXtoDdcaD$DhHl9QN%|H1|P)EZ~pVdT$FJ<4u=6Sy$r6j{wgv)6Mq5`Re zg;!O=U}BSc%`%^_U-b5vF&3mIw5AJ1$lnNXwbK=et3b$13Q5xXyT^kK(>7*aTzMmC zS|BU;H8=Eh0YE4muSNehe|w>mcy$5#wqdO5LN|CZ$$~WVWDux-Dia8B=ZphPnNY|N zhrGAYr%JoyHnTU4szPvT4iAI__7&+H$3Q<|=?P<8cKfksjt235hxkfC;VCnx{f)!I z8WBokOCj9`_b3ovrsImY0_UfQ%sKh4RTp5G|LfP*N#7RxsYhNNV@8P;Ve|WC(0_f; zl53^i)p9Sbfi|VO_jxV~CtQIGtk`-VL^L5@xL_b|-#mR_WlK!K-jLn2^;z!r z6Dg&Zj8zw%;Z%aD7&G<+fDN01SYNP_MpRc4Zta)6Y(MjOyVhP0MigCbw=LKQdN2>8 zqWqVJwR_-gGG&I!c6)3$SY=D%!H=KUeRwPOg!yYn{K~s>b9cMXUtX<0*!=suJzfQv zFCwHHhnnv=c7Pux6D5lYeu_S7Ch{`S8M=CS<9EyadIFPcC%ru`^dcjl=jEvSe^%`k zTgCE7`PHh)pXTH;_UxieN@D==p`Y_o#q1z2g9H5I9^Q%>z!3IT_2IK9}UY7g)xCLBmggLdc)2%f%=y3o8S+iE5rge#go6;+oRoy6+Oa zf2C0|SokeJg$gXd3*syURgzN6<12<|2BG3ck=;7M1<@dxH?XZ_N$jEMqp z*&L76R-{7>&W6TTc=G9hP}lpT^Xxy zvGNl5Dw40b3-($vl5J-AyB7><1|a>T!PuQ$Fq&ugQe=GY&ws1yqX&HLE~LlaGw23? z&GLNSIkqa~V=Obv`y2Pcb%j>Rr*O)Yv;fdo4kLt%-B96wWP-4^GvAm01$TPO0AYx= z!j|r3NV8wkJWeLv1;4s>uvS~RH(yu=M(mf!XJa6ym%}a?V8x(XqUIr?DOX0N2Y6HD zzTE$`<5@Fjv|Gt}rvpnp!eQ{i2$GW{A>LvhpC^l0_8d{?7T z!G-TD_;QuUR#f3B-82V3L^|u!mgUJZOwzy;Trz1jUlnOE2js$JJO z>W*N&UNqFae#z<5dj@GT?ABy5_SX#~PPONXGt3TfLv(R&p5m)-y^ zY+t!YvX9x>r_qnUZE&KYM!dnGwKw;c-gtv;u4&YVv7PguG%|?`?eqm+cb5012ac5Q zW2GCx5R%CD`|&iSCdI|W_!DoRtCgDcqzGz%rDmRko6J|{E&hvH zL!W_BnhA3P&N)X3te476n{&47wrgJl(<|VkOuyqHyBRZjEk6NVEie}$P(d%2zU04u zxB8c9u{|1Bou!O+d{uF{X>()r@PlFBI>*DtYnP9nvhhF`!T6E$kHgCc`KQ|r7HCTH zuwqLGJtWfnw_^xuobnmM6zSizUCBN0YSA}u^OJhTpJ?fXeJl^_b*KpPwvy|no4!m; zDLs&)NK>0*R^39B_}Br9neJ+4yP{C;cj z;rx4Dp2Pm47nN@A4py$r+xqGiYZd-d=pj@SC`6gbGwNATQ?DNpo4!%Kr2lZo2Kw1< zim`d00pHvAtVf>Ot9wfubDwcg+QWxOoDyxC+4sx55-Z=5BzDuF?$%cWg`0G>41-2$ z?apnDN+Mxo>}uK$)s4MdG^aPggO*6DbDdYhh=-c`0jyDxaHxucR&82 zbnoQM>(cdU=(%RXn7J4@)?jv`{H+Kjw`b=TvVd~M!3RFSMxy+Gx;uf&n&Z>cEl99y zO2+S}Nd=?AGVxM3@g?-`qqAoXqSvZ;Bfno|@lA*KS97|MU~8K}3=scF`SZoy|94A zIj#c}PXX8e@5T!55Zj-5Gq;(5YW-utwzuwCp4AC*S1D)jOYbr!dp7J4t=Cg_x=p+$ zt!RvPpbVEcvLTjUo&JDHgWzV>UQz7g*}#;PDaM?cmrNhRae zipuf@{U1P9;lY|_`^e(Xs*6XRNS(B0S84SP1)iyf31o>%*g)cpSWsPD`1f^UUi3w;hx^mWfiU-^0Ob=XATiJEzxj#pc_W)&r$ei&O& zOrJ1#;dZ7hLh-M9Tb?7CO765j!hxzrc(b-vJTZtumU($qI0(tC%zPzI{c4*B&Wv2n zR5iIR#LPzu`1)hj7YI;1nz|iY_UDSgb~`fitxLf@Tox|>Y>>tS@y4A^g^k}z4bopX z!2w8j7jq5gQ|=mmUgp}|Y8A8e;HPWSi;FKq#4*yeV{cSnT);JP!k?Y|e8I!J8L|u} z=KBZ`*jTXJV6;m{K1J?5H`bKVQk%KI`aRGJ>@r4ztD%%=BtEi*LZDjYl1GyW{9ZVy z@Rj{x7(f!cL2OoU&<~C}!=jOhyO_w7lI7eJdH3*)1cnrUVhyUkG8`e<7OK&fX5Sjc zV{mX9HEEk%`Eiq!B-G`GM*`6#!mOP0fQml_TyqAnr+Am$@QLRFPtF2Zv9GxIx12f- zRIaoT92jJWj9-3Veg-<@99cz>Rj_L4J1HdMoZ{fjg4<4?Oaqrrc^U%&wxB&@0S1Qs zT#CO}cacCO@DZX$5g_2yxj*KBUSWE!(!u>tFDTzaV)C}x($EV1*UGGYIok}IG|m%O zj+4QDcfkr&l5nt`hdMpwz<%LY#elpX6EK<(Fb>f zso}TzKF8gz=u$y>zr)<#^Ll~K$sN>~NzV$(Lz|^|8nz%FmeoGI{qnciAL4ypElvJx z0SXWKTT-s+IL?ZbSAluZ10D!7K-7s(?aI_!uL#uH1%8mgAxdqXzXsm7gT)3c7V~wm zeoV|afdAh5A;0pY(%UUO#@OW81pG+74`SWgrVRrgm|u#zkhi@vZv95b?d^jK!IpJP z5x94GT?$EU)z}U57x%>m1J~`E_o1#g89#m-j78o26ZeHb5r=0cQ?Y-Om=xJmt~>zl z7&DD=!8mm{VOfKKVWwU67;60Rw)w8d$zWK-(zBfLcq0X9m{g_u>Oj5KgUAMt4bRAd zT~VCaVW9CuHyAE!lkt4ao;*M%yq8*|!D0DgTWY>tn?-!Bw)RJtwj$nv%P6%!8zDnG{-%QyS0u^5AEoM&OtO&^9HV-a&T8GQ+mJ zc6b9^bDw;`2gmH7G0`jFn^6PBJoU)lEMfW~f4iKVSu8)PrPra_$u@rn)wB3jr-aIW zXwIF#BRsRM>Cm!7XjrJrb%na?o`pw0ZN(1XI=ofo3hDM1MDH3f^pEdhN=waXF)F0c zZaw&MX=O7XoV-Fz!!NgHH$!N}=$Aqvb*3N&paCxcZJthifMVkuga+20J^<#$iTb@2 zAMd<9*%xI6O2dUDL<8{taq9=Y-zcFiD2BVei6L%=7HOTnzR{ z9TO}_IhTI6x;Hco2VvNfm#oS6kn!F9?PqSpFBKk|wERq+grLr>L`b#x~LRw1->BB69egOrTgtBT;`%;o0@Np3MRR;hI;F-JXk z$1B8Jt&Cl^@vJyS5Orcm^k(Nd==Veeo=IJbPA2@QcCq6Bh@mq07r` z*=^RIhq~w2>S9yGgL=l~O#8Mu%s4LlOli;-SdMHa-zl%xu)ensN3G%&W`p(W9p)MC zUo{N=oBf_unfug$5$Q|t`=korsvky~(+AL4DI9;5wW49V?5cJCp!oS(3B!Ctyb*O|{Gy_&tvGjgu;)4BYxW$3Wo=Bg-k0x^KfbJk zrlk~v+W}986_<=VGq-ggTZ@$7gHvDK5Aw!UCF7BnnW=IuVJ02dDJ9j7YxkMiVS>CUpzCRD$jg1bT#<&B)XNnnA<1B*Qi>eDZt%M-mepI5Rbzl>j64@@{PFb69hHr)?^cWd6OP#)^b- zrN`2H&Ne^YnMjMZ9r!^v(kVo6ByPP1gPsCQ_ViY?InqF@*l!JgY6yQCp!4)Q!w~+o zRHEZ%Mr!yTLVOUuSFayuOQSHQ<9m}<1>duOCBq`Qj_<444pXU&HhBqEWu=lCF`6_>b z2tUY4f3Go(9)Idcy}bNbEqwvFb{7C@8c-DKxXJp>3HZ>SQ1+@eyuV6s9ffD3bsEuZ zUL(LcJQu5zQ^OSr_n}k3dU(-NK;&2PfKfE+YrZOie<&f=~s@#R*>LlMJNe?tG6I`phQHD+t%EF3Y0xf6I1W#xu{Ox8I^Wm1dU#Z$nvk|6+x& z2_I?bfHT-fr;3zyo?#+)!|Nd{sS}=dlZ4!o#qb!x0+)z)_+7=p6Exv18nqqBkfwps zJR3G&i$^hkN=;H7<*~3CQlK2kgHjU4%Q0J{mzNSaL;mOTeoVW0qnC%5=s}GR?*SU` zVe)KV2_0AzF`d5K=~&17e5=JXIl{`|u@EJA^aHg;Eb0inEM^QySj#zXP5HE_fx;E` zC!ZQ$y^~8W&u)xR*!UJ(A>GP>-2ZWN?!_?_pabIy;WldyLNEo>@2&S|^`)Bs(6$Kh z&7kv_0J3Td4w+tS%Tt(}s)@KJLAa;xhbO#^jdXHQVi)}vPH=~dW_ZKjN38JuqeRE! zKeGqVgyTs=zHyoTAUv?sj=m#@xkHRyFlo^&H#=txc1zn}SkMJ%2tI{eTM6`dHk&>g zl}ZOY4EPza&K_)oz34iZ07~|>ErHE`^|nO@F3$6LzPicm_`UUyRji{5CqIu}HRYGO zW7GBk{=xq`csy4j$*fr3Npu_uJg-GRPMJm&O$U8yzS zZ@{>E0#u-xJ8zD0_A2}+TbaPGgZ0`|s6wXwUhJZ`5^!81DpZ>Qbk=j+uX2kEdr=d1 zFrWzSl+a=)<9RWV=dm?z-cePMAIqlWf{^&0#B<eHI-j&3Qo7Dn~`gRA6jcu8VoJBP_V!<#a1a zQ6>c7^|m)#-W6=8x8B6X_ERkGtI7$z*je}7UVLWw&}>T_tl6|ao3{~yJ{6gEEw$Xt zelT0B2KC?;4Sh8K9}`{UiyQY%=WWnhdo;`w*Fr3@--Z;qeY;@$##5Tsc8SvPX`n30 z5?m+!&Z`{Gs-my)$zUF>Wy&Aqi|Zu14RP3t=ZaX4p8bP2hT79vLQ~h{iQNA=sh-9>vny7_n?{VNTeCtAKK`qGsM6*zx&Q!T5E-1>8>6# zosN6uSoDR;EOh?oBk}%?J0eb{qwK>5LwHpC=Hw|_?7M6ePek$jl{jz(oQk)db5s;7 zp%X~!xW~x3+NZxy^ubMxzE{k?mhpSfpyjwFqjx>#gft=bylT?eT5trT^~Nor+fs8y#>7N-w1=hJ zdUUlIUJ_w7iH;L!Jofg$WWp+l{PmJapBOR;-RZf>+t++h5k7U)+`|N_FtelSA8Mj z9=-$tTV<3dq?#upzzvKgPG5T)F69*LHi+2To}r(T>uSb(c_pJ9PxPL?U`=_6J>q@%xDR-nXn>&rBnOh>i`;WrxIr)k-VK&gj$yk^} zZ6sLPmLM7Q@YKTeRcU~K6V$@L>z|arQ%#Ux_8JiPZ*{U_xE-ga3OdO&1h8Quie=jM*Difzl8~HDJIz5D zAkx;}ULGUV zQLWzh?f89d(wt3ay zNou8S*_pvP-0YUDF-^^mR~@OMv(-Ib%N99-vS-s5td_Yv%_gMeW8jWtw)5rPePsd#A#*j$CT<~J&N zkGmrvy9jDJg+M|gZX^zzfJz7yV@Rx#{TJ^{zGwb?eeWksY23F!0qLbHU=Bl;(`GA_ zMfNnk@I3}xo#6gmseAPk`c^NN@Npysu-#3!`F8g4z#B&~lJ~o`%^w;H-@N}>JR<7q zAPS}sb`vK*4w-3b={#u@ox`pfSQhCW0ucrgnr{x+bQ-prPITQ$^4&{am@@eZDj|6_ zcWD_S?LIC3xNh5vfvJ^gzs(WE?VR&%UTe+ISv5KM7Zb0X+DNV~(X$k6meX08A7$>| zN_itb9YQe%On>!)FLfG<7})|@>%-dBM*wxi?>@RlgW=tPuVfUd%PnwY-=~PZoWaCt z_BZ*OEVv<(pA*U`#kLQqm%HM3($+Wog$$ZuM9(?ucZQAa;6=k=XQa|C-6q)1-6iO# zuzyVL#jYqp@HKhyv88rm)qDSZdi1WNQ*7Ac8%gz)7?{0#?7;51^eFd!Pg2`X6F z)wTm&osYEnoGi-99)HO#OjGYv&wLjz zd!F_=wo28%*C|N-KriwMzMV5tDZNMA$G#4BawDNZaUOIT8Eu#6Kfc(hT+sH^K4`;x-)^EsLDWRXcfREJ{ zkVoh&Hp#KhI3nW#sOFv)tb&T{^3Jr`Kkt>q|MGJp!aEwGj`A#Q#fk_@Zn|C;MAI^C z#d5Kc@o0H@`C9fjvu6-m?#KcQrS7VCViuyZ7Ard(JXOc@LVjsHFPT+bl; zwt?>o5RWqm;9GdR1ULr4aOv{5%#zI=1S7p*lp*#i>bMC7EIx6`+u3LZ2A^jmM$ z|B#e9$XzWPc}w}o^N_DJ{4pLi2ByPRxSWYfx4L`mZOcQ4n!YXQlISd;iBlntWHtni zYKK6wxi7YO2iS#|L#9xKCosRn$3DSL)m2Q)hP7F+mQF2il;Gp&Xp%cZ*r_f*_LYtc zF3}@s&??T+?ny99SH~OU!p?7_w_0L>n7#wz|Cc;C+(oT43RsT4zI^Pynb+5=|DQt$ z(sA;tzKIXvLmwIVc8YUPbqRO;uPhq2mo^F6*uDXWwGYB^KLTJ0Vw`up_ESZdQYyH< ztn`opgGo3l(PmpM@PY@*=D#WcW2@>$z{}k|)8e1AW=lRI>U`VQ4Je`SJK3LrsoJgN zBEyXQplfXv3k!G0Aa#C?F9BBhUZ^wWAwjUuR)`TEaxu`oh>i~$yl9^sJBo|NDII<| zF04mGIY9E?rscRVj?O3jIs8xltxxbaF={@8V^*uE%=U`}3DN*%VL;(vgD>fJJ}ufB zAz}6tnaJqA`HSq&HQ)~W9f?r)-tE^ZF382-MJbe)9x`Gui8jyxF!&x;r%tc$Ez~{e zmjFvBkEJpu;oPle5lQR)^l8H$&)QoQ6 z@DQwB;};7m-hbL{uz%tya$eyX(0Mv8xyQVTy8N1xjC#!U`Q&vS)Gky4v861b?pN=x zBd3gwk|w;W;95-JTKpT)tP3^{Ip;dhuN@Cchm(6RnXU|j9uElUWkJKw6SY~G@uaoh zQ}>2NI8a9!dnm-okq=%_i|Uu<;rZ}9s^4#9)SQFhnD3w{p4MLFL-&WOes&9fqCF1M zG<^hQ`iIWy!XG8D8mi0w-+#e05EntEE_E4LAZgkQt;oV6D5GzUyDPwB1`C{n&=imc zDDwvG!E4riCzV~Ah$rTHzpmTe(@P?|4P_O->jMIPb^C{@`nkC=k7agR+&;#$M+k_q z;UmgB5B|O z`)hDgE+o3i3QRTRtaZQVu0*$6mu2DZ1@3;Udl(Agpv`J1nRN++o;b62Efu2ivbZO2 zP2>#iVG1JA^e3AVieN*1%^f44g>e9q^n)}r)vod;e-AIIx^&OExF=?qn zSBGwE%a8HXO)oz|M|CZ${fuYXDN$y75Mt6%jiiV#5%W(4jmTTT@bw(fKAu5|P!2jU z4BwaX(}bS_u6ZEumWBStIbivlgO09a$*BDM{SbaFKNAGLRNvF3rMiZU;QLe5^cF6N zW}#uey0jT+Qr)`om93y%JJq4NdjFb<`_J_d_ca<$M524ip< z_)Ys@STRglGIDtb$|b^ezR!`A@J`NTDq0?ntylP36G#O+CCCmpiU3?zIJbkQG}0J= zIPSA8B(13K(LGi90oQ5H1wgYbr;wtJ_o4Fp6$kIu5Pdj?HhU7!LuSP2Lj`5>-6A>Z zH3u5aQJ=T-A()2@AlY01uWq`oclD?A`yQ^QSTaOn-+;{I`DFZLLS!wViNL)iKwBwI z!T(&F#Y1RK)CEoGznA#7w&^FVs2z{(+DBYlIO!YAK7oWA>=2oA|S9^vRx*z^5b=#HabtRcDwK7WXu zh8LaILW;+eN?E6BgQ?-Et+nDux|3ZDhPbPGbi8~tGi`7qMqF9X#XJrwh2MPrF)U<{ z4~0psv%>ep&z^5|(~cyKIhAvyIxfM6DHG2Nt($D;ZxRSwPB?+Ec<~ZPw+xV%CwIF~ z-u-SIjXzZ~i)*#)s_^~h_LG}1h)2jbxOkpGk6~YaCE-<0Zmy?P`{u+q_^9>`ai|%B z81#@<2t_`Ph8>TOok)(G82*%ZV&Qd!n*3PHm7kJ?L7Vn_eAg+=FSmNymz>CtT; z_>IJYC|eyfTw!YnP7gOV^abS|RD9S*6$rO+%#R2xk8Zv$_2@iemkdR(Jfiv;fe~_jfpS-x; zUe+MKiAz+ibKsIya;y$vSzLAbP2}4Md946qD^jtLdxZrKvuqe*d}LXSihvsc8d(jzoo3;7}@oQ`^~paH^B!f8Gh?`~7bYA;Yy~NEj3d6hi8Efs~5- zZl)KL<7?oSh5x(dM+F`=>U%J2t{t3z;o0!zIE^Sdox?q@wMVTf(8)P}k-57{VbK*DgDK@2h3BO4gDU;Tq&eot$;4wYBLMAc= z_Ji^Yke(AcI}T|ZToQTYn`p{7bsCzJc)@?KC&(cwRycx>xonrIa=-Nf60CXV_a+3| zhq84bOY9nDxlCX_N&5Fox&KKcvcSAwBskxB^GUzZ}_T~12`RjVN3bt)``OCAI=coa%P~UTYT?v?}et2Hoa{cf|w~#h8;Fx z7dYnwtstbX7Di1gpe`l27>!TqBkamV437!Z7;!xFe%2bahNU49UHLhIJOjd>gH|P& zVrxbTZoe#*V#Bel0D3=;lA!UX;~bGk?8%cnSTcddDjwWS6rrQXiO+@%LS79>GY=X3 zB&;H24XV^AG;e^Jx13pE;L(Fjwk5o>?p1BF(p;t7d@b4B<;)j1L!jo4ytLL=0>7&q z*zwFzK(C#_{&8!}R=udERPwjFEZkMMlTZb02Wh>1;SYFK!q9VP;qjS}&ZrlE9q61! zdz2)@Gd=%9)?R~(-Rg$pEveW*cn$6Dhx%Q;iKx2%1dTpxgXMjtcWxcoVw7L(vb0mr zKEJBE(BSt{mO2cCh?tOjdgQD zPG8XXI{Iex3q{k)Sd{pa(``Eh%$r$gyI}m$D*)sChj2z7g&j@V!{UOzEy*}M!rV=v zl5=B~;`Omn{E*WWbQ{1KIbdVE=07D(+dh6w(|hmbT~>OV8{Y(ebk3omb*CX$ z+MJqiT-w-0HxuvAPLh@kByCQea62fgq9 z0&*rgXgHcxFvSLQ2a?UPr(cSJ_|CqNehAO(L&m3w#Kuz72+k@O!oIvaL?9;10zg+d zqd^<%P3%!G>zt-Aeq5XfkpYHhXsSP>!PRGp&&LS+-UKOzuneWV;P>b`ITFFV^;p@D z5s3a?z&rjwM`w^_iIPOMHNPR8XbL)=nT7Y=em?##yJ4=<>~AFD0&b0fddi_nRDsPG!)OugBu>aTWM~<7k;S zG+=P2@W9`}9+T&@23t&L7&n*S>N+Y&2;TBvEK=Vq>T^UG)Bmn?$QZu0#Y^#l(|~*8 z*AYlws;ci)OkU2-&224&-`w8%UkLrn-z7Sd$kaP%svftVA0q%K__Cf%pyW=k0sziG zuHH<+{P;JDejhu1U-SMITl?bqkN<{#q+K7)zE@hX2O5#wF?F(&R}+V76F} zWx{+6hkYwXUwhz*%V3883s?tE=4j^F<6%zJInQT#R;c75cl)RsFCU5jNYbY!CIFO^$tor>eaK2>coFH$LB~l#6;O zO=C~WmE>a%BFDbJ-k$>mmF~-&8r2*|xg~#SS0kZANXCHKlu+(nq|%`>NBDy9%S)yV z_jm;_Vf$BwY8xYCuGlOwROYG%u861Q+}jpZ0?;|Ifb)8``AWnC>qG}SI(4`|;BLdQ zSPQ&AewE|@WEMmN2$XryU8yLbxC%O4Lp9NhFk~W|1gGW0m=+{Qp3dEbE{*4u-`XGK z=fqV&-a??O@r=RFuPBjkNC5OYR!wbryAv><+_gj9lp(n;QWM?-jSijy$?J6ZY$l^Yjod~kXO(3H(a(3T2Jpd-Qa%jD*AwZclIG4+n9{IqL_ zxaj%q!$r;GrCj2>ETKgrgYrL~*H10eiMz0%Pekiep^Zfl3m#3E4BlzUik4yb*}k5G zo+tx#*?x8M8H{3F`;*h#m4nlo9Qra^ZvssDYu+nx^JY+$N~$0Vd*g8Qq*h2Et5!G- z>IY$TkRNm&L%9Q6{DFUlc9yO_p*<`HkJ86oqrtx9tW(%5j62Bv<1c(h`)Dytyx&d9 zGq;~dEtDi-3;!hVD1*25AMaNbB?N4P{%RBw+$%u1d#q{!%cbuL#2huS;R6*4lm`kh zQq!=zH%v0gk=c~xF9}6 z8$I9wfK1o}E!Ic78o84cR`Ie!9d(ZX&tc|hRGz4Q=V z;BwGnO!+euV=Iu;JpnfV`5^4O)T(_|*9wea@X?M4upnQ%xR{00WR3%46`Xko@f5S| zmD~mRW>TdT-qb9&v?b@^8LF zmN5&?GWLJZJTjDi8?m)WYt_U@}aYWG8^4 zSgyZ-6{A*u%a*$0Z!LUO?D|DT%)h0VCqO{*aiktD^HcwSrB{`?TP&Tl)e#yovoH_G zb4{mD3aAXK@XZVGyZktmR=1^xOZ&u}hh0X){_@GXci3x=e@}cW} zAd?{Dh!fTxX{mUzk& z@k*jn7(J$`|B_m71wNo?h?aC_fq2;zT60$+)wuIKiG?3MOET5>AEr`WZP~DNT~R&L z?MAsode4EDK+!-Y$A?$LR+oa&?Z@A9|q5T#!gM(H@ZQ<@2)U^R4*~t zG(Sl#uF;KJyG=F`K!3tSNCt)s66Ll;A1(l)KnttIb6gMVI`Li2vXaD zq{EYjZd~7)P60fg%i;z;g^C5-No4eSTs+$fmnH)QQM&|mqv8-k6f5dzQg8gNRVz;uKIiVY@cy3fnSfxbblR=5_Ri^pqc^l$8e zEEgr4?u_<~?mChFUI~uU(pZ$?#O+^5I!>u8F9e)s_F3#Al%L;2Bx(%{uy6=5<{4x~ zD{{30sw4~6WGa4&^>a8Fj+8?`4Et;vmLwu7;j^oh|NMf@29y={Xe|kmIS0J>R11cd zo=>gOTYYfEow!0X?dBMu%wv_05p~vH13#gN)IN)WM^eaQL5TGi9S}^e7SWAt5IYTQ z_!WS0%psqdi39&XHy(<`Nk#$zl9gbvdYJ8FLHV;c?j@@kR?H@9kns32Kp{`_Gu6oZ z!(El0Nfo}7$n>t~;jcOI@7MBxI?`&$LH_{8*m!(ocw5VYNoi}&Fw+=^R7it%x7aM)9_5@%Z>uzJD*hd_(+!&qZg!_iT@ z=Z)Zxc*BXbB6eQsU|vMJL~}xc2*FeWW4dxl)=KQv6!@jqr~x^;#rc;ct&D5*PT*xA z+8X})-G5i^_{ix8azK3ga+~dS((Qfta^TZE0fimQkH0A9n<}uKjNBa$Ih(rw8R!Do z9Q$No6)3U7MWE8CG$OZ_f^$s>qrl2mcp*qy*CsoaW{v=$RHGqe4B&w3D;$!^q1DAGR~%|YYf zJZxM@)uRpIiOchfZeZ^QP1IN(bUYMxeqB5YXK)JNUMRm$-rWq#m%)@`(@yeA+H1(< zzl!dwGVzQFLW6M@Oy)C%YD7@=V}D74TD`&Yj}$gFG?1_1V<%pqg=eyW(W|hT_i@yY zgbG{6Rlo^d50%YA{;`jpqcPzG5=E&OBqU2hpdV57;N>AuRM;P!w3#+?x}dO(?JL ze&3d!q*~`&#-t=QmIc)k$^gmTUPFz;NLxs0vboS!aYQ+hc(LM2REyqv_&RvCK$GiH zMavQ5uH}gYAdTq8SN8%{@dAfM(gY9RExRgy-MN-QF?jAsbz6Q}6Bl#Ky?7irXXzFj zw6h>E2=J-nl>X+gOqg2-WDonwrJ(yCWDTB^KHrLMZzQ?^EOvJYHBj5k1JZ&S2+Xk& zcD-2;0G$5^Jgmp7b6B1O2+&Rjw>l9V4W6zJbfN|(Yi1p{)8|1Aqy^X>zY63N6b9EM ze9N3qH$W0=;I%-QYJNEm3qtuFhIu|CTL8IoJ#A4|Q~MnRvEvLH|PspM4&H zmf6^}Pr$_yA?`V4KoyAuxF9#k8eGr#w2XcjVgHMyTCo=hOxwrqhgGRPn0hAs&SI-Pwh$DNp_xc z6%y6W8y$AKE_tm9-j+jGf1B);X{j`%R-+Av<*L}q8n|3$tK4j^R?$X4xIccDWS>Qi z#r7Gp9}vwFX-EiN8h8Glq?%S#=D>mD?%{6G$NTcA?C2$~e=#l|kb3QVj{Fx9O2LuD zd;!AF*%`2Zr}cCcb(c-wUY$oCx(f#bJXS}7b0>hUaNwM)+}~lC3gESc`)VT~BqhTc zdaDx4%}Z>jOTKoD5Rr>=(gHh-)7-oVFBxjGb{mq+{Q98_jc5%!|PU?eM_Qjq6H#_|(-&DK(sLwh_jb+6(0dJhIm zC2i+^O{JPrQ;A)DB@YIrFz19cl2i;;2Yl0Z z3o6TD3{aNmS5rK5CJCR4n|ijj+<-Y3bD-?= z|4ckibb!$<_D_B{8M6 zlxcMvh}%L~vx7jQZ54j1sT?T8)fi4G;n>wDYUOc&|4dA}=k_^nPZmhH6j7F=U6Ymw zL1k;4Z*o?6>@ZK$0HSFjUe?)=R}P0}F1l=U Schema Docs

GenesisFromJSON is the config file for network_custom

Type: string

L1: root hash of the genesis block


Type: integer

L1: block number of the genesis block


Type: array of object

L2: List of states contracts used to populate merkle tree at initial state

Each item of this array must be:


L1: configuration of the network
Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the data availability committee contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


\ No newline at end of file + Schema Docs

GenesisFromJSON is the config file for network_custom

Type: string

L1: root hash of the genesis block


Type: integer

L1: block number of the genesis block


Type: array of object

L2: List of states contracts used to populate merkle tree at initial state

Each item of this array must be:


L1: configuration of the network
Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the data availability committee contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


\ No newline at end of file diff --git a/docs/config-file/custom_network-config-doc.md b/docs/config-file/custom_network-config-doc.md index f2dd77af33..3e0ecdfe6c 100644 --- a/docs/config-file/custom_network-config-doc.md +++ b/docs/config-file/custom_network-config-doc.md @@ -70,14 +70,11 @@ **Type:** : `object` -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ---------------------------------------- | ------- | ------ | ---------- | ---------- | ----------------- | -| - [.*](#genesis_items_storage_pattern1 ) | Yes | string | No | - | - | - -##### 3.1.5.1. Pattern `genesis.genesis items.storage..*` -> All properties whose name matches the regular expression -```.*``` ([Test](https://regex101.com/?regex=.%2A)) -must respect the following conditions +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| -------------------------------------------------- | ------- | ------ | ---------- | ---------- | ----------------- | +| - [](#genesis_items_storage_additionalProperties ) | No | string | No | - | - | + +##### 3.1.5.1. `genesis.genesis items.storage.additionalProperties` **Type:** : `string` diff --git a/docs/config-file/custom_network-config-schema.json b/docs/config-file/custom_network-config-schema.json index 8bff4da3a8..3b1aa6d667 100644 --- a/docs/config-file/custom_network-config-schema.json +++ b/docs/config-file/custom_network-config-schema.json @@ -26,10 +26,8 @@ "type": "string" }, "storage": { - "patternProperties": { - ".*": { - "type": "string" - } + "additionalProperties": { + "type": "string" }, "type": "object" }, diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 40fb3d4e6c..cdcf1fcce9 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -10,17 +10,29 @@
"300ms"
 

Default: "15s"Type: string

PollMinAllowedGasPriceInterval is the interval to poll the suggested min gas price for a tx


Examples:

"1m"
 
"300ms"
-

Default: 64Type: integer

AccountQueue represents the maximum number of non-executable transaction slots permitted per account


Default: 1024Type: integer

GlobalQueue represents the maximum number of non-executable transaction slots for all accounts


Default: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"Type: string

FreeGasAddress is the default free gas address


Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node
Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the HTTP requests


Default: 8545Type: integer

Port defines the port to serve the endpoints via HTTP


Default: "1m0s"Type: string

ReadTimeout is the HTTP server read timeout
check net/http.server.ReadTimeout and net/http.server.ReadHeaderTimeout


Examples:

"1m"
+

Default: 64Type: integer

AccountQueue represents the maximum number of non-executable transaction slots permitted per account


Default: 1024Type: integer

GlobalQueue represents the maximum number of non-executable transaction slots for all accounts


Default: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"Type: string

FreeGasAddress is the default free gas address


EffectiveGasPrice is the config for the effective gas price calculation
Default: falseType: boolean

Enabled is a flag to enable/disable the effective gas price


Default: 0.25Type: number

L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price


Default: 16Type: integer

ByteGasCost is the gas cost per byte that is not 0


Default: 4Type: integer

ZeroByteGasCost is the gas cost per byte that is 0


Default: 1Type: number

NetProfit is the profit margin to apply to the calculated breakEvenGasPrice


Default: 1.1Type: number

BreakEvenFactor is the factor to apply to the calculated breakevenGasPrice when comparing it with the gasPriceSigned of a tx


Default: 10Type: integer

FinalDeviationPct is the max allowed deviation percentage BreakEvenGasPrice on re-calculation


Default: 0.5Type: number

L2GasPriceSuggesterFactor is the factor to apply to L1 gas price to get the suggested L2 gas price used in the
calculations when the effective gas price is disabled (testing/metrics purposes)


Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node
Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the HTTP requests


Default: 8545Type: integer

Port defines the port to serve the endpoints via HTTP


Default: "1m0s"Type: string

ReadTimeout is the HTTP server read timeout
check net/http.server.ReadTimeout and net/http.server.ReadHeaderTimeout


Examples:

"1m"
 
"300ms"
 

Default: "1m0s"Type: string

WriteTimeout is the HTTP server write timeout
check net/http.server.WriteTimeout


Examples:

"1m"
 
"300ms"
-

Default: 500Type: number

MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second


Default: ""Type: string

SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node


Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


WebSockets configuration
Default: trueType: boolean

Enabled defines if the WebSocket requests are enabled or disabled


Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the WS requests


Default: 8546Type: integer

Port defines the port to serve the endpoints via WS


Default: trueType: boolean

EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.


Default: trueType: boolean

TraceBatchUseHTTPS enables, in the debugtraceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)
to do the parallel requests to RPC.debug
traceTransaction endpoint


Default: falseType: boolean

EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction


Nacos configuration
Default: ""Type: string

URLs nacos server urls for discovery service of rest api, url is separated by ","


Default: ""Type: string

NamespaceId nacos namepace id for discovery service of rest api


Default: ""Type: string

ApplicationName rest application name in nacos


Default: ""Type: string

ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker


NacosWs configuration
Default: ""Type: string

URLs nacos server urls for discovery service of rest api, url is separated by ","


Default: ""Type: string

NamespaceId nacos namepace id for discovery service of rest api


Default: ""Type: string

ApplicationName rest application name in nacos


Default: ""Type: string

ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker


Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
+

Default: 500Type: number

MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second


Default: ""Type: string

SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node


Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


WebSockets configuration
Default: trueType: boolean

Enabled defines if the WebSocket requests are enabled or disabled


Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the WS requests


Default: 8546Type: integer

Port defines the port to serve the endpoints via WS


Default: 104857600Type: integer

ReadLimit defines the maximum size of a message read from the client (in bytes)


Default: trueType: boolean

EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.


Default: falseType: boolean

BatchRequestsEnabled defines if the Batch requests are enabled or disabled


Default: 20Type: integer

BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request


Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: 10000Type: integer

MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit


Default: 10000Type: integer

MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit


Default: 60000Type: integer

MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit


Default: trueType: boolean

EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.


Default: falseType: boolean

EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction


Nacos configuration
Default: ""Type: string

URLs nacos server urls for discovery service of rest api, url is separated by ","


Default: ""Type: string

NamespaceId nacos namepace id for discovery service of rest api


Default: ""Type: string

ApplicationName rest application name in nacos


Default: ""Type: string

ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker


NacosWs configuration
Default: ""Type: string

URLs nacos server urls for discovery service of rest api, url is separated by ","


Default: ""Type: string

NamespaceId nacos namepace id for discovery service of rest api


Default: ""Type: string

ApplicationName rest application name in nacos


Default: ""Type: string

ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker


Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
 
"300ms"
-

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Configuration of the sequencer service
Default: "1s"Type: string

WaitPeriodPoolIsEmpty is the time the sequencer waits until
trying to add new txs to the state


Examples:

"1m"
+

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
+
"300ms"
+

Consumer Configuration for the consumer of rollup information from L1
Default: "5s"Type: string

AceptableInacctivityTime is the expected maximum time that the consumer
could wait until new data is produced. If the time is greater it emmit a log to warn about
that. The idea is keep working the consumer as much as possible, so if the producer is not
fast enought then you could increse the number of parallel clients to sync with L1


Examples:

"1m"
+
"300ms"
+

Default: 10Type: integer

ApplyAfterNumRollupReceived is the number of iterations to
start checking the time waiting for new rollup info data


Default: "5s"Type: string

RequestLastBlockTimeout Timeout for request LastBlock On L1


Examples:

"1m"
+
"300ms"
+

Default: 3Type: integer

RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1


Default: "5m0s"Type: string

StatisticsPeriod how ofter show a log with statistics (0 is disabled)


Examples:

"1m"
+
"300ms"
+

Default: "5m0s"Type: string

TimeOutMainLoop is the timeout for the main loop of the L1 synchronizer when is not updated


Examples:

"1m"
+
"300ms"
+

Default: "5s"Type: string

RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1


Examples:

"1m"
+
"300ms"
+

Default: falseType: boolean

FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized


Configuration of the sequencer service
Default: "1s"Type: string

WaitPeriodPoolIsEmpty is the time the sequencer waits until
trying to add new txs to the state


Examples:

"1m"
 
"300ms"
 

Default: 100Type: integer

BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool


Default: "12h0m0s"Type: string

FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting


Examples:

"1m"
 
"300ms"
-

Default: 300Type: integer

MaxTxsPerBatch is the maximum amount of transactions in the batch


Default: 120000Type: integer

MaxBatchBytesSize is the maximum batch size in bytes
(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1)


Default: 30000000Type: integer

MaxCumulativeGasUsed is max gas amount used by batch


Default: 2145Type: integer

MaxKeccakHashes is max keccak hashes used by batch


Default: 252357Type: integer

MaxPoseidonHashes is max poseidon hashes batch can handle


Default: 135191Type: integer

MaxPoseidonPaddings is max poseidon paddings batch can handle


Default: 236585Type: integer

MaxMemAligns is max mem aligns batch can handle


Default: 236585Type: integer

MaxArithmetics is max arithmetics batch can handle


Default: 473170Type: integer

MaxBinaries is max binaries batch can handle


Default: 7570538Type: integer

MaxSteps is max steps batch can handle


Default: "10m0s"Type: string

TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime


Examples:

"1m"
+

Default: "10m0s"Type: string

TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime


Examples:

"1m"
 
"300ms"
 

Default: "3h0m0s"Type: string

MaxTxLifetime is the time a tx can be in the sequencer/worker memory


Examples:

"1m"
 
"300ms"
@@ -42,11 +54,11 @@
 
"300ms"
 

Default: "5s"Type: string

Examples:

"1m"
 
"300ms"
-

EffectiveGasPrice is the config for the gas price
Default: 10Type: integer

MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation


Default: 0.25Type: number

L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price


Default: 16Type: integer

ByteGasCost is the gas cost per byte


Default: 1Type: number

MarginFactor is the margin factor percentage to be added to the L2 min gas price


Default: falseType: boolean

Enabled is a flag to enable/disable the effective gas price


Default: 0Type: integer

DefaultMinGasPriceAllowed is the default min gas price to suggest
This value is assigned from [Pool].DefaultMinGasPriceAllowed


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
+

StreamServerCfg is the config for the stream server
Default: 0Type: integer

Port to listen on


Default: ""Type: string

Filename of the binary data file


Default: falseType: boolean

Enabled is a flag to enable/disable the data streamer


Log is the log configuration
Default: ""Type: enum (of string)

Must be one of:

  • "production"
  • "development"

Default: ""Type: enum (of string)

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Type: array of string

Each item of this array must be:


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
 
"300ms"
 

Default: "5s"Type: string

LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent


Examples:

"1m"
 
"300ms"
-

Default: 0Type: integer

MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not.


Default: 10Type: integer

MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx


Type: array of integer

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"Type: array of integer

L2Coinbase defines which addess is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


PrivateKey defines all the key store files that are going to be read in order to provide the private keys to sign the L1 txs
Default: "/pk/sequencer.keystore"Type: string

Path is the file path for the key store file


Default: "testonly"Type: string

Password is the password to decrypt the key store file


Default: 0Type: integer

Batch number where there is a forkid change (fork upgrade)


Default: trueType: boolean

UseValidium is a flag to enable/disable the use of validium


Configuration of the aggregator service
Default: "0.0.0.0"Type: string

Host for the grpc server


Default: 50081Type: integer

Port for the grpc server


Default: "5s"Type: string

RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate
or batches to generate proofs. It is also used in the isSynced loop


Examples:

"1m"
+

Default: 0Type: integer

// MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
// non-trivial consequences: larger transactions than 128KB are significantly harder and
// more expensive to propagate; larger transactions also take more resources
// to validate whether they fit into the pool or not.


Default: 10Type: integer

MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx


Type: array of integer

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


PrivateKey defines all the key store files that are going to be read in order to provide the private keys to sign the L1 txs
Default: "/pk/sequencer.keystore"Type: string

Path is the file path for the key store file


Default: "testonly"Type: string

Password is the password to decrypt the key store file


Default: 0Type: integer

Batch number where there is a forkid change (fork upgrade)


Default: trueType: boolean

UseValidium is a flag to enable/disable the use of validium


Default: 80000Type: integer

GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100


Configuration of the aggregator service
Default: "0.0.0.0"Type: string

Host for the grpc server


Default: 50081Type: integer

Port for the grpc server


Default: "5s"Type: string

RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate
or batches to generate proofs. It is also used in the isSynced loop


Examples:

"1m"
 
"300ms"
 

Default: "1m30s"Type: string

VerifyProofInterval is the interval of time to verify/send an proof in L1


Examples:

"1m"
 
"300ms"
@@ -56,7 +68,7 @@
 
"300ms"
 

Default: 0Type: integer

ChainID is the L2 ChainID provided by the Network Config


Default: 0Type: integer

ForkID is the L2 ForkID provided by the Network Config


Default: ""Type: string

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs


Default: "2m0s"Type: string

CleanupLockedProofsInterval is the interval of time to clean up locked proofs.


Examples:

"1m"
 
"300ms"
-

Default: "10m"Type: string

GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared.


Configuration of the genesis of the network. This is used to known the initial state of the network

L1: Configuration related to L1
Default: 0Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the data availability committee contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

DEPRECATED L2: address of the PolygonZkEVMGlobalExitRootL2 proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

L2: address of the PolygonZkEVMBridge proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


L1: Genesis of the rollup, first block number and root
Default: 0Type: integer

GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1


Type: array of integer

Root hash of the genesis block

Must contain a minimum of 32 items

Must contain a maximum of 32 items

Each item of this array must be:


Type: array of object

Contracts to be deployed to L2

Each item of this array must be:


Configuration of the gas price suggester service
Default: "follower"Type: string

Default: 2000000000Type: integer

DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.


Default: 0Type: integer

MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.


Default: 0Type: integer

Default: 0Type: integer

Default: "10s"Type: string

Examples:

"1m"
+

Default: "10m"Type: string

GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared.


Default: 0Type: integer

GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100


Configuration of the genesis of the network. This is used to known the initial state of the network

L1: Configuration related to L1
Default: 0Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the data availability committee contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

DEPRECATED L2: address of the PolygonZkEVMGlobalExitRootL2 proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

L2: address of the PolygonZkEVMBridge proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


L1: Genesis of the rollup, first block number and root
Default: 0Type: integer

GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1


Type: array of integer

Root hash of the genesis block

Must contain a minimum of 32 items

Must contain a maximum of 32 items

Each item of this array must be:


Type: array of object

Contracts to be deployed to L2

Each item of this array must be:


Configuration of the gas price suggester service
Default: "follower"Type: string

Default: 2000000000Type: integer

DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.


Default: 0Type: integer

MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.


Default: 0Type: integer

Default: 0Type: integer

Default: "10s"Type: string

Examples:

"1m"
 
"300ms"
 

Default: "1h0m0s"Type: string

Examples:

"1m"
 
"300ms"
@@ -64,4 +76,6 @@
 
"300ms"
 

Default: ""Type: string

Default: ""Type: string

Default: ""Type: string

Default: ""Type: string

Default: ""Type: string

Default: ""Type: string

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: number

DefaultL1CoinPrice is the L1 token's coin price


Default: 0Type: number

DefaultL2CoinPrice is the native token's coin price


Default: 0Type: number

Default: falseType: boolean

EnableFollowerAdjustByL2L1Price is dynamic adjust the factor through the L1 and L2 coins price in follower strategy


Default: 0.15Type: number

Configuration of the executor service
Default: "x1-prover:50071"Type: string

Default: 3Type: integer

MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion


Default: "1s"Type: string

WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion


Examples:

"1m"
 
"300ms"
-

Default: 100000000Type: integer

Configuration of the merkle tree client service. Not use in the node, only for testing
Default: "x1-prover:50061"Type: string

URI is the server URI.


Configuration of the state database connection
Default: "state_db"Type: string

Database name


Default: "state_user"Type: string

Database User name


Default: "state_password"Type: string

Database Password of the user


Default: "x1-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration of the metrics service, basically is where is going to publish the metrics
Default: "0.0.0.0"Type: string

Host is the address to bind the metrics server


Default: 9091Type: integer

Port is the port to bind the metrics server


Default: falseType: boolean

Enabled is the flag to enable/disable the metrics server


Default: ""Type: string

ProfilingHost is the address to bind the profiling server


Default: 0Type: integer

ProfilingPort is the port to bind the profiling server


Default: falseType: boolean

ProfilingEnabled is the flag to enable/disable the profiling server


Configuration of the event database connection

DB is the database configuration
Default: ""Type: string

Database name


Default: ""Type: string

Database User name


Default: ""Type: string

Database Password of the user


Default: ""Type: string

Host address of database


Default: ""Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 0Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration of the hash database connection
Default: "prover_db"Type: string

Database name


Default: "prover_user"Type: string

Database User name


Default: "prover_pass"Type: string

Database Password of the user


Default: "x1-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


\ No newline at end of file +
Default: 100000000Type: integer

Configuration of the merkle tree client service. Not use in the node, only for testing
Default: "x1-prover:50061"Type: string

URI is the server URI.


Configuration of the metrics service, basically is where is going to publish the metrics
Default: "0.0.0.0"Type: string

Host is the address to bind the metrics server


Default: 9091Type: integer

Port is the port to bind the metrics server


Default: falseType: boolean

Enabled is the flag to enable/disable the metrics server


Default: ""Type: string

ProfilingHost is the address to bind the profiling server


Default: 0Type: integer

ProfilingPort is the port to bind the profiling server


Default: falseType: boolean

ProfilingEnabled is the flag to enable/disable the profiling server


Configuration of the event database connection

DB is the database configuration
Default: ""Type: string

Database name


Default: ""Type: string

Database User name


Default: ""Type: string

Database Password of the user


Default: ""Type: string

Host address of database


Default: ""Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 0Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration of the hash database connection
Default: "prover_db"Type: string

Database name


Default: "prover_user"Type: string

Database User name


Default: "prover_pass"Type: string

Database Password of the user


Default: "x1-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


State service configuration
Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


Default: 0Type: integer

ChainID is the L2 ChainID provided by the Network Config


Type: array of object

ForkIdIntervals is the list of fork id intervals

Each item of this array must be:


Default: 0Type: integer

MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion


Default: "0s"Type: string

WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion


Examples:

"1m"
+
"300ms"
+

Default: 0Type: integer

Batch number from which there is a forkid change (fork upgrade)


Default: 0Type: integer

New fork id to be used for batches greaters than ForkUpgradeBatchNumber (fork upgrade)


DB is the database configuration
Default: "state_db"Type: string

Database name


Default: "state_user"Type: string

Database User name


Default: "state_password"Type: string

Database Password of the user


Default: "x1-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration for the batch constraints
Default: 300Type: integer

Default: 120000Type: integer

Default: 30000000Type: integer

Default: 2145Type: integer

Default: 252357Type: integer

Default: 135191Type: integer

Default: 236585Type: integer

Default: 236585Type: integer

Default: 473170Type: integer

Default: 7570538Type: integer

Default: 0Type: integer

MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit


Default: 0Type: integer

MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit


Default: 0Type: integer

MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit


\ No newline at end of file diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md index fd60c75839..02190a2ee9 100644 --- a/docs/config-file/node-config-doc.md +++ b/docs/config-file/node-config-doc.md @@ -23,10 +23,10 @@ | - [L2GasPriceSuggester](#L2GasPriceSuggester ) | No | object | No | - | Configuration of the gas price suggester service | | - [Executor](#Executor ) | No | object | No | - | Configuration of the executor service | | - [MTClient](#MTClient ) | No | object | No | - | Configuration of the merkle tree client service. Not use in the node, only for testing | -| - [StateDB](#StateDB ) | No | object | No | - | Configuration of the state database connection | | - [Metrics](#Metrics ) | No | object | No | - | Configuration of the metrics service, basically is where is going to publish the metrics | | - [EventLog](#EventLog ) | No | object | No | - | Configuration of the event database connection | | - [HashDB](#HashDB ) | No | object | No | - | Configuration of the hash database connection | +| - [State](#State ) | No | object | No | - | State service configuration | ## 1. `IsTrustedSequencer` @@ -426,6 +426,7 @@ MaxGasPriceLimit=0 | - [AccountQueue](#Pool_AccountQueue ) | No | integer | No | - | AccountQueue represents the maximum number of non-executable transaction slots permitted per account | | - [GlobalQueue](#Pool_GlobalQueue ) | No | integer | No | - | GlobalQueue represents the maximum number of non-executable transaction slots for all accounts | | - [FreeGasAddress](#Pool_FreeGasAddress ) | No | string | No | - | FreeGasAddress is the default free gas address | +| - [EffectiveGasPrice](#Pool_EffectiveGasPrice ) | No | object | No | - | EffectiveGasPrice is the config for the effective gas price calculation | ### 7.1. `Pool.FreeClaimGasLimit` @@ -743,26 +744,161 @@ GlobalQueue=1024 FreeGasAddress="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" ``` +### 7.13. `[Pool.EffectiveGasPrice]` + +**Type:** : `object` +**Description:** EffectiveGasPrice is the config for the effective gas price calculation + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [Enabled](#Pool_EffectiveGasPrice_Enabled ) | No | boolean | No | - | Enabled is a flag to enable/disable the effective gas price | +| - [L1GasPriceFactor](#Pool_EffectiveGasPrice_L1GasPriceFactor ) | No | number | No | - | L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price | +| - [ByteGasCost](#Pool_EffectiveGasPrice_ByteGasCost ) | No | integer | No | - | ByteGasCost is the gas cost per byte that is not 0 | +| - [ZeroByteGasCost](#Pool_EffectiveGasPrice_ZeroByteGasCost ) | No | integer | No | - | ZeroByteGasCost is the gas cost per byte that is 0 | +| - [NetProfit](#Pool_EffectiveGasPrice_NetProfit ) | No | number | No | - | NetProfit is the profit margin to apply to the calculated breakEvenGasPrice | +| - [BreakEvenFactor](#Pool_EffectiveGasPrice_BreakEvenFactor ) | No | number | No | - | BreakEvenFactor is the factor to apply to the calculated breakevenGasPrice when comparing it with the gasPriceSigned of a tx | +| - [FinalDeviationPct](#Pool_EffectiveGasPrice_FinalDeviationPct ) | No | integer | No | - | FinalDeviationPct is the max allowed deviation percentage BreakEvenGasPrice on re-calculation | +| - [L2GasPriceSuggesterFactor](#Pool_EffectiveGasPrice_L2GasPriceSuggesterFactor ) | No | number | No | - | L2GasPriceSuggesterFactor is the factor to apply to L1 gas price to get the suggested L2 gas price used in the
calculations when the effective gas price is disabled (testing/metrics purposes) | + +#### 7.13.1. `Pool.EffectiveGasPrice.Enabled` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** Enabled is a flag to enable/disable the effective gas price + +**Example setting the default value** (false): +``` +[Pool.EffectiveGasPrice] +Enabled=false +``` + +#### 7.13.2. `Pool.EffectiveGasPrice.L1GasPriceFactor` + +**Type:** : `number` + +**Default:** `0.25` + +**Description:** L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price + +**Example setting the default value** (0.25): +``` +[Pool.EffectiveGasPrice] +L1GasPriceFactor=0.25 +``` + +#### 7.13.3. `Pool.EffectiveGasPrice.ByteGasCost` + +**Type:** : `integer` + +**Default:** `16` + +**Description:** ByteGasCost is the gas cost per byte that is not 0 + +**Example setting the default value** (16): +``` +[Pool.EffectiveGasPrice] +ByteGasCost=16 +``` + +#### 7.13.4. `Pool.EffectiveGasPrice.ZeroByteGasCost` + +**Type:** : `integer` + +**Default:** `4` + +**Description:** ZeroByteGasCost is the gas cost per byte that is 0 + +**Example setting the default value** (4): +``` +[Pool.EffectiveGasPrice] +ZeroByteGasCost=4 +``` + +#### 7.13.5. `Pool.EffectiveGasPrice.NetProfit` + +**Type:** : `number` + +**Default:** `1` + +**Description:** NetProfit is the profit margin to apply to the calculated breakEvenGasPrice + +**Example setting the default value** (1): +``` +[Pool.EffectiveGasPrice] +NetProfit=1 +``` + +#### 7.13.6. `Pool.EffectiveGasPrice.BreakEvenFactor` + +**Type:** : `number` + +**Default:** `1.1` + +**Description:** BreakEvenFactor is the factor to apply to the calculated breakevenGasPrice when comparing it with the gasPriceSigned of a tx + +**Example setting the default value** (1.1): +``` +[Pool.EffectiveGasPrice] +BreakEvenFactor=1.1 +``` + +#### 7.13.7. `Pool.EffectiveGasPrice.FinalDeviationPct` + +**Type:** : `integer` + +**Default:** `10` + +**Description:** FinalDeviationPct is the max allowed deviation percentage BreakEvenGasPrice on re-calculation + +**Example setting the default value** (10): +``` +[Pool.EffectiveGasPrice] +FinalDeviationPct=10 +``` + +#### 7.13.8. `Pool.EffectiveGasPrice.L2GasPriceSuggesterFactor` + +**Type:** : `number` + +**Default:** `0.5` + +**Description:** L2GasPriceSuggesterFactor is the factor to apply to L1 gas price to get the suggested L2 gas price used in the +calculations when the effective gas price is disabled (testing/metrics purposes) + +**Example setting the default value** (0.5): +``` +[Pool.EffectiveGasPrice] +L2GasPriceSuggesterFactor=0.5 +``` + ## 8. `[RPC]` **Type:** : `object` **Description:** Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ---------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| - [Host](#RPC_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the HTTP requests | -| - [Port](#RPC_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via HTTP | -| - [ReadTimeout](#RPC_ReadTimeout ) | No | string | No | - | Duration | -| - [WriteTimeout](#RPC_WriteTimeout ) | No | string | No | - | Duration | -| - [MaxRequestsPerIPAndSecond](#RPC_MaxRequestsPerIPAndSecond ) | No | number | No | - | MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second | -| - [SequencerNodeURI](#RPC_SequencerNodeURI ) | No | string | No | - | SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node | -| - [MaxCumulativeGasUsed](#RPC_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is the max gas allowed per batch | -| - [WebSockets](#RPC_WebSockets ) | No | object | No | - | WebSockets configuration | -| - [EnableL2SuggestedGasPricePolling](#RPC_EnableL2SuggestedGasPricePolling ) | No | boolean | No | - | EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price. | -| - [TraceBatchUseHTTPS](#RPC_TraceBatchUseHTTPS ) | No | boolean | No | - | TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)
to do the parallel requests to RPC.debug_traceTransaction endpoint | -| - [EnablePendingTransactionFilter](#RPC_EnablePendingTransactionFilter ) | No | boolean | No | - | EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction | -| - [Nacos](#RPC_Nacos ) | No | object | No | - | Nacos configuration | -| - [NacosWs](#RPC_NacosWs ) | No | object | No | - | NacosWs configuration | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [Host](#RPC_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the HTTP requests | +| - [Port](#RPC_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via HTTP | +| - [ReadTimeout](#RPC_ReadTimeout ) | No | string | No | - | Duration | +| - [WriteTimeout](#RPC_WriteTimeout ) | No | string | No | - | Duration | +| - [MaxRequestsPerIPAndSecond](#RPC_MaxRequestsPerIPAndSecond ) | No | number | No | - | MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second | +| - [SequencerNodeURI](#RPC_SequencerNodeURI ) | No | string | No | - | SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node | +| - [MaxCumulativeGasUsed](#RPC_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is the max gas allowed per batch | +| - [WebSockets](#RPC_WebSockets ) | No | object | No | - | WebSockets configuration | +| - [EnableL2SuggestedGasPricePolling](#RPC_EnableL2SuggestedGasPricePolling ) | No | boolean | No | - | EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price. | +| - [BatchRequestsEnabled](#RPC_BatchRequestsEnabled ) | No | boolean | No | - | BatchRequestsEnabled defines if the Batch requests are enabled or disabled | +| - [BatchRequestsLimit](#RPC_BatchRequestsLimit ) | No | integer | No | - | BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request | +| - [L2Coinbase](#RPC_L2Coinbase ) | No | array of integer | No | - | L2Coinbase defines which address is going to receive the fees | +| - [MaxLogsCount](#RPC_MaxLogsCount ) | No | integer | No | - | MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit | +| - [MaxLogsBlockRange](#RPC_MaxLogsBlockRange ) | No | integer | No | - | MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit | +| - [MaxNativeBlockHashBlockRange](#RPC_MaxNativeBlockHashBlockRange ) | No | integer | No | - | MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit | +| - [EnableHttpLog](#RPC_EnableHttpLog ) | No | boolean | No | - | EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server. | +| - [EnablePendingTransactionFilter](#RPC_EnablePendingTransactionFilter ) | No | boolean | No | - | EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction | +| - [Nacos](#RPC_Nacos ) | No | object | No | - | Nacos configuration | +| - [NacosWs](#RPC_NacosWs ) | No | object | No | - | NacosWs configuration | ### 8.1. `RPC.Host` @@ -895,11 +1031,12 @@ MaxCumulativeGasUsed=0 **Type:** : `object` **Description:** WebSockets configuration -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------- | -| - [Enabled](#RPC_WebSockets_Enabled ) | No | boolean | No | - | Enabled defines if the WebSocket requests are enabled or disabled | -| - [Host](#RPC_WebSockets_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the WS requests | -| - [Port](#RPC_WebSockets_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via WS | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------------- | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------------- | +| - [Enabled](#RPC_WebSockets_Enabled ) | No | boolean | No | - | Enabled defines if the WebSocket requests are enabled or disabled | +| - [Host](#RPC_WebSockets_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the WS requests | +| - [Port](#RPC_WebSockets_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via WS | +| - [ReadLimit](#RPC_WebSockets_ReadLimit ) | No | integer | No | - | ReadLimit defines the maximum size of a message read from the client (in bytes) | #### 8.8.1. `RPC.WebSockets.Enabled` @@ -943,6 +1080,20 @@ Host="0.0.0.0" Port=8546 ``` +#### 8.8.4. `RPC.WebSockets.ReadLimit` + +**Type:** : `integer` + +**Default:** `104857600` + +**Description:** ReadLimit defines the maximum size of a message read from the client (in bytes) + +**Example setting the default value** (104857600): +``` +[RPC.WebSockets] +ReadLimit=104857600 +``` + ### 8.9. `RPC.EnableL2SuggestedGasPricePolling` **Type:** : `boolean` @@ -957,22 +1108,100 @@ Port=8546 EnableL2SuggestedGasPricePolling=true ``` -### 8.10. `RPC.TraceBatchUseHTTPS` +### 8.10. `RPC.BatchRequestsEnabled` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** BatchRequestsEnabled defines if the Batch requests are enabled or disabled + +**Example setting the default value** (false): +``` +[RPC] +BatchRequestsEnabled=false +``` + +### 8.11. `RPC.BatchRequestsLimit` + +**Type:** : `integer` + +**Default:** `20` + +**Description:** BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request + +**Example setting the default value** (20): +``` +[RPC] +BatchRequestsLimit=20 +``` + +### 8.12. `RPC.L2Coinbase` + +**Type:** : `array of integer` +**Description:** L2Coinbase defines which address is going to receive the fees + +### 8.13. `RPC.MaxLogsCount` + +**Type:** : `integer` + +**Default:** `10000` + +**Description:** MaxLogsCount is a configuration to set the max number of logs that can be returned +in a single call to the state, if zero it means no limit + +**Example setting the default value** (10000): +``` +[RPC] +MaxLogsCount=10000 +``` + +### 8.14. `RPC.MaxLogsBlockRange` + +**Type:** : `integer` + +**Default:** `10000` + +**Description:** MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs +logs in a single call to the state, if zero it means no limit + +**Example setting the default value** (10000): +``` +[RPC] +MaxLogsBlockRange=10000 +``` + +### 8.15. `RPC.MaxNativeBlockHashBlockRange` + +**Type:** : `integer` + +**Default:** `60000` + +**Description:** MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying +native block hashes in a single call to the state, if zero it means no limit + +**Example setting the default value** (60000): +``` +[RPC] +MaxNativeBlockHashBlockRange=60000 +``` + +### 8.16. `RPC.EnableHttpLog` **Type:** : `boolean` **Default:** `true` -**Description:** TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP) -to do the parallel requests to RPC.debug_traceTransaction endpoint +**Description:** EnableHttpLog allows the user to enable or disable the logs related to the HTTP +requests to be captured by the server. **Example setting the default value** (true): ``` [RPC] -TraceBatchUseHTTPS=true +EnableHttpLog=true ``` -### 8.11. `RPC.EnablePendingTransactionFilter` +### 8.17. `RPC.EnablePendingTransactionFilter` **Type:** : `boolean` @@ -986,7 +1215,7 @@ TraceBatchUseHTTPS=true EnablePendingTransactionFilter=false ``` -### 8.12. `[RPC.Nacos]` +### 8.18. `[RPC.Nacos]` **Type:** : `object` **Description:** Nacos configuration @@ -998,7 +1227,7 @@ EnablePendingTransactionFilter=false | - [ApplicationName](#RPC_Nacos_ApplicationName ) | No | string | No | - | ApplicationName rest application name in nacos | | - [ExternalListenAddr](#RPC_Nacos_ExternalListenAddr ) | No | string | No | - | ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker | -#### 8.12.1. `RPC.Nacos.URLs` +#### 8.18.1. `RPC.Nacos.URLs` **Type:** : `string` @@ -1012,7 +1241,7 @@ EnablePendingTransactionFilter=false URLs="" ``` -#### 8.12.2. `RPC.Nacos.NamespaceId` +#### 8.18.2. `RPC.Nacos.NamespaceId` **Type:** : `string` @@ -1026,7 +1255,7 @@ URLs="" NamespaceId="" ``` -#### 8.12.3. `RPC.Nacos.ApplicationName` +#### 8.18.3. `RPC.Nacos.ApplicationName` **Type:** : `string` @@ -1040,7 +1269,7 @@ NamespaceId="" ApplicationName="" ``` -#### 8.12.4. `RPC.Nacos.ExternalListenAddr` +#### 8.18.4. `RPC.Nacos.ExternalListenAddr` **Type:** : `string` @@ -1054,7 +1283,7 @@ ApplicationName="" ExternalListenAddr="" ``` -### 8.13. `[RPC.NacosWs]` +### 8.19. `[RPC.NacosWs]` **Type:** : `object` **Description:** NacosWs configuration @@ -1066,7 +1295,7 @@ ExternalListenAddr="" | - [ApplicationName](#RPC_NacosWs_ApplicationName ) | No | string | No | - | ApplicationName rest application name in nacos | | - [ExternalListenAddr](#RPC_NacosWs_ExternalListenAddr ) | No | string | No | - | ExternalListenAddr Set the rest-server external ip and port, when it is launched by Docker | -#### 8.13.1. `RPC.NacosWs.URLs` +#### 8.19.1. `RPC.NacosWs.URLs` **Type:** : `string` @@ -1080,7 +1309,7 @@ ExternalListenAddr="" URLs="" ``` -#### 8.13.2. `RPC.NacosWs.NamespaceId` +#### 8.19.2. `RPC.NacosWs.NamespaceId` **Type:** : `string` @@ -1094,7 +1323,7 @@ URLs="" NamespaceId="" ``` -#### 8.13.3. `RPC.NacosWs.ApplicationName` +#### 8.19.3. `RPC.NacosWs.ApplicationName` **Type:** : `string` @@ -1108,7 +1337,7 @@ NamespaceId="" ApplicationName="" ``` -#### 8.13.4. `RPC.NacosWs.ExternalListenAddr` +#### 8.19.4. `RPC.NacosWs.ExternalListenAddr` **Type:** : `string` @@ -1128,11 +1357,13 @@ ExternalListenAddr="" **Description:** Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ----------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------ | -| - [SyncInterval](#Synchronizer_SyncInterval ) | No | string | No | - | Duration | -| - [SyncChunkSize](#Synchronizer_SyncChunkSize ) | No | integer | No | - | SyncChunkSize is the number of blocks to sync on each chunk | -| - [TrustedSequencerURL](#Synchronizer_TrustedSequencerURL ) | No | string | No | - | TrustedSequencerURL is the rpc url to connect and sync the trusted state | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [SyncInterval](#Synchronizer_SyncInterval ) | No | string | No | - | Duration | +| - [SyncChunkSize](#Synchronizer_SyncChunkSize ) | No | integer | No | - | SyncChunkSize is the number of blocks to sync on each chunk | +| - [TrustedSequencerURL](#Synchronizer_TrustedSequencerURL ) | No | string | No | - | TrustedSequencerURL is the rpc url to connect and sync the trusted state | +| - [L1SynchronizationMode](#Synchronizer_L1SynchronizationMode ) | No | enum (of string) | No | - | L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute | +| - [L1ParallelSynchronization](#Synchronizer_L1ParallelSynchronization ) | No | object | No | - | L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') | ### 9.1. `Synchronizer.SyncInterval` @@ -1188,82 +1419,84 @@ SyncChunkSize=100 TrustedSequencerURL="" ``` -## 10. `[Sequencer]` +### 9.4. `Synchronizer.L1SynchronizationMode` -**Type:** : `object` -**Description:** Configuration of the sequencer service +**Type:** : `enum (of string)` -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ---------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| - [WaitPeriodPoolIsEmpty](#Sequencer_WaitPeriodPoolIsEmpty ) | No | string | No | - | Duration | -| - [BlocksAmountForTxsToBeDeleted](#Sequencer_BlocksAmountForTxsToBeDeleted ) | No | integer | No | - | BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool | -| - [FrequencyToCheckTxsForDelete](#Sequencer_FrequencyToCheckTxsForDelete ) | No | string | No | - | Duration | -| - [MaxTxsPerBatch](#Sequencer_MaxTxsPerBatch ) | No | integer | No | - | MaxTxsPerBatch is the maximum amount of transactions in the batch | -| - [MaxBatchBytesSize](#Sequencer_MaxBatchBytesSize ) | No | integer | No | - | MaxBatchBytesSize is the maximum batch size in bytes
(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1) | -| - [MaxCumulativeGasUsed](#Sequencer_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is max gas amount used by batch | -| - [MaxKeccakHashes](#Sequencer_MaxKeccakHashes ) | No | integer | No | - | MaxKeccakHashes is max keccak hashes used by batch | -| - [MaxPoseidonHashes](#Sequencer_MaxPoseidonHashes ) | No | integer | No | - | MaxPoseidonHashes is max poseidon hashes batch can handle | -| - [MaxPoseidonPaddings](#Sequencer_MaxPoseidonPaddings ) | No | integer | No | - | MaxPoseidonPaddings is max poseidon paddings batch can handle | -| - [MaxMemAligns](#Sequencer_MaxMemAligns ) | No | integer | No | - | MaxMemAligns is max mem aligns batch can handle | -| - [MaxArithmetics](#Sequencer_MaxArithmetics ) | No | integer | No | - | MaxArithmetics is max arithmetics batch can handle | -| - [MaxBinaries](#Sequencer_MaxBinaries ) | No | integer | No | - | MaxBinaries is max binaries batch can handle | -| - [MaxSteps](#Sequencer_MaxSteps ) | No | integer | No | - | MaxSteps is max steps batch can handle | -| - [TxLifetimeCheckTimeout](#Sequencer_TxLifetimeCheckTimeout ) | No | string | No | - | Duration | -| - [MaxTxLifetime](#Sequencer_MaxTxLifetime ) | No | string | No | - | Duration | -| - [Finalizer](#Sequencer_Finalizer ) | No | object | No | - | Finalizer's specific config properties | -| - [DBManager](#Sequencer_DBManager ) | No | object | No | - | DBManager's specific config properties | -| - [EffectiveGasPrice](#Sequencer_EffectiveGasPrice ) | No | object | No | - | EffectiveGasPrice is the config for the gas price | +**Default:** `"sequential"` -### 10.1. `Sequencer.WaitPeriodPoolIsEmpty` +**Description:** L1SynchronizationMode define how to synchronize with L1: +- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data +- sequential: Request data to L1 and execute -**Title:** Duration +**Example setting the default value** ("sequential"): +``` +[Synchronizer] +L1SynchronizationMode="sequential" +``` -**Type:** : `string` +Must be one of: +* "sequential" +* "parallel" -**Default:** `"1s"` +### 9.5. `[Synchronizer.L1ParallelSynchronization]` -**Description:** WaitPeriodPoolIsEmpty is the time the sequencer waits until -trying to add new txs to the state +**Type:** : `object` +**Description:** L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [MaxClients](#Synchronizer_L1ParallelSynchronization_MaxClients ) | No | integer | No | - | MaxClients Number of clients used to synchronize with L1 | +| - [MaxPendingNoProcessedBlocks](#Synchronizer_L1ParallelSynchronization_MaxPendingNoProcessedBlocks ) | No | integer | No | - | MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients | +| - [RequestLastBlockPeriod](#Synchronizer_L1ParallelSynchronization_RequestLastBlockPeriod ) | No | string | No | - | Duration | +| - [PerformanceWarning](#Synchronizer_L1ParallelSynchronization_PerformanceWarning ) | No | object | No | - | Consumer Configuration for the consumer of rollup information from L1 | +| - [RequestLastBlockTimeout](#Synchronizer_L1ParallelSynchronization_RequestLastBlockTimeout ) | No | string | No | - | Duration | +| - [RequestLastBlockMaxRetries](#Synchronizer_L1ParallelSynchronization_RequestLastBlockMaxRetries ) | No | integer | No | - | RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1 | +| - [StatisticsPeriod](#Synchronizer_L1ParallelSynchronization_StatisticsPeriod ) | No | string | No | - | Duration | +| - [TimeOutMainLoop](#Synchronizer_L1ParallelSynchronization_TimeOutMainLoop ) | No | string | No | - | Duration | +| - [RollupInfoRetriesSpacing](#Synchronizer_L1ParallelSynchronization_RollupInfoRetriesSpacing ) | No | string | No | - | Duration | +| - [FallbackToSequentialModeOnSynchronized](#Synchronizer_L1ParallelSynchronization_FallbackToSequentialModeOnSynchronized ) | No | boolean | No | - | FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized | + +#### 9.5.1. `Synchronizer.L1ParallelSynchronization.MaxClients` -**Examples:** +**Type:** : `integer` -```json -"1m" -``` +**Default:** `10` -```json -"300ms" -``` +**Description:** MaxClients Number of clients used to synchronize with L1 -**Example setting the default value** ("1s"): +**Example setting the default value** (10): ``` -[Sequencer] -WaitPeriodPoolIsEmpty="1s" +[Synchronizer.L1ParallelSynchronization] +MaxClients=10 ``` -### 10.2. `Sequencer.BlocksAmountForTxsToBeDeleted` +#### 9.5.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks` **Type:** : `integer` -**Default:** `100` +**Default:** `25` -**Description:** BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool +**Description:** MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync +sugested twice of NumberOfParallelOfEthereumClients -**Example setting the default value** (100): +**Example setting the default value** (25): ``` -[Sequencer] -BlocksAmountForTxsToBeDeleted=100 +[Synchronizer.L1ParallelSynchronization] +MaxPendingNoProcessedBlocks=25 ``` -### 10.3. `Sequencer.FrequencyToCheckTxsForDelete` +#### 9.5.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod` **Title:** Duration **Type:** : `string` -**Default:** `"12h0m0s"` +**Default:** `"5s"` -**Description:** FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting +**Description:** RequestLastBlockPeriod is the time to wait to request the +last block to L1 to known if we need to retrieve more data. +This value only apply when the system is synchronized **Examples:** @@ -1275,154 +1508,282 @@ BlocksAmountForTxsToBeDeleted=100 "300ms" ``` -**Example setting the default value** ("12h0m0s"): +**Example setting the default value** ("5s"): ``` -[Sequencer] -FrequencyToCheckTxsForDelete="12h0m0s" +[Synchronizer.L1ParallelSynchronization] +RequestLastBlockPeriod="5s" ``` -### 10.4. `Sequencer.MaxTxsPerBatch` +#### 9.5.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]` -**Type:** : `integer` +**Type:** : `object` +**Description:** Consumer Configuration for the consumer of rollup information from L1 -**Default:** `300` +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------ | +| - [AceptableInacctivityTime](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_AceptableInacctivityTime ) | No | string | No | - | Duration | +| - [ApplyAfterNumRollupReceived](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_ApplyAfterNumRollupReceived ) | No | integer | No | - | ApplyAfterNumRollupReceived is the number of iterations to
start checking the time waiting for new rollup info data | -**Description:** MaxTxsPerBatch is the maximum amount of transactions in the batch +##### 9.5.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime` -**Example setting the default value** (300): -``` -[Sequencer] -MaxTxsPerBatch=300 -``` +**Title:** Duration -### 10.5. `Sequencer.MaxBatchBytesSize` +**Type:** : `string` -**Type:** : `integer` +**Default:** `"5s"` -**Default:** `120000` +**Description:** AceptableInacctivityTime is the expected maximum time that the consumer +could wait until new data is produced. If the time is greater it emmit a log to warn about +that. The idea is keep working the consumer as much as possible, so if the producer is not +fast enought then you could increse the number of parallel clients to sync with L1 -**Description:** MaxBatchBytesSize is the maximum batch size in bytes -(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1) +**Examples:** -**Example setting the default value** (120000): -``` -[Sequencer] -MaxBatchBytesSize=120000 +```json +"1m" ``` -### 10.6. `Sequencer.MaxCumulativeGasUsed` - -**Type:** : `integer` - -**Default:** `30000000` - -**Description:** MaxCumulativeGasUsed is max gas amount used by batch +```json +"300ms" +``` -**Example setting the default value** (30000000): +**Example setting the default value** ("5s"): ``` -[Sequencer] -MaxCumulativeGasUsed=30000000 +[Synchronizer.L1ParallelSynchronization.PerformanceWarning] +AceptableInacctivityTime="5s" ``` -### 10.7. `Sequencer.MaxKeccakHashes` +##### 9.5.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived` **Type:** : `integer` -**Default:** `2145` +**Default:** `10` -**Description:** MaxKeccakHashes is max keccak hashes used by batch +**Description:** ApplyAfterNumRollupReceived is the number of iterations to +start checking the time waiting for new rollup info data -**Example setting the default value** (2145): +**Example setting the default value** (10): ``` -[Sequencer] -MaxKeccakHashes=2145 +[Synchronizer.L1ParallelSynchronization.PerformanceWarning] +ApplyAfterNumRollupReceived=10 ``` -### 10.8. `Sequencer.MaxPoseidonHashes` - -**Type:** : `integer` +#### 9.5.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout` -**Default:** `252357` +**Title:** Duration -**Description:** MaxPoseidonHashes is max poseidon hashes batch can handle +**Type:** : `string` -**Example setting the default value** (252357): -``` -[Sequencer] -MaxPoseidonHashes=252357 -``` +**Default:** `"5s"` -### 10.9. `Sequencer.MaxPoseidonPaddings` +**Description:** RequestLastBlockTimeout Timeout for request LastBlock On L1 -**Type:** : `integer` +**Examples:** -**Default:** `135191` +```json +"1m" +``` -**Description:** MaxPoseidonPaddings is max poseidon paddings batch can handle +```json +"300ms" +``` -**Example setting the default value** (135191): +**Example setting the default value** ("5s"): ``` -[Sequencer] -MaxPoseidonPaddings=135191 +[Synchronizer.L1ParallelSynchronization] +RequestLastBlockTimeout="5s" ``` -### 10.10. `Sequencer.MaxMemAligns` +#### 9.5.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries` **Type:** : `integer` -**Default:** `236585` +**Default:** `3` -**Description:** MaxMemAligns is max mem aligns batch can handle +**Description:** RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1 -**Example setting the default value** (236585): +**Example setting the default value** (3): ``` -[Sequencer] -MaxMemAligns=236585 +[Synchronizer.L1ParallelSynchronization] +RequestLastBlockMaxRetries=3 ``` -### 10.11. `Sequencer.MaxArithmetics` +#### 9.5.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod` -**Type:** : `integer` +**Title:** Duration -**Default:** `236585` +**Type:** : `string` -**Description:** MaxArithmetics is max arithmetics batch can handle +**Default:** `"5m0s"` -**Example setting the default value** (236585): +**Description:** StatisticsPeriod how ofter show a log with statistics (0 is disabled) + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5m0s"): +``` +[Synchronizer.L1ParallelSynchronization] +StatisticsPeriod="5m0s" +``` + +#### 9.5.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5m0s"` + +**Description:** TimeOutMainLoop is the timeout for the main loop of the L1 synchronizer when is not updated + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5m0s"): +``` +[Synchronizer.L1ParallelSynchronization] +TimeOutMainLoop="5m0s" +``` + +#### 9.5.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1 + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Synchronizer.L1ParallelSynchronization] +RollupInfoRetriesSpacing="5s" +``` + +#### 9.5.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized + +**Example setting the default value** (false): +``` +[Synchronizer.L1ParallelSynchronization] +FallbackToSequentialModeOnSynchronized=false +``` + +## 10. `[Sequencer]` + +**Type:** : `object` +**Description:** Configuration of the sequencer service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | -------------------------------------------------------------------------------------------- | +| - [WaitPeriodPoolIsEmpty](#Sequencer_WaitPeriodPoolIsEmpty ) | No | string | No | - | Duration | +| - [BlocksAmountForTxsToBeDeleted](#Sequencer_BlocksAmountForTxsToBeDeleted ) | No | integer | No | - | BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool | +| - [FrequencyToCheckTxsForDelete](#Sequencer_FrequencyToCheckTxsForDelete ) | No | string | No | - | Duration | +| - [TxLifetimeCheckTimeout](#Sequencer_TxLifetimeCheckTimeout ) | No | string | No | - | Duration | +| - [MaxTxLifetime](#Sequencer_MaxTxLifetime ) | No | string | No | - | Duration | +| - [Finalizer](#Sequencer_Finalizer ) | No | object | No | - | Finalizer's specific config properties | +| - [DBManager](#Sequencer_DBManager ) | No | object | No | - | DBManager's specific config properties | +| - [StreamServer](#Sequencer_StreamServer ) | No | object | No | - | StreamServerCfg is the config for the stream server | + +### 10.1. `Sequencer.WaitPeriodPoolIsEmpty` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1s"` + +**Description:** WaitPeriodPoolIsEmpty is the time the sequencer waits until +trying to add new txs to the state + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1s"): ``` [Sequencer] -MaxArithmetics=236585 +WaitPeriodPoolIsEmpty="1s" ``` -### 10.12. `Sequencer.MaxBinaries` +### 10.2. `Sequencer.BlocksAmountForTxsToBeDeleted` **Type:** : `integer` -**Default:** `473170` +**Default:** `100` -**Description:** MaxBinaries is max binaries batch can handle +**Description:** BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool -**Example setting the default value** (473170): +**Example setting the default value** (100): ``` [Sequencer] -MaxBinaries=473170 +BlocksAmountForTxsToBeDeleted=100 ``` -### 10.13. `Sequencer.MaxSteps` +### 10.3. `Sequencer.FrequencyToCheckTxsForDelete` -**Type:** : `integer` +**Title:** Duration -**Default:** `7570538` +**Type:** : `string` -**Description:** MaxSteps is max steps batch can handle +**Default:** `"12h0m0s"` -**Example setting the default value** (7570538): +**Description:** FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("12h0m0s"): ``` [Sequencer] -MaxSteps=7570538 +FrequencyToCheckTxsForDelete="12h0m0s" ``` -### 10.14. `Sequencer.TxLifetimeCheckTimeout` +### 10.4. `Sequencer.TxLifetimeCheckTimeout` **Title:** Duration @@ -1448,7 +1809,7 @@ MaxSteps=7570538 TxLifetimeCheckTimeout="10m0s" ``` -### 10.15. `Sequencer.MaxTxLifetime` +### 10.5. `Sequencer.MaxTxLifetime` **Title:** Duration @@ -1474,7 +1835,7 @@ TxLifetimeCheckTimeout="10m0s" MaxTxLifetime="3h0m0s" ``` -### 10.16. `[Sequencer.Finalizer]` +### 10.6. `[Sequencer.Finalizer]` **Type:** : `object` **Description:** Finalizer's specific config properties @@ -1494,7 +1855,7 @@ MaxTxLifetime="3h0m0s" | - [StopSequencerOnBatchNum](#Sequencer_Finalizer_StopSequencerOnBatchNum ) | No | integer | No | - | StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number | | - [SequentialReprocessFullBatch](#Sequencer_Finalizer_SequentialReprocessFullBatch ) | No | boolean | No | - | SequentialReprocessFullBatch indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel) | -#### 10.16.1. `Sequencer.Finalizer.GERDeadlineTimeout` +#### 10.6.1. `Sequencer.Finalizer.GERDeadlineTimeout` **Title:** Duration @@ -1520,7 +1881,7 @@ MaxTxLifetime="3h0m0s" GERDeadlineTimeout="5s" ``` -#### 10.16.2. `Sequencer.Finalizer.ForcedBatchDeadlineTimeout` +#### 10.6.2. `Sequencer.Finalizer.ForcedBatchDeadlineTimeout` **Title:** Duration @@ -1546,7 +1907,7 @@ GERDeadlineTimeout="5s" ForcedBatchDeadlineTimeout="1m0s" ``` -#### 10.16.3. `Sequencer.Finalizer.SleepDuration` +#### 10.6.3. `Sequencer.Finalizer.SleepDuration` **Title:** Duration @@ -1572,7 +1933,7 @@ ForcedBatchDeadlineTimeout="1m0s" SleepDuration="100ms" ``` -#### 10.16.4. `Sequencer.Finalizer.ResourcePercentageToCloseBatch` +#### 10.6.4. `Sequencer.Finalizer.ResourcePercentageToCloseBatch` **Type:** : `integer` @@ -1586,7 +1947,7 @@ SleepDuration="100ms" ResourcePercentageToCloseBatch=10 ``` -#### 10.16.5. `Sequencer.Finalizer.GERFinalityNumberOfBlocks` +#### 10.6.5. `Sequencer.Finalizer.GERFinalityNumberOfBlocks` **Type:** : `integer` @@ -1600,7 +1961,7 @@ ResourcePercentageToCloseBatch=10 GERFinalityNumberOfBlocks=64 ``` -#### 10.16.6. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout` +#### 10.6.6. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout` **Title:** Duration @@ -1626,7 +1987,7 @@ GERFinalityNumberOfBlocks=64 ClosingSignalsManagerWaitForCheckingL1Timeout="10s" ``` -#### 10.16.7. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingGER` +#### 10.6.7. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingGER` **Title:** Duration @@ -1652,7 +2013,7 @@ ClosingSignalsManagerWaitForCheckingL1Timeout="10s" ClosingSignalsManagerWaitForCheckingGER="10s" ``` -#### 10.16.8. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingForcedBatches` +#### 10.6.8. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingForcedBatches` **Title:** Duration @@ -1678,7 +2039,7 @@ ClosingSignalsManagerWaitForCheckingGER="10s" ClosingSignalsManagerWaitForCheckingForcedBatches="10s" ``` -#### 10.16.9. `Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks` +#### 10.6.9. `Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks` **Type:** : `integer` @@ -1692,7 +2053,7 @@ ClosingSignalsManagerWaitForCheckingForcedBatches="10s" ForcedBatchesFinalityNumberOfBlocks=64 ``` -#### 10.16.10. `Sequencer.Finalizer.TimestampResolution` +#### 10.6.10. `Sequencer.Finalizer.TimestampResolution` **Title:** Duration @@ -1718,7 +2079,7 @@ ForcedBatchesFinalityNumberOfBlocks=64 TimestampResolution="10s" ``` -#### 10.16.11. `Sequencer.Finalizer.StopSequencerOnBatchNum` +#### 10.6.11. `Sequencer.Finalizer.StopSequencerOnBatchNum` **Type:** : `integer` @@ -1732,7 +2093,7 @@ TimestampResolution="10s" StopSequencerOnBatchNum=0 ``` -#### 10.16.12. `Sequencer.Finalizer.SequentialReprocessFullBatch` +#### 10.6.12. `Sequencer.Finalizer.SequentialReprocessFullBatch` **Type:** : `boolean` @@ -1747,7 +2108,7 @@ sequential way (instead than in parallel) SequentialReprocessFullBatch=false ``` -### 10.17. `[Sequencer.DBManager]` +### 10.7. `[Sequencer.DBManager]` **Type:** : `object` **Description:** DBManager's specific config properties @@ -1757,7 +2118,7 @@ SequentialReprocessFullBatch=false | - [PoolRetrievalInterval](#Sequencer_DBManager_PoolRetrievalInterval ) | No | string | No | - | Duration | | - [L2ReorgRetrievalInterval](#Sequencer_DBManager_L2ReorgRetrievalInterval ) | No | string | No | - | Duration | -#### 10.17.1. `Sequencer.DBManager.PoolRetrievalInterval` +#### 10.7.1. `Sequencer.DBManager.PoolRetrievalInterval` **Title:** Duration @@ -1781,7 +2142,7 @@ SequentialReprocessFullBatch=false PoolRetrievalInterval="500ms" ``` -#### 10.17.2. `Sequencer.DBManager.L2ReorgRetrievalInterval` +#### 10.7.2. `Sequencer.DBManager.L2ReorgRetrievalInterval` **Title:** Duration @@ -1805,121 +2166,129 @@ PoolRetrievalInterval="500ms" L2ReorgRetrievalInterval="5s" ``` -### 10.18. `[Sequencer.EffectiveGasPrice]` +### 10.8. `[Sequencer.StreamServer]` **Type:** : `object` -**Description:** EffectiveGasPrice is the config for the gas price +**Description:** StreamServerCfg is the config for the stream server -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ------------------------------------------------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------- | -| - [MaxBreakEvenGasPriceDeviationPercentage](#Sequencer_EffectiveGasPrice_MaxBreakEvenGasPriceDeviationPercentage ) | No | integer | No | - | MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation | -| - [L1GasPriceFactor](#Sequencer_EffectiveGasPrice_L1GasPriceFactor ) | No | number | No | - | L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price | -| - [ByteGasCost](#Sequencer_EffectiveGasPrice_ByteGasCost ) | No | integer | No | - | ByteGasCost is the gas cost per byte | -| - [MarginFactor](#Sequencer_EffectiveGasPrice_MarginFactor ) | No | number | No | - | MarginFactor is the margin factor percentage to be added to the L2 min gas price | -| - [Enabled](#Sequencer_EffectiveGasPrice_Enabled ) | No | boolean | No | - | Enabled is a flag to enable/disable the effective gas price | -| - [DefaultMinGasPriceAllowed](#Sequencer_EffectiveGasPrice_DefaultMinGasPriceAllowed ) | No | integer | No | - | DefaultMinGasPriceAllowed is the default min gas price to suggest
This value is assigned from [Pool].DefaultMinGasPriceAllowed | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------------------------------------------- | +| - [Port](#Sequencer_StreamServer_Port ) | No | integer | No | - | Port to listen on | +| - [Filename](#Sequencer_StreamServer_Filename ) | No | string | No | - | Filename of the binary data file | +| - [Enabled](#Sequencer_StreamServer_Enabled ) | No | boolean | No | - | Enabled is a flag to enable/disable the data streamer | +| - [Log](#Sequencer_StreamServer_Log ) | No | object | No | - | Log is the log configuration | -#### 10.18.1. `Sequencer.EffectiveGasPrice.MaxBreakEvenGasPriceDeviationPercentage` +#### 10.8.1. `Sequencer.StreamServer.Port` **Type:** : `integer` -**Default:** `10` +**Default:** `0` -**Description:** MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation +**Description:** Port to listen on -**Example setting the default value** (10): +**Example setting the default value** (0): ``` -[Sequencer.EffectiveGasPrice] -MaxBreakEvenGasPriceDeviationPercentage=10 +[Sequencer.StreamServer] +Port=0 ``` -#### 10.18.2. `Sequencer.EffectiveGasPrice.L1GasPriceFactor` +#### 10.8.2. `Sequencer.StreamServer.Filename` -**Type:** : `number` +**Type:** : `string` -**Default:** `0.25` +**Default:** `""` -**Description:** L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price +**Description:** Filename of the binary data file -**Example setting the default value** (0.25): +**Example setting the default value** (""): ``` -[Sequencer.EffectiveGasPrice] -L1GasPriceFactor=0.25 +[Sequencer.StreamServer] +Filename="" ``` -#### 10.18.3. `Sequencer.EffectiveGasPrice.ByteGasCost` +#### 10.8.3. `Sequencer.StreamServer.Enabled` -**Type:** : `integer` +**Type:** : `boolean` -**Default:** `16` +**Default:** `false` -**Description:** ByteGasCost is the gas cost per byte +**Description:** Enabled is a flag to enable/disable the data streamer -**Example setting the default value** (16): +**Example setting the default value** (false): ``` -[Sequencer.EffectiveGasPrice] -ByteGasCost=16 +[Sequencer.StreamServer] +Enabled=false ``` -#### 10.18.4. `Sequencer.EffectiveGasPrice.MarginFactor` +#### 10.8.4. `[Sequencer.StreamServer.Log]` -**Type:** : `number` +**Type:** : `object` +**Description:** Log is the log configuration -**Default:** `1` +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ----------------- | +| - [Environment](#Sequencer_StreamServer_Log_Environment ) | No | enum (of string) | No | - | - | +| - [Level](#Sequencer_StreamServer_Log_Level ) | No | enum (of string) | No | - | - | +| - [Outputs](#Sequencer_StreamServer_Log_Outputs ) | No | array of string | No | - | - | -**Description:** MarginFactor is the margin factor percentage to be added to the L2 min gas price +##### 10.8.4.1. `Sequencer.StreamServer.Log.Environment` -**Example setting the default value** (1): +**Type:** : `enum (of string)` + +**Default:** `""` + +**Example setting the default value** (""): ``` -[Sequencer.EffectiveGasPrice] -MarginFactor=1 +[Sequencer.StreamServer.Log] +Environment="" ``` -#### 10.18.5. `Sequencer.EffectiveGasPrice.Enabled` +Must be one of: +* "production" +* "development" -**Type:** : `boolean` +##### 10.8.4.2. `Sequencer.StreamServer.Log.Level` -**Default:** `false` +**Type:** : `enum (of string)` -**Description:** Enabled is a flag to enable/disable the effective gas price +**Default:** `""` -**Example setting the default value** (false): +**Example setting the default value** (""): ``` -[Sequencer.EffectiveGasPrice] -Enabled=false +[Sequencer.StreamServer.Log] +Level="" ``` -#### 10.18.6. `Sequencer.EffectiveGasPrice.DefaultMinGasPriceAllowed` - -**Type:** : `integer` - -**Default:** `0` +Must be one of: +* "debug" +* "info" +* "warn" +* "error" +* "dpanic" +* "panic" +* "fatal" -**Description:** DefaultMinGasPriceAllowed is the default min gas price to suggest -This value is assigned from [Pool].DefaultMinGasPriceAllowed +##### 10.8.4.3. `Sequencer.StreamServer.Log.Outputs` -**Example setting the default value** (0): -``` -[Sequencer.EffectiveGasPrice] -DefaultMinGasPriceAllowed=0 -``` +**Type:** : `array of string` ## 11. `[SequenceSender]` **Type:** : `object` **Description:** Configuration of the sequence sender service -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ------------------------------------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| - [WaitPeriodSendSequence](#SequenceSender_WaitPeriodSendSequence ) | No | string | No | - | Duration | -| - [LastBatchVirtualizationTimeMaxWaitPeriod](#SequenceSender_LastBatchVirtualizationTimeMaxWaitPeriod ) | No | string | No | - | Duration | -| - [MaxTxSizeForL1](#SequenceSender_MaxTxSizeForL1 ) | No | integer | No | - | MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not. | -| - [MaxBatchesForL1](#SequenceSender_MaxBatchesForL1 ) | No | integer | No | - | MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx | -| - [SenderAddress](#SequenceSender_SenderAddress ) | No | array of integer | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | -| - [L2Coinbase](#SequenceSender_L2Coinbase ) | No | array of integer | No | - | L2Coinbase defines which addess is going to receive the fees | -| - [PrivateKey](#SequenceSender_PrivateKey ) | No | object | No | - | PrivateKey defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs | -| - [ForkUpgradeBatchNumber](#SequenceSender_ForkUpgradeBatchNumber ) | No | integer | No | - | Batch number where there is a forkid change (fork upgrade) | -| - [UseValidium](#SequenceSender_UseValidium ) | No | boolean | No | - | UseValidium is a flag to enable/disable the use of validium | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [WaitPeriodSendSequence](#SequenceSender_WaitPeriodSendSequence ) | No | string | No | - | Duration | +| - [LastBatchVirtualizationTimeMaxWaitPeriod](#SequenceSender_LastBatchVirtualizationTimeMaxWaitPeriod ) | No | string | No | - | Duration | +| - [MaxTxSizeForL1](#SequenceSender_MaxTxSizeForL1 ) | No | integer | No | - | // MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
// non-trivial consequences: larger transactions than 128KB are significantly harder and
// more expensive to propagate; larger transactions also take more resources
// to validate whether they fit into the pool or not. | +| - [MaxBatchesForL1](#SequenceSender_MaxBatchesForL1 ) | No | integer | No | - | MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx | +| - [SenderAddress](#SequenceSender_SenderAddress ) | No | array of integer | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | +| - [L2Coinbase](#SequenceSender_L2Coinbase ) | No | array of integer | No | - | L2Coinbase defines which address is going to receive the fees | +| - [PrivateKey](#SequenceSender_PrivateKey ) | No | object | No | - | PrivateKey defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs | +| - [ForkUpgradeBatchNumber](#SequenceSender_ForkUpgradeBatchNumber ) | No | integer | No | - | Batch number where there is a forkid change (fork upgrade) | +| - [UseValidium](#SequenceSender_UseValidium ) | No | boolean | No | - | UseValidium is a flag to enable/disable the use of validium | +| - [GasOffset](#SequenceSender_GasOffset ) | No | integer | No | - | GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100 | ### 11.1. `SequenceSender.WaitPeriodSendSequence` @@ -1980,10 +2349,10 @@ LastBatchVirtualizationTimeMaxWaitPeriod="5s" **Default:** `0` -**Description:** MaxTxSizeForL1 is the maximum size a single transaction can have. This field has -non-trivial consequences: larger transactions than 128KB are significantly harder and -more expensive to propagate; larger transactions also take more resources -to validate whether they fit into the pool or not. +**Description:** // MaxTxSizeForL1 is the maximum size a single transaction can have. This field has +// non-trivial consequences: larger transactions than 128KB are significantly harder and +// more expensive to propagate; larger transactions also take more resources +// to validate whether they fit into the pool or not. **Example setting the default value** (0): ``` @@ -2017,7 +2386,7 @@ to sign the L1 txs **Default:** `"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"` -**Description:** L2Coinbase defines which addess is going to receive the fees +**Description:** L2Coinbase defines which address is going to receive the fees **Example setting the default value** ("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"): ``` @@ -2092,26 +2461,50 @@ ForkUpgradeBatchNumber=0 UseValidium=true ``` +### 11.10. `SequenceSender.GasOffset` + +**Type:** : `integer` + +**Default:** `80000` + +**Description:** GasOffset is the amount of gas to be added to the gas estimation in order +to provide an amount that is higher than the estimated one. This is used +to avoid the TX getting reverted in case something has changed in the network +state after the estimation which can cause the TX to require more gas to be +executed. + +ex: +gas estimation: 1000 +gas offset: 100 +final gas: 1100 + +**Example setting the default value** (80000): +``` +[SequenceSender] +GasOffset=80000 +``` + ## 12. `[Aggregator]` **Type:** : `object` **Description:** Configuration of the aggregator service -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| --------------------------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| - [Host](#Aggregator_Host ) | No | string | No | - | Host for the grpc server | -| - [Port](#Aggregator_Port ) | No | integer | No | - | Port for the grpc server | -| - [RetryTime](#Aggregator_RetryTime ) | No | string | No | - | Duration | -| - [VerifyProofInterval](#Aggregator_VerifyProofInterval ) | No | string | No | - | Duration | -| - [ProofStatePollingInterval](#Aggregator_ProofStatePollingInterval ) | No | string | No | - | Duration | -| - [TxProfitabilityCheckerType](#Aggregator_TxProfitabilityCheckerType ) | No | string | No | - | TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch
possible values: base/acceptall | -| - [TxProfitabilityMinReward](#Aggregator_TxProfitabilityMinReward ) | No | object | No | - | TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch
this parameter is used for the base tx profitability checker | -| - [IntervalAfterWhichBatchConsolidateAnyway](#Aggregator_IntervalAfterWhichBatchConsolidateAnyway ) | No | string | No | - | Duration | -| - [ChainID](#Aggregator_ChainID ) | No | integer | No | - | ChainID is the L2 ChainID provided by the Network Config | -| - [ForkId](#Aggregator_ForkId ) | No | integer | No | - | ForkID is the L2 ForkID provided by the Network Config | -| - [SenderAddress](#Aggregator_SenderAddress ) | No | string | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | -| - [CleanupLockedProofsInterval](#Aggregator_CleanupLockedProofsInterval ) | No | string | No | - | Duration | -| - [GeneratingProofCleanupThreshold](#Aggregator_GeneratingProofCleanupThreshold ) | No | string | No | - | GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared. | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [Host](#Aggregator_Host ) | No | string | No | - | Host for the grpc server | +| - [Port](#Aggregator_Port ) | No | integer | No | - | Port for the grpc server | +| - [RetryTime](#Aggregator_RetryTime ) | No | string | No | - | Duration | +| - [VerifyProofInterval](#Aggregator_VerifyProofInterval ) | No | string | No | - | Duration | +| - [ProofStatePollingInterval](#Aggregator_ProofStatePollingInterval ) | No | string | No | - | Duration | +| - [TxProfitabilityCheckerType](#Aggregator_TxProfitabilityCheckerType ) | No | string | No | - | TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch
possible values: base/acceptall | +| - [TxProfitabilityMinReward](#Aggregator_TxProfitabilityMinReward ) | No | object | No | - | TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch
this parameter is used for the base tx profitability checker | +| - [IntervalAfterWhichBatchConsolidateAnyway](#Aggregator_IntervalAfterWhichBatchConsolidateAnyway ) | No | string | No | - | Duration | +| - [ChainID](#Aggregator_ChainID ) | No | integer | No | - | ChainID is the L2 ChainID provided by the Network Config | +| - [ForkId](#Aggregator_ForkId ) | No | integer | No | - | ForkID is the L2 ForkID provided by the Network Config | +| - [SenderAddress](#Aggregator_SenderAddress ) | No | string | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | +| - [CleanupLockedProofsInterval](#Aggregator_CleanupLockedProofsInterval ) | No | string | No | - | Duration | +| - [GeneratingProofCleanupThreshold](#Aggregator_GeneratingProofCleanupThreshold ) | No | string | No | - | GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared. | +| - [GasOffset](#Aggregator_GasOffset ) | No | integer | No | - | GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100 | ### 12.1. `Aggregator.Host` @@ -2352,6 +2745,29 @@ allowed to be cleared. GeneratingProofCleanupThreshold="10m" ``` +### 12.14. `Aggregator.GasOffset` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** GasOffset is the amount of gas to be added to the gas estimation in order +to provide an amount that is higher than the estimated one. This is used +to avoid the TX getting reverted in case something has changed in the network +state after the estimation which can cause the TX to require more gas to be +executed. + +ex: +gas estimation: 1000 +gas offset: 100 +final gas: 1100 + +**Example setting the default value** (0): +``` +[Aggregator] +GasOffset=0 +``` + ## 13. `[NetworkConfig]` **Type:** : `object` @@ -2947,180 +3363,67 @@ MaxGRPCMessageSize=100000000 URI="x1-prover:50061" ``` -## 17. `[StateDB]` +## 17. `[Metrics]` **Type:** : `object` -**Description:** Configuration of the state database connection +**Description:** Configuration of the metrics service, basically is where is going to publish the metrics -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ---------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | -| - [Name](#StateDB_Name ) | No | string | No | - | Database name | -| - [User](#StateDB_User ) | No | string | No | - | Database User name | -| - [Password](#StateDB_Password ) | No | string | No | - | Database Password of the user | -| - [Host](#StateDB_Host ) | No | string | No | - | Host address of database | -| - [Port](#StateDB_Port ) | No | string | No | - | Port Number of database | -| - [EnableLog](#StateDB_EnableLog ) | No | boolean | No | - | EnableLog | -| - [MaxConns](#StateDB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------ | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------- | +| - [Host](#Metrics_Host ) | No | string | No | - | Host is the address to bind the metrics server | +| - [Port](#Metrics_Port ) | No | integer | No | - | Port is the port to bind the metrics server | +| - [Enabled](#Metrics_Enabled ) | No | boolean | No | - | Enabled is the flag to enable/disable the metrics server | +| - [ProfilingHost](#Metrics_ProfilingHost ) | No | string | No | - | ProfilingHost is the address to bind the profiling server | +| - [ProfilingPort](#Metrics_ProfilingPort ) | No | integer | No | - | ProfilingPort is the port to bind the profiling server | +| - [ProfilingEnabled](#Metrics_ProfilingEnabled ) | No | boolean | No | - | ProfilingEnabled is the flag to enable/disable the profiling server | -### 17.1. `StateDB.Name` +### 17.1. `Metrics.Host` **Type:** : `string` -**Default:** `"state_db"` +**Default:** `"0.0.0.0"` -**Description:** Database name +**Description:** Host is the address to bind the metrics server -**Example setting the default value** ("state_db"): +**Example setting the default value** ("0.0.0.0"): ``` -[StateDB] -Name="state_db" +[Metrics] +Host="0.0.0.0" ``` -### 17.2. `StateDB.User` +### 17.2. `Metrics.Port` -**Type:** : `string` +**Type:** : `integer` -**Default:** `"state_user"` +**Default:** `9091` -**Description:** Database User name +**Description:** Port is the port to bind the metrics server -**Example setting the default value** ("state_user"): +**Example setting the default value** (9091): ``` -[StateDB] -User="state_user" +[Metrics] +Port=9091 ``` -### 17.3. `StateDB.Password` +### 17.3. `Metrics.Enabled` -**Type:** : `string` +**Type:** : `boolean` -**Default:** `"state_password"` +**Default:** `false` -**Description:** Database Password of the user +**Description:** Enabled is the flag to enable/disable the metrics server -**Example setting the default value** ("state_password"): +**Example setting the default value** (false): ``` -[StateDB] -Password="state_password" +[Metrics] +Enabled=false ``` -### 17.4. `StateDB.Host` +### 17.4. `Metrics.ProfilingHost` **Type:** : `string` -**Default:** `"x1-state-db"` - -**Description:** Host address of database - -**Example setting the default value** ("x1-state-db"): -``` -[StateDB] -Host="x1-state-db" -``` - -### 17.5. `StateDB.Port` - -**Type:** : `string` - -**Default:** `"5432"` - -**Description:** Port Number of database - -**Example setting the default value** ("5432"): -``` -[StateDB] -Port="5432" -``` - -### 17.6. `StateDB.EnableLog` - -**Type:** : `boolean` - -**Default:** `false` - -**Description:** EnableLog - -**Example setting the default value** (false): -``` -[StateDB] -EnableLog=false -``` - -### 17.7. `StateDB.MaxConns` - -**Type:** : `integer` - -**Default:** `200` - -**Description:** MaxConns is the maximum number of connections in the pool. - -**Example setting the default value** (200): -``` -[StateDB] -MaxConns=200 -``` - -## 18. `[Metrics]` - -**Type:** : `object` -**Description:** Configuration of the metrics service, basically is where is going to publish the metrics - -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ------------------------------------------------ | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------- | -| - [Host](#Metrics_Host ) | No | string | No | - | Host is the address to bind the metrics server | -| - [Port](#Metrics_Port ) | No | integer | No | - | Port is the port to bind the metrics server | -| - [Enabled](#Metrics_Enabled ) | No | boolean | No | - | Enabled is the flag to enable/disable the metrics server | -| - [ProfilingHost](#Metrics_ProfilingHost ) | No | string | No | - | ProfilingHost is the address to bind the profiling server | -| - [ProfilingPort](#Metrics_ProfilingPort ) | No | integer | No | - | ProfilingPort is the port to bind the profiling server | -| - [ProfilingEnabled](#Metrics_ProfilingEnabled ) | No | boolean | No | - | ProfilingEnabled is the flag to enable/disable the profiling server | - -### 18.1. `Metrics.Host` - -**Type:** : `string` - -**Default:** `"0.0.0.0"` - -**Description:** Host is the address to bind the metrics server - -**Example setting the default value** ("0.0.0.0"): -``` -[Metrics] -Host="0.0.0.0" -``` - -### 18.2. `Metrics.Port` - -**Type:** : `integer` - -**Default:** `9091` - -**Description:** Port is the port to bind the metrics server - -**Example setting the default value** (9091): -``` -[Metrics] -Port=9091 -``` - -### 18.3. `Metrics.Enabled` - -**Type:** : `boolean` - -**Default:** `false` - -**Description:** Enabled is the flag to enable/disable the metrics server - -**Example setting the default value** (false): -``` -[Metrics] -Enabled=false -``` - -### 18.4. `Metrics.ProfilingHost` - -**Type:** : `string` - -**Default:** `""` +**Default:** `""` **Description:** ProfilingHost is the address to bind the profiling server @@ -3130,7 +3433,7 @@ Enabled=false ProfilingHost="" ``` -### 18.5. `Metrics.ProfilingPort` +### 17.5. `Metrics.ProfilingPort` **Type:** : `integer` @@ -3144,7 +3447,7 @@ ProfilingHost="" ProfilingPort=0 ``` -### 18.6. `Metrics.ProfilingEnabled` +### 17.6. `Metrics.ProfilingEnabled` **Type:** : `boolean` @@ -3158,7 +3461,7 @@ ProfilingPort=0 ProfilingEnabled=false ``` -## 19. `[EventLog]` +## 18. `[EventLog]` **Type:** : `object` **Description:** Configuration of the event database connection @@ -3167,7 +3470,7 @@ ProfilingEnabled=false | --------------------- | ------- | ------ | ---------- | ---------- | -------------------------------- | | - [DB](#EventLog_DB ) | No | object | No | - | DB is the database configuration | -### 19.1. `[EventLog.DB]` +### 18.1. `[EventLog.DB]` **Type:** : `object` **Description:** DB is the database configuration @@ -3182,7 +3485,7 @@ ProfilingEnabled=false | - [EnableLog](#EventLog_DB_EnableLog ) | No | boolean | No | - | EnableLog | | - [MaxConns](#EventLog_DB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | -#### 19.1.1. `EventLog.DB.Name` +#### 18.1.1. `EventLog.DB.Name` **Type:** : `string` @@ -3196,7 +3499,7 @@ ProfilingEnabled=false Name="" ``` -#### 19.1.2. `EventLog.DB.User` +#### 18.1.2. `EventLog.DB.User` **Type:** : `string` @@ -3210,7 +3513,7 @@ Name="" User="" ``` -#### 19.1.3. `EventLog.DB.Password` +#### 18.1.3. `EventLog.DB.Password` **Type:** : `string` @@ -3224,7 +3527,7 @@ User="" Password="" ``` -#### 19.1.4. `EventLog.DB.Host` +#### 18.1.4. `EventLog.DB.Host` **Type:** : `string` @@ -3238,7 +3541,7 @@ Password="" Host="" ``` -#### 19.1.5. `EventLog.DB.Port` +#### 18.1.5. `EventLog.DB.Port` **Type:** : `string` @@ -3252,7 +3555,7 @@ Host="" Port="" ``` -#### 19.1.6. `EventLog.DB.EnableLog` +#### 18.1.6. `EventLog.DB.EnableLog` **Type:** : `boolean` @@ -3266,7 +3569,7 @@ Port="" EnableLog=false ``` -#### 19.1.7. `EventLog.DB.MaxConns` +#### 18.1.7. `EventLog.DB.MaxConns` **Type:** : `integer` @@ -3280,7 +3583,7 @@ EnableLog=false MaxConns=0 ``` -## 20. `[HashDB]` +## 19. `[HashDB]` **Type:** : `object` **Description:** Configuration of the hash database connection @@ -3295,7 +3598,7 @@ MaxConns=0 | - [EnableLog](#HashDB_EnableLog ) | No | boolean | No | - | EnableLog | | - [MaxConns](#HashDB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | -### 20.1. `HashDB.Name` +### 19.1. `HashDB.Name` **Type:** : `string` @@ -3309,7 +3612,7 @@ MaxConns=0 Name="prover_db" ``` -### 20.2. `HashDB.User` +### 19.2. `HashDB.User` **Type:** : `string` @@ -3323,7 +3626,7 @@ Name="prover_db" User="prover_user" ``` -### 20.3. `HashDB.Password` +### 19.3. `HashDB.Password` **Type:** : `string` @@ -3337,7 +3640,7 @@ User="prover_user" Password="prover_pass" ``` -### 20.4. `HashDB.Host` +### 19.4. `HashDB.Host` **Type:** : `string` @@ -3351,7 +3654,7 @@ Password="prover_pass" Host="x1-state-db" ``` -### 20.5. `HashDB.Port` +### 19.5. `HashDB.Port` **Type:** : `string` @@ -3365,7 +3668,7 @@ Host="x1-state-db" Port="5432" ``` -### 20.6. `HashDB.EnableLog` +### 19.6. `HashDB.EnableLog` **Type:** : `boolean` @@ -3379,7 +3682,7 @@ Port="5432" EnableLog=false ``` -### 20.7. `HashDB.MaxConns` +### 19.7. `HashDB.MaxConns` **Type:** : `integer` @@ -3393,5 +3696,475 @@ EnableLog=false MaxConns=200 ``` +## 20. `[State]` + +**Type:** : `object` +**Description:** State service configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------- | ------- | --------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [MaxCumulativeGasUsed](#State_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is the max gas allowed per batch | +| - [ChainID](#State_ChainID ) | No | integer | No | - | ChainID is the L2 ChainID provided by the Network Config | +| - [ForkIDIntervals](#State_ForkIDIntervals ) | No | array of object | No | - | ForkIdIntervals is the list of fork id intervals | +| - [MaxResourceExhaustedAttempts](#State_MaxResourceExhaustedAttempts ) | No | integer | No | - | MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion | +| - [WaitOnResourceExhaustion](#State_WaitOnResourceExhaustion ) | No | string | No | - | Duration | +| - [ForkUpgradeBatchNumber](#State_ForkUpgradeBatchNumber ) | No | integer | No | - | Batch number from which there is a forkid change (fork upgrade) | +| - [ForkUpgradeNewForkId](#State_ForkUpgradeNewForkId ) | No | integer | No | - | New fork id to be used for batches greaters than ForkUpgradeBatchNumber (fork upgrade) | +| - [DB](#State_DB ) | No | object | No | - | DB is the database configuration | +| - [Batch](#State_Batch ) | No | object | No | - | Configuration for the batch constraints | +| - [MaxLogsCount](#State_MaxLogsCount ) | No | integer | No | - | MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit | +| - [MaxLogsBlockRange](#State_MaxLogsBlockRange ) | No | integer | No | - | MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit | +| - [MaxNativeBlockHashBlockRange](#State_MaxNativeBlockHashBlockRange ) | No | integer | No | - | MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit | + +### 20.1. `State.MaxCumulativeGasUsed` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxCumulativeGasUsed is the max gas allowed per batch + +**Example setting the default value** (0): +``` +[State] +MaxCumulativeGasUsed=0 +``` + +### 20.2. `State.ChainID` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** ChainID is the L2 ChainID provided by the Network Config + +**Example setting the default value** (0): +``` +[State] +ChainID=0 +``` + +### 20.3. `State.ForkIDIntervals` + +**Type:** : `array of object` +**Description:** ForkIdIntervals is the list of fork id intervals + +| | Array restrictions | +| -------------------- | ------------------ | +| **Min items** | N/A | +| **Max items** | N/A | +| **Items unicity** | False | +| **Additional items** | False | +| **Tuple validation** | See below | + +| Each item of this array must be | Description | +| ----------------------------------------------------- | ------------------------------------ | +| [ForkIDIntervals items](#State_ForkIDIntervals_items) | ForkIDInterval is a fork id interval | + +#### 20.3.1. [State.ForkIDIntervals.ForkIDIntervals items] + +**Type:** : `object` +**Description:** ForkIDInterval is a fork id interval + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ----------------- | +| - [FromBatchNumber](#State_ForkIDIntervals_items_FromBatchNumber ) | No | integer | No | - | - | +| - [ToBatchNumber](#State_ForkIDIntervals_items_ToBatchNumber ) | No | integer | No | - | - | +| - [ForkId](#State_ForkIDIntervals_items_ForkId ) | No | integer | No | - | - | +| - [Version](#State_ForkIDIntervals_items_Version ) | No | string | No | - | - | +| - [BlockNumber](#State_ForkIDIntervals_items_BlockNumber ) | No | integer | No | - | - | + +##### 20.3.1.1. `State.ForkIDIntervals.ForkIDIntervals items.FromBatchNumber` + +**Type:** : `integer` + +##### 20.3.1.2. `State.ForkIDIntervals.ForkIDIntervals items.ToBatchNumber` + +**Type:** : `integer` + +##### 20.3.1.3. `State.ForkIDIntervals.ForkIDIntervals items.ForkId` + +**Type:** : `integer` + +##### 20.3.1.4. `State.ForkIDIntervals.ForkIDIntervals items.Version` + +**Type:** : `string` + +##### 20.3.1.5. `State.ForkIDIntervals.ForkIDIntervals items.BlockNumber` + +**Type:** : `integer` + +### 20.4. `State.MaxResourceExhaustedAttempts` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion + +**Example setting the default value** (0): +``` +[State] +MaxResourceExhaustedAttempts=0 +``` + +### 20.5. `State.WaitOnResourceExhaustion` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"0s"` + +**Description:** WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("0s"): +``` +[State] +WaitOnResourceExhaustion="0s" +``` + +### 20.6. `State.ForkUpgradeBatchNumber` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** Batch number from which there is a forkid change (fork upgrade) + +**Example setting the default value** (0): +``` +[State] +ForkUpgradeBatchNumber=0 +``` + +### 20.7. `State.ForkUpgradeNewForkId` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** New fork id to be used for batches greaters than ForkUpgradeBatchNumber (fork upgrade) + +**Example setting the default value** (0): +``` +[State] +ForkUpgradeNewForkId=0 +``` + +### 20.8. `[State.DB]` + +**Type:** : `object` +**Description:** DB is the database configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | +| - [Name](#State_DB_Name ) | No | string | No | - | Database name | +| - [User](#State_DB_User ) | No | string | No | - | Database User name | +| - [Password](#State_DB_Password ) | No | string | No | - | Database Password of the user | +| - [Host](#State_DB_Host ) | No | string | No | - | Host address of database | +| - [Port](#State_DB_Port ) | No | string | No | - | Port Number of database | +| - [EnableLog](#State_DB_EnableLog ) | No | boolean | No | - | EnableLog | +| - [MaxConns](#State_DB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | + +#### 20.8.1. `State.DB.Name` + +**Type:** : `string` + +**Default:** `"state_db"` + +**Description:** Database name + +**Example setting the default value** ("state_db"): +``` +[State.DB] +Name="state_db" +``` + +#### 20.8.2. `State.DB.User` + +**Type:** : `string` + +**Default:** `"state_user"` + +**Description:** Database User name + +**Example setting the default value** ("state_user"): +``` +[State.DB] +User="state_user" +``` + +#### 20.8.3. `State.DB.Password` + +**Type:** : `string` + +**Default:** `"state_password"` + +**Description:** Database Password of the user + +**Example setting the default value** ("state_password"): +``` +[State.DB] +Password="state_password" +``` + +#### 20.8.4. `State.DB.Host` + +**Type:** : `string` + +**Default:** `"x1-state-db"` + +**Description:** Host address of database + +**Example setting the default value** ("x1-state-db"): +``` +[State.DB] +Host="x1-state-db" +``` + +#### 20.8.5. `State.DB.Port` + +**Type:** : `string` + +**Default:** `"5432"` + +**Description:** Port Number of database + +**Example setting the default value** ("5432"): +``` +[State.DB] +Port="5432" +``` + +#### 20.8.6. `State.DB.EnableLog` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** EnableLog + +**Example setting the default value** (false): +``` +[State.DB] +EnableLog=false +``` + +#### 20.8.7. `State.DB.MaxConns` + +**Type:** : `integer` + +**Default:** `200` + +**Description:** MaxConns is the maximum number of connections in the pool. + +**Example setting the default value** (200): +``` +[State.DB] +MaxConns=200 +``` + +### 20.9. `[State.Batch]` + +**Type:** : `object` +**Description:** Configuration for the batch constraints + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------ | ------- | ------ | ---------- | ---------- | ----------------- | +| - [Constraints](#State_Batch_Constraints ) | No | object | No | - | - | + +#### 20.9.1. `[State.Batch.Constraints]` + +**Type:** : `object` + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ----------------- | +| - [MaxTxsPerBatch](#State_Batch_Constraints_MaxTxsPerBatch ) | No | integer | No | - | - | +| - [MaxBatchBytesSize](#State_Batch_Constraints_MaxBatchBytesSize ) | No | integer | No | - | - | +| - [MaxCumulativeGasUsed](#State_Batch_Constraints_MaxCumulativeGasUsed ) | No | integer | No | - | - | +| - [MaxKeccakHashes](#State_Batch_Constraints_MaxKeccakHashes ) | No | integer | No | - | - | +| - [MaxPoseidonHashes](#State_Batch_Constraints_MaxPoseidonHashes ) | No | integer | No | - | - | +| - [MaxPoseidonPaddings](#State_Batch_Constraints_MaxPoseidonPaddings ) | No | integer | No | - | - | +| - [MaxMemAligns](#State_Batch_Constraints_MaxMemAligns ) | No | integer | No | - | - | +| - [MaxArithmetics](#State_Batch_Constraints_MaxArithmetics ) | No | integer | No | - | - | +| - [MaxBinaries](#State_Batch_Constraints_MaxBinaries ) | No | integer | No | - | - | +| - [MaxSteps](#State_Batch_Constraints_MaxSteps ) | No | integer | No | - | - | + +##### 20.9.1.1. `State.Batch.Constraints.MaxTxsPerBatch` + +**Type:** : `integer` + +**Default:** `300` + +**Example setting the default value** (300): +``` +[State.Batch.Constraints] +MaxTxsPerBatch=300 +``` + +##### 20.9.1.2. `State.Batch.Constraints.MaxBatchBytesSize` + +**Type:** : `integer` + +**Default:** `120000` + +**Example setting the default value** (120000): +``` +[State.Batch.Constraints] +MaxBatchBytesSize=120000 +``` + +##### 20.9.1.3. `State.Batch.Constraints.MaxCumulativeGasUsed` + +**Type:** : `integer` + +**Default:** `30000000` + +**Example setting the default value** (30000000): +``` +[State.Batch.Constraints] +MaxCumulativeGasUsed=30000000 +``` + +##### 20.9.1.4. `State.Batch.Constraints.MaxKeccakHashes` + +**Type:** : `integer` + +**Default:** `2145` + +**Example setting the default value** (2145): +``` +[State.Batch.Constraints] +MaxKeccakHashes=2145 +``` + +##### 20.9.1.5. `State.Batch.Constraints.MaxPoseidonHashes` + +**Type:** : `integer` + +**Default:** `252357` + +**Example setting the default value** (252357): +``` +[State.Batch.Constraints] +MaxPoseidonHashes=252357 +``` + +##### 20.9.1.6. `State.Batch.Constraints.MaxPoseidonPaddings` + +**Type:** : `integer` + +**Default:** `135191` + +**Example setting the default value** (135191): +``` +[State.Batch.Constraints] +MaxPoseidonPaddings=135191 +``` + +##### 20.9.1.7. `State.Batch.Constraints.MaxMemAligns` + +**Type:** : `integer` + +**Default:** `236585` + +**Example setting the default value** (236585): +``` +[State.Batch.Constraints] +MaxMemAligns=236585 +``` + +##### 20.9.1.8. `State.Batch.Constraints.MaxArithmetics` + +**Type:** : `integer` + +**Default:** `236585` + +**Example setting the default value** (236585): +``` +[State.Batch.Constraints] +MaxArithmetics=236585 +``` + +##### 20.9.1.9. `State.Batch.Constraints.MaxBinaries` + +**Type:** : `integer` + +**Default:** `473170` + +**Example setting the default value** (473170): +``` +[State.Batch.Constraints] +MaxBinaries=473170 +``` + +##### 20.9.1.10. `State.Batch.Constraints.MaxSteps` + +**Type:** : `integer` + +**Default:** `7570538` + +**Example setting the default value** (7570538): +``` +[State.Batch.Constraints] +MaxSteps=7570538 +``` + +### 20.10. `State.MaxLogsCount` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxLogsCount is a configuration to set the max number of logs that can be returned +in a single call to the state, if zero it means no limit + +**Example setting the default value** (0): +``` +[State] +MaxLogsCount=0 +``` + +### 20.11. `State.MaxLogsBlockRange` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs +logs in a single call to the state, if zero it means no limit + +**Example setting the default value** (0): +``` +[State] +MaxLogsBlockRange=0 +``` + +### 20.12. `State.MaxNativeBlockHashBlockRange` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying +native block hashes in a single call to the state, if zero it means no limit + +**Example setting the default value** (0): +``` +[State] +MaxNativeBlockHashBlockRange=0 +``` + ---------------------------------------------------------------------------------------------------------------------------- Generated using [json-schema-for-humans](https://github.com/coveooss/json-schema-for-humans) diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json index c0a45ef64c..b2632e1014 100644 --- a/docs/config-file/node-config-schema.json +++ b/docs/config-file/node-config-schema.json @@ -275,6 +275,53 @@ "type": "string", "description": "FreeGasAddress is the default free gas address", "default": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + }, + "EffectiveGasPrice": { + "properties": { + "Enabled": { + "type": "boolean", + "description": "Enabled is a flag to enable/disable the effective gas price", + "default": false + }, + "L1GasPriceFactor": { + "type": "number", + "description": "L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price", + "default": 0.25 + }, + "ByteGasCost": { + "type": "integer", + "description": "ByteGasCost is the gas cost per byte that is not 0", + "default": 16 + }, + "ZeroByteGasCost": { + "type": "integer", + "description": "ZeroByteGasCost is the gas cost per byte that is 0", + "default": 4 + }, + "NetProfit": { + "type": "number", + "description": "NetProfit is the profit margin to apply to the calculated breakEvenGasPrice", + "default": 1 + }, + "BreakEvenFactor": { + "type": "number", + "description": "BreakEvenFactor is the factor to apply to the calculated breakevenGasPrice when comparing it with the gasPriceSigned of a tx", + "default": 1.1 + }, + "FinalDeviationPct": { + "type": "integer", + "description": "FinalDeviationPct is the max allowed deviation percentage BreakEvenGasPrice on re-calculation", + "default": 10 + }, + "L2GasPriceSuggesterFactor": { + "type": "number", + "description": "L2GasPriceSuggesterFactor is the factor to apply to L1 gas price to get the suggested L2 gas price used in the\ncalculations when the effective gas price is disabled (testing/metrics purposes)", + "default": 0.5 + } + }, + "additionalProperties": false, + "type": "object", + "description": "EffectiveGasPrice is the config for the effective gas price calculation" } }, "additionalProperties": false, @@ -344,6 +391,11 @@ "type": "integer", "description": "Port defines the port to serve the endpoints via WS", "default": 8546 + }, + "ReadLimit": { + "type": "integer", + "description": "ReadLimit defines the maximum size of a message read from the client (in bytes)", + "default": 104857600 } }, "additionalProperties": false, @@ -355,9 +407,43 @@ "description": "EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.", "default": true }, - "TraceBatchUseHTTPS": { + "BatchRequestsEnabled": { + "type": "boolean", + "description": "BatchRequestsEnabled defines if the Batch requests are enabled or disabled", + "default": false + }, + "BatchRequestsLimit": { + "type": "integer", + "description": "BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request", + "default": 20 + }, + "L2Coinbase": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "L2Coinbase defines which address is going to receive the fees" + }, + "MaxLogsCount": { + "type": "integer", + "description": "MaxLogsCount is a configuration to set the max number of logs that can be returned\nin a single call to the state, if zero it means no limit", + "default": 10000 + }, + "MaxLogsBlockRange": { + "type": "integer", + "description": "MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs\nlogs in a single call to the state, if zero it means no limit", + "default": 10000 + }, + "MaxNativeBlockHashBlockRange": { + "type": "integer", + "description": "MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying\nnative block hashes in a single call to the state, if zero it means no limit", + "default": 60000 + }, + "EnableHttpLog": { "type": "boolean", - "description": "TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)\nto do the parallel requests to RPC.debug_traceTransaction endpoint", + "description": "EnableHttpLog allows the user to enable or disable the logs related to the HTTP\nrequests to be captured by the server.", "default": true }, "EnablePendingTransactionFilter": { @@ -445,6 +531,114 @@ "type": "string", "description": "TrustedSequencerURL is the rpc url to connect and sync the trusted state", "default": "" + }, + "L1SynchronizationMode": { + "type": "string", + "enum": [ + "sequential", + "parallel" + ], + "description": "L1SynchronizationMode define how to synchronize with L1:\n- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data\n- sequential: Request data to L1 and execute", + "default": "sequential" + }, + "L1ParallelSynchronization": { + "properties": { + "MaxClients": { + "type": "integer", + "description": "MaxClients Number of clients used to synchronize with L1", + "default": 10 + }, + "MaxPendingNoProcessedBlocks": { + "type": "integer", + "description": "MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be \u003e= to NumberOfEthereumClientsToSync\nsugested twice of NumberOfParallelOfEthereumClients", + "default": 25 + }, + "RequestLastBlockPeriod": { + "type": "string", + "title": "Duration", + "description": "RequestLastBlockPeriod is the time to wait to request the\nlast block to L1 to known if we need to retrieve more data.\nThis value only apply when the system is synchronized", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "PerformanceWarning": { + "properties": { + "AceptableInacctivityTime": { + "type": "string", + "title": "Duration", + "description": "AceptableInacctivityTime is the expected maximum time that the consumer\ncould wait until new data is produced. If the time is greater it emmit a log to warn about\nthat. The idea is keep working the consumer as much as possible, so if the producer is not\nfast enought then you could increse the number of parallel clients to sync with L1", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "ApplyAfterNumRollupReceived": { + "type": "integer", + "description": "ApplyAfterNumRollupReceived is the number of iterations to\nstart checking the time waiting for new rollup info data", + "default": 10 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Consumer Configuration for the consumer of rollup information from L1" + }, + "RequestLastBlockTimeout": { + "type": "string", + "title": "Duration", + "description": "RequestLastBlockTimeout Timeout for request LastBlock On L1", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "RequestLastBlockMaxRetries": { + "type": "integer", + "description": "RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1", + "default": 3 + }, + "StatisticsPeriod": { + "type": "string", + "title": "Duration", + "description": "StatisticsPeriod how ofter show a log with statistics (0 is disabled)", + "default": "5m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "TimeOutMainLoop": { + "type": "string", + "title": "Duration", + "description": "TimeOutMainLoop is the timeout for the main loop of the L1 synchronizer when is not updated", + "default": "5m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "RollupInfoRetriesSpacing": { + "type": "string", + "title": "Duration", + "description": "RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "FallbackToSequentialModeOnSynchronized": { + "type": "boolean", + "description": "FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized", + "default": false + } + }, + "additionalProperties": false, + "type": "object", + "description": "L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')" } }, "additionalProperties": false, @@ -478,56 +672,6 @@ "300ms" ] }, - "MaxTxsPerBatch": { - "type": "integer", - "description": "MaxTxsPerBatch is the maximum amount of transactions in the batch", - "default": 300 - }, - "MaxBatchBytesSize": { - "type": "integer", - "description": "MaxBatchBytesSize is the maximum batch size in bytes\n(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1)", - "default": 120000 - }, - "MaxCumulativeGasUsed": { - "type": "integer", - "description": "MaxCumulativeGasUsed is max gas amount used by batch", - "default": 30000000 - }, - "MaxKeccakHashes": { - "type": "integer", - "description": "MaxKeccakHashes is max keccak hashes used by batch", - "default": 2145 - }, - "MaxPoseidonHashes": { - "type": "integer", - "description": "MaxPoseidonHashes is max poseidon hashes batch can handle", - "default": 252357 - }, - "MaxPoseidonPaddings": { - "type": "integer", - "description": "MaxPoseidonPaddings is max poseidon paddings batch can handle", - "default": 135191 - }, - "MaxMemAligns": { - "type": "integer", - "description": "MaxMemAligns is max mem aligns batch can handle", - "default": 236585 - }, - "MaxArithmetics": { - "type": "integer", - "description": "MaxArithmetics is max arithmetics batch can handle", - "default": 236585 - }, - "MaxBinaries": { - "type": "integer", - "description": "MaxBinaries is max binaries batch can handle", - "default": 473170 - }, - "MaxSteps": { - "type": "integer", - "description": "MaxSteps is max steps batch can handle", - "default": 7570538 - }, "TxLifetimeCheckTimeout": { "type": "string", "title": "Duration", @@ -675,42 +819,61 @@ "type": "object", "description": "DBManager's specific config properties" }, - "EffectiveGasPrice": { + "StreamServer": { "properties": { - "MaxBreakEvenGasPriceDeviationPercentage": { - "type": "integer", - "description": "MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation", - "default": 10 - }, - "L1GasPriceFactor": { - "type": "number", - "description": "L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price", - "default": 0.25 - }, - "ByteGasCost": { + "Port": { "type": "integer", - "description": "ByteGasCost is the gas cost per byte", - "default": 16 + "description": "Port to listen on", + "default": 0 }, - "MarginFactor": { - "type": "number", - "description": "MarginFactor is the margin factor percentage to be added to the L2 min gas price", - "default": 1 + "Filename": { + "type": "string", + "description": "Filename of the binary data file", + "default": "" }, "Enabled": { "type": "boolean", - "description": "Enabled is a flag to enable/disable the effective gas price", + "description": "Enabled is a flag to enable/disable the data streamer", "default": false }, - "DefaultMinGasPriceAllowed": { - "type": "integer", - "description": "DefaultMinGasPriceAllowed is the default min gas price to suggest\nThis value is assigned from [Pool].DefaultMinGasPriceAllowed", - "default": 0 + "Log": { + "properties": { + "Environment": { + "type": "string", + "enum": [ + "production", + "development" + ], + "default": "" + }, + "Level": { + "type": "string", + "enum": [ + "debug", + "info", + "warn", + "error", + "dpanic", + "panic", + "fatal" + ], + "default": "" + }, + "Outputs": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Log is the log configuration" } }, "additionalProperties": false, "type": "object", - "description": "EffectiveGasPrice is the config for the gas price" + "description": "StreamServerCfg is the config for the stream server" } }, "additionalProperties": false, @@ -741,7 +904,7 @@ }, "MaxTxSizeForL1": { "type": "integer", - "description": "MaxTxSizeForL1 is the maximum size a single transaction can have. This field has\nnon-trivial consequences: larger transactions than 128KB are significantly harder and\nmore expensive to propagate; larger transactions also take more resources\nto validate whether they fit into the pool or not.", + "description": "// MaxTxSizeForL1 is the maximum size a single transaction can have. This field has\n// non-trivial consequences: larger transactions than 128KB are significantly harder and\n// more expensive to propagate; larger transactions also take more resources\n// to validate whether they fit into the pool or not.", "default": 0 }, "MaxBatchesForL1": { @@ -765,7 +928,7 @@ "type": "array", "maxItems": 20, "minItems": 20, - "description": "L2Coinbase defines which addess is going to receive the fees", + "description": "L2Coinbase defines which address is going to receive the fees", "default": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" }, "PrivateKey": { @@ -794,6 +957,11 @@ "type": "boolean", "description": "UseValidium is a flag to enable/disable the use of validium", "default": true + }, + "GasOffset": { + "type": "integer", + "description": "GasOffset is the amount of gas to be added to the gas estimation in order\nto provide an amount that is higher than the estimated one. This is used\nto avoid the TX getting reverted in case something has changed in the network\nstate after the estimation which can cause the TX to require more gas to be\nexecuted.\n\nex:\ngas estimation: 1000\ngas offset: 100\nfinal gas: 1100", + "default": 80000 } }, "additionalProperties": false, @@ -892,6 +1060,11 @@ "type": "string", "description": "GeneratingProofCleanupThreshold represents the time interval after\nwhich a proof in generating state is considered to be stuck and\nallowed to be cleared.", "default": "10m" + }, + "GasOffset": { + "type": "integer", + "description": "GasOffset is the amount of gas to be added to the gas estimation in order\nto provide an amount that is higher than the estimated one. This is used\nto avoid the TX getting reverted in case something has changed in the network\nstate after the estimation which can cause the TX to require more gas to be\nexecuted.\n\nex:\ngas estimation: 1000\ngas offset: 100\nfinal gas: 1100", + "default": 0 } }, "additionalProperties": false, @@ -1187,48 +1360,6 @@ "type": "object", "description": "Configuration of the merkle tree client service. Not use in the node, only for testing" }, - "StateDB": { - "properties": { - "Name": { - "type": "string", - "description": "Database name", - "default": "state_db" - }, - "User": { - "type": "string", - "description": "Database User name", - "default": "state_user" - }, - "Password": { - "type": "string", - "description": "Database Password of the user", - "default": "state_password" - }, - "Host": { - "type": "string", - "description": "Host address of database", - "default": "x1-state-db" - }, - "Port": { - "type": "string", - "description": "Port Number of database", - "default": "5432" - }, - "EnableLog": { - "type": "boolean", - "description": "EnableLog", - "default": false - }, - "MaxConns": { - "type": "integer", - "description": "MaxConns is the maximum number of connections in the pool.", - "default": 200 - } - }, - "additionalProperties": false, - "type": "object", - "description": "Configuration of the state database connection" - }, "Metrics": { "properties": { "Host": { @@ -1356,6 +1487,184 @@ "additionalProperties": false, "type": "object", "description": "Configuration of the hash database connection" + }, + "State": { + "properties": { + "MaxCumulativeGasUsed": { + "type": "integer", + "description": "MaxCumulativeGasUsed is the max gas allowed per batch", + "default": 0 + }, + "ChainID": { + "type": "integer", + "description": "ChainID is the L2 ChainID provided by the Network Config", + "default": 0 + }, + "ForkIDIntervals": { + "items": { + "properties": { + "FromBatchNumber": { + "type": "integer" + }, + "ToBatchNumber": { + "type": "integer" + }, + "ForkId": { + "type": "integer" + }, + "Version": { + "type": "string" + }, + "BlockNumber": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object", + "description": "ForkIDInterval is a fork id interval" + }, + "type": "array", + "description": "ForkIdIntervals is the list of fork id intervals" + }, + "MaxResourceExhaustedAttempts": { + "type": "integer", + "description": "MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion", + "default": 0 + }, + "WaitOnResourceExhaustion": { + "type": "string", + "title": "Duration", + "description": "WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion", + "default": "0s", + "examples": [ + "1m", + "300ms" + ] + }, + "ForkUpgradeBatchNumber": { + "type": "integer", + "description": "Batch number from which there is a forkid change (fork upgrade)", + "default": 0 + }, + "ForkUpgradeNewForkId": { + "type": "integer", + "description": "New fork id to be used for batches greaters than ForkUpgradeBatchNumber (fork upgrade)", + "default": 0 + }, + "DB": { + "properties": { + "Name": { + "type": "string", + "description": "Database name", + "default": "state_db" + }, + "User": { + "type": "string", + "description": "Database User name", + "default": "state_user" + }, + "Password": { + "type": "string", + "description": "Database Password of the user", + "default": "state_password" + }, + "Host": { + "type": "string", + "description": "Host address of database", + "default": "x1-state-db" + }, + "Port": { + "type": "string", + "description": "Port Number of database", + "default": "5432" + }, + "EnableLog": { + "type": "boolean", + "description": "EnableLog", + "default": false + }, + "MaxConns": { + "type": "integer", + "description": "MaxConns is the maximum number of connections in the pool.", + "default": 200 + } + }, + "additionalProperties": false, + "type": "object", + "description": "DB is the database configuration" + }, + "Batch": { + "properties": { + "Constraints": { + "properties": { + "MaxTxsPerBatch": { + "type": "integer", + "default": 300 + }, + "MaxBatchBytesSize": { + "type": "integer", + "default": 120000 + }, + "MaxCumulativeGasUsed": { + "type": "integer", + "default": 30000000 + }, + "MaxKeccakHashes": { + "type": "integer", + "default": 2145 + }, + "MaxPoseidonHashes": { + "type": "integer", + "default": 252357 + }, + "MaxPoseidonPaddings": { + "type": "integer", + "default": 135191 + }, + "MaxMemAligns": { + "type": "integer", + "default": 236585 + }, + "MaxArithmetics": { + "type": "integer", + "default": 236585 + }, + "MaxBinaries": { + "type": "integer", + "default": 473170 + }, + "MaxSteps": { + "type": "integer", + "default": 7570538 + } + }, + "additionalProperties": false, + "type": "object" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration for the batch constraints" + }, + "MaxLogsCount": { + "type": "integer", + "description": "MaxLogsCount is a configuration to set the max number of logs that can be returned\nin a single call to the state, if zero it means no limit", + "default": 0 + }, + "MaxLogsBlockRange": { + "type": "integer", + "description": "MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs\nlogs in a single call to the state, if zero it means no limit", + "default": 0 + }, + "MaxNativeBlockHashBlockRange": { + "type": "integer", + "description": "MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying\nnative block hashes in a single call to the state, if zero it means no limit", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "State service configuration" } }, "additionalProperties": false, diff --git a/docs/configuration.md b/docs/configuration.md index f448196e5d..519f716f1e 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,13 +1,13 @@ ## Configuration -To configure a node you need 3 files: +To configure a node you need 3 files: - Node configuration - Genesis configuration - Prover configuration ### Node Config -This file is a [TOML](https://en.wikipedia.org/wiki/TOML#) formatted file. -You could find some examples here: +This file is a [TOML](https://en.wikipedia.org/wiki/TOML#) formatted file. +You could find some examples here: - `config/environments/local/local.node.config.toml`: running a permisionless node - `config/environments/mainnet/node.config.toml` - `config/environments/public/node.config.toml` @@ -15,19 +15,19 @@ You could find some examples here: For details about the contents you can read specifications [here](config-file/node-config-doc.md) -This file is used for trusted and for permisionless nodes. In the case of permissionless node you only need to setup next sections: +This file is used for trusted and for permisionless nodes. In the case of permissionless node you only need to setup next sections: You could **override values with environment variables**. The variables needs to have next format: `ZKEVM_NODE_`[
`_`]*` For example: -`ZKEVM_NODE_STATEDB_HOST="localhost"` override value of section `[StateDB]` key `Host` +`ZKEVM_NODE_STATE_DB_HOST="localhost"` override value of section `[StateDB]` key `Host` ### Network Genesis Config -This file is a [JSON](https://en.wikipedia.org/wiki/JSON) formatted file. +This file is a [JSON](https://en.wikipedia.org/wiki/JSON) formatted file. This contain all the info information relating to the relation between L1 and L2 network's (e.g. contracts, etc..) also known as genesis file -You could find an example here: +You could find an example here: - `config/environments/local/local.genesis.config.json`: For details about the contents you can read specifications [here](config-file/custom_network-config-doc.md) @@ -37,6 +37,6 @@ For details about the contents you can read specifications [here](config-file/cu Please check [prover repository](https://github.com/okx/x1-prover) for further information -Examples: +Examples: - `config/environments/mainnet/prover.config.json` - `config/environments/testnet/prover.config.json` diff --git a/docs/design/synchronizer/l1_sync_channels_flow_v2.drawio.png b/docs/design/synchronizer/l1_sync_channels_flow_v2.drawio.png new file mode 100644 index 0000000000000000000000000000000000000000..b0f24bf57ff9d6eb40c315a8206bde3e75e96f4b GIT binary patch literal 91154 zcmeEu2RzmL|G#k(*(xezud+AE$~ZzqX7-kGIF7wXW?7jPsZb&r$=-=jp%9s&A}iUW z{_hXxbam_dz4!n9-tYb1`+fYnuJIY~`5MpH^Ywb42=z0H#QToy!@|NMR)WiEU}51* zg8$j@@xT=p*5()B2eylbqAXVaa~g0*^8rd;7vfpldXo&)szzslFM(6&MFW30nt}msFQz)n zDFGH4K5JV?Q!c)phHTMm$m<5zRI}1jGvUocN(?u+_@sgkDiA$64MX5Tj+wOtrvP+d|=GbtgKyaEi7Hot?gVw zArST`Tc__Ynj;(?EzQvzif+*qiA1=6|CklR9z8qsGfqGbzH1%2plNOj)^B@#c0HzJ zYk{)axhl#pvNIOwX}G1WwGF0cK4Gz)I}WCphtV@}u`#tkxbM8bed)(_#H=O~fdK98 z+Pj?_yT3!4amvyjT5QZZqWb|d_5c6ww`n53X~n)wh}`c=x$?+vea}kHHRK|2LcAJ*`OTk!CyNH z3Q35Q=`Ul(^k9iZSz<&G>f@iUC1%-y5L*IGMIk*wv*_2w(93`pUlH`5?!U-_fB^cM z%`Q3M7eZe&Ma!S{x0azX|DHVp&!D&XpO5}0lJL{z2mL}_?^=Bw1k%nD39g}U{gwBD zj~}g9=#AhL72284uASh678aW0?wt_+%WVY4kpH3v-!=k$>+iG?-z5F-w-IR9v`ZuG z`7{vr_O4D!j#dbDB*Mbg9MTU+r2fiVA|&!ZqZ^_>7^xqZ-v;4e<_gT}Z@C8mA?;5g z>=DRqpDSP`1pNnIvqCtcc3l_P{s*qRppXbV%UySbxBr1VJ7f6IIvN3tp+XxteqOZU z+ND>5=zZR8-!R_iuj6R=e%wT`6(~!j8q&fNX^GUZbaAyuxv=mH+XIv%YX&q-*c$rn z975C6(Hx*6)@{r+q@}$n%GT}skmP3y|9gh;%Rtl-wjc!l26Y8Rzjq@7m>3Mx)6N2---4Xt zZ^PJ~&i-MSLIU4+$&WFDm@aqr;$P@e07T*dKYs)C|9oZ8Pj3fQzxCt~ao0~}1e!9m zM7tEq3Fr*CV&)0HfdCVPZo;MxPCz*v&0L@#J4TWnPzKP8y8s6yXDX&>;eITrlj?@0Ilb zw%7^%7+m~RoHWrPHv);WL0BUkP3`4@kfE>2es9Ttc}4|+0D`(5dtb0bp*+!ajj1aN z0c2>GEBJ#9GCw+=|4oZ5gnsbfx5&R+wjh!JZK7q{2>pR(Md-(vR~w z<;ZqN$)^yGF0KxLN3<@8aay}T*xv-LgKWmH!Nm^>4(R6Xe1Mg`r3V_e%l>r{{I6qm zQFBXkD7^R~CN~om5(KdLXNU#u1AY_cW8nL5dKHZ5|NCBLmxy67{r};72-#sQyth;Qcp*C~?S)}Mu6zz_wUca9sEKf=*9SRaR+3XfLB46 z2Vwf=2Q^~5;qKp&S`u#oUSv-|&JcA8%hP0XVOk)KbHTjW==_{W=S0gP$>)+;(l`nOFr2CV=> z!_*VlJ4}PB<#BbSBz$Nqk+RUTn=06GjH(#8BJ zfjUiHb1#D=l?lZ^eg<1fxQ@k{p0-*|9{L6{fU-IqT z(HMFMQ;lP9X$93X+C%j^>IfHG=m8L?Aa~A)?3CVEBIVrxuLV`$KyeNd-46JZ3!FFj zC$!WL0O()GFMPj>kNGht9d_laprcJt*t$Dc#rNx~u>bza222szF5vn-Hv)y^-`86G zj5$B!>R<+nRITJuHlR$^)!~%AEwo8HRjFVVpz~3`=3{;kCV)?&?cY@k^L+}_5#b0` zWbF=F(U&j+^xNfJKRXuleaX(wuy$i$Oz{3|1pTiI(6^#5bd>k4fO(eyVaoY-3(&9q z{(oP7FhTMz`B4CB4z?U&1-@CBqQKceUMo;Y&d>WxF6A$}^-y&?x-N?!Q|*qa_CgE6 ze^xGjeNqRr-@C*@2m{Ri+H!%>nV%fgLEqX%ef^?|8-Y<; z3^4x-DCmz~{2TKP`KMjm@Yk@v{}qppQ7W`O<`etzgdApTcW*aFKmS78jgAy|+2bFK z?H^If1VOFeaTtLQR3bwNZ&xOg4;*VoU-*TR{w^1}{jfSnzk~CnP;vG)QT+#%#iDbH zznyzTTdDuC07(!d#TeiLk*$y!z)0{P&m;(8Kre>T#^@TR%YQlfh#`vqql~sN2H{{# z17P<&VH@3ad5pkLaB34SDg%uHRNLw;Ztp+j0bMR!22V@{Aon-?O-fMS^xVX1*U*$ z*LEvG!9L2y)&>0dR)E91{il`d_9v}?V>RfH1OT5p;tIA3dLH_|b7Pz8+eRXPxq#%) z5Dx*2@}tE=@Mp)NF-T;01oGdd6n~lu|1tD8w^4INS%ZUjMmacCzWGOSLWI|AMyf(q>F_ zwOb}I*Z!P{Kma4DXvr1A_!|t$_~Us1!Cxs0bQJA7>fvAJ0sh2_f}cvnkH?r?PzWT1 zZGd0e`M4H75mj=L?x_Fc>;NABD}zW{QWz-d9BQg|bG>;o1S zJ(iN3%sCIE>9oBb2j!_k)o^ZRcgTiycqfu*WNzLv%R0_TpTW+4i~blRV|Y*`BfaoB zIc#h~97cBe%+qm0-UJ~X@mK_zn=eG3K6y5=Jn(46@3X{_qQ0~=Piwc8YXa8KVOiiG z!G3~RtX*FLFUat3!B6WeVReR!G_M>iDBgO;&|iU%hP8k z5M7xO_5)>|mJ$PN2IYCN?cp$ZofB9G;$DkDLlbXUBnLy};@9xaIsdP15Anh-UgU^! z55(D%sq?^yf}A|={__d$IKR2-l~zRU7OR@Xrx2g5rQW_{w@*@;yD|8G*~GCE*GN33 z1=}KNUSIlXtw!m1vN_c{L$BDX$m5e4$GN+)%B>1<=?8D0xN$sL@Q?rlHU%#;*qc+r z=SL{}&t)7quHDXhY35DoaUBximY5Zy+K47iB>%RN^0Gvw?^Ew7onMyGfYLlSGV zm2(y!d+sAYy_KJMr+!m*Pxu1Ey_9RH zsaG7zl3xA>E+PfJY_vqfej5&bRi4F}nVIlsSA{G;blhz1GAOc8iinPWB#IRt6?G*? z^G+*I?Iz=15}H`M-h2+KYk0&*S~6sU$8UxmZRP9_qw{0ushSUxr(9nUkAEOkR?r`O zGs|M?N_uQ8{SBXmh|SHax;ab^lov~^jbFhTcJjR{?#AFY7(CJA^QVd`huTP67J8gi6lFZJ$b4owZ(i#?S}TO#LBRL?Ik)X zqqXsmSA=e$7RRc+SMI%JJ9GVRYq(#ezj>giy1M$i>JH%RM={a*c9`2 zI0q*E2CKg4h$+G%ZD$-JP-XZM3oT5oTu(HD?uK+sb=*AlZZ9pREwiS3kXEj?(wj#P z#TFMY+6i5F)glpEJ0`)5kHWV|xA$$oMi6fhyKIq8=83BhgdRnQFQ%PvzZXarQj!(o7U~`0 zC&Qih#w&L-n)bx2w`YhvVyB(&JC7gMEp<#A@O+<^Dt}$lx8ewy?bALlk5ifE1-Unb zHo#6y1d~dVwUeY|pH4iMbMdB^4!s>Ct(kvgDDkmty++=u;4;e#iO=m0Yww<2)A{mc z1JP{-UzXH)UUlBSdhe<`km9dw!0vu^nAQn6KKHtt<|C9gJ_+|1jtJ z3HLK5fgw$l9D;)99yz?pHejTO6|j>MXiIXz82rL4x9*n?#T%3(IieUiZN1_`hJmR| zN#*frYD?xplvI-)D+F?qdW+;sfA|14A*9BM0Ucd+fw&Vd)uf&s@J~7D0W8l%YvS># zLwH9>CFTRxMGTY*Vqa-A5MhOo<~6|!$Ql!Ia1Jzleo*@*J~o!j@~{H`!O8WDuIwN0 zp1qwgLEF>OS8Q!PM0txMh~Ta3On=F-bD8f>#pvO1I&~-acKY0}Z_3!4WF79hbqW8m z{?b%8i-d%M*UIeu0mZ4F``5(mutry3o5$qzQR*7{ua55R?t!~by*gV=a#em1H4+Lx zacI%@X*Z+c9CwJf$tQLz|JWi;Qbah4$O8GUJ@x56Zq^SkZT3reOMO{2w_2pUo;mk* zb?_=2F<4PD)d)}Nl(`XF7iLFh^@_fzB;@@Zko|&<@`jCuRQITon%3GlECLuFcI0B6 zjdg8KR;8S{0p(G@)lro*;h*m7wK$V$j;6fp$?GMvBuge6CQG|2deUVd7H1QdeEU2(lY+%N^JA>0(udUiU(ob2mWvapXtA>~OX zdqSNQ`InSloT=#%eY2pXczEVbL>a~nk;A2kVU@rLYz8aw5v$gKnaL^uya>hbYd z^pO^4kNIpRbDhuT_MNZan;27?lox?IpVqHm)01Dq+QoObE(4#^fLYgYDR@7Zv2M3e zZ~F4g8|&vCDa$g82k#H}-HTavKmP)&HGILATd<6zfntj=SeaR;w~?B$`s{6m3rVs! zYBm?z+Of%_&7a||GW8wr;!_t#eNaz&|6yxoI62wKi{rC)rTd)rN4{y#{G!i}g{5lq z5|HKtk2!l|W~#IVDX1VQnYo(5sJDO8i%=lXMpw}4>WKKl;!8E_qoK-pasnZ|1MpZQ z-zBXQ8{%7rpU;O)Y)u9<4)wJ*eyo$n~i8ukzb^E2xf@EBHDJR&u_$^P7xS!|uC?mqs+<7@?e zr0dBlhOQE8)fdlE^n~to4n0qd;*1^`os@a4?JIlZafX_*lS?B}YEs@woj%V}(Jmej zZi3i=uKJLAYHUtBS9lP)0#Y!~Ijr{pU{j=#9RNgwffq7B_fZ zW@m?!wHHX(on@e3b|&2wH{K$E6(vcjZHASlmY*LcA}7Ae*dH%Bs`pZAzXu+<*xsi`anT=mqbRvr z#yT8Bhby$(^L8zThk3}ADKVGrvHkMy5hGFG=lOmwyn=Mu{n;&~-1;^G)^ZgB zOD*|o;_3SxZe~wjx-`^u8o|;#6&iS)vDk*-Y#7q&KWu3%5x$^r9mT^n#alV_ zxJ#kyF?$F`hm6e;w zF#62LExz5i6x?)}0Eg(TJpJ>MkPo6xbkazta9%o$-rPtMErpTjza)2#*YCJ%qXQPv=EhJYX3FUg&GP7|`M+qmf~y0N7?SmVCw6NB50;+6{s4 zr%XaJD-$Z35{H&41a0iyyd+eZbMgGEkc*FG$T#DKju%N5?UBoKdv1NhH_5W`urhI& z40hhQK7Xmx0bH0bjoZNICMx6hswaAJ`NlL+&#X)eUlFout`Jni6@5?+pSW8?bwB9` z!GptJ3HubWcBxYJTCo{h%ZnS>s~b6xSvZce<8g*p-FBD`sqX{c`kvz?L0t5PNkBkg zP*S?@a-T|@WOR8km~uFFuH=`p9E3ELYg+U;fVrx~SI>NNF@kMvAGUi03K*-R@FbNcE*}lkwVutMv(dn*ihlMftFbxD zOm9|l@!D$6jQYJ5yY3so9hcS}KHM)1oiI56DzxePst6_Pg$6=xX|8_!>^@V~ix0|@ zt{+cGAfET!&-9=Ie*| z&G$C&y+!mr?bVBm{%9iB+aSkC(E=i+;??8V93R8#FS&ingf~~ypD9_wKcv!Nd8)SK zwc%0$YZo~KJ}w{EO=3P#JVhd(f>0c7f0r%%=0)67o7BGY=j#aEi$jU2tw?Y>dkYYO z<;2xamxaB}`(GG1D7~1iY|Oz(8DxfEiYS&GR5PFb4^rg#ikWG`uM z9g?NGwFriCZ=CEkS@UWR%O>$G?5gpJXuB^T`o#(gWYz6z21QA*FUw~wv#usLk{U(b z!nqm!%z(ay0gsogqbr5aeEhP=P)mGCF(a+zV`-e5EOAa!k8#4NiDn5!a5*o%qD-E0 zCvCbE_>St59i8vu*%B-&a)FTsT;*a)F1)pd{z&-}T`aG zUN`v*0!p_FTd)ba>ptxT^q1HSN=AgUh~<{HTFzyfKOq$0#l<61cRZIe(M72;W|N+= zo_)d>uKLXQ%}{8gf^2wSetCFGB?;jdlg{o9zcC3Q>iI%TPkV={{2OJJ9HQK|vgJnh z(fQhEnqM{1y7)o@k9ysZmYjCagvcPiLgmp0D%qhvpJJviXD&RJLk20iSCT|H`@#eB zhp!G@VWgetXkKofTfdw*phbG#_2H`+`sd__32@sIkLw(}q-$|$K2E>ud{<%Ykzk2u zD@McF^UBQu+H-TWTye9J?oriqcgb2M~b zoR1q{Ntj(b-%n0KE<~nxO4-XFHIrgHPQ2A1taya<8l}wRhJ~2b38%(DOS(=2bp?mB z_iWSq$R}F}Z|zTV4jBlh#?Bio&^42}{;2WT^I+HgU+VQ`51TG2covG1-NfEloV+b} zh;zOmlZg(WJAs@K$5dXiVK&`~OLqKF883YvYnf>_w+?Ob`G#SK$??w+F^}8| z^s2uVRgX(qvizZQ?2$}77q*=mozwW?0AmTa>#A_Lx?bb@rJ`HcXYYlbbbVZR;4SsX z{k;1FzeJ|ewHr)|eC50RFq3NX&;heYguSLCL?Xx9t?H=gqE4&c&_qNoPS>2Do}`ih z98uj@U^h05uN5f(l7M+683sTut_YEC3PeZM83QHSuxl$a+P-NGtz({y9xX68x#0Ic zG;KvB1K1t1XOCrpH!TYX_BKkIpBg*^zE%JU;Lct6^81|I34b+5mXgG2F_G0hs%5 zB-($!(bY?EH8?yLgy(I~(@t0p*Z3l*U-OB%yt@R56PdSTV=RD6>FVmj$#Kxt0kE71 z=QW9>`|JP(B_(TIT%5+ea~vGTHC5*x7#U8z%3-=pEt1sHp}qj(^zD_cWf;+lt8p9@ z*Q-F~Y_7e(QRKVfW(!!aSGhWBz8=T5^X^JMUZoEU3(F-HlzgQERxD5v3l~N7YV7UX zS9P?wt4xZ~cd@r>zkWG>P1qqG*hA$?x0&%F@3}-uE`8Wh=AM-8<*a*v?_rQhC+Z{( z@e-fkJ=08j00Lk&0HW#dpD)eEU&+mHY6S)tU;6O_ykzshQ_Wkib>l=w6#HJuelcyvkE`9xG!Ok|Ym zUc*dGZvZMyC6!;kyrX)d^*HJxpWBt9MM!$ccwqyqT|7or?0`YK$7xiht8n=UOE4bs zzC(BO*ntC?BVQfAqGmBr=G>K|$>zB+Q<~*8+EAkdPXbipwxsdnm&s?oNI!WV_@nGi znNMfiudtB@UIBz`KraC^E9tT;5MRhq!=T%9&RvuMfX07B>0`_^V0V4xw=H=s>U{1%{!Z3Zh`6G_<{S&CdB z)6I{)nrnW!9b!gsG!+6rP4t-cLBpe;$1ju1WI`&(96gypDxRkaNdU9S0i2Yj;F$G+ za+kx{PuMR%K?0qPmBu1#c?ACoINtHmFuEi^G_Gk>N6lX{#QT4mU zIy=m@$F+c$jI`X`fcZ=Y$n9@+P4WUJjcLq%o%^s_j4;RZ8SZ?bi|r$SG@VOvwCWS=bO0cfQFS8zwp>t(iT zx9nMN3gSy=8ua6(Gsx@AG&q?D%bd+Kw9=MXwzX)U9ET(d^;kyH%-)pFR)#Vdm^?1n zYQ77hw$+X?GGW6GlM{EN@C8h;1)w@F1v|2tq$_}hRKW?qZB*;$>p7W~TqcmicP%!H z6>9X<{(|$F##J6?@98|qXQXptJA0~Bj8F}{!G=Ap5{a)35vC0n3Ey}gRAbuF<-lp^ z9rGy>PEHlQuGH<_Q^&81KT1cZMpY|q>(p}vk~p7`07FJ!u=}l7QR6M-fU%E>Igr-0 zV`ulnRa6op4<5g#_ZVnz+1Jo2T(pvehZ{0hYu-779QIv#3aCu{%TQxD(RFM$d>DbE z#x+Gz?KK#ry+04gqD<*P91SQiOrS9~0r>%;a?j7%xqd&&5D0-uC9ziNMoXcF1hiYfn_8?NM092Peb*jdQiRwANEn4zUOq z+6hzc^*+vUolh7RViN*)e|RbVz$7q;>OqHtwUh~hs9o#);VHhrSY;oP8*fj2d8?ol zMcflork{D6l<260ZsH!?QV{puwZ6)bRlRgzzNQEN^?^x1Not>n>WX|G4F*Bh0y9w! zyXZLDa1lGqbn!!H(4a2hvseJuBGYC5y8e;Jb8R9y@!%nDnL~90&ds!`l!nSlDCOSz z`^d-P}N$UR)Qu-MU`CmyVr^MHBl{)ltuJ^2z>^Tqp(Nw=pA<2;4_G3t)aOY{~Q$S$2nN zlc82cz@FXk=LXa;W%$R#0#{>%2f@;h*e@LY^ z3skO)p4Dr2Ch0#)bXH9h1Pc&ecC7{c>@)lGX0ZUptlKS8Iv?6+%1g9wpT>$@>RD}? z=6B{cb5)+7@2$`FmmJp|Knzvs6=aiatt|B#>);BhW&$JCmwwXw+_m#lAd9e;-S@?9 zd0J1)J%PZNPC>Oj8cM&|OW~JM?LB_o0Y_V2+>NptEPrA*c3N!2wt^&L88Amuavk#r z=?>``xeBXiM_3%`7wKi`ygo4K0a_1fo}+#9j?C$G_&(X? zHlErl)l9mRUgsj*#{-4sY3biJG&b&;*Z}iNd>zcISO5!m3P<9vfrQxk1CxmAr6h{= zL-uv8mltm3jaci85K+))ugnc2dgAkW%q=W3&*f;yyG_5o38QZf!hS3=>|>?s3>}kP$Z{7j>xC-Rq0p2;X8s%he zt3KDCrzRju}?M_w}Fb+Qin*5M;c zS6Df6XF*CaK{q(Ikfnq!d1nObdCm?j=xY7P!d@%D`d{3n--F zMhU|x^J?Y;L%Vfum?$X*b<;HvQLaMSBm&8VJLb*N$4HsS3GJ5VYQLs?NgZ9}8>hH&D^ zt_--u6d|ugq8>8fu@*crPhu1o7mtD~o;-UL-;!DP%Gsmixuuc8dmew*G4{>ElYQ6U z^=du8_8rLDp0+XX2oaX?oZng931>_0gPME&c$THFM7WkjbjSHp>ZP#>f;rk@Ba-jdv8P^OmUfN26YHhuHaZ~t z0Nc)B9HMu^VNp%tm3jO2Elg@D2Q}GsI<|fZHy;L;qeMCLdmtD3$G7MBIxyP_)J;iT z=ZjvE3b2udq?`1)z2v(Zr$ZlQzAa8{K$mD8!_p^AG!H1W~-rr4T z?K1TZdqXfFt|Bi^+1SvZUtVbviW|%Z@(ddEU85SA1iqVs2PY40no!bu2GGLp48@Z; z=f5Agtw^eMloyCNZvZubf|dzn!o5k^jE4Ti2Bv~>=~DiF6H}QHr~=AyO9f0VK;yL1 z+QLM)v+uZAI4Xgln97E3kxn(!E(z6MW|9pn#$XHeUE@BxsVmWarmyyXX)MTVY{JGlgp0j;rx9mkbu1=#qrv*HWdGF- zm>eDFEnS<{+OJz`#3bt0;iz`uy0?-p?^K%h;T-fEr}N(|?5n81Lo<7nth=W*T!!Kc zi9409UFX+2yx_wwora8zj0w2}AzXPC>{0J#i!O8@6M_eNguRd&aHataQRkBp`D-k7 zZ`JY5AJ!UWj?sksKIF<<24Z|J^W%g15%_u0$kc<2rwWFvy-Sf%ICeB)T69T1y3>V6 zv2hh=#LN0k@L<7In(kCY9lV5C`5%mv*0Zy|gx=*PTL!_(Ia~(a>9eePe7wD+1-(T3 z2}EV`;%OlU)==xn8?H=l{a1eLfM3h3=azn zs0bBDLoXgCUcD16HoM18ydPfcbf7Zp_I>~LNwyNJdekU;Kh-RJ#5@OKHWFqb?h@=) zr@t>8vACzK%S4;t`m?3rIv?|*o<0VXvzg}1kzrU&j;!0zJ`jbYOOH2t(bj==Wq(}- zdpIBGc;4CDp&JOBQ4*)MI(KrMfp8_d{Lb?eD~XR#T3e=_iYCOXndWsthxfEon9|qF z;pEesXlI(QllG8b(mveXbTB0qqULJJ0ZJZncE20R9;U9|A zKeu-Qlvr$oVV(WF$GZwgaK55(=jsb_jjl%*HX1kMtw#c>jU2_{@+bQ3X*2W1R3E=fx-NB*&j?Qk+ z@*=-5UxONY?ebs+FPVgeFn?oHIPHaoURl8Kkj#&NC!(`p2`I1&FTHeQ+xCLsY=i{I zO5SUz$`kR%s+U&8I(<80lf%JRiy~GoAlQc~A1Vt5LqAGGljk;Pb_wN9zx|*BTY&E5 z>jGvWq3-;9Cc*os4@C?fMXwRO?kH$k{IQs%EV{3E1g}4*Uy6>_J|W@{0xnSTa?^@^ z*V6|km(|~jZTBx*cNH!LYJe!-17H@{pLXE=nQOwT>x+}8qM`dxP)P+cD3!dQn4#rw zIbC-h9&`D!)>|6t1}JO&(z+U0UF1j|w$Q0m9%ISGP7vaD&-MEQv!~I_V^EecVDoT{ z<#d92vs#*X;>0F$qBUm>z|-f*Do|K5wF=B)@i+(vXkC3k?4%bh0uUGNFbL$5#y~~V z9q}5kr76y}QoG(Ww16xaE~cqn8S?J_Bp*eYGp0`qVPsdIFPm$6(JKHZ)807g?UuB0 z0XG%YdGNP6PY(wQf-(}$=EA}QUE@!$6w}a2`jm-BFL!*V5xS5dF6lO{JJAzSbc<`) z=kv4nvEB#9I?a4_U3ASV_|fLta{ikjl%5g|;`$JCL)2N8R&nEF44s6BdSVWkhF60|4W+L41H=Jm^ZVy%xzSAVaWxX4`^(~AfH}7o`vQ27oUyHql@~}F0VPoqcOG9!eDlaL zP}k6g^x0hfG_`vCK~?<3>W9wTTGV`T(ivnKaMFY%tCN5sGl2HTep4iL;qph^RUDTu%IiDBclKBM{g#5DX@3K18k3F;seEEq-j)u!UOc)( z2{xAXFOHt{vLG>AYzCCZXuC{I5^3uL;?Ry4i_InV`|-5a$NEhOgL!>DAJ!knbAbLL z36|J$c9?euB}Sa!8O9#N4=;N0k`JM#V(fxD%gV|)n*n%#Fm){)xzkgysb0-|EJE>^LF3cgrafOKAr*|?AK=tH zeQ{YrEFJd&J+?ViD`iz#_zK)IVl_FFX(t~U6O$aVpY2#-zI;&NMYs8pdLwQjgc{Bh zCajg)0Ah>0Tx#+VGKo;C69Y`?bLTNF@GkZlpsc+e29<8A5w7?|WyhrLc`@|SNx-+x z+r?rY{6@JFa>TEiR~Duh{8!O^9n66*`HwrtEeZJ z4lk^Kd~o6h!2@P&TxiH9w+mZ9w?j~o%$BeT0ZuTMtU=EM;~P;AaBr9EDboVO;IQs6U#;Rn9z&pQrLT!7!LBw`U@1pM-ubP0~v zOwv)#Y6i???66JQOmj9VdosWd`QJ5ME>Oa`f}p@BB~3mebO8?RLKg6iw;W0 z!0Y%y$IB!;?!cXbw+c=tj=Jl!)U9*Dx(4`NCBUL|bE8yhltzGsDl%^-(W~+(G$FK= ztE{7(Qeid`1cOqr<4a10LcvS&bp?!DAfew??qXdsBlwm~Gle>L?B+wVX(okBv7mhC zDIj#RES|eJCqBC|j~L>F9-IW#XICH! z6)=tyhCx-{j|eVNi=0W~)MKO;wr_DWY6lgQ&^~Bpdft#`EAU=%oa`&U!~cOdPMV&a zHxnKfMd&sgTnW44PZm(AQRB1rVf`)Lv%_R1K+VIgtXO;l>Hw{qYrq7GNoH}gOv&MO zE-)gtu0r@jP$>qLY*v(c@x;%g?SX};s`>k6PjGBGlK zm@<1kYgIjRwXF^KimivCc}Y@GEyA513qzmOIzTR1f+D4$RMfWkfH7_IuDb(%Zry!j zU32EO&Q^|)9+fd`0RX5Ic%X{$v@thpn*t6uB&NvBcHR>(cAuhCF}+^#SR41)Y(E=f4=kQ%cg7%~n^_`b8QionamX#BD(5Ev8== zrk69!$>K=Lg1u5a64V4|lOAnM2OR5ku7WK`(l%>%NeSFcnR!>5l|BFIP4-e{WZ{E# z!z)FyKyR2wG=Z%6-V@V3w2y8X`!<=0vCwJd!s`lb^AN`d_=`h>O&^G#E}T#SjhI?g z)un-v4=5#jRzf2`Zp+U@QJ`W1oLkUWVBq$eF$M-Og5n{R0H0u1pB>Jd*BC#&Us0r;QG{1`v{Mb!GihN3PD!PK_!LXIJ~~ds-0D4`p};-Mv5D49f(R$&|QG7NEM7d-DymK=91( zI(1IR@?)0|yN^NSmDKV$u#MEC{B}E*>L&b!6`RfLf}2(I4TK9&#ocvC`AW(rZpTQ* z197-#ep@jn=?a@#D`49ldIuqfFi!EPvwr5-`pC!#=Zb!bg>4YLnFe&GvKAU zEFKgDx?qCOhh%{C^7?Ih)Vt3BI%%by*prF^P~Zn9`|QJ^TGWf7SH)biTM4um0XQMd zo5rWUh+LgxDLPc$H$I)NxQFX&GcD&fU|>W!)w@Iv`}?0|H+nIAcTYwa_?wJLJ##4UtGbh|< zY2>|x-=rm9*(qxG2(1H^;4ETdsgjfF_0K9-$$&=M3qJw&I?K4$54pMOT<$y;Dkm@h z6p#s-zzAOuS}(Ed%>)(2t?}sz3Cyk^dlEsctawc8UZ&h;vaufrl;;AuIAs-;OPt0v z1_aK}2D3ns7;a(P`6U-y}UJWe6UTW(rqrNXzfmDa^2fScq1 zl{g82KW;5FZ%)Ya0yqE^S6fa*)ws_Y9R_$4n37Wj3pf1Mm9}U(`1sBeEC80R<%*hQ z4s4|*j;VW?zUu@?w?P%g5Z$sxog;g3AHVf;8YtnnPU{&K8oJNOOg{3!Sx-6gbndg~Fgk;i&jN&9KfV1jjFY@>+U5Cmv5AsiBPM1F ze0DGqZkt)}*>c6TSP0GTJHSrQQ@t3!I5%uuUL&2^STX^4|0|$=x8=*$Mxt_5(nvfG zHmNg7)LH;GF2DTxRUtcOE-uxc`{(cEeSnjVm+dvug%><`2Z=yp68bI?`_~zG_(}2P za_gNXJf|0)c@Xdz=*bc$CcoCamI#`J1%&+>F%k=u-ZUx{6E5%N?(V|f0Z_ToV zzC#w(h&pBSJAxj2HkU%hho`*PXWcVf%Jq0!JR zmwDT#+1-yPu6zCTmxdCboH9;jto;CNj6AXKK6!cspnlC^NjU*?#z1Mv|G@7%1XSo# zOcve|hM!iydmwkH8LPyE92d;Oi~b@1#}9VtQ1B9>ZAI%Fh;^&dL}xA#x19SaRe!MMJPkm3FACqK zP1BnwWsY^-eq*ilss9DMCP-_hCE`wOfH&lPl6hR%05K3K4ZQ!+4=3xIrx2={#-g7} zGcO*9yQM|jI!F ze@DZfXUX``$$<*DnR{QpTjhR=$=a$#*AxUzC$Ftt?tX@_j(XoJy50`CtqkH|viKjdj1yl!70s0L{RNeC7I zvzgc+&j3I+IXO8LI&i3GNJ|Tyqd&I{TrtJLgDmgWW)fb$8Hzhiy#6L{ITcr##lMhJ zBSYotrA zK9ESEG294BxcgdrAvE!!{!%X0^67yr0gdd@7HR3n;_Cnqpy)0TQ8qn*HVGh;Oh5$r z8LDYV!&C?koyi0M?PGjO3i}mSRWrNw!@DDu_>^E5x%lTwoyhxnJ^VsTNi@1&Z=Rc( zPlUa1|7b0zOKS`v!nXU3MNsq{@Cj-Ts(?8n;;52LHMUaCDb!(LD2y251O!LoFAhD*)2(7J1?)Vj0DHKjTle!hvuv5O4M6It|K)5@J<|_!T`-#oQPKZT)uHF_YL3W z!_?hs_(U|K>cDNV0iMFt>T-16VaBX`15BQ>QF`|$5o>@AXwAt>-K@WG2=WEw z`Vp3kdvU^NyB-)zah!jYCv`5&6%>II9MZ_V?KZDkJ6E$|@rpfkxVW;!ckwZFJ_rDu z1fB}Xrs*3j^&C1`@VS(vb#dz`g?PLq2TY2I$sgyEpRcaEu5Um0s!42;Dqk@i4?4>*zr9kZLre+5EZPHygOd8w)e zTaNP|$)6^c7Ifin&l2v@);O!mq!JRmrZ=&OBcT#tR;wgrWn(+ebXryxXM<`- zt?$pN=fMtbewiE{721NUCb%SItT+FS$4v_}Mo z)=nzLrO&*x#@h}U-EjO&mE2FT4AterNbXPboGtM_oiEYjo?4A^%Vi!^iK8gl>seA& zRbSMJv&Zo^~eOC5#0CBOkEsLEie6i}YZ(KKS?;DD0W$1^rc zq?g_|CYH#dsPc?uVZ6U^I(YZ(7H8F%-QfiXj`ek1Aa-OmiW@son|y4hM$kr8&z^~# zHwYf~k#3;P4P?ooG#IE!(ZC}*lwp9I3~Gk}2tH7j0YUuP!O95`4qPCO1cyXhL5eq1 zBkT0k`(gjDT%;!#V`-%P4Cw0sHwmgXP~@B+1l+4Yu+??^(|rNvPon*@g^6R3=R&H|8|oORF1J+Op^a6yppL}~g&Zb1xJKR8rtyDUmMf9fvCmWBjk+-Vc9a5|UU7kE$&-gIJJ>AAs8?c>zm7p^cXJ8~F}DUC^+DiJHy;WN^D zlM+!2Pd_?|uj1RqkPS%V1{g=1>;EQ z7Vo5cole80TVOa9il5%+@aB;ia~F9z6vkX|!jf`Zz0>^U$!SnaXRG9XO6|RS2eJBK zN>=f(j0k0f-Ud5%V97DlRs7Y+MF0fu?eUryn(MqB2C-WU@17kG z-b?BR&ot-7G9__8LX=M~Hx24A&b_I4IoQ;9)P2KslcXX#s}zYGKk?x{|3b|WlZkHJ zP@bMZIk2)4N8k5)t4`FqKRK`gOswOz_}*SoE+M^<1f`lsaay~i%SzVeZUMN~FY zYi_2tLs#IzexRBw;_Q@l4zN{P6t#YgwfzhCj4UWNLUnfu?tvh+Ce;0x8aGK1#|`P(RaQcJaBrx0-BY6kM*bU;B7kij&cE znpM?fkJesGjTBHyLWBw1H5QY7O>-Gvg-T8i%Y4lhva`4x(8#z|*nCv8&=C;S%z&NO zban+LJ>0;6Ujc5Ye&%7JXf|*h$5akwtEO%SV`XoW;gK-B;hd56T58viI8iLydd|PY zDl5kEtHSA5o@W*)?^=8|NSDTW4#jSvM;`#7D;CJ#ZYp)6zW>!BP$jxP_#+t2GF#~;orj2S=FyEYBD=@;BGMsu8C z0BJ1mH+uWlN!I;{b!BxZoOCnIU7}8fk>p(F7`YnojJJRtUt4|JwAL3KjpzHT9}MVo_f2^$6n3ADW{D$1@Exi9o)uNM?w&q7Tso-TKr zMQ~^>NnX1sQM9DYF*suRl$J8~UVO2%#x3?e&NQZ<{o0Kl;ZZ;qwh=g=^cXuk?EpAi&*T9<9)Al zzIa`H5Y}FMtvT;G?lFG1M6@>cp~Y#K5F`)op?>dt``+}L?5uFZ!P0Q=8ms;+Y;f>9 z8XxlrP%8@Co!Z8cqh;eMC*WQz231(ZEYkq?*I8We^JmpFyw_$~q z81QE{*KUU&JwOwWUNl#8TUHf=%zL( zD!sA#j6;r#{s3gP;;1QLuuUa+u}oU7v{jI*=LGc|G7GXF11}*n+y>b2^TwFdYVowE z+CesZs`((#YEXy_Z#!aT1Z)O_zrHlRGj-FmSGk-M-nU##%YYi*E@cC>-l@PF~Bt3kneS9iL>3()tN>jn}Dda0zGs zImm4e+YoU99KxR;0wA7X>ri%%P}3X`!5TKTKVFo%h=Sm1HTEOkfIaA3;UVqQ)a6|{ z*_=9kb!kKIOE!IA8vPlby>`&@=m$6GNwR&=_+q%}`U1K**pqY7XSm18#$L?at?Kekq9+CDMY9gMd_GcgY=@V328?4D@UvH?A5+So*hcDP3^XCl9@?Mz+$n_|}(%wmH zK^jw8b$|Lz&grs0NdvKSr|*GVVK}lD`J>O5J?3C*_?yQu9uXU+mt`#tq}3E_1#>!H zH@zC_iyh43Dtm3KC>ot9s^d-GfvTx_)t~*s32gbFM%RVw%)bys)a$n^ym1!5)+4S( z%JL<0-J5!+=W{G=S};McIZVW?naA9EdwHl~^b(P9!X-j??huHsPJe|jUrR=Ep|$(W z{I=}Z{#+$hRxQZ~Jgn+k-wfBAUA7b8&P+@y9O5U3K7ZamyX@rW3(LJCG%)7B3z3tp zgKXgTyvipH#u+2(CZoZ4Qf4^~&#udbU-c-Dp=?5|i2OvZmV&LGrL^iu1Y-zx>|4E(_m$7gQ?Q&nhVhBkD?C;_ZhSf%PP&NHW zWknQ~yi&B1Ae+EZ+l>)KEjz6b{&~1Iefx#+WGC+#-iO-ys9r=v@+|7q0d$Y8ToTo9 z!>{`iaV6*1d5^wg#FqN2tJ-(<{pnOl;LT5v;wIZIpJ@|De3;ywEKLQd4vl#tk$+{R z9khsc*bDMQ&Cz8JXMOa!gF1ZA6}USYc7j`gT@2|g4kBqXQLNR!k33;fnfiZ z5ILdORlac`<1;!3XYqxjmT-s3XcvqR4k9N#OHV{jK!@7lrzPC|`NVVL$2&|J zj*>{1F+8S+V+03xqfJ*{!@~{z26;m*PU|uxWLkycA1Mif{;&ab0t#;x*6hX2cjenB z@*eCRtFQDDNyQ6Qi;V>N%U1=_`r|bifBPs0Id3>yGb}M4gI!(@fOL}938}EbUTNK* z@#zw$lN70hH-UaZniwW_)m{+SSBu1ILq?r|!y?UinYxv2FE4MEpJc)H(P@L};C+G6 z@MRVwF`E+_cv}?MZ=~tqm&`!!y~kmS7Q~H|;!->o?uw*VaiFxUs7CaaL9noHI#*XF5*RZe z?PV$rUXVi;KSf=%i0sMaH@<9azPAgFge)Al=1_T~Vgi;)`IE5JLASuf#Fh2lYOvni zE22HW$SIPlW?E^(08Uvu-;LH%Ja@xf0#^SPf?pc~EqnNo+u)LkF_Ah_x`d`MBge<3=XaT8wjN`CrIA&sNGhD>9_S->S~OYtYupbWn&Z1;g{!^mO4$SM@oH0uCsfWJ!bY!8G3eWwEX}1v3~s;|ENwvZ=Ji(qMTU#C z(S<(~R0CVV{kyOG3aJe{oWtxVQd_GTb5uUtq zpfoG-L$^vzQB5TQ7u|a!(+o;+R<9`^{nW zQLv9AYB0q=tb~rF(-#Fai;Z*zhs2O;^vU1oud35!*9dE?41&ou=ulm4cw<8gBB)kk zOo~eDS91nO(~tm*EmZY@4UJ>=JyO~cmHIy8;k0B6bfFy+ zW{_l!{6&|S_Xyg<+sw*8Vh2sv)|%Ho{o76Y}y5aEI?Ph9E%YO#~tV^koc+wSRb((5le9>m=(z3OY)2YU7vqehp#Si1oMt zhQ3HAhgE{Ykq3o7+>_j*6mRQYin`n>KJB291XwYNJJIONZc=@w)FGRse@35SF861| zv)VzLVSJCEk{+j0-m6-Q44G$wOn|+llzG?lwBAgTxj9;B*z3U&+gn<|%A3XQYt}r6 z%=1YSNazj*FM5FQU(uV;0_`&2`vzaK!t8k}GQ%Mo+7Lx&_mhI5=q{_Nv!_pqS?*@_ z{qGCh^g~}@H*7%K%v*q4%IXx)8^06D?L(()iQLr0t6;`G$}}boR=7GkW)nd_T3{T# zK2okvV}9Te|NBie+RP!`#E7j{=+@d>kblC8+xO^Sr+#0rsNdIzr^b(VrU=eaY!))W{Jb3kKbaWv3 zUFumyLF$`T<;VNC;o_!iQ9eYSQzC0V?d@$Sa+nzL;(_&KSz8@Kg zzzDW$(Dx!4|1qyQ&a3xlWpJNaj=EQ zOOwupz>Ar)=VdkES-ag<`=m&KN0Kv@@*At2|L9lK)jxN+PspJm`eU;0`mC)+GG>+WOBmK)2a3%FWsoKLynyi!Cia?kj&i4$x( z7-pQvXpJV=bO4&faIn{l3xY?t3HI!(rr~#S>d*e}U!4Y0c(Jm~Au@8D4!@8ijZob)J zdXfCya`YP~AhKDG|4bH^cIx9Nie*hmvJQt?)bX(PDd+Z`7k#JMxO$1!N5&YxJ|kBm zq=k~)^O#K=I`AgBak+rg#o{O4@OD1M4Ue?xcd3>eVck0goM)i!ekqzf_*Nki7Glvy zrmZvDWfd3a=cf|J*OQ(eKBg#kV-p|%rC$pkh3R#6;8i z_$a(rMEQhaM)t_|bT-`B{;m=D7$INt^D78?aHGHT?zVZOJPX{?weR}{8&F*-na0D@ zrmGy80O`(@F03l>wIoDDM9fMFeB#bHMydR0-*_BWtXVEWx%Z3KKH?16M$j+J+AaqG z-D7eReK`N1`PMF--`Q+KsjGHju4Y)f8SqPhRA+Um=X=7PjZdtcX+JRniI2gG`DYQ) z8^<6oq{F7a+c>3k3yd#2rU6JyKnwYU{NduS$gLt-2Rc&96V&VL(@iFb|Lz2fspTUG z+a^mRMB5#!cBL$qyS$yj84zo zBBdqP(C6?oRXrKI$gj0jODU4UF5Ix6Qclk+x3`VCLmn#6qjFh?>o1d!&64=s-J_($ zt|+gKQ>4y1N2kF{_4x;%_w5P|<_O=p@z3*$Za9VmxQ5Sn?LkoW#|#4QF>sOP zpCOo?snGkJa&*|$5(H*B*5B&o=>n(i-V;c?A2RH7hVwh8V<7k7!vs-Xtp zH#{#}UJg}9OsQk}SS<%n)q_O06}q#~UKz}|zsYXC9+5(;EGVK!>DSZ)sgU6^kVNb7 zRx%pU^`y#BTfbNTuMsKjS*HP7icc*rrT%Y}e#Ce?TO8y`qQ)d#-Hr)@?@Y6t7rn{} zZmYFYGIf()xoJ4m96cIE`5jjN>vvdv=}hmVk}Nm+ci7nk(XvEO!gZn+=)QDm%Pm{o z?>F|}e>b#8(HuWU2t&unp#_!(=S?zFdK`bZ(5!iWnJC^zO{$+*AbeLf*8^NnYB1os z(uJRuX}FRu>B|fUvc0+cf@d49(0p5kmNHCcrqO0 ze>hvCypM1SPcQ|BbH`(eW1F!zFAE28N)WZyKD!hl-hy|Ens0w#xBm%(0?XFeWef16 z*SMnUkMhS{YU(XB!sFeav<=(1=@B91^VvSXPTOw+dU?u_mXhh+0K7QhL-Xa)#>V_BOfz9ATCADBMq3Z@?_57I?kg^`)e}Coy<4T#TYVA z;}9$+$#^R1_X`fD456@&U&xCNG{iPb+L4EZNTpTw1kC;4M7AfGw|nsy z#rA>&&Oq|FAB1j8PyT%z0d!Bhuh}TpT+cAeWOk(mkhnUJ6xf z|9&tHu$c4%M{pks@3Sq4@9l;9CPY-TghPd0moEwd1w&I=_&iBK1 zO6aBi35$+u9xSW=h`-HV*2b-#OL5ZmNdUv_rmHGVD|{J~wd6-^fdO9g59hg&FX@GE z;eCT5jnbc%uRr;4aECBJP+I`3@JAu7JH7s1%YO+6pLr$^NZbuuvV?!XciLd1radIy z|C2xGX0QEU&SrEZ9aAW{AGM|izX)`=44;5|*S-jD7xQ17Ifq5DAl86Pv`_EYN!z?) zK4Zr(^`ku{Y)&4EaM2tA`Sf4EpW!htL|{EFYT(ax5tg~Hc?bAx0~Q}_2AQeZQ2jLj zE=xp(1fu!OLPp?03qPYKVmuzV&KWMvk!6vu>*3R`SB=5+F`3a`C^1(HnPJrv{d>^@ z;n?5YQNc3_?{9ERk2Sd4uUlh&i3ukf<(-j$x_{^*5@aLi`_vApUmgulp7FR^$`4AN z0u;^hC(nA0{uzZ}LOz#iq{Vbl@qFrhD_vjhK+wZMhEg6Ir&^0Gv#M!udNROPlxhQb zB1u)P33A~rq=cnQ0`>*yD4X&sPQ|m*^;)@SaViX|DI(ho7Po1}MYI=#qIe&Q3kgbi z{=JfB_g21v-72b;QSzX6X6r(R1$*Sm&D3CGRe`Lh&>mYuv7lGa=8TJdzYbo*B-a3{ zL7gS76N8O;G7SOLG|hsVG?CIt>|%!Tbj<5R5oe&IC$4qFQTfS-J@^y1dfE#&DVVj$ zHqP`vuCfbbOcEr%=EXG2uv~$5rd9GQ!G|#EQq3Px4iyfsx_C)aoP5+cJX)V1MVpo5 z5I#k{f~_`uS}p6AWNMp75kRw^dOdPIH7f}h@^x?7ttM_^)H~?k?AhP09jrxDk-2u4 zM0!wi7a_!mM}Tb52NtSfyy7KIn&4rchcFaEzYTD~dI6dNLAWCS@g%Y_Mx23=21Bmv z6)~{VY5t2s2wr%^MAvrs^<8x^@Q_w^=LLg~z-QGx52AJyNgQLI835k9OIz;#gg7y5s7qYDHdD_w_cZz=uKKghJP@m8 zkAX10ENQEG^{{cpJ0$H#9MZau{1QqrkNlL#r{j1(G#=A)B;jC55~uaMAm|BLbxZ!j zJ`4}XK~zWYSX0VrW*nKGUgD+vmt z?&ix6HP`v>M$?tNo#`|eA;`o*|4{LgUbsSssZXC$7>PFV7L9aXL@mLNrdO1nslRT( zlbC+~zSk2)lp3e$lm(zkE0QdXg_Y{rKD=5XTp^`O>El2gI|65*bzAM$8qT9Uuo3cbYMU0Y(X!!KY8>m88x5SmO`cRfIHLU`{ z-zlNjzBTVid%j7E7f#`;)oiPDS6R6$d%KYem+J)uIse4;3Q}~(9e;*l#jA>oHuS~s z!Cmi$65=bFzw{|vaI{8JoZUI@kP16V7A}Db0@7Tiu6ptM~Zq+{=8=u9ZA<5yoW+!Bm>#u z$7fH)t<2x;Up_v=oiTvuMtqka8K+JV%nwQ@6Z^Bzm%$d*`#=H8DmJC;OCR6e^?fXI-T&4TR-!7_8 zYJ&rOQ7ekD{_;!J>UR??Ez3QT$XckeVU~_pZC+H6=ST}0nl_)SsM8kQq@7@;^(%gZ z9kocLkuCoJmrn7~lKM4Uvh{||+u%Ei2Lf%`veIIKB2|k^WEm|}@KUhyxx<9Rk%{5Z4@0?X= zCpd?v_N_Y$7N|_es1}=fr2*uOwFXdSHN!O7-sOjUw2DBU)oex9SB-GKimKDOzQT4|ZgpW8HcJUf_MS2doxc1uOSH$3+BGQgKVAU-*-3Kw zKu56dlPy0huP1pYRo06XMD|z(+#0`X-Tp@yw=eQt&7V{le6Xket>Ve>q2x1C8%x)a z%l?jIB!4-yzb4IM^Mw#5l9FvYB}3$9fzEsn3nG8@sW()fkG{(qBM#AYFFG)W%-&F| zC*TnjRjv#+pt}h}oFoh(ca^Oe*O+%y-mp{zg4bDE6Gd-TQA5F+?jrOcXyVe#M&BEH{pblwU|~GXP8?c8z+z_nz+mLq@Ly7w`)SQsb01VAiBGxSOOtA#Cg~sI zdN$)$Jlwb}@E5ueR8sX*K%F>P$HnM$ma4{x_UF+40c&9&@A}q7>CzjT^^BNAo$Dz6 z_e^{J;q&)pAK5~@fR~)Ke&Nid)1W(pB*aIO=2=LjtHw%0udc~Gn?C4p{YU!rQ42bi zk>@XPkDWv2gj}G?mF-OQrAUdA}0oCUA4rdVpu^1A5yLg#;&^D4fk5JR|P- z7j_RDGnf5^$^A_QV&z`_mqLOJ7h^I|r2*P5>u-m9{U(`W^|Zn={o{Xj4xHdBkgGxb z+K`5P|KfGY{-@$B1V>CF;@V~d6n$oScN#-A+z96ol2B{(>TNf7aGmoZ_w@b>BHO&I zhbddS#d0dP81>qwaNVRoJ^E~jUh^~P#n@yrs0;eg;+eD*DOTItEMX|wddjNe z{WW!T2hYU^)|?qi!>*SK+k$#7TKi=k6oj5s528FT%IVvFAx|#9ysGtPJl0)f80l__ z<&P(K-{6g0;E<8FkNh#$^F_YmkUwO=fxglgj5x4kD(j%~1yS<;E@ZIF?@c+PKWtt7 z)kSYTt1{no{iuHw`RV`Zf$VR{e!pZ(_=jf;6SKUP%2N(FHi9Th-)#GeLhlMXRAH(_ zk;P+O>$Jh#q#s;uzJvRyfG)CZhqBA>CJ=Hyhkr=X9Idl3hYDVh6m z4gF+p>xo`~iGru6IE^|{)y+SmJxc;^6s@_p;}hps-WC==;Bt?ADy!|^?V9Z1Y#m%p zO3nQ(rUXHEyDH0ZV1Hr;R?M0V`e(X7%uDr}lF&zi-!q#P0X>I3$TPMz$0X! znoadSO)kUF8{3U5Wc4-$r$Rd(hsGG0^lLp9q@K*Kj-ZIEok#W10yr@v1a9p*IqH+zU$ zg|*T1KB$vC=c|MU=e=xex_dl#yx-Z@JIjB0fq0+br?`;t@B!d?SWh?4!QE3n?!;Ht z>=N}nF@eg9bQ4u7H?0r@UV+TZbI1A74X1%U&}1pjC&EPwzG zAd5NPK*r{?@swCo{#9h_IK48z$x*NyRQ%ueHJWriB!j`>x#j+PQc4CDaC*^*m{saT z&$Gat;^4FJVQ#J+AgkuhJ;l?@PapL>YHy`As4D$d&<8L8$z4LUw9+|1n(+ilJ!C4f z5<$MOx65GEO9^40jI=ZqxQu8a5b=$n>`dgHEPm&)DgX>i0ru{nWi?!8gDRV?H_?j? zuDf&~Xr~^14FcCiA;<85+JSnxku=;=ByhM}+Ctd*tw-!hJ_+HWEQRm;%4MDya>4Pa#aCXS?Qggo{^B0I0# zpiQIk6c04_?gqz+&OxO4wg5z?tzu)~#$<#cAa0ZpZ?SA^zW>zHCVdLBO;J4{(zQ14 zzNmgz=IqwucmMvC*2;=?Qx?_Gq;_Z3y5Vho%J+EZuX4OjyVr)(`=0!?^m<8O{aaqE zXPErGapg9P?bH{cE_l_v5>BH<>yBSGp#iagpDW#jL%B&Z--r|2O;<>xS%5JY8WFm+ z2kjTXaH(fO_*vfnQZde0G4AZ?`wq^82oHy7n#>PRp= zEh#T0$qX1R-|>iu=mFc4P;(jbn>#)Rxxu*m(>qprV!5T4oh7pTKtuS_FJ#z`bPZ&j z@qE2CbVC~ehhJK5ehT^g!+Ck6#Dj|gIo~j zFbwEdDYpnN% zQJ1@!b(`h9Xsw&N+vYq({@|VK{NTO+sK+$Vd46+5nYa2XryRAOR(>Ry9O0Kg1=3t& z7EcK=TZ?TPL-KA`hVt1(FZO6|N(Kvg3SubJS|xLn@Fb2LkH)c??_ct7 zW*N(Im}D&>(qzyIw;Ql*yja9GKFQgHkmE|+KcRqE}=3n(=(ESoa!1j^X^3csj zkD-M*mquD)JZ{C*T7G#&{lW2mrU2jMF~3T2(4JeGk_;)~k9y-koMc4as1f(}kiWAW z0Yh&nkzVaz;kezW*h2q2s*WA#6a4w0UO;VzfyLLmY8j3qd~+D*g5~VQH%8vvwJHh% zqtt&K6I?bCG|UCT7!r&Aig1N&*DkM-C|+^?E^5Fb(IUr%bXHPG2>0cp7o+HBAGNbC7VdahxtUmX-kEuK#!=7$imYv0;yEx=P=XLFET5lA!XoRR2%ls8NA6 zbXawK&{nYKp1H{HN(IaE-i7e%PuZiNpWG~y3c^X z$)`=vuBaEF!xc;XnOF729vBRig87Pv*fTm2_EmXP3=ZnHD3P@OY}4&E62B^zR}LbY zFWGKEqIxF^wtD=ySNT_QjpUQxNQ{RQ(Hc*$GT>*L^mivN)H<#<)xzt1h;x6V0CxGip`vm?%74Ulb*5&Y$qp1VHf^umS2Yle@9nB)j#i%UI}%q$V4 z(9Jk(3ML7cLQ^6{nw}Md84y;+bF(>B&h3xkR)fhmjcqTc`rPM}`j{v)ocWjk^PjDj zsD3=DviW#gaCbf{vl_|v;agbo#*+o- zohR6Yq!!ch`(=V+xs=zSf8(!{AOeEOA@`v$XM?2BC&uDrTlcoavP>P0UPG43RLWkQ zIzHKQp09OUf<2$GP}S64W68jnW)!h;;5X=Nrz|QD<;S#~#$>g{lSp3tOy95>!H*AHGh;ZbI4AzA3gpI0;W% z9Pd=(7;*$`eH6x+Wu&!rRXU(@_3VIYnn48c5aW|2caS*cvlJss>BZ zMHMqTDmmYu<+bZ^d#q9;^z#WFv-T_6t65w`8O0;fKJy0#zx7?cBs|z+%{>hY0V~rj z#nRh+Skj9fWUWB4sT!Ep>nWIolvUsozE&5ybIRx9XIim;Pn@nd5#y=hvoGF!tYP%*D?ziZx#|UzUzl@jwYasaOKvoOl6#r;53${w z{dh01XU3rOQ>owj98sE*4K*YkhA;e#Ob~?uG6tw3sUp5NX_6V&-2Gk7pM6mNijEL9 zu1V$f!T5y_(~EgAT({lXF}^POzMAtvX)0A$z~F-a=>RCRPNT#Y<#@$Y1S&CyvKSTH zNSpCuS(s*|lWK+0DlGfK_EuY~>gZ|V{7vEB6IpLud8Bs5k>M<79M{1#jtWpHyo%-0 zbD7D~mqdxlLNXc}B3h%kR`|w}3a$4;;S!$ZRLceT+e|yZDg=xpNpNYEOzc6RbIG+D zmI-)j3MjS;KcyEdZ`ri29El2@&(16S1J^2&&Y8aVAKWX*hu&n5B5uGEl{nFKj8@+} z%f*I5A}!j@Xi}GPNaBS@xLx|B@XEu>B9GdZ#a%{2rWH(5dz`_SouzX1Jdmg zIeY=+;qgunI%cw$MpULan4N#P_h>j4hta;Gg*o6SW}uR?HDBeE0wxNb4~0Ly8qFrZ zTo@Oy{*Ic{Ls8gE?!hjh=+O+Q3~79`aqqCKSGT{sJr>&L!Js_Hm5=KP>8$A<4JXgw zqUn6H=}G&_j8SnKLTM{_nfKKUGQ<@#7F4Yg_UBuA-E@QfTTswQ=+T!2f`q4~69e&IL&wHjGTQ z!DgCMW0DUl5dEY-oESd}D_`l(k`NEaz3jVQLUkh;mh?!uO%o(8F;Qi$0jOlrXwJ{C z>0uNh**NZTwuD6GPN`BfWD?3_*n|*|`wSjiE58v9CU1?R z9PfMVlX#7DSjycifM#Yh=Pr_ub6#haW}BuWuoce@ z+eo3|s2JZo6UIITX03(3-%Ej2etic9PpYwzJM_MRw6ZroE{W9<=Xefpyy>UvgpRBl_H(De5;^dgwO$)|jI<@>il--><;6{UIb$ODPB__iR<#f!&~6J5 zNyN7PUBpU|K)y)iOpcYGnGp4xWgggUW_6uyPCFA2THmWa;!1(d27irH<0PcAjilY4 zGsb3?903G8JGmR%n)&KfDJRUetj9{4YztyJ-XnR*)?>=mz%X{_*U3*ag3J$@EwtWdC z@2_^r=h_PmX#G%NFJ;-*Xc5e7HM(Fauppt@b9JY5{ZiB&S|Bq?--K0%zF_owDb5I$ z_RG!@&So-fwQ8ZlGJ`5k0dz0o@mR{F8fwBT`ziqfN!DS>{9~j}i zv19UOW&7b{C|?>yYP^hn6K)#+xfZfyR|1tDfed==X-SQh=+0Wy0_F)Nunxv}J0y`I z$g%Htt|}MXuGp-8k*yBu5h918D8M9S$vt6^p}7_?_|ln$98{Qb&O?Yxk?xg(%V#iZ z<&}U#z@2#;69TN#bEIRKBS|on$mWS(AeJ8`c0nEYf<_m6P|7_2o40}4{vDC2|Fv$v zx-gF={o{>s19SR$!kD0^g-dI%io)?+rGiSaCdXHP`j;4A*tiT+{z;Bi{z-zF&qi4J z6Cg;7;i3{D#?x=V^kUS(XH>Ty!4$5#8r_9ycR0fg;wzxh3umXi70#Qbs&&ws_iuaa z7DYkn4XUluG?*;toN+G`#aEiNgI|bf`b!eG*AD6ZLBjlMQIo8kU z<;lxo`lkwl4EA)RR{e2&+P z&qkF>fmsWz?WM;>jzCK9{nGYYtfP~PCWI`_-S6wv2>*D<>{LE z+7b2q$YQku6}*xa7VK3_mJl3hLQH39hcT&=+5i?3UnBGk*Zg9bHg}O(eJ6(zg|H~? zcVgz`P$JnZk6iM%8gmrlnP@r6yiN#S2ki48BK+eZ2mjFR#fKLvqIPrj#+ zp;G!5>NVAa@}onF^4fO5NK1GVt#JJ8B#G>9`}Pv%k(>xKSx@<|&sU10l5p@MK|6*j z?>}Fn5E7N_2->m?AD?Pr5jZJyuF*l1!Nd^L`TXUI&6?Z-iO(&GOEeYp4Ub8Htig1y zL{4=}roz|oWOyOuC0J*^kdxbH{pXj(%ThYgkg*6HXMC)(df|Cg_4JlXEa3})-=t$? zE6}^9%7LmhoFH6NuaP8Fr5Pf6vs#usTZsFy{!%&Qbr z)UC>m`GADNbj2w^>?v~(HTyv`R2+YwVdCStjgnps4G9P1Ka{{U2i9;>r-;UZnV%?8IU5?^VgiH>z&?I*=FEzm;aD0K0PlXasSR+M?9AM9jDc zF_ya(SO~sErt@)D;Sx~+BGwC-gFudB03%OR%>K-UGJ&&x4XBp=EO{wQFKLTUqaSbYeD%e8bEJb3X;kke+f3uNC+IX%7`%a4qTE0ms-A?S4vd^?MKJ|3G$MO#Go z%YHVD!eo>!x?qOm@@9ecml$q@v{+>D5Slb47wV5WTHh?F5nz1(MKih13&!F^+n0$U zWq5qJY$*VPMmKa8wal|=0ctAu+J6t&G*jTCD|#`8C{VY-R{oz%MzMC4d_?=RK3eZ{ zAgwX{$$aDzs+hfnGYRUzJ7x;zz#w`GgC9SK`v8na_v}4Db!PG`?^i!o(=>sa==N+i zf;uJ+m=y;AZr`TA{${7>?QFK&#kBbVpsp|h5PclB$dwq_wHd*|t!10-avntb>8N>S z+FJn9X#kkF7y!qNYYO)V3^0l+EJx}1FQ$w?(=S?rDb`6q;aj-NsdR?M)6 zZr+O^m}5?e3n$nvtsj8sw;1gB&3CHp-;2|+nRfMk%u>L4OAclxL_;2N^w)+bT0@#~ zJ&+_1oQ9=|`qY$#X+`gjg5LlhbllvDHz5a@rI8K!2WAxf+J3D3E5ZZ^@wc(&(BA_c zBI%=DT^BRqg2yT*-0G-hF__7#%%=eLW^Fu@sO;qjv{cNEv*lq~ieylwN`QM;Krn9_uJfi}rmXHwv zG%X3#{{uh)q6A`SGh}WeYpohYX5n;AxZ8QK6zE#^LA;c^0i9q%%tYKZc<)|fp8>w-@Pt1j)%6W~>#c;aK897ycJh0C}rzGne1J$c9-IWl!&rLQE|_uv%0BGff9gyaTTESc1+1T4?c+Jrk?#2uO%z}DlHx{hKhVSlj%>B;=u zh+Od&#towdhxKI0?oh`!LASj)5c}%$+{~^~ULC2!9uag{603Y&yA*`3$k|cny!mY( zx+zAF9tPN1x^O-soVDq4xZs=X)Di3qm&IrJMj)EqZy5m5X!byeXi?zD&qptiCh&1w8dq z&y($FOs!mw3c0n5sp3fCRiY$8;2&-tUU5||y@RhhFv_@Tpd-DU#*Ad*AS3}(9V+8vXii{)vmmuh^%ePh>4d`2;e3J% zAtMk0*_{Ynw%|jK;?ZfTw{aZuJM%#$$Ksk)!XN*V9kTn#6FAo*>X-kq7*cevQbA z^UwSbvRsV95f1fzYvrd;NReG|bA!Ul}s28NtC2bfeGG< zXM%L7(M^aAl9N*Iw0K!q&C6+QQ84K9CiY?4?JI9jw20Xt@v5{eYX}XpBEN@x4mnl> zC;hKw7IVWVxS~q|PY1uK2yN`~Fl$wG{vJp;1Khxu_kwAa3T^oT!G)xqpUuG@ka_D> z6^mMYR}q_3qNyL1`j))L1*=-_^nbhnJsU474tmm6zg|BTsg{ZUfRsR;*C}ouK@~>= zFB8azq+9}3KvsKJW>ax3j!4g=ZL1%$MEYS5WRK=NRzG`xZE)~74Z&B617aH$Yvj5E zZ3xN9r|O&PS-ZqtK!v8jjHca=Jm{$WJopS-bp4BCc&+4(r63K7y$jI22bYzE)|8a% zD=Fk0`NrxEJD#Z#YI|=e7K0Bn)ua$2>D-++|tBavXEG z#qk#YbSNrkip#8kCQVtI*sL8bi%$5)0(7KqLB@TVoo&YbW-?zIopyWst#gw=A}Y`# z>9jbn2@Ab#YJ5NZIG!nUo0ghxv9M*(xZP9adAF;IbRiB_CnZ!+M5=%#Tof17aJgGn z(ghM(W1XS|mPLv`QkB24q$wd_+i-OPr7-+| zUPT6g!{+dKg)XFQ_b=H=M@Uh@gJVs9%Y{M?=VhvCqDf0Tib9>hW&`6#HdO!>23M@# zYgPA7_K7dcjp1gSu;`5x4IUQJ0w7^;(4*XCbtA~;hyKIDqRr=(k+m(Ab{8`4#`sF2 z)bo#gRMe$cYKPJ&JcDWp1zg3wnEHSsdLT=YNa}tOy?pY`_wSAN+r<#o>!aDV)`Tim zy&|I5h<;RgH1oMr>YNx+Cn*K}F4Z*6j(9}Am6Q-aJ;i^;f^rO&ZN4%o3fFQvG)b#Ln%kg)x*<18e!pIEo;Nz8j>)+m4 zP+1woErX(_o8|}T)SSMu_XKI>!InhN5zAQ+fVIhwkIM_+B2y~81xOr#vTI+%tqKo2yH#kZ5e2fWlOE^ zS)jS4IC9rslB%}rBA8ao%XzodCDCh@)=qOfkobqJ z!05vX@Nchoib{DLVIW<16O(HqMkB(#mSVzsNAC1ivk7#f=RA@jCvmpl#YYN*HRqX8 zuLPyQU^1A-cOQwX@lt*LlDdtIrkVt|9Bb48+1Cw#vp(`Wy37kSbJ;nH;(x?}hBP%rHw^Lttv|DJi7=%HoRq&Wzufe) zZx2#!Px6Ho>TDnxZw*RW&GxLsbfFWzO5xZNp|N)VXB+)XAL=zFL(?-;dA?a#VY-f@ zS|z@NB%{zmLYIuy@a}o)&rIXjmhK%90dP4ls+s{85l2~Q-J>D+d1jidu*p{mn342a7;EXsh~;I8mLsE(?K5OHfpx>q zW3za19b3G$e-wM5l;Cyzl|9XT+`Fnp-=@S$VywJE?)Ek-j5&e>J$U? zZ_f`LK8m+5#rMtMkq`SIwhs$N@70ikK<(u9k2>%Cd+-w)GCVA3 z57*n7bFLx24wEk-iqx~M-o9X5Rr=yb)p}4kCbE(n&9wOYgD#*mhWN%uPOGuki5$3y z2ZOB<^kjX{!+FX9S2k_;XzkROrhEv%l1)Jgbn9XWU<%zGL(1Qe$!J)7ftdlLj~)S| zb6O!`C@6GIWNRSX%+Vk|pqz=_VX4&@m7m5J-q6g!m(B!KW_~1vMG&X8xk6u^2c0+w z2VOKIp$suJTr=*Uenlu}_2czl|G~zx?x2{xEauKML1sNUkxt*FKN6oj!=^h$1_%DT z@UBf&o<2#ainUHlWqy+$3~)=yJ>=eldp50KHvZVtwQa-nA`k*7y#)e@H0iwtkcfbQ7lIV&MS2kesPrZ+ zgf2x&R8&-&NS98Kj*1lN9YKho0-}6td)@c*%zQKNJMS-NoFUoSd+oK(^Ei*vfGIX$ z{iikTc5Q8U8a$bKOCbFGem9*tuGr+PvOH+1LK+Xo-wLtQOEO+0zx?b*3T+!uy_D|+ zI;ow#atKh8uj@izPmf z$}TSh8*skVv1dbLhFWMcAm3NS@C5%Ryam$W^B-XzHsClo0+{sT#ZJb74W#1Vt8rxBb}n?&I=& z0b(c~51D2+V<(C!spu4f&&EU8oQf*vNf8evK^3OKdL>13vdYh~`rH?^8(Xv|e;s>6 zB$tbhyE1jhq4eDL@Kptu4BBj!MPXyZ2@O_q_@AfdPdKCLeQ&*goXVl;Y+3lIT{Lu1 zx-TzI0QOOLC;o}N$9T6*0{;F{M_CjP->`SpdAvlhkHD$pT-Ec=P%7@y&&iqDp*KM? zV-W9H!>0@A5s_F-IS}S}@_Nxrk&2yf*`~kPOP5RxgL!N_=4N>}*FYaksgU=u2ipnpY5?PM^(mE~=%YOWx` zCG?TAIJbrtK$nQsFE=Ii1{5xbzOXByaV7&BbiS6=m%THjiEaQe9{Nd6ON~t0H3D2f z`o+&6#8dfmf7F#WF$42n(2dBgVEJei=7nMZEXnHzNB?BeiB6mGfQNExVMCnd;Wlk< z?4nadd_aX`pdAp6iCYIQs*Y#rvn~}C zM%hR8=`b38@S#z*N$NSHubStkCWhHcV(l&?>9RKFQyOSbBc+3V;dXfwrO-~j?|FHA zKZ!p~U6__pVaCtvZq@g`9J*RcW?WTX#vaHt)%d^n%`507^gUiA%ck!|qAl|-&?Mh`Jw$F|08wJJ>FGwo=r%7xEeg{Scmql#ZfyUbGx<2=FcuEc6} z%>nd9pDB0i0;!|p$msaQ*wVaBw%=xKTveb$dx3>$26#bTT~z>fnBfkHzd7Z}{F*C? z)lJ5-Sq6h{LTB@M}}w4J{4HKAddym&R3p%2F;-? z&EOr8Y|h^iXCv>x3g%wYtZ;#qeH4F{j=)c*of!m+(D}-5uq~si{)i*CBlFqlvS17% zjMg+@@^)ek=&Uf~8BZo>nOo+zQx^1wrO*j00rng<#e-lU#+KEQ*Km6dDhJaTs}XgV z#q|{8g!b{}vTkpvoj%dv(sH?%c+>GL97*5y>VG+09#_{ow@JepP#-@T=473i)MWu` zuotrR8n_qZ{OgsLV{%rw;p9m3are5tGADSzmi=?4F4YJi1%A*&O2I9qOc$%GAIi<% znVDuWmz-Tp4SyhTZ>Gs6>~m04(A&qYC*S)NL`~~K3Zl9B#OdLqtmlDAXF)}cQnp(x zUyY>$zk89B(K{7X)GG{8&d5A?mLD62hB|$bYlEOn^Dd3%ZSv6MeH6Cn?me9VC)CC1 zh`_WcN)neY>%DP7G40Dsb%`)0g!THG1pB@2oboTFo1r46h*cguojW=UK2 zFuyb85T>r5y2FN@)HhEGx+?ZGhA+VJ@-9w!Lw4ND25T! z&7pK)!QI<2z(CrUS?r!2f4v|H*7ycl)Zfi|=7E}pA3RVPCN_e3LSX3;h|4oqw6+Ky z_iN8KpUC17uI*m`xuf}e$Kj^B(GS&<$ZNhoLJq|M!A2a^jEn;Y?y5CBq)4U{(|v02 zqbE6LDJA_oX+`pEDF})y?87lW4+H6qQDm{ke6J2Xo)oz%U8(#W7Xi2dm=}mCxYIDP zQE1bqkJM3ZR!J9R^mz``l;!s^(Go9-!9>bu>tbXXlcpKoI=(E$Ram@ufS5VYjsv6q zE`j+aExFrFrRNnouG?KQMu*vD1b*(LaN|lx$NwnwRCq~SuyD;quu?&g88r>&x~|&N{`&B3 zvl741-3w#5v!sj={!E&9BUWI63}E9nk@#F^Au@loiWq74&mY98xK-zy7+1h`f~KMlF^30P4|_Bk!@D^jtS&l!Cr;OmDBgO!G{^W*28Y z6R}=hrEPBM>7%0<5++`yi^jSl@}QsGV*}XFVe+dSUOS2#h%Xg!~;i z8FpvNmcOV!zESp*20on3^7{l-dw*vcS1C@k_$ATnXT?Rrymgs0FARI;Utaol-$w51 zdtz#@(rTC(kfEvyax6cAF6?g)2|77&QBMMBMY=|+@^KASsfllP#)#ipO(cf+C=68e zE&sXqOcZpRZvo`nbMwyYgFOLwE}rFbJUjwl!&o4iL^qR&nc?NR=}Y`Dexe<@aT%xr zbJmN)haaA)y9R#DB=q}(WL7RfUzFqz{8f+BBfmM2p!p^)rbI`0j}pH#T1!l+QUGYg z)p5OdAYk2A;sy>}U?lX1y#M|of_%zOV8Cqh3g5*4?-DmURB=J?Z$S{D6xoFkBHMh4 zp-7-g5+5K?hC?|w{d30mIACkOpBqde{O(&9wlcBm4Kf&+B}bcmIY->1rT~O^n?Aet zQ~+Q^7txu7`yf~1cS&>3?(s4XSn9tiFlKQT_o^Px?p_TaMA=5B`Bo%c;t~<)HV{$U z0cqH3=wFp37oe6LYe=*AwGcEs{uCw!3PaA+-XO66F|iU24NJiAdRNBfok0?_&=nnn zZyoJSgk|#T`D&*Guep|~mgG;BR!+?Nk|3&qK+*&X2R38l;AUO@ehF<;5#P1nyKxu>d&;^YO$Pjx^AEN@4&ZYwpf$0}#P6aiI*a~! zZ$RSB_KV26uh5(T%3j=gGynB&&y6x)no8?8#!%`8K*K_=$mq+|bV-52Qxfo5>!gy- zz44k7fRJ!dzn$6jW~F>m6#Ss@_JoxV8JZ9=DS<#kx@MypV7JEiOyhhK-A6yn96Qg= z5yVF_b$$NIUH$atr#+%%QHaQvsM!;cHiHI$p$kxG3j5LpPzN6q3t6S?)T(}X^n7K? zQ}Qo^!Ze^U0-J46Mj=RM2mpN}UH*=&_uzAoCGieKHammx;eaWZTr@ayQ*+fTbFg?2 zR7C9MGNOpN2o~_g)cBv?U;*=*ON?1lB;Jt+@TW2Wc3UMZ4Z%H$YNc6_78%?Gh^`AB z=8hW^VBtNEqUODd*GX&7lXi;bM~YX0u9vfM>AslG>ng`m#h{I=kn2QBT|5sLE2Beg zEV3wGpiSiI7}F(grU$`dNpwS3569s=H*ea1)KS;52R2v2NlXa^AHYl5u1T>M26DmY z+|yr_is&np+6A5ZjrjI&3_yK|0EGZu%L>l$n>Bv-=Uea_^yNFX--E!No1Aj)3*kjR zVg{GTrzTcO9pt%K&6(M9GWMSEsI>iAc=jkPhQv-SQD@RQa_+#`(3GMxj$|(AV3S~@ zB$o)YZQWSp6m;|C1<}2YfVdg=aJD&w(HF&1mlE6+K1JHF`M5x5><=D!7ZBdZ0qn&> zB_eKWm(?BTICnTf!*g@KSlLr7s?#TE-FnF5{;v(M{ytF&G=1%!D zys#1Y4@)mHgJoxxi4u7B11u}fP~&p_)D_^w%-F((MytF)twfBYMdE~iRp1)h*Hd0G zfEWIporD@64c6rV%zZMzv`Url(FTNtVPRwz1ut(L1YI5Y{5n=!Z8=3YMR(=tlj3(a zJsO_?<5^^?G4M&t_X%r_PgyO8S;(SpeQtiCHD~rKXd8C@j<*UMhsOyoatbvm5*G)e zOpWZbG`YX~K)Cwe9+(6c_%Gofyvw+#obFAw&U3Amae$+sr~EOBeViStaQrzl!Q z;tPmn2a(N)vvWa0{!v5ODc35O3z+g=%M6X5EdAMftCyK!^oH@dD3|Y#AmGK-vH?= z{3oBzp`WloGn}bgXH>=wfkV0rAkG|$d@bpz*P!=K|R# zLhFJ@Fq3j7ib@yMC|79q)OGKN0o%sdr&iNBYW$Hw6lmLPZ4m^m!~`(AnNd4XxbcP` z7%vVb@ZHNJ#y1OZ*3q|e6$eFsq&|nsU@!F2JYCm3?N_qAQTmqVGuYo;RiFCKxBaoe zKN14~KL3j2`^QE`@hG{ZTlcl{;95&Mtiu;$ljv9^O?~JBgleAaYad4!-U3rHkfXAB z9`*Iv4zRS&=z^&Z#tM$qJ<=QZ;I3g4uXu42*2f(;YBsJ-*>F^vaQ#Ox_fmnZXKu2a9 zm|TKz^(*|zIJJx*3nDP$$bts0fDqcERm}QviIS{YDqG(ud7X~thPykQKeUHI@iRZL zc$ly)8cn&c(47=nJoIkv2%cSQg$?SwyA~~c7dWX4+qlA2sY3-e)p(K0Wn(m(Ga!D3 zD57uj+KoG4L@Z_NY4AwYC>biI9UK_QCxrp`j}G0(%SP#w6_8Y^ zWCIXQbf&g`moMGN?VX6sYpoS78&Tl5W z-bfx6yI?KzLXXbQ$9)nPRY!6UW@6K+L>iTuB7gq9ga&Q zDqqBvw<^kG1Nk%d@iXfrn{?FZ-KH-u+hLjdC;1=N!~pd~i^k6&s6CQ&t)$|z4H+lJ z3k3;^GOeZYEv#J&Bbh1t%^Ce*9!BLAc<8~{rCOh}zHfgAL-mJgDuujo=mfqmjcRTi zIJV~nbyM7BypR?)F~q!sg(}2o61xmA0U5@x^Mc$yUgqzVE)pLYDWV1F?;Yq3oQVAW z@LrIXN#JlRL_kw*;4gl`Gk$-qz&8hY=nMc1NCX6crfQ(y2%@|eL3GGC#WJIetjEOQ z*_}l26__-Vd|l;{fbHr&s+|3fePfR(rD~n+RkY)R;^fO`*W7>$$@{M%ql#+a zY#s*zG3BqjbEps4erIPWr6Jf4&pjoak1+4!-!BH&RZqW*v;9&T#a+YFIiWYS)5s zjk;n6D4sLU;Ni}Ikj(b4KxiaANn*zBNHkie@ zHJdA~Vc*XR>+wH_`=QtgR7caEukv?yz(INejN@7>Z@!&dkwsK~hH9<0t`kYy8DJgz z84Ojvfo+N1!xN;ftODN%MU^?g>(F%Z2aUtrOj;J(`Xbq}1xP||P55&@nwtl#^=E3o z58fAt2NnJ9XM9=z+^1KOu?iDr*yL^V#-}LXcaY;O>z+n*vYhXnO`68B`}KX{S=ctP z0dXBJ6a^sIxoFD0yY?d4lk0c*-sMW|HW7k;eQ8=a%jQ}LhR9u7t$;x-14KvKeY*HK zz)rD&sUKXemA8F#`5d_bBb$iVKf=o&t2~wOYt_LcV_CGJMEcHwd*Gne*Led9^+j^D zu4qs!ggd`CRy7im7~{0__I^OqfS@mIjLd_06wsty^!m#}kV+ULrMy;68cDEjtzSNysAxM z8i)4pivgb}FJ@_>gH&5;XzF3h0_O~?;_?@lU;Abj_ofM*u(^a9M{v5vpE9*^eZ3TH z!g_a4)A_uvRTMd^s9AT)(-%JhX(kIioR4={5--G5#_x9CKPPsD^;#RlpkW)`;Pv2Z zM;a^N#m(*=7qH#=DLMIrB11>iIGK272(Z3dbV)+M(MaW|L2yqv)JniVJ?gOD{RdRo zYW(eVxHy#u`mA4D+My5#yR8JC;5_JvbagW}Vj@sdm@!e+M6%3tbnZh!mi(yqnK02? zAP8`QsZaR_7@pN-YeVIl@i5wVz|K$P{)i0Pn(sa&9xh}Q73UX>Z z69p*tR&#GEP%s@`y?`^0C~Wu^Liht}0_VaM^{@jt>(!^QSJZkm??da$Mtg&fGq$vJ)}6C|B;}sp$SSj2If4sPBihHYaWRTh@V!!>`>6YU>36SS^7_Lzl~txq+yN-3 z@@(l7Ra7Z_0(%=c=1ZP9ot@qg(%1eD$iTVf6)q4e0(QsqWEz71UN@rYbYJt-$)Nf* zD8JZ86zA-Ny0~?E_&>qz2;r5B=(k`gKLW?}I2d*bnsg8h_rOAAeBL&%XG@r|rl!&~N7H3;i3>S17!tc@NEvm3uY_(us0&fz}#x z{!3=4aoVVa?)Zp4PGnKj7`r*^C-1qIBfd|FFa8s3CSL;pWsGA-<+fA**6VX6QNq%U zZmXk*Y!Wv<)N#Dfn7(VEI_>csou1vS#C5qjW2P#PU$7Z4${&^;Y=!I(KDu7k{5%`q z+=YH+_FrsECn}Pw6MG*osE$wp-Z-!ge*%n0#m-Z0mOE#~-c=mk_da!vyH&x|Z1gG0 zF$1@-+3U$|_#mk!YWM`8^0L$?{Li^B#(mJa2lT1;1bs`dv`lbw3gOE$@#H5URa7+O zvws4yljYh{jLNu!W}RWgy7DNI5i%sV=qRB2&^Njbk;rxOV!At z@IB|ZATHIc#u=R%$~{HB!ij>RmZa4_!5uo4%IMOCHVs@~w*I)maw8*b&%iu~%S7e- zRS{DcY_VTNOIXrpvLx5O$k7bUkcVOzhMq8OP+aDUQ)Un1YOU0)kWNxVU%KQrbQB^*eQDquQ2r_IN`GXAiZewx&Halj zR81W}WzK-VPzY1TX@6G~Gf;UTU7wkS4_6lYEs^w@*gWnxH{lb5SO(CL~bgDz|eS-agd+U5siMX9n~&)tb$luaN3@2M=KX3m1cV01|vkZTKX|&9;mrwW<6) zyvY8C=OXHX`zGkHwW^5++~7(-ZT{V;wdMfMT&(!HECBQ=if6nJ0$O6WUg3hIE}0Gl zK;)E9S2yGS?is+VE;lyHasheTLDOe2sl5$$0!1s0WsaoKCLsNl3BWZ}1Z|E7yf;$J zZ&f4m9!B57qgs}OPIW$8HG-|ciS z6+aV=c;a zJ4;v1c}5)g&h#sVbulRB4L+Oz`fm?{3vp- zt^VzOr#tE%dF5E>)@l47ZXM{ZHEB@dXg&~Bi+Bh+WQDnJ1Cq^GprR`j#2jB)Grp&D z74*R(AXI98rp>C>wI3i<7eFomjvS_>1@K^D(6lr`y7Tz%OzE@Z1G}CdpSc*Q&3UcJ z19Jf^2)uX)k)r|srUkSzrjSZIlUNH1Rj5TM7{uSaLyYTBJR|)O;;&SliCPKEtXq3_l+T6@BKaSvpEIQ z`?vru0ARC*X{<}owo!qCyh|1^4ui4~@pt^HXXIMlRY{0r0$iQn<4 zD&=dmjlEFy5AIfd#%(1AEBfM{=P#Sy!EX@6$1^~BECE;%;{#UT)d2QOV8dCn=z z6iixgG6Iz{_HbD!v9Web+VyYvF+V9=~8V0nR9o6#)a=y(U$3 zs5qFnz6D*|H(;1w@CDmmVl)rf+{%f!*r=+JJl{?L?dw4}^J_MfR%_h7KS%2YdOa*A zYSnDMtbE)RtpGqhm8NqV_kU%fG@Q5v#N_VN$_FYyZ2w-wbGP+p@f3&F9F#6zp1eM~0qV&==>e?|9{;~?Nm%hF zy3KhLrA3Nm2jOU1vm5|^0Zc4k0K{TH$pRoe!Ev;5hCC}>^H0NA1&dJJ`SW19SA5$* zq4*f|Zux#`3Qft@xUVVtEHieB$f7^mqu$mYyP$WL1RG2aDLECpDC{oqet9{zwCsP~ z}C zr0=e<%rjk!sEr5bG5ZaRHR!-nu6!*FzMKGq{&p(L?l7F`?{&6vZT)0zxlgGoNE7h8_pShYZ59@Mlp-tSJ4{ksoavS=P##?mfsZ;yq)XR{{@W_An2xncimzLASlqTY{hadA{$)Xecs$h31aG0 z*OLDZH~HzH>cW{cC}TtCKWA&zf3~%;}IQE&(I;HIJ>gHmzRd{1Mki|&>jjygt>sq^{M$;Xz9b$ z_S#3?JQ=`u%9H=^-(DWK1q1MZ>mnx_ao{$cro6_U-QqmeVEuFeBnRSwda8717N~}t z0iT=Y-r9XYOUTYJE`R7CvI)tST7FD>*4+CY#&OX;QVr;3JPstkKRW%r8?g#FlGz#Ky&_vLhq5+SpYwp9H zH@*Op2P7XO2c~QfbCa;gOeW=R7c+zEZ-A{<|F4%1!z@Sveu4?`?v%|L=TA z7CjG?Pga}H|8`~A7c?bCy8z4rLk#t)>7K&P>tLem{JFuu^i$aksmEAZoS)B2@XlLl z>az%6xtKryoT*uDnqa zvjSW-aA1UG1qp+I&btwyJJ-SZ(u4P_=IP0Mn|<*0XfOgw6}boMmv2Lg^LdOWfWGw; zcL2hsTeRG z8~G?&&Hk=8tUwh|$}0mA)+2CWt`$8Qv(r(zihd9mQV-76wV9s){4-pAmn}F(+txAo zHj12DzpVAh0|`U-t^z6Nu!(eN6X*$x?FR;p9E3Kpeg@ zN}gzT7w*hL)e}^)Gy;s<8HlC-^oj#t+h$O#Hjpso)EVy= zOuPX=SysxDC2@;g30%x+)OSGkKmeGW6QGk_Bim z!zSrfXJ<2T8UYHvwU8YA?qqcrcNr-oJ@fEe;*ygWlLpX?%J?toLinF(IjClUqSSc* zs`_3#c>p+?xyW8U{`(>;2(&wx1#@a1t+4FF=D<7=*&J#WbNq7V_twd-?M<%7bW=oc z;Ko%LG>+~io?Szg_4}d9_JJu`KX~0)gfZ&g15jOgiRbn;dz=h7h+t3N#Kq;~XHex4 zzrW`a$+W7Z)fONzQ=R80V~yVj=GwAJz;V${HT^|3l7$_fW5VnP96o!Up5M4vy2o#B7skgNy@vsoR6T^<|r zh!cZAh+fII@kQuAFiyTc32yHAMYQp@k2L`sqzyz&a4-#Fj z0>3o5us~R#P8DJg?!kKthBJO3*KflIOebjcTNb^RzfmuNTm6$OlFR?7H1@_Ypx-3ljjihn~H*ihoxLBrA0`^#vY0WZb< zwOi@hyRv0aBwhSu#v735jO0PnjpqZoC9RmvQq4ZeBI1bzK&NqI4^`1BOtzy#%Z&i; z|DAg853p#uD0UBGFhQr`9s-ar?>H^h>p^-&EJN-?iloE`)t)0(irn{q3|f!gy^7;K zpmG2GpZgE~u?O`C?@m(WSym5G-c4`}f#Hn}^D%7k8e~kAT8qVOfIWl3s304k;Cj9e z@!!AH@&R?Vx@s?<)di+$rCNRWt7KRBO&F5F4nGtt;Ue;|ou$xU70eL2##>rrMXsG*Cb>L9*mitxSeH(D?NkSE$iwb*516;A(83 z@P`199F{^KcU}w`aRJ1X_6PU~&N0bF!-r#ig`d#_k@9nkeEMZ)ZQCmQsQ$OnZG|N) zB87g@Ao^M%0L^e$WHbE7b_PU^-?#@zDLxjGULo)zLCtCr^p3HOPv;g>b&U5}nUF?# z8JmFPyzFyUQeaVeJKo6@eQ%hYYPsYxm8JLC2uEIBwDO2)3!D8F5OP*PSvY4c*Ytu@ z{#JLYX>sl`+SVwqCx%f&N$SmRYeE$ka2|5+fQ?Rg3F-qLfukv4E_6NLlii8e3|4!U z?{s7!ih9li(hBb-{Q30i=6qeUXVUNCdds}j$^F>JD?QzGsa1KgCCCqthUVU>Avmrz zjU?C3BSS1}T~i?qAc7kbMoKx*`^LXF?4)Y~Rha+o&ivcANgukBbJ=8)WR(1U%c(J$(LC07VFE1f>=OD*}Eq9ZTamg&6x&``U=?`G6e{}_L5mS$CnouwFX z?xhXK*;h~U+pHab|1uQKD@7x+`w^vq_W|}DVwG+ji9iUg-6cck&YlNc{1-t>-juCHJ?fCcvOoRHwmmsui=|OE`*WR-> za3c233@Mz$XX5p$8>Cr34`{=mw1U}%D}6<#<<20YmyuCwMT!Y6(kteW#~x@PxWY7| zmVCK>jQ?;Dwe}TMvi55n#V*IFZ^ClHb=h&GPE(~Q>i4<2zlmghU^wu(8~fy~v0#^2 zabQ&ya&{AK^5I?!QQW5?7|6lD0qnD&+dPFQ!TYe^SNuaLua}Pa(wA?;3-c4B(%;1QVD_e3e3e9>g(G_e^R9ZwVO3WrjK;|XBT@Vs zFj1N!P+QqxYyDq~gcX{VB>cWWDKDY!PC6uT)FjY%iDp*eV;JQr?la2H@L^BDWpU!zJ+n|e@DE^^p8tZ$6A=1w zkWy=-0H(A+BKS0_0?HMP?_Pcp=SK|JIKg5JL}Jr22Z8om&O45fVa3u(txe69sECaE zg}9GpZfV1E8VEY`;W~pgyP01Jd84mM(cvr`>Asim>Kv0q=}(ZS9mKp4WWB;Yx4;Pk zOWiM08E;Y}dG26&Bfo4<)ec$UtZ~NSQ+kVx6osKA9xhz>x69$y)4fVM#|`oRWQx7? z0Rx>R->8pVB>l*mt>5^Zwhu`bw!inlYWaa-?n`u`E=RB={u{m)6YX`wM8!${-U$X} ze8fYCT3ZAZK?ha~(5<{yE{SF32&~#X`7d z??G04E0vF&`(B=pb4UgljDbakF!R+mXoBIwsVKL4ap75UH;9jPGQ2Yf3np&G3d zI`sU~lIt(9Q+(4MpH(<#Mw1j9xMM&pafBl@{$S+{&hdp8G;S3J4ojDY?=1j@ zI1rd${8?V!%g>7OSk4H#XL6p`;Cwfy0L@W6`%+ZQRg~B0giZ@T!v{JK$1(eu*;@9) z<1eom5b<)8^*S}g%Z!|I1UNgZID4_h%f9kUr~CpI3hgh+i`|;Pw3CsgYv`(*wuC*aj}li2Ek6(%jZ$(yCg zRV~&l8THtJnr_n$uv!pP7%VYO2VUs0-Z=k<%kj!DFHNth>IXsVZySsMpl(P9&BJ|~ z0eeY1I5Jk)v=!4`1Wd0kxg!W^3{uW6LK9-Nri+}3u+i(!0o!%P#H#R@LiMZ;S%nHq;9wrkFB}5PDCk$&k zzBW9o;`d2f@RS=H{m$DWFn6@1hjA;7rYuvk+1Rh0Yj&I{MDj#?%YXavQ`|Lwz1n+S zrC2Xmd-ACOGs%eTh(KxcArJE_-CbH5mA6g~rGL9FXoRcF5CvFsZvxoVEZw_^_lc&j zsT?{5lh-&pbSHk02auaC+7UQ(xq!ai_*;Sq&xn!?8$C1G7=;KJ zx7#Yr-BD}AgufWGNIZ*|nQ$C<{Wb&_^oQ{Z|4ocIG@a{Z)JawA-#R|x;SFT9&H9pq z7u41Yjxwccy2XmY)IJrNa9pRnU8d~Xu~Y|Aaq{9wkg5LPAK+z!>&C)v72WQW9JVA8 z-b!avTIzVh9jsp1>qFa}Kl{+qNM*#i?1w5^c@^k+>NWiAOS?PY7mF=~08iO9rR8Lz zxX$=g>cW%@8?9Bcd@&E}j{8!^`zj}r4_7)h|H}oaZ;4T&NI5w_7EY$j{Cl~z-I&_> zpYkxKPJy@(1``E46OC_J zTeT^0mscp@=a!rT_?e}+>=bDf`EG4>nWtT69g((EHjsCI@CD1TM_k7WQ4zIeVWe+N zwU{$pS`xXlbgfN}undXH$qYar6I1y)If$f4^?MB0%nu-5q~lF&vV~9jM_<&A-`mqU zMx1V7tYVc0dID)-?uAZh6&Hj^2P#D*TiQ7{R*(HRm6KX7fn!`~b6bOdv((4noJ)0i zC%4vI^fgLuCAoLnA16aQq7$1qRD1W}3CVfp?pr`BM##_0g%h3k;q+ECRaY0W zg5Td;z+_eWcpA;i{$Qd6k=$rjI-_uGZr5}1YZ=cjN)lmd!nZV!f9MtmIbYsBzRGqO zFdWrG((4$z{^{K(un{`-u-8U95fg`t^j?3*p$_s-;kW+`t2u>qO15O&A`VAJ&hXUg zE_dB&C)KjfQzW?ikyQw*7b!LX7m>d=AAXjsEi*`^*FtGJsKwIY;%MoiEd88=G2AOFVeS&=k=8BROm z2y0-#aae2FkvJH$mPdRFfOloh$b<8Cp95ej0a5{cD{5F-x!=0c9ZMMp82|CHf66lw zuGYqA&zzS9PHH-rTMJ#*^@UoB+No=x97 zXfbO?!h6Px&AkF#2{*j#l7j|}Z@1QJ-cj{m5Sj@sf1v~{tT`Ope%0kxShn=JE2eP? z^e@&eHz%Xwe^&2M&;Pu3q}WcDK(YG|vdVr)vEOLYP_}1zH86jeV|UgA)c#X zDI&Go9wZ@%RG-yfT@Q0>@vR>Tty1RCV~g2FvG09;3Yu)KJsN(02Soc3i{`ii^*rj- zx&-;#H)YB;E=7>B4yPWBIC8BjSXaBg=z7ghxGJw(6!#Kw)b*4^xg)gmD^s#p@PnoP zv+pGyelgEDXj$7L7+M$p?4F?8ba_M98MB`<>s6>pm~%u8uD$u{{1a8DT-(L*R5#w7 zVx6tVg44qDo;x{>Ck;90WMdMKDj3Pm<6ZzMfROUUX^ zucI`Pm}?7SNEI&@=9QDWZnIgBdUYKDAa9u#^zO&TG4e}){AecFE-BOuyE8hOzuZNK zPPhaA!CV%3ooAB|Cy4jU86dQh zwqGix5-c(Ehg1tk&LZa+<#dbTRR`p|!t`uN_t{ye*@-0Xjl5X{s=LyP>Wy@a)| zN$wYT@4=n*CZK{I#>j5b)D7>Fyu!n)zn6ccsY)D$f*rx&rnf<-E=IgX&t^4Cg`Sp)VLECYO{NOXMiu zG(oo8NAxmD@Zk=uM16Z$QhmRNbD7lVqAQEwXUVqBkYbf6!@bnWK9B8N24o zgoFK;F(Z+5%`Jmc+zDzAt4A!wUmkR51nR29_?h#mUfRB?8#ZXNhxvUqj zvwB<{Q5vD%XVc+K$M3h5MttZqCb{c`80^dneJ6RrY4Fa*P9v+9YXm z(VMLNM7-?F!Ru?BBNz5X?IJc!#E)71~A~d@Eid@39c)vm?N|6X%Jr6&p&;qr-1wu?Bl~dS%_z_Ze@uoQjN)x}dHe zMeg&iZj)yeFJ%O2L=#jj)wI+asFlw-&xRQvHN5B5Xuv!;GSDHo%c>SvC-lyiq+Ox= zMnanGhW&>i!WYq5V5UgH<_5P6@F7RD^4&sS#|-f65PlSn^B%)YA*LK1`y$(!ti5uo8dj=D_-D1Z!eAE)fqmy%=+18qw0&{_~^Nt5&w?p*+hP zK3^M4@v4QD=CAa$DdwqqaO8a+#o=?r%pctapNM{r#xs9LL;j5V(je#r@Z$FFU8;dp zl6Ji81%@$4PW_5MweEz%EWO4RSQE1zCgS(KZbT<<_YO-Ht133AtRt5D^Q!V37AZ~L z{;Fn?7^z!XWK2 zV0=fDI8#Z%j@_SVlOTGqPBcOkDU7h#`{{7LKmvbvRyrM@tN}iio5CTUJ1|di?o^N| zHcw0cqDBK&#K0I(;KGwbaKTEqUuGpgNlw>vPNSrDKF7$-E7g*J#}e_N&^og2u_xA% z5W{d592+6FGb{E8{*m)(g z2cE?l-~CZZ>E>66=i@~i2WvV6VQOt zsC%(Jp8%s|3A~h`l5YU5*z$;Ms?S`SW;0LHnh3HePC->`q4TpdX}6VXN)f|akve(b zOY%Up1A@Ib%1!VDnQ?V{rLFhFXxKjPr-k~KtX)5#!pFp^NurdRGYn!`VP@c8xQPd z?0zBQ@H6RVEvJ{7Nc(k+m+COomv;Ix7>fc!Vq*d@mHds^%W4*N)H?5OV}l4(cHxP# znlBa+D+P#@W+J1)P#Ai#&`qK*s_qB!Fu_~Y&pZ4r$ z!ZQc318x=_hPg-u(~(k@ss* zYK;-eBg%#oTRTI?K395qJn+Qn`7Ah@0m5lZv8S>8UFT9ibsVFh?zy68sb4{@@k2N{h8d+qS2EK(A7vMXwEX0-+Pw+Lsfz5!$Gm;}0y zWpmJuK1mlxYwC~?RgeOSY^%db`A!T&R3?6GbHCqycQzCVq ze+X(kUzT9Y-!Gn4@vs7k**DJ!+In;|rBvQombgYn2XhPO_reAS?Th!eGdQN!oe4enhJXwWDo|Pmd+pp z+O17^i!tyrx~V|%nO&wV&0h7AUSD!{!c7RJ`W8>Zdm)jGPm6$4R|4dGA|7>PbVi4r zKiGf+Yow(%4)Q3nr|zveCFe4f#Yu8HW^62+--#~N`pmzKb%gQM*e?hs|KyC06*)^5 zs~h*?b|Pn!-HLFiwA$XmSiHn*hvhvVfpa>)2A9Z`X^m4LT>}PR~#djh1(sbtXBuEob?P`HmIA;;Qomf>gV4n z5ySBkgEv_f#GhgASswHN|3mgciIgvhMT3#^3HSUpRY}ByI->vd0Q2a#TsE`V1KoYu zxeikpTIRZyyMP{f`1GJLe5b<+SaHOV|8`c4-Y5M$qHXii#euqB{GIZtmNKw|Oe&-> zHi4d1OOW9>%lAGB^qWQbo$y3dlfevDm-xp%Ax@$8(21Jx!o`+G{|(QlFTnL(>ZaZ0 zmXbXznCPx6c|1QG#Siy;#}r+mDRrAUgFJe8DNc`y?p%TYC+|==XG;bKJB7abON0#b zsDriaYr-O_{<}C`WWiwaQT}x`@zN0Cw*#J5Q(+Hr|6g@)8ct>V{(T!Wl{u1mC^MO7 z85WiybCFD;kRekUGOH}JB_cB|BC}NHDN~e?LS;yV3}v3V_w%~?{r~s#-|qX}^Wu4~ z&5LW>7H8*qtm8b6{n+>K{`qdWvW{W4D5y?YD;-rPnI*9eh7I2oBv9r6=D&Wat73_C z8{>Du)s;Fa$%P*OS}L+gi$J0So`t1bqlDq}125jLDO_BSc#yO)JkPIf0GMSOJOWpM zZcyR*mHi{B__q}Sq~=K*h9@JWxfq~OB+vV=UPy_kD<|YtP4nXK#K5;pFYpg>hdqPE zA|EFO39txk3l}N67dNLu$=e`SB16TB+`MCIk39dM%SWh$0 zEPt^*?m8-^GBO{2)AmnI)IY!J7YjmRM5M$8is213mCcs4wRXT(eA$*BQX>pQA$sGw zJko=Uh`d?)J~_Uf;G+v+=5-Eo+{{e;Roc0^fAd<2piHlY-CQ8;@AX+8F&oT?nJ)0$7R{kyE9nuEXBNGB`hXR1} zO-0lJ>lcFk-p?to*3w7O5kK^Ij;l|N4K%TsyknqaiwF>I<%C@Swzy?go7p)z@0mYe@1b|O6LkEWfN^#I z6im|V%rtHW{oJ_@0&~woMe*hv*N8t4(4RFhb61)@Pz0*)rE4;PE=AYnSg-r#mY&(q zJ~&t)k~^r@pWevcC+4u;9jj>OO3`-}{G^cm-Au}&oNG!DQIS6|qc9^H(t?}cOIu(q zrWx>0Is3H83X3+2e+?bwKlgojO{~`)&WM~j$UHb&za=M0i1Tk7-NqRf^m@%-9?^6& zf7uk>Y7NtF(ZP^N?IEDI6HlQZOKQzs-XIZwQqFRF7l;nE2D}k#>AdQDCHu$1ow})y z&k9@YNpti)(|hceA^p~9dvmpnQrMiPhX`&;BCPsz9w`pq`(nw~Azdr(-1S~5)4aNY zxN=2n`!`ck21L`pGi2VzHU#Kw{)kVFjyvlgzxf5!ZnGbo6YWV9c^>GbY&8Z78`AV3 z9Rm_kuRa%$)~cwyO$LEqQVsHu(hf{5ZDnx1H|_Iqy{W?Aj6Lr{Cb=&u_+n{OO8g$K zfBsZ5lW|>rc3$}q=X2O1wF8am$xw=An;rc4%JCe5PGPl|IpOc*{*u;64tK(#!IW-6 zj(0Ftn2Mt%ldIx(wUSMxG4yr;yxqF5` zo21lqq+NyXh|~#9-f+f`RxwA^8jrC9clij8VTIw(A`j+(aicOdNqqu-v(oR(99N_zli#6Q=<0&0ysXSF%(bv_~!SuxjWXo zCzh3o=d{eS-&?Pv=6@fpQ$3kzc*bJd(ro%xy*=-;O^C2dOO9x_@}J5jvs~MoC$72} z5sBZ2h3+JN*(W{%cevIp1AmhUE0K?NLBy4Sk)LK}*@AGfBKKp@fn&n zgh!@sThyC2-*ZfknlUcDB{i}QZ5k@3$kU}@hwlD_Mv_9Wn?({|jIzuo+Lj-bD>63y ze5+}@9J?SMG9Ur6hVRA&#+09LXUO!GURN(@+mu0f+NT#ZS_8v6A9*^y`1DtDCCJl# zdc$l!=VLv~8X$USwr)krK$^U7l2zD+*C6SQ=%t_??=6=Kod=J3(eEWb^zXO+%8FL$ zL1w@j{?N}^HsnTbzmpiW&oWyv+OjY!xAxv14Uw65-gSASSv;Ai-k!k_&&*4bEGT%Y zykRh?EueslS`}UtEsuekw_~uQ6fdz&<_0`l7m!;Ep6~tSlKpy2d41AX_bvJ9f`IiO z$40Hu#Wp>UZ9ex72JOw2c{J(LV5nh2b4!|k8U602-masU>v7~vtJmzB4fZiCD~LFnlbyrOdW92~ zmF+G?I;H+i|Jm2&mQ6n=`lsV2`yW;8Z%dJA$FTb(x?X#s*~}zu8sE5Y&Hu5e>UD~< zbF>N?8H}N@!JD%p1aOZ1S3S^1rLxcVc(p|j!X%pVGo$79?RiDw~kfFsexK}SnTf^??6u{RmHg(pv546nVZOuez`NLSzm z&+>YG_DPAki!qT}D13?3C&Q&xX*{?RDd6uE*GV0IH1^`k6_>2np|0IqQqe4s6Sx-j zF6W}nEBEpAUG1MHahtI0{FZTBE8cj=cr71AhInZr>b-veHqh)KK1o_8MvH7d9T%Lr zy~)B{c}(gggRd{UNWT;GnFWz(v0kR^;6Vq1QF8`(;_g*|f5f&L9I8iBN`=5i#sxc( z^d`v-=m;%2$s~?K&@~IdFOhT}Lh00@N$E}Rc{fdfH&q|FTska`Rv|4WeW%SVIT5|C zH+er3Hj-PIv7N5)cU8dF9zbnbQzQ@ZqcB4hQ93r&_TW(XAj!wTc32GvNYtc) zPu9vKEsUv2@87U~pkc-)RZ;h%iG^0_XPUhOG^Gf5l1*PUh=ix9eSg#7&9H?+cQI#FoS%l;_ghRi<4<*EwCa!+)|>(5jxx< zsUoI0o|{9t9mnhUl#i8;CA#G#<}&5(z>~2R53?ZtD)!5R`WqVt;ln>u+hup>Cd*iz|b{rmEz`p%oj`j=Gy$Wc*qxHG-pd`8uAXUs`;LjOVvw@{q3G93#{pvvgPU<&O7hl? z=|rx)tt2(en4mEg&#siO>%Ho7!-7WZhCzbyqCs*yrxqm-UDDpTN~Us+Yu1*g6WMXL zlK2P>`xzJ!<+GZdpGn#0yrqw|9`4zin0$Iz8jyf@&w(ZvJ!j5=pGq@D_~i!WDn?Et zPS9kn_mDyA8G5^i`TE# zR~{N9ypaB*(0X;k-93(L{QX3pTQd*bt?0P>y=P+O9!oobzS(T~-MkN+5Fwx$ycmDz zCpk51xsKlb!$iZ+FH!L{lG8+SJjj|Nh)HbfAFnN`ZY=kg#m52Pn$Oj*UqAl zVF`2q&B6B1@raGrk)vCd(;B64XV?cvl;?civGv}_p|V%6TC~qI^b1y6Ok(Ib)jVj! zETHjP5X5wsV^-=HMV`pq&TiZog8p@rXMuvN+aSiiinQ^L0dt||_M~+%h+*jh#K5i> zv3*cHK3J#Oy0CG>KzSDjzz1G&=;X2cWRjTyP0AE?M|+FJu>^gf3M;s`y~2CYT=}Zq z^Hd{mWfOEo^UY0rophwT>*U!NJd54K7_ATBmVz}=zu2uEsqptk*vij;*1@{xk2t+} zMmBmFO3APQtHR?1nWKC1I7Foz))*2sthdRJoJl01{&ocfhFNz`#v23#}G7%Q~+$kO({NY4Kn;5Y|iC>xt)XZQxP5|I6WgC6Tv=58HO00pC1f zu;%`inTZv2ec!`fvY$kn?^l`d>~~(ly5KG|?zs=&h3o{Uw7Fs0;ScukIUWi!L!9@? z#Lzy{(L-Vu7VQf2s(j7f5@4f*c7W8|6mt!`fk^bT@2$wy%|5>70R4aI%qNZ9qBan+ z2xu#EyHir(NPqJ~wlJoedzrgZh0oE!fi%AW`Z1HLS19}cqXmErEcq8T9mIn9a;`m% zeFB>j4v0?(H3SICx|iRn*9x0gs;~v`T+cewx&odl=Ag@ez90+si>6r0296m757TsF zdM93Cg&OtR$OfoWmp9aH?Fj}zb#H~PgMEhUqZQ&_Qg*uoLlgeAp)86b{uY%`n5D3h zJ~ntdGR|myo?QiIOa5z&Z^t9NLuP~YngIgpw7m#%?9D!fN74S*3JXaUYK zY9Bw(%2Zz-rTi-kx+ZO>?LN_bGmGB%!>+ISK4yEWdY{QianaA$lWd@+^W1#aKL1eD z58bT}Zla}Nm2QC{LdN(LB0Q(`d{x;LQ1U1tA|b97%tMmL&>{_? z(XffZThRI!I18e6+gjQvEC@p=%_fnyM;Jun2^Lro*yXj0K(mSEVqMPv0H%GgXzXgN z066i0Ov^TeZ^+<4Mf?_9PGs`<2h5K7TMN5$D#u+q{r9$waQ8kuTf%-v?&&N4knjfe zN3^zT$9Qz?wU7u|E%`nm`EHnvih*iVTp)nZ*S89_1^t}))4O0F^KsuJ2(X6s3=8PK zy7~MVA5JHaC>~y=KiQz-b#YhR!_cRDaIxMdZ#{$t`(YV8bjTPq_lSB8F-avH6TV$> zL>&mJ!}9qqcPZ&eujV&ObgfQ3am`E|@X($&N0%#{>3FT-&E zVay*!h9_F_`vZCEvxOP%Nq|wj)vS$%9r$#cNH%6Rgm zSRqIy*It!xsa;>Z3gff1f;1ED0;^5(*+KLE%Et!&~V|_%DafJ703G$O}2vHNe5b3m>FQFj3=h` zV8U0H62V`AUTm&?0%0aFTkfA^!QDv?VUt&}BVvT=H4? z^0EYJ!D7tMu-sv_t=^t?QP2no^sknt<&%cnh>1%)Evpi0C9!0Q!fSYspC&s z34g8#nB#uLVPAF%OeAhDjhRr1Mhy67pc7`OFO0@Y3H!N_VB=+i(UEVM#Y!XF+V9X& zOo&e}v$PQEZDcD+!)E;^z8 z4AFF8D|SV?T}5Iw9VWlrlDd!-{Kfg%LJ;*B;Z*og=@1ppwr|!d}jBEL5-xt148KSU(><=xMuSh=qx#DF|C?eZmLcnn-*6!g!dyl_`&Ie=Tdj7U7^l)3ZTJq~b*>+hj~v0dqHNL9!I+{^Rn)oRC^RIz?S&_{j z7Dax5H@T+JY6UtL3w3NZde1e3NR4`mF#S!tFwq$)-xyxAidKLwN@9!eaQSzU8EdeKHF_L0tU2JzYT+$rJeXg^flA^t;fv(@Q z@(ar0>@%_-17f4`NcZl`Q<15hUlK5PYmLoYIEK_;CXlHE_3UyH_fXutl8)_+;*KN6 zN9fk;2~_Tfw4!ewZ%)bMx3&9^QqWjG(`QvO!@mKcT0JRSqb=3lw)Wjqdsb8{jz zSB!AH;qqU3!qiC9_CmA8Z|HGH))jkFs zXMy{8J<_;%$VhBtLu#_+k<}!hfq2p4r$l!XZWIgizwA`!gj?@>DjDDn-b@B7KEp~@ey?HAL-=0~*+@L%1%B#M z#Gvje!@?CFRwE~&LR1@p(da{`&5Oso2A6ch_+F9s3%5qKL#$d^j%@bBf3; zPGM3z_GQjfP5}Peb3}co5!O0O!L@+J_+)a2ZLp3qT81xs_W6b$az7EK*MKt_pW1-} znXF!G6p2qQvGvYcm&?__CsUV&tvjcsMD(dT%gLZs=aVR0>dym=OO+o*Oo2y{fwgt^ zPrGR;F^1Id0m-d7RpuvcmM|imymTW!UMg50+pW?w+~(A}^Yh&k0l%tR0R@F?+}AL; z?@~>dIe6TgHm_J+h!oWcteV!Pv-w8-Zgd&HF4wO!VKPsc1w-Gj+<2GjrAH}XMXoPX zu&`m)@{}5Ri2u6Uy4hqR!ZT=6Qj z6#cBUr#glpX@2fZ4h5-6=2DC&vkuO_L@RamhsBAys|H}uGx+^$4x&77qW6vK9X6-8xN#gLwl8|78< zpwKiYrHMNn7F*S*x~tmO!9bx?Wly$smg?M)7|rkD;zzcnuv9JaG=o%n5R=p~{|Kb0SSlEVa|s5jV% zC{srI#uMFwR;ZBDa8l;2Q=0crR)m#lBw@Tb9ba6)#?P~D(NdODZQ7+0F7d`dr&S=dYs6$ZDv9)Y)k+Yq8o!yC6rE_s+sXth~eRVyMOTF~@?9 z+(SbyrD2yt2!xdK_on4K5-sj)lW$8U@P6tTp6!s}dYpX~>$^CGGygTE^lkis{nRmX zhLc3jaI&k!Ebd{9)@{&;q#QCrgc`z5Mp<5>M>TT}Vavk;FJS^)IO(lPxbsh8<6TNE zO=r^7>3?|8p@wLrUjKSC(Su3nZlRk#Gc8ri(aoX;$NoA_4SDMi>H`tX3>kUCLJsN< z>=&O>VeY=MZhZa_P335lJ4I$Rf+R7=A}_R7uUS23eFy}^(zZWQ@r$e z_XVFjIVen>fFUiEiBt9 zVQXCiN@|a3d{!9m4!2yimM_h!#&Z&`X>-1ZolnhMqXKU)l@heYP1*nxp#BZP7RAF+ zjTM>T5NNzzpcJN_=(*fEN&UWyW`cUPt)YR#f{G@FtPOInMmTijpDv^CJ=aJSQ2kYB zPTy@)o+;w~z~(HLc9NrorNQ`qHC4mA``2V|E_czo+WbkZ1!RIxj6$QZ%H*46I~wPp z)w5puu_buMjsiC`qaj^ z&3@sbHf#g3qollHnikRALjZ5^RtnpI9<9@)cQemNM?5?*JrFNHm@?TL`f6SHKc~or zLwqD7poA)d!@${>4>`Ju^MDfbSv{`$#S7y6I>^;0WoL^3o^i-T&j_dq`MfFT&z*aV zP&6aDSqlcI2N z{z#`)hTelN&-Pa#$id+yVs{4kk1GM&D`g}cPb~D<%?iCimwAR!9^ceLo2;m>%3JZIIQoBKKMCwiIl4()z@m&o;Iw20d zi2te5y{%**S@uCfY_nzvl4`1;c;fx|%)qRz8CVAVu&NtEMyLnM0!7cFf#P6jpx3D+ zJ4TP)X!S|;SVmm4&pQ8xhTK?Y8GUH|E7($-f1^(qv+$!A_4DqSp3N7H&#aq6Eh~Q_ zm`fovj9_#kBqO4Rk%d*PNRDfs4=u`nwHgNvw4wsN;H&!FjtNp#?pUyseG8}ZO-IuG zni@$YNC60}VoN;a7j*$DG7ERaXNueY^-1@^E--RmHh=qO$q)G)u)^fQp{Yicxelop z`cg$xbw%n>i{Jr~G$pKBX7MtXrJ^uFkUQ_IgJh61oq4LM?s0twjmU)vu5n`hTrwVN zbn6C*Br0I8KkEi04tK}H4ZD7Gwgb0a5P&ZqTR3&%hv_gb$w0HvhTENiBLfy@Wu{>_ zBz`F%Ns-q^hPx8<7#~v@8I6l{VaNJnd{752BjshEh>l&q6iEeHKI!_Ze{VroVc~qU z`<~3IvPTRt{et@{(K1KaF}Lj=;aR&zh$11k&pqFV<<`QP78_T?rtk!04&*j7u}?XY z#&cB(nWSCQjt(G%>j7%v#&2PZnw|t45|sl9w*vbQh~%Rk$EXqYG8>%Meio@`jsOO% zMH%CUdj{A{aaSHC(|t^u32vQx;R-?FMec9CpM>uYwz#jNrk8Wk28~os_+<1P$o+5~ zR`-c~9A-eh@Y`l>r||_PNMNJ%UAuM+Mt2ra~aXJw=QS1~MHQE+uu$ za)=~=Yh1_Hh_Gz64di!ksuBeQmn2;)p7xW@n7fmh z!WiNqks>PWj-2<;`JmBp;+QMjOJh$J8dPclQ@zOP>r3XIgY`)by5OX*jNf>p9Va0t z_rz3D6Azz+h$oSuM~#4ffrXG~N}Rx&dSSf&j>+J=X*L3wY1@f~yz{^RpfH9X*TC_<^EPilz3Z7Q+jw|)mIJeDs20UQR??fy$eX7y@HpjH z62!6mLI3^ew)JUnjvou%yXXz;!U`^3wj`1KacU|rpy<$xL>_<6GSnM95kFVH zSuO4jp+?4{g2jpv0lf*^9oO)NPR|rulFWVliX$#8EZ0 zRV}JNd(o!@9}|WlAKRcsyfIquzSpi{xPwMd#apqO#L!973rQ<72`^g8c~!(_0~nc{Il5D@?oS7KR0x!zYPMoO_hNB{ z2R_qpNpnBlowC4&JA}`IVO;)?f5(;ntKEl@lK3JN+P0sLj5(Uo<)(SPyca7K|D)G@ zWOu?N-fEHxf06(`E?XS)e2v`NH6{C3RxD;W_dk62?7g%%wtuvupi$p)_+g?U5w9iD za-Nap#6RlTPq^!xk@idk5RZflvE~3SV>R@Q2IR zFf~k&hqApFB;?9ZdY^fJ4n9+YG$H)!1Rl)z(fqIdJD9gTEdAdkED@6>hK7(r8mW6^ zshAVy$w|-T>o>hDnSNeK*CpVYx(Dw?Gdls%lr&t^eH|mS1jGE0{l}*m!SY%)r1u{| zDw={#4YTi}+6fC>0WGmE0%1p(%GHl?#` z%}}4$0sa#XSSJ>fz$y_2d!lk^npVFxTtsu3xiJmOiYiz(3aJTo0%_XZv}&62gp!}N zYgOy&K25b;hoe>Y#!$WQ&QrtL_zY^SfiL{6uSo3!DPa6lHi3QVhsY4%ZFzP+2mm=T z8R}{%UKt@CTNxOU2C^5yw3sBkPSfO8KYtXFY&loE!;%K%*&7fw9I_t{&H_Y2&*GIU zKT?r<_e4TVCsjlkAub`%2muEQx&^Ix7OcbvMU%y^0AN=+!yWJqR(B-%hSd6%_vX3B z0Gl8ENsG-Nh3whDX{wH%_3oF`3b_c&?<^S&@J|f-fNeAjo5Aqj+S=O8LuU6x#6R*s zKHvwCC#nuP4sQAh^_N7%Kmuw$Wi;#6H~M-HcGrm?MW|q#VT%D6cU>bucf24YITy15 zuUA|+R1`maSpjoWoZEwGFiU@Nuy1Gs@7eGKNEDu{P|+_St?Gc+zXb9Kfs2P5kG_HF za4+-Vz2Sr$cBlv~bGwd%d*X13nZcquPcx7^14JiI0JqP&ym3wEkkveoYTgC5=9=G# z&qUP^?8ELzJV*%3Rx{9IV7?XC<~eBK!{9b5v2dK^WI(nJtTkr#iG(9LR||rrf;4U) zQvZO3W$*TNE@-}@#(*+!a9;BMVK^~`sh@i5wDpn|C1+q8F=G!X5oRG_Wq7>~%6Lxz z#W@UyxExM`m!I>1Wlh7E%D`Q(EWVS>(s~132=g9*+s_e9jlfN8bvPNpFNEG_T4r(i zmFrkOK>$b%zL4m#ad>bJV*zDK(2J)B<4@=qJx!E8X} zb;y|S{b#BTNie$6(M%%>FNEOwT$p+{j98kh{iJFvK^{3a05C8mvikb^pF z`L79@l_$dsWesAMe42$RA4b&Mdz-(`0TUzzh>J5ZKEs4xA=-TA#+k(Cbd7@-vkw-| zPG3(kOVg7E(nHX+Ay%c@b{{XCrS3pmTe})|9VRim`}$Y@FyPZs>WN7ej<>Dmma%;kr!`@*|M5Tf0*5qrmDmX=R`2H1 zr>dGlfzSftda3-wRzfN#Hh&&2)_gWD){$$|%*Se(>VaxA>X}b3>Ei5Z@x%_>DMaaa znr?{SKZ3)AFh4&(?*_rNl!Szd^}S^&q-qQ%3D&%%0L#v5T<=#9cl#n4_n5nyJ#8gC z+JkE1EK)<<A5}z$K8&&6=)k6nC)Zp z39nsnR;^)M5KNO)!97)Jy~Z^j-AfYfJ9<9~-`HzwWn2&fuB|PO8^7~m3u2QJpHLy# zLe{<13LBTX+O=16*cAvXpVrO8=6R(}E^x z6A_Jnpz!I8lZ8+(92^;}F8RvAS~CmPA7f9YJHJdpk`RvofYPhOoqQlfmQo@bbi(*y zF3)&y=djj|qOYsE%MPh~e1HU+Bo&()qdnu`B4Tv#bP=)!{Cq|1gZ>yl!Es{5EQD%V zkVw*4H&Hx7hN3MKER>Q$*oTb%oWL{s4U^~F2r%oi=n>0>tKp)150jZtYi&%*r00eP zpMmbc`lBuUAimcTk`#DX%oUnT_%6Af;_q6nH zje6jNPr-%VcN1oLFnWJp;}_}C37R;KIO-&b^iA;D-6=Flolr?Mn@2!GGWFwnK)i4s zZu=)kz$wny2U%#Nuu9Dd*|ibQbg>lTmtqG(IookEg1Z^+ob%)DZ_p6zx*ZYRwL ztk={To9rAJQR7&H4{8K?u%mlhB3UUzy}EatY+`SE`cLBFQB`T9R81p)^m51h@b9YrGe#C@HvQ#%s#xBEI-#O$}>{m<@83jJoau*Mcmxz z=F9z{asOx^n}(b$UvB(h&bmRtpIARK^SZ78LEn zu8?Ep%L^08cdyf@M7CUSJ2zw*Zmr_?;e>azPfG&c*5n>3O}hDM5y%(hy|CMz)vgbjfO90Th>zu)4+iUOYv`%d0vNZoV+x zpdm;TZxAtVvWjPsOPX8MVk6 z=0Ec!qF0O^2ZgS|jQSd$l9Gf+miW&PeWZ%5WR}s!bmB}?A%wg#pS)`_h|rtrRU;3ZFX5py3vv}nR z5PV9ZW-EPrLa45;jAs=yUU^lvh1|MSl` z>e_;t`B~=`07gNz3k~@l1f4mtT=`u|!DA}a6$~dq-68*I$n>Pz_1qEU<<07pOKY=JR?TwSjKhq$zh0L_b zFsxXFjg{a^udcp+yw<%QcMbJWE~T>;jgS9M2U@);_T!yMmW#oPZC5J&8$h>k7b{g^ z9qqFos2TI`kOk_6a`k`hfA;@O;r;tvkeyedNri*%H^oC23EPP0&(JbGS0z*aUY&2c zQR&3I&Fy`e^a&%JmU_&{e;z!X%&_C!D896e!SR=qqq)EMhfaAhT!PckKMxu$dpiL{ zabJ((3VrDp1_!2J{{7|?Z3*Z-$?k@}his8Fp>$C|v`<8)hRMJ8iAHYIKJ;kwRmjr? zR*eA38b9H`Zv;;oo`z*OtF8C?M#cNkQ_r)x#yK1ZP5%ySE@pk}%>PJY|NrdqLQS~b zA$24U5$IU0=hFkw-XTa}Ch&JbK!)W+*@0`?yEBgr9H4gLSvKeGhMsHXxQ~phlPqQ?;`! z1dG{#S4bNiTJ3I0h>9A(`B4CZO0n_VLVSO#56&fm2+eDpx>MuqQ_=1bE`{SYnq zX3JgU`FDzkrIE>=o0j)oDQ)+G1<~u%WwX=Ni`0$O!xc?i>05+q43OJg|Eu*A0Et&B zoX4M|BS~$*9M8eQp=dzKXFieqCTb4QkD^bt5>+ zr0@Ov6y)>_=lofi$f+`ufhHjdd|RNDoatn=NrtVuXbSO4i1sA){6=zuk9q(zukIA- zA_L-*P2$bAY@o_s`{)ZcR~4HU{8E?FC277zYOBEuG7I@^v1eaViw@5Yn9HmhC;Z_t z`XwA={xcmV!cZ7lR5Ohfl6Xf>rs*r;^LxUtzvWZtARahzI`&okT=VEls$>U9kwV}P zXFQ>Y9J3T$?XT@!lHQ?>q<1c#LZZTn>TyGEX;I?J1Jow zq@>w4zhS=TJtEQ5OMD(7L$05K+8HTsuF9H+4PC1{#_Z9*8@h@bnJnjY@XmLV)1zQT z>K4|lMygJ)UTXiE9#_Kgo_P01kEkeA6EeB8Zj4r5x8S`W2n1UP05KEa^(A9i7~Mr& z_;#^S0xrYIoTtF7a4Vgs#U8;>_5cUDtnO#!ST&)uBnfcA+A7)(J=<6HcSfRuE90hN z(uyjpfBrZ8P$7t?@lfmFWMt-NN6RfhDP1$O3OGqYK+8s`*JCXqE#y|X1{HJ;WKbSN z`{3TkvMO9zY~vb4q_T`yu9xRLD7rilaSdYHAa#437J2huYSzclE`WNiX3H6@B8Q$C z&|sPo$FX1*Qc1GZ4fN`(D2>&08!w|3T+fx z>u_vrtiFNbi;z`)jR>l=BwJ8%(KuNVa_{b_@0_J8MPGsn~k_(dvD51Lm_ zs9p-)1Jc^Imes{mE&`OWo=y+^X!<*Wr!L?}%SMJNd}BFWyx_qoqlI0un;Ve)yVH@v zc0IQ&rg|LkwPkQ4{(po5pYjb{lBrVllL?M_Mkrn9?<3NCPOuSfRJ=Z9zVw@K=xJW3%YPP(HX^=h z+9kYc;O`tD;|Tq>0a9zEm_|Dw zpApL&28GyOSmM)S*bIu;iek9N3H2Qq|2?4`XxT#IX6U7h8PfTsmo6FA&~!!?VsLTT zh~M`2KZY&<-IupMom?Wm^eP4(+sfgp=-*i?tAbZ73Ryl{obKP#1>Y#^6q^ej^-tK@ zlM8fHZ`B(^^pZABE2x(+2``5y$%}jc4)O$VXeSPm3n`}suk-+j`kai+$bOb|O(euJ s#x21?NGsXK(%7RI)Tzy $ZKEVM_NET.zip && unzip -o $ZKEVM_DIR.zip -d $ZKEVM_DIR && rm $ZKEVM_DIR.zip +curl -L https://github.com/okx/x1-node/releases/latest/download/$ZKEVM_NET.zip > $ZKEVM_NET.zip && unzip -o $ZKEVM_NET.zip -d $ZKEVM_DIR && rm $ZKEVM_NET.zip mkdir -p $ZKEVM_CONFIG_DIR && cp $ZKEVM_DIR/$ZKEVM_NET/example.env $ZKEVM_CONFIG_DIR/.env # EDIT THIS env file: @@ -41,13 +41,13 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker 2. Define installation path: `ZKEVM_DIR=./path/to/install` 3. Define a config directory: `ZKEVM_CONFIG_DIR=./path/to/config` 4. It's recommended to source this env vars in your `~/.bashrc`, `~/.zshrc` or whatever you're using -5. Download and extract the artifacts: `curl -L https://github.com/okx/x1-node/releases/latest/download/$X1_NET.zip > $X1_NET.zip && unzip -o $X1_NET.zip -d $X1_DIR && rm $X1_NET.zip`. Note you may need to install `unzip` for this command to work. +5. Download and extract the artifacts: `curl -L https://github.com/0xPolygonHermez/zkevm-node/releases/latest/download/$ZKEVM_NET.zip > $ZKEVM_NET.zip && unzip -o $ZKEVM_NET.zip -d $ZKEVM_DIR && rm $ZKEVM_NET.zip`. Note you may need to install `unzip` for this command to work. -> **NOTE:** Take into account this works for the latest release (mainnet), in case you want to deploy a pre-release (testnet) you should get the artifacts directly for that release and not using the "latest" link depicted here. [Here](https://github.com/okx) you can check the node release deployed for each network. +> **NOTE:** Take into account this works for the latest release (mainnet), in case you want to deploy a pre-release (testnet) you should get the artifacts directly for that release and not using the "latest" link depicted here. [Here](https://github.com/0xPolygonHermez) you can check the node release deployed for each network. 6. Copy the file with the env parameters into config directory: `mkdir -p $ZKEVM_CONFIG_DIR && cp $ZKEVM_DIR/$ZKEVM_NET/example.env $ZKEVM_CONFIG_DIR/.env` 7. Edit the env file, with your favourite editor. The example will use nano: `nano $ZKEVM_CONFIG_DIR/.env`. This file contains the configuration that anyone should modify. For advanced configuration: - 1. Copy the config files into the config directory `cp $ZKEVM_DIR/$ZKEVM_NET/config/environments/testnet/* $ZKEVM_CONFIG_DIR/` + 1. Copy the config files into the config directory `cp $ZKEVM_DIR/$ZKEVM_NET/config/environments/$ZKEVM_NET/* $ZKEVM_CONFIG_DIR/` 2. Make sure the modify the `ZKEVM_ADVANCED_CONFIG_DIR` from `$ZKEVM_CONFIG_DIR/.env` with the correct path 3. Edit the different configuration files in the $ZKEVM_CONFIG_DIR directory and make the necessary changes 8. Run the node: `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml up -d`. You may need to run this command using `sudo` depending on your Docker setup. @@ -102,4 +102,4 @@ There can be as many instances of it as needed, but in order to not introduce ot - Read replicas of the State DB should be used - Synchronizer should have an exclusive instance of `x1-prover` -- JSON RPCs should scale in correlation with instances of `x1-prover`. The most obvious way to do so is by having a dedicated `x1-prover` for each `x1-rpc`. But depending on the payload of your solution it could be worth to have `1 x1-rpc : many x1-prover` or `many x1-rpc : 1 x1-prover`, ... For reference, the `x1-prover` implements the EVM, and therefore will be heavily used when calling endpoints such as `eth_call`. On the other hand, there are other endpoints that relay on the `x1-state-db` +- JSON RPCs should scale in correlation with instances of `x1-prover`. The most obvious way to do so is by having a dedicated `zkevm-prover` for each `zkevm-rpc`. But depending on the payload of your solution it could be worth to have `1 zkevm-rpc : many zkevm-prover` or `many zkevm-rpc : 1 zkevm-prover`, ... For reference, the `zkevm-prover` implements the EVM, and therefore will be heavily used when calling endpoints such as `eth_call`. On the other hand, there are other endpoints that relay on the `zkevm-state-db` diff --git a/docs/snap_restore.md b/docs/snap_restore.md index be5c35d1e2..af067f87e5 100644 --- a/docs/snap_restore.md +++ b/docs/snap_restore.md @@ -5,7 +5,7 @@ This feature is for fast replication of nodes. It creates a backup of database a ## Snapshot -This feature creates a dump of entire database +This feature creates a dump of entire database ### Usage @@ -21,7 +21,7 @@ OPTIONS: --help, -h show help ``` -**Make sure that the config file contains the data required to connect to `HashDB` database**, for example: +**Make sure that the config file contains the data required to connect to `HashDB` database**, for example: ``` [HashDB] User = "prover_user" @@ -33,11 +33,11 @@ EnableLog = false MaxConns = 200 ``` -This generates two files in the current working path: +This generates two files in the current working path: * For stateDB: `_`\`_`\`_`\`.sql.tar.gz` * For hashDB: `_`\`_`\`_`\`.sql.tar.gz` -#### Example of invocation: +#### Example of invocation: ``` # cd /tmp/ && /app/x1-node snap -c /app/config.toml (...) @@ -68,10 +68,10 @@ OPTIONS: --help, -h show help ``` -#### Example of invocation: +#### Example of invocation: ``` /app/x1-node restore -c /app/config.toml --is /tmp/state_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz --ih /tmp/prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar -.gz +.gz ``` # How to test diff --git a/etherman/etherman_test.go b/etherman/etherman_test.go index 6bb15dd83a..b07b9a4cd1 100644 --- a/etherman/etherman_test.go +++ b/etherman/etherman_test.go @@ -435,7 +435,7 @@ func TestProof(t *testing.T) { proof := "0x20227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a0520227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a05" p, err := convertProof(proof) require.NoError(t, err) - str := "20227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a05" + str := "20227cbcef731b6cbdc0edd5850c63dc7fbc27fb58d12cd4d08298799cf66a05" //nolint:gosec proofReference, err := encoding.DecodeBytes(&str) require.NoError(t, err) var expected [32]byte diff --git a/etherman/mock_etherscan.go b/etherman/mock_etherscan.go index d8e3820968..1cdf7e0a41 100644 --- a/etherman/mock_etherscan.go +++ b/etherman/mock_etherscan.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package etherman @@ -40,13 +40,12 @@ func (_m *etherscanMock) SuggestGasPrice(ctx context.Context) (*big.Int, error) return r0, r1 } -type mockConstructorTestingTnewEtherscanMock interface { +// newEtherscanMock creates a new instance of etherscanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEtherscanMock(t interface { mock.TestingT Cleanup(func()) -} - -// newEtherscanMock creates a new instance of etherscanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEtherscanMock(t mockConstructorTestingTnewEtherscanMock) *etherscanMock { +}) *etherscanMock { mock := ðerscanMock{} mock.Mock.Test(t) diff --git a/etherman/mock_ethgasstation.go b/etherman/mock_ethgasstation.go index ee9f1d5cba..6e11f8520b 100644 --- a/etherman/mock_ethgasstation.go +++ b/etherman/mock_ethgasstation.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package etherman @@ -40,13 +40,12 @@ func (_m *ethGasStationMock) SuggestGasPrice(ctx context.Context) (*big.Int, err return r0, r1 } -type mockConstructorTestingTnewEthGasStationMock interface { +// newEthGasStationMock creates a new instance of ethGasStationMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEthGasStationMock(t interface { mock.TestingT Cleanup(func()) -} - -// newEthGasStationMock creates a new instance of ethGasStationMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEthGasStationMock(t mockConstructorTestingTnewEthGasStationMock) *ethGasStationMock { +}) *ethGasStationMock { mock := ðGasStationMock{} mock.Mock.Test(t) diff --git a/etherman/smartcontracts/script.sh b/etherman/smartcontracts/script.sh index 3d1199f3e7..e9e2e7e5df 100755 --- a/etherman/smartcontracts/script.sh +++ b/etherman/smartcontracts/script.sh @@ -14,4 +14,4 @@ gen matic gen polygonzkevmglobalexitroot gen mockverifier gen datacommittee -gen polygonzkevmbridgel2 \ No newline at end of file +gen polygonzkevmbridgel2 diff --git a/ethtxmanager/ethtxmanager.go b/ethtxmanager/ethtxmanager.go index 03df29c859..1c48071cf0 100644 --- a/ethtxmanager/ethtxmanager.go +++ b/ethtxmanager/ethtxmanager.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "math/big" + "sync" "time" "github.com/0xPolygonHermez/zkevm-node/log" @@ -33,12 +34,6 @@ var ( // ErrExecutionReverted returned when trying to get the revert message // but the call fails without revealing the revert reason ErrExecutionReverted = errors.New("execution reverted") - - // gasOffsets for aggregator and sequencer - gasOffsets = map[string]uint64{ - "sequencer": 80000, //nolint:gomnd - "aggregator": 0, - } ) // Client for eth tx manager @@ -65,7 +60,7 @@ func New(cfg Config, ethMan ethermanInterface, storage storageInterface, state s } // Add a transaction to be sent and monitored -func (c *Client) Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error { +func (c *Client) Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, gasOffset uint64, dbTx pgx.Tx) error { // get next nonce nonce, err := c.etherman.CurrentNonce(ctx, from) if err != nil { @@ -83,10 +78,6 @@ func (c *Client) Add(ctx context.Context, owner, id string, from common.Address, } else { return err } - } else { - offset := gasOffsets[owner] - gas += offset - log.Debugf("Applying gasOffset: %d. Final Gas: %d, Owner: %s", offset, gas, owner) } // get gas price @@ -101,7 +92,7 @@ func (c *Client) Add(ctx context.Context, owner, id string, from common.Address, mTx := monitoredTx{ owner: owner, id: id, from: from, to: to, nonce: nonce, value: value, data: data, - gas: gas, gasPrice: gasPrice, + gas: gas, gasOffset: gasOffset, gasPrice: gasPrice, status: MonitoredTxStatusCreated, } @@ -238,15 +229,16 @@ func (c *Client) Reorg(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) } log.Infof("updating %v monitored txs to reorged", len(mTxs)) for _, mTx := range mTxs { + mTxLogger := createMonitoredTxLogger(mTx) mTx.blockNumber = nil mTx.status = MonitoredTxStatusReorged err = c.storage.Update(ctx, mTx, dbTx) if err != nil { - log.Errorf("failed to update monitored tx to reorg status: %v", err) + mTxLogger.Errorf("failed to update monitored tx to reorg status: %v", err) return err } - log.Infof("monitored tx %v status updated to reorged", mTx.id) + mTxLogger.Infof("monitored tx status updated to reorged") } log.Infof("reorg from block %v processed successfully", fromBlockNumber) return nil @@ -262,222 +254,251 @@ func (c *Client) monitorTxs(ctx context.Context) error { log.Infof("found %v monitored tx to process", len(mTxs)) + wg := sync.WaitGroup{} + wg.Add(len(mTxs)) for _, mTx := range mTxs { mTx := mTx // force variable shadowing to avoid pointer conflicts - mTxLog := log.WithFields("monitoredTx", mTx.id, "createdAt", mTx.createdAt) - mTxLog.Info("processing") - - // check if any of the txs in the history was mined - mined := false - var receipt *types.Receipt - hasFailedReceipts := false - allHistoryTxMined := true - for txHash := range mTx.history { - mined, receipt, err = c.etherman.CheckTxWasMined(ctx, txHash) - if err != nil { - mTxLog.Errorf("failed to check if tx %v was mined: %v", txHash.String(), err) - continue - } + go func(c *Client, mTx monitoredTx) { + mTxLogger := createMonitoredTxLogger(mTx) + defer func(mTx monitoredTx, mTxLogger *log.Logger) { + if err := recover(); err != nil { + mTxLogger.Error("monitoring recovered from this err: %v", err) + } + wg.Done() + }(mTx, mTxLogger) + c.monitorTx(ctx, mTx, mTxLogger) + }(c, mTx) + } + wg.Wait() - // if the tx is not mined yet, check that not all the tx were mined and go to the next - if !mined { - allHistoryTxMined = false - continue - } + return nil +} - // if the tx was mined successfully we can break the loop and proceed - if receipt.Status == types.ReceiptStatusSuccessful { - break - } +// monitorTx does all the monitoring steps to the monitored tx +func (c *Client) monitorTx(ctx context.Context, mTx monitoredTx, logger *log.Logger) { + var err error + logger.Info("processing") + // check if any of the txs in the history was confirmed + var lastReceiptChecked types.Receipt + // monitored tx is confirmed until we find a successful receipt + confirmed := false + // monitored tx doesn't have a failed receipt until we find a failed receipt for any + // tx in the monitored tx history + hasFailedReceipts := false + // all history txs are considered mined until we can't find a receipt for any + // tx in the monitored tx history + allHistoryTxsWereMined := true + for txHash := range mTx.history { + mined, receipt, err := c.etherman.CheckTxWasMined(ctx, txHash) + if err != nil { + logger.Errorf("failed to check if tx %v was mined: %v", txHash.String(), err) + continue + } + + // if the tx is not mined yet, check that not all the tx were mined and go to the next + if !mined { + allHistoryTxsWereMined = false + continue + } + + lastReceiptChecked = *receipt - // if the tx was mined but failed, we continue to consider it was not mined - // and store the failed receipt to be used to check if nonce needs to be reviewed - mined = false - hasFailedReceipts = true + // if the tx was mined successfully we can set it as confirmed and break the loop + if lastReceiptChecked.Status == types.ReceiptStatusSuccessful { + confirmed = true + break } - // we need to check if we need to review the nonce carefully, to avoid sending - // duplicated data to the block chain. - // - // if we have failed receipts, this means at least one of the generated txs was mined - // so maybe the current nonce was already consumed, then we need to check if there are - // tx that were not mined yet, if so, we just need to wait, because maybe one of them - // will get mined successfully - // - // in case of all tx were mined and none of them were mined successfully, we need to - // review the nonce - if hasFailedReceipts && allHistoryTxMined { - mTxLog.Infof("nonce needs to be updated") - err := c.ReviewMonitoredTxNonce(ctx, &mTx) + // if the tx was mined but failed, we continue to consider it was not confirmed + // and set that we have found a failed receipt. This info will be used later + // to check if nonce needs to be reviewed + confirmed = false + hasFailedReceipts = true + } + + // we need to check if we need to review the nonce carefully, to avoid sending + // duplicated data to the roll-up and causing an unnecessary trusted state reorg. + // + // if we have failed receipts, this means at least one of the generated txs was mined, + // in this case maybe the current nonce was already consumed(if this is the first iteration + // of this cycle, next iteration might have the nonce already updated by the preivous one), + // then we need to check if there are tx that were not mined yet, if so, we just need to wait + // because maybe one of them will get mined successfully + // + // in case of the monitored tx is not confirmed yet, all tx were mined and none of them were + // mined successfully, we need to review the nonce + if !confirmed && hasFailedReceipts && allHistoryTxsWereMined { + logger.Infof("nonce needs to be updated") + err := c.reviewMonitoredTxNonce(ctx, &mTx, logger) + if err != nil { + logger.Errorf("failed to review monitored tx nonce: %v", err) + return + } + err = c.storage.Update(ctx, mTx, nil) + if err != nil { + logger.Errorf("failed to update monitored tx nonce change: %v", err) + return + } + } + + // if the history size reaches the max history size, this means something is really wrong with + // this Tx and we are not able to identify automatically, so we mark this as failed to let the + // caller know something is not right and needs to be review and to avoid to monitor this + // tx infinitely + // if len(mTx.history) == maxHistorySize { + // mTx.status = MonitoredTxStatusFailed + // mTxLogger.Infof("marked as failed because reached the history size limit: %v", err) + // // update monitored tx changes into storage + // err = c.storage.Update(ctx, mTx, nil) + // if err != nil { + // mTxLogger.Errorf("failed to update monitored tx when max history size limit reached: %v", err) + // continue + // } + // } + + var signedTx *types.Transaction + if !confirmed { + // if is a reorged, move to the next + if mTx.status == MonitoredTxStatusReorged { + return + } + + // review tx and increase gas and gas price if needed + if mTx.status == MonitoredTxStatusSent { + err := c.reviewMonitoredTx(ctx, &mTx, logger) if err != nil { - mTxLog.Errorf("failed to review monitored tx nonce: %v", err) - continue + logger.Errorf("failed to review monitored tx: %v", err) + return } err = c.storage.Update(ctx, mTx, nil) if err != nil { - mTxLog.Errorf("failed to update monitored tx nonce change: %v", err) - continue + logger.Errorf("failed to update monitored tx review change: %v", err) + return } } - // if the history size reaches the max history size, this means something is really wrong with - // this Tx and we are not able to identify automatically, so we mark this as failed to let the - // caller know something is not right and needs to be review and to avoid to monitor this - // tx infinitely - // if len(mTx.history) == maxHistorySize { - // mTx.status = MonitoredTxStatusFailed - // mTxLog.Infof("marked as failed because reached the history size limit: %v", err) - // // update monitored tx changes into storage - // err = c.storage.Update(ctx, mTx, nil) - // if err != nil { - // mTxLog.Errorf("failed to update monitored tx when max history size limit reached: %v", err) - // continue - // } - // } - - var signedTx *types.Transaction - if !mined { - // if is a reorged, move to the next - if mTx.status == MonitoredTxStatusReorged { - continue - } + // rebuild transaction + tx := mTx.Tx() + logger.Debugf("unsigned tx %v created", tx.Hash().String()) - // review tx and increase gas and gas price if needed - if mTx.status == MonitoredTxStatusSent { - err := c.ReviewMonitoredTx(ctx, &mTx) - if err != nil { - mTxLog.Errorf("failed to review monitored tx: %v", err) - continue - } - err = c.storage.Update(ctx, mTx, nil) - if err != nil { - mTxLog.Errorf("failed to update monitored tx review change: %v", err) - continue - } + // sign tx + signedTx, err = c.etherman.SignTx(ctx, mTx.from, tx) + if err != nil { + logger.Errorf("failed to sign tx %v: %v", tx.Hash().String(), err) + return + } + logger.Debugf("signed tx %v created", signedTx.Hash().String()) + + // add tx to monitored tx history + err = mTx.AddHistory(signedTx) + if errors.Is(err, ErrAlreadyExists) { + logger.Infof("signed tx already existed in the history") + } else if err != nil { + logger.Errorf("failed to add signed tx %v to monitored tx history: %v", signedTx.Hash().String(), err) + return + } else { + // update monitored tx changes into storage + err = c.storage.Update(ctx, mTx, nil) + if err != nil { + logger.Errorf("failed to update monitored tx: %v", err) + return } + logger.Debugf("signed tx added to the monitored tx history") + } - // rebuild transaction - tx := mTx.Tx() - mTxLog.Debugf("unsigned tx %v created", tx.Hash().String()) - - // sign tx - signedTx, err = c.etherman.SignTx(ctx, mTx.from, tx) + // check if the tx is already in the network, if not, send it + _, _, err = c.etherman.GetTx(ctx, signedTx.Hash()) + // if not found, send it tx to the network + if errors.Is(err, ethereum.NotFound) { + logger.Debugf("signed tx not found in the network") + err := c.etherman.SendTx(ctx, signedTx) if err != nil { - mTxLog.Errorf("failed to sign tx %v created from monitored tx %v: %v", tx.Hash().String(), mTx.id, err) - continue + logger.Errorf("failed to send tx %v to network: %v", signedTx.Hash().String(), err) + return } - mTxLog.Debugf("signed tx %v created", signedTx.Hash().String()) - - // add tx to monitored tx history - err = mTx.AddHistory(signedTx) - if errors.Is(err, ErrAlreadyExists) { - mTxLog.Infof("signed tx already existed in the history") - } else if err != nil { - mTxLog.Errorf("failed to add signed tx to monitored tx %v history: %v", mTx.id, err) - continue - } else { + logger.Infof("signed tx sent to the network: %v", signedTx.Hash().String()) + if mTx.status == MonitoredTxStatusCreated { + // update tx status to sent + mTx.status = MonitoredTxStatusSent + logger.Debugf("status changed to %v", string(mTx.status)) // update monitored tx changes into storage err = c.storage.Update(ctx, mTx, nil) if err != nil { - mTxLog.Errorf("failed to update monitored tx: %v", err) - continue - } - mTxLog.Debugf("signed tx added to the monitored tx history") - } - - // check if the tx is already in the network, if not, send it - _, _, err = c.etherman.GetTx(ctx, signedTx.Hash()) - // if not found, send it tx to the network - if errors.Is(err, ethereum.NotFound) { - mTxLog.Debugf("signed tx not found in the network") - err := c.etherman.SendTx(ctx, signedTx) - if err != nil { - mTxLog.Errorf("failed to send tx %v to network: %v", signedTx.Hash().String(), err) - continue + logger.Errorf("failed to update monitored tx changes: %v", err) + return } - mTxLog.Infof("signed tx sent to the network: %v", signedTx.Hash().String()) - if mTx.status == MonitoredTxStatusCreated { - // update tx status to sent - mTx.status = MonitoredTxStatusSent - mTxLog.Debugf("status changed to %v", string(mTx.status)) - // update monitored tx changes into storage - err = c.storage.Update(ctx, mTx, nil) - if err != nil { - mTxLog.Errorf("failed to update monitored tx changes: %v", err) - continue - } - } - } else { - mTxLog.Infof("signed tx already found in the network") } + } else { + logger.Infof("signed tx already found in the network") + } - log.Infof("waiting signedTx to be mined...") + log.Infof("waiting signedTx to be mined...") - // wait tx to get mined - mined, err = c.etherman.WaitTxToBeMined(ctx, signedTx, c.cfg.WaitTxToBeMined.Duration) - if err != nil { - mTxLog.Errorf("failed to wait tx to be mined: %v", err) - continue - } - if !mined { - log.Infof("signedTx not mined yet and timeout has been reached") - continue - } - - // get tx receipt - receipt, err = c.etherman.GetTxReceipt(ctx, signedTx.Hash()) - if err != nil { - mTxLog.Errorf("failed to get tx receipt for tx %v: %v", signedTx.Hash().String(), err) - continue - } + // wait tx to get mined + confirmed, err = c.etherman.WaitTxToBeMined(ctx, signedTx, c.cfg.WaitTxToBeMined.Duration) + if err != nil { + logger.Errorf("failed to wait tx to be mined: %v", err) + return + } + if !confirmed { + log.Infof("signedTx not mined yet and timeout has been reached") + return } - mTx.blockNumber = receipt.BlockNumber + // get tx receipt + var txReceipt *types.Receipt + txReceipt, err = c.etherman.GetTxReceipt(ctx, signedTx.Hash()) + if err != nil { + logger.Errorf("failed to get tx receipt for tx %v: %v", signedTx.Hash().String(), err) + return + } + lastReceiptChecked = *txReceipt + } - // if mined, check receipt and mark as Failed or Confirmed - if receipt.Status == types.ReceiptStatusSuccessful { - receiptBlockNum := receipt.BlockNumber.Uint64() + // if mined, check receipt and mark as Failed or Confirmed + if lastReceiptChecked.Status == types.ReceiptStatusSuccessful { + receiptBlockNum := lastReceiptChecked.BlockNumber.Uint64() - // check block synced - block, err := c.state.GetLastBlock(ctx, nil) - if errors.Is(err, state.ErrStateNotSynchronized) { - mTxLog.Debugf("state not synchronized yet, waiting for L1 block %v to be synced", receiptBlockNum) - continue - } else if err != nil { - mTxLog.Errorf("failed to check if L1 block %v is already synced: %v", receiptBlockNum, err) - continue - } else if block.BlockNumber < receiptBlockNum { - mTxLog.Debugf("L1 block %v not synchronized yet, waiting for L1 block to be synced in order to confirm monitored tx", receiptBlockNum) - continue - } else { - mTxLog.Info("confirmed") - mTx.status = MonitoredTxStatusConfirmed - } + // check if state is already synchronized until the block + // where the tx was mined + block, err := c.state.GetLastBlock(ctx, nil) + if errors.Is(err, state.ErrStateNotSynchronized) { + logger.Debugf("state not synchronized yet, waiting for L1 block %v to be synced", receiptBlockNum) + return + } else if err != nil { + logger.Errorf("failed to check if L1 block %v is already synced: %v", receiptBlockNum, err) + return + } else if block.BlockNumber < receiptBlockNum { + logger.Debugf("L1 block %v not synchronized yet, waiting for L1 block to be synced in order to confirm monitored tx", receiptBlockNum) + return } else { - // if we should continue to monitor, we move to the next one and this will - // be reviewed in the next monitoring cycle - if c.shouldContinueToMonitorThisTx(ctx, receipt) { - continue - } - mTxLog.Info("failed") - // otherwise we understand this monitored tx has failed - mTx.status = MonitoredTxStatusFailed + mTx.status = MonitoredTxStatusConfirmed + mTx.blockNumber = lastReceiptChecked.BlockNumber + logger.Info("confirmed") } - - // update monitored tx changes into storage - err = c.storage.Update(ctx, mTx, nil) - if err != nil { - mTxLog.Errorf("failed to update monitored tx: %v", err) - continue + } else { + // if we should continue to monitor, we move to the next one and this will + // be reviewed in the next monitoring cycle + if c.shouldContinueToMonitorThisTx(ctx, lastReceiptChecked) { + return } + // otherwise we understand this monitored tx has failed + mTx.status = MonitoredTxStatusFailed + mTx.blockNumber = lastReceiptChecked.BlockNumber + logger.Info("failed") } - return nil + // update monitored tx changes into storage + err = c.storage.Update(ctx, mTx, nil) + if err != nil { + logger.Errorf("failed to update monitored tx: %v", err) + return + } } // shouldContinueToMonitorThisTx checks the the tx receipt and decides if it should // continue or not to monitor the monitored tx related to the tx from this receipt -func (c *Client) shouldContinueToMonitorThisTx(ctx context.Context, receipt *types.Receipt) bool { +func (c *Client) shouldContinueToMonitorThisTx(ctx context.Context, receipt types.Receipt) bool { // if the receipt has a is successful result, stop monitoring if receipt.Status == types.ReceiptStatusSuccessful { return false @@ -501,23 +522,22 @@ func (c *Client) shouldContinueToMonitorThisTx(ctx context.Context, receipt *typ return false } -// ReviewMonitoredTx checks if some field needs to be updated +// reviewMonitoredTx checks if some field needs to be updated // accordingly to the current information stored and the current // state of the blockchain -func (c *Client) ReviewMonitoredTx(ctx context.Context, mTx *monitoredTx) error { - mTxLog := log.WithFields("monitoredTx", mTx.id) - mTxLog.Debug("reviewing") +func (c *Client) reviewMonitoredTx(ctx context.Context, mTx *monitoredTx, mTxLogger *log.Logger) error { + mTxLogger.Debug("reviewing") // get gas gas, err := c.etherman.EstimateGas(ctx, mTx.from, mTx.to, mTx.value, mTx.data) if err != nil { err := fmt.Errorf("failed to estimate gas: %w", err) - mTxLog.Errorf(err.Error()) + mTxLogger.Errorf(err.Error()) return err } // check gas if gas > mTx.gas { - mTxLog.Infof("monitored tx gas updated from %v to %v", mTx.gas, gas) + mTxLogger.Infof("monitored tx gas updated from %v to %v", mTx.gas, gas) mTx.gas = gas } @@ -525,36 +545,35 @@ func (c *Client) ReviewMonitoredTx(ctx context.Context, mTx *monitoredTx) error gasPrice, err := c.suggestedGasPrice(ctx) if err != nil { err := fmt.Errorf("failed to get suggested gas price: %w", err) - mTxLog.Errorf(err.Error()) + mTxLogger.Errorf(err.Error()) return err } // check gas price if gasPrice.Cmp(mTx.gasPrice) == 1 { - mTxLog.Infof("monitored tx gas price updated from %v to %v", mTx.gasPrice.String(), gasPrice.String()) + mTxLogger.Infof("monitored tx gas price updated from %v to %v", mTx.gasPrice.String(), gasPrice.String()) mTx.gasPrice = gasPrice } return nil } -// ReviewMonitoredTxNonce checks if the nonce needs to be updated accordingly to +// reviewMonitoredTxNonce checks if the nonce needs to be updated accordingly to // the current nonce of the sender account. // // IMPORTANT: Nonce is reviewed apart from the other fields because it is a very // sensible information and can make duplicated data to be sent to the blockchain, -// causing possible side effects and wasting resources on taxes. -func (c *Client) ReviewMonitoredTxNonce(ctx context.Context, mTx *monitoredTx) error { - mTxLog := log.WithFields("monitoredTx", mTx.id) - mTxLog.Debug("reviewing nonce") +// causing possible side effects and wasting resources. +func (c *Client) reviewMonitoredTxNonce(ctx context.Context, mTx *monitoredTx, mTxLogger *log.Logger) error { + mTxLogger.Debug("reviewing nonce") nonce, err := c.etherman.CurrentNonce(ctx, mTx.from) if err != nil { - err := fmt.Errorf("failed to estimate gas: %w", err) - mTxLog.Errorf(err.Error()) + err := fmt.Errorf("failed to load current nonce for acc %v: %w", mTx.from.String(), err) + mTxLogger.Errorf(err.Error()) return err } if nonce > mTx.nonce { - mTxLog.Infof("monitored tx nonce updated from %v to %v", mTx.nonce, nonce) + mTxLogger.Infof("monitored tx nonce updated from %v to %v", mTx.nonce, nonce) mTx.nonce = nonce } @@ -623,18 +642,18 @@ func (c *Client) ProcessPendingMonitoredTxs(ctx context.Context, owner string, r } for _, result := range results { - resultLog := log.WithFields("owner", owner, "id", result.ID) + mTxResultLogger := CreateMonitoredTxResultLogger(owner, result) // if the result is confirmed, we set it as done do stop looking into this monitored tx if result.Status == MonitoredTxStatusConfirmed { err := c.setStatusDone(ctx, owner, result.ID, dbTx) if err != nil { - resultLog.Errorf("failed to set monitored tx as done, err: %v", err) + mTxResultLogger.Errorf("failed to set monitored tx as done, err: %v", err) // if something goes wrong at this point, we skip this result and move to the next. // this result is going to be handled again in the next cycle by the outer loop. continue } else { - resultLog.Info("monitored tx confirmed") + mTxResultLogger.Info("monitored tx confirmed") } resultHandler(result, dbTx) continue @@ -654,7 +673,7 @@ func (c *Client) ProcessPendingMonitoredTxs(ctx context.Context, owner string, r // refresh the result info result, err := c.Result(ctx, owner, result.ID, dbTx) if err != nil { - resultLog.Errorf("failed to get monitored tx result, err: %v", err) + mTxResultLogger.Errorf("failed to get monitored tx result, err: %v", err) continue } @@ -663,8 +682,42 @@ func (c *Client) ProcessPendingMonitoredTxs(ctx context.Context, owner string, r break } - resultLog.Infof("waiting for monitored tx to get confirmed, status: %v", result.Status.String()) + mTxResultLogger.Infof("waiting for monitored tx to get confirmed, status: %v", result.Status.String()) } } } } + +// createMonitoredTxLogger creates an instance of logger with all the important +// fields already set for a monitoredTx +func createMonitoredTxLogger(mTx monitoredTx) *log.Logger { + return log.WithFields( + "owner", mTx.owner, + "monitoredTxId", mTx.id, + "createdAt", mTx.createdAt, + "from", mTx.from, + "to", mTx.to, + ) +} + +// CreateLogger creates an instance of logger with all the important +// fields already set for a monitoredTx without requiring an instance of +// monitoredTx, this should be use in for callers before calling the ADD +// method +func CreateLogger(owner, monitoredTxId string, from common.Address, to *common.Address) *log.Logger { + return log.WithFields( + "owner", owner, + "monitoredTxId", monitoredTxId, + "from", from, + "to", to, + ) +} + +// CreateMonitoredTxResultLogger creates an instance of logger with all the important +// fields already set for a MonitoredTxResult +func CreateMonitoredTxResultLogger(owner string, mTxResult MonitoredTxResult) *log.Logger { + return log.WithFields( + "owner", owner, + "monitoredTxId", mTxResult.ID, + ) +} diff --git a/ethtxmanager/ethtxmanager_test.go b/ethtxmanager/ethtxmanager_test.go index 0e18763707..204a12981b 100644 --- a/ethtxmanager/ethtxmanager_test.go +++ b/ethtxmanager/ethtxmanager_test.go @@ -57,6 +57,8 @@ func TestTxGetMined(t *testing.T) { Return(estimatedGas, nil). Once() + gasOffset := uint64(1) + suggestedGasPrice := big.NewInt(1) etherman. On("SuggestedGasPrice", ctx). @@ -67,7 +69,7 @@ func TestTxGetMined(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: estimatedGas, + Gas: estimatedGas + gasOffset, GasPrice: suggestedGasPrice, Data: data, }) @@ -124,7 +126,7 @@ func TestTxGetMined(t *testing.T) { Return(block, nil). Once() - err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, nil) + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) require.NoError(t, err) go ethTxManagerClient.Start() @@ -173,6 +175,8 @@ func TestTxGetMinedAfterReviewed(t *testing.T) { Return(firstGasEstimation, nil). Once() + gasOffset := uint64(2) + firstGasPriceSuggestion := big.NewInt(1) etherman. On("SuggestedGasPrice", ctx). @@ -184,7 +188,7 @@ func TestTxGetMinedAfterReviewed(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: firstGasEstimation, + Gas: firstGasEstimation + gasOffset, GasPrice: firstGasPriceSuggestion, Data: data, }) @@ -226,7 +230,7 @@ func TestTxGetMinedAfterReviewed(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: secondGasEstimation, + Gas: secondGasEstimation + gasOffset, GasPrice: secondGasPriceSuggestion, Data: data, }) @@ -293,7 +297,7 @@ func TestTxGetMinedAfterReviewed(t *testing.T) { Return("", nil). Once() - err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, nil) + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) require.NoError(t, err) go ethTxManagerClient.Start() @@ -337,6 +341,8 @@ func TestTxGetMinedAfterConfirmedAndReorged(t *testing.T) { Return(estimatedGas, nil). Once() + gasOffset := uint64(1) + suggestedGasPrice := big.NewInt(1) etherman. On("SuggestedGasPrice", ctx). @@ -348,7 +354,7 @@ func TestTxGetMinedAfterConfirmedAndReorged(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: estimatedGas, + Gas: estimatedGas + gasOffset, GasPrice: suggestedGasPrice, Data: data, }) @@ -449,7 +455,7 @@ func TestTxGetMinedAfterConfirmedAndReorged(t *testing.T) { Return("", nil). Once() - err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, nil) + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) require.NoError(t, err) go ethTxManagerClient.Start() @@ -525,6 +531,8 @@ func TestExecutionReverted(t *testing.T) { Return(firstGasEstimation, nil). Once() + gasOffset := uint64(1) + firstGasPriceSuggestion := big.NewInt(1) etherman. On("SuggestedGasPrice", ctx). @@ -536,7 +544,7 @@ func TestExecutionReverted(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: firstGasEstimation, + Gas: firstGasEstimation + gasOffset, GasPrice: firstGasPriceSuggestion, Data: data, }) @@ -603,7 +611,7 @@ func TestExecutionReverted(t *testing.T) { Nonce: currentNonce, To: to, Value: value, - Gas: secondGasEstimation, + Gas: secondGasEstimation + gasOffset, GasPrice: secondGasPriceSuggestion, Data: data, }) @@ -669,7 +677,7 @@ func TestExecutionReverted(t *testing.T) { Return("", nil). Once() - err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, nil) + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) require.NoError(t, err) go ethTxManagerClient.Start() @@ -753,6 +761,8 @@ func TestGasPriceMarginAndLimit(t *testing.T) { Return(estimatedGas, nil). Once() + gasOffset := uint64(1) + suggestedGasPrice := big.NewInt(int64(tc.suggestedGasPrice)) etherman. On("SuggestedGasPrice", ctx). @@ -761,7 +771,7 @@ func TestGasPriceMarginAndLimit(t *testing.T) { expectedSuggestedGasPrice := big.NewInt(tc.expectedGasPrice) - err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, nil) + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) require.NoError(t, err) monitoredTx, err := storage.Get(ctx, owner, id, nil) @@ -770,3 +780,205 @@ func TestGasPriceMarginAndLimit(t *testing.T) { }) } } + +func TestGasOffset(t *testing.T) { + type testCase struct { + name string + estimatedGas uint64 + gasOffset uint64 + expectedGas uint64 + } + + testCases := []testCase{ + { + name: "no gas offset", + estimatedGas: 1, + gasOffset: 0, + expectedGas: 1, + }, + { + name: "gas offset", + estimatedGas: 1, + gasOffset: 1, + expectedGas: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dbCfg := dbutils.NewStateConfigFromEnv() + require.NoError(t, dbutils.InitOrResetState(dbCfg)) + + etherman := newEthermanMock(t) + st := newStateMock(t) + storage, err := NewPostgresStorage(dbCfg) + require.NoError(t, err) + + var cfg = Config{ + FrequencyToMonitorTxs: defaultEthTxmanagerConfigForTests.FrequencyToMonitorTxs, + WaitTxToBeMined: defaultEthTxmanagerConfigForTests.WaitTxToBeMined, + } + + ethTxManagerClient := New(cfg, etherman, storage, st) + + owner := "owner" + id := "unique_id" + from := common.HexToAddress("") + var to *common.Address + var value *big.Int + var data []byte = nil + + ctx := context.Background() + + currentNonce := uint64(1) + etherman. + On("CurrentNonce", ctx, from). + Return(currentNonce, nil). + Once() + + etherman. + On("EstimateGas", ctx, from, to, value, data). + Return(tc.estimatedGas, nil). + Once() + + suggestedGasPrice := big.NewInt(int64(10)) + etherman. + On("SuggestedGasPrice", ctx). + Return(suggestedGasPrice, nil). + Once() + + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, tc.gasOffset, nil) + require.NoError(t, err) + + monitoredTx, err := storage.Get(ctx, owner, id, nil) + require.NoError(t, err) + require.Equal(t, monitoredTx.gas, tc.estimatedGas) + require.Equal(t, monitoredTx.gasOffset, tc.gasOffset) + + tx := monitoredTx.Tx() + require.Equal(t, tx.Gas(), tc.expectedGas) + }) + } +} + +func TestFailedToEstimateTxWithForcedGasGetMined(t *testing.T) { + dbCfg := dbutils.NewStateConfigFromEnv() + require.NoError(t, dbutils.InitOrResetState(dbCfg)) + + etherman := newEthermanMock(t) + st := newStateMock(t) + storage, err := NewPostgresStorage(dbCfg) + require.NoError(t, err) + + // set forced gas + defaultEthTxmanagerConfigForTests.ForcedGas = 300000000 + + ethTxManagerClient := New(defaultEthTxmanagerConfigForTests, etherman, storage, st) + + owner := "owner" + id := "unique_id" + from := common.HexToAddress("") + var to *common.Address + var value *big.Int + var data []byte = nil + + ctx := context.Background() + + currentNonce := uint64(1) + etherman. + On("CurrentNonce", ctx, from). + Return(currentNonce, nil). + Once() + + // forces the estimate gas to fail + etherman. + On("EstimateGas", ctx, from, to, value, data). + Return(uint64(0), fmt.Errorf("failed to estimate gas")). + Once() + + // set estimated gas as the config ForcedGas + estimatedGas := defaultEthTxmanagerConfigForTests.ForcedGas + gasOffset := uint64(1) + + suggestedGasPrice := big.NewInt(1) + etherman. + On("SuggestedGasPrice", ctx). + Return(suggestedGasPrice, nil). + Once() + + signedTx := ethTypes.NewTx(ðTypes.LegacyTx{ + Nonce: currentNonce, + To: to, + Value: value, + Gas: estimatedGas + gasOffset, + GasPrice: suggestedGasPrice, + Data: data, + }) + etherman. + On("SignTx", ctx, from, mock.IsType(ðTypes.Transaction{})). + Return(signedTx, nil). + Once() + + etherman. + On("GetTx", ctx, signedTx.Hash()). + Return(nil, false, ethereum.NotFound). + Once() + etherman. + On("GetTx", ctx, signedTx.Hash()). + Return(signedTx, false, nil). + Once() + + etherman. + On("SendTx", ctx, signedTx). + Return(nil). + Once() + + etherman. + On("WaitTxToBeMined", ctx, signedTx, mock.IsType(time.Second)). + Return(true, nil). + Once() + + blockNumber := big.NewInt(1) + + receipt := ðTypes.Receipt{ + BlockNumber: blockNumber, + Status: ethTypes.ReceiptStatusSuccessful, + } + etherman. + On("GetTxReceipt", ctx, signedTx.Hash()). + Return(receipt, nil). + Once() + etherman. + On("GetTxReceipt", ctx, signedTx.Hash()). + Run(func(args mock.Arguments) { ethTxManagerClient.Stop() }). // stops the management cycle to avoid problems with mocks + Return(receipt, nil). + Once() + + etherman. + On("GetRevertMessage", ctx, signedTx). + Return("", nil). + Once() + + block := &state.Block{ + BlockNumber: blockNumber.Uint64(), + } + st. + On("GetLastBlock", ctx, nil). + Return(block, nil). + Once() + + err = ethTxManagerClient.Add(ctx, owner, id, from, to, value, data, gasOffset, nil) + require.NoError(t, err) + + go ethTxManagerClient.Start() + + time.Sleep(time.Second) + result, err := ethTxManagerClient.Result(ctx, owner, id, nil) + require.NoError(t, err) + require.Equal(t, id, result.ID) + require.Equal(t, MonitoredTxStatusConfirmed, result.Status) + require.Equal(t, 1, len(result.Txs)) + require.Equal(t, signedTx, result.Txs[signedTx.Hash()].Tx) + require.Equal(t, receipt, result.Txs[signedTx.Hash()].Receipt) + require.Equal(t, "", result.Txs[signedTx.Hash()].RevertMessage) +} diff --git a/ethtxmanager/mock_etherman_test.go b/ethtxmanager/mock_etherman_test.go index 55fef0ddbe..4ae3920e2c 100644 --- a/ethtxmanager/mock_etherman_test.go +++ b/ethtxmanager/mock_etherman_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package ethtxmanager @@ -274,13 +274,12 @@ func (_m *ethermanMock) WaitTxToBeMined(ctx context.Context, tx *types.Transacti return r0, r1 } -type mockConstructorTestingTnewEthermanMock interface { +// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEthermanMock(t interface { mock.TestingT Cleanup(func()) -} - -// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEthermanMock(t mockConstructorTestingTnewEthermanMock) *ethermanMock { +}) *ethermanMock { mock := ðermanMock{} mock.Mock.Test(t) diff --git a/ethtxmanager/mock_state_test.go b/ethtxmanager/mock_state_test.go index befb59638c..e01cf3ef88 100644 --- a/ethtxmanager/mock_state_test.go +++ b/ethtxmanager/mock_state_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package ethtxmanager @@ -43,13 +43,12 @@ func (_m *stateMock) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Bloc return r0, r1 } -type mockConstructorTestingTnewStateMock interface { +// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newStateMock(t interface { mock.TestingT Cleanup(func()) -} - -// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newStateMock(t mockConstructorTestingTnewStateMock) *stateMock { +}) *stateMock { mock := &stateMock{} mock.Mock.Test(t) diff --git a/ethtxmanager/monitoredtx.go b/ethtxmanager/monitoredtx.go index 47a344b967..716030763b 100644 --- a/ethtxmanager/monitoredtx.go +++ b/ethtxmanager/monitoredtx.go @@ -73,6 +73,9 @@ type monitoredTx struct { // tx gas gas uint64 + // tx gas offset + gasOffset uint64 + // tx gas price gasPrice *big.Int @@ -103,7 +106,7 @@ func (mTx monitoredTx) Tx() *types.Transaction { Nonce: mTx.nonce, Value: mTx.value, Data: mTx.data, - Gas: mTx.gas, + Gas: mTx.gas + mTx.gasOffset, GasPrice: mTx.gasPrice, }) diff --git a/ethtxmanager/monitoredtx_test.go b/ethtxmanager/monitoredtx_test.go index bfe1d4df7b..8e973aa8ad 100644 --- a/ethtxmanager/monitoredtx_test.go +++ b/ethtxmanager/monitoredtx_test.go @@ -14,15 +14,17 @@ func TestTx(t *testing.T) { value := big.NewInt(2) data := []byte("data") gas := uint64(3) - gasPrice := big.NewInt(4) + gasOffset := uint64(4) + gasPrice := big.NewInt(5) mTx := monitoredTx{ - to: &to, - nonce: nonce, - value: value, - data: data, - gas: gas, - gasPrice: gasPrice, + to: &to, + nonce: nonce, + value: value, + data: data, + gas: gas, + gasOffset: gasOffset, + gasPrice: gasPrice, } tx := mTx.Tx() @@ -31,6 +33,6 @@ func TestTx(t *testing.T) { assert.Equal(t, nonce, tx.Nonce()) assert.Equal(t, value, tx.Value()) assert.Equal(t, data, tx.Data()) - assert.Equal(t, gas, tx.Gas()) + assert.Equal(t, gas+gasOffset, tx.Gas()) assert.Equal(t, gasPrice, tx.GasPrice()) } diff --git a/ethtxmanager/pgstorage.go b/ethtxmanager/pgstorage.go index b9d611cc41..5ac17bfd89 100644 --- a/ethtxmanager/pgstorage.go +++ b/ethtxmanager/pgstorage.go @@ -36,13 +36,13 @@ func NewPostgresStorage(dbCfg db.Config) (*PostgresStorage, error) { func (s *PostgresStorage) Add(ctx context.Context, mTx monitoredTx, dbTx pgx.Tx) error { conn := s.dbConn(dbTx) cmd := ` - INSERT INTO state.monitored_txs (owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, history, created_at, updated_at) - VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)` + INSERT INTO state.monitored_txs (owner, id, from_addr, to_addr, nonce, value, data, gas, gas_offset, gas_price, status, block_num, history, created_at, updated_at) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)` _, err := conn.Exec(ctx, cmd, mTx.owner, mTx.id, mTx.from.String(), mTx.toStringPtr(), mTx.nonce, mTx.valueU64Ptr(), mTx.dataStringPtr(), - mTx.gas, mTx.gasPrice.Uint64(), string(mTx.status), mTx.blockNumberU64Ptr(), + mTx.gas, mTx.gasOffset, mTx.gasPrice.Uint64(), string(mTx.status), mTx.blockNumberU64Ptr(), mTx.historyStringSlice(), time.Now().UTC().Round(time.Microsecond), time.Now().UTC().Round(time.Microsecond)) @@ -61,7 +61,7 @@ func (s *PostgresStorage) Add(ctx context.Context, mTx monitoredTx, dbTx pgx.Tx) func (s *PostgresStorage) Get(ctx context.Context, owner, id string, dbTx pgx.Tx) (monitoredTx, error) { conn := s.dbConn(dbTx) cmd := ` - SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, history, created_at, updated_at + SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_offset, gas_price, status, block_num, history, created_at, updated_at FROM state.monitored_txs WHERE owner = $1 AND id = $2` @@ -85,7 +85,7 @@ func (s *PostgresStorage) GetByStatus(ctx context.Context, owner *string, status conn := s.dbConn(dbTx) cmd := ` - SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, history, created_at, updated_at + SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_offset, gas_price, status, block_num, history, created_at, updated_at FROM state.monitored_txs WHERE (owner = $1 OR $1 IS NULL)` if hasStatusToFilter { @@ -128,7 +128,7 @@ func (s *PostgresStorage) GetByStatus(ctx context.Context, owner *string, status func (s *PostgresStorage) GetByBlock(ctx context.Context, fromBlock, toBlock *uint64, dbTx pgx.Tx) ([]monitoredTx, error) { conn := s.dbConn(dbTx) cmd := ` - SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_price, status, block_num, history, created_at, updated_at + SELECT owner, id, from_addr, to_addr, nonce, value, data, gas, gas_offset, gas_price, status, block_num, history, created_at, updated_at FROM state.monitored_txs WHERE (block_num >= $1 OR $1 IS NULL) AND (block_num <= $2 OR $2 IS NULL) @@ -182,11 +182,12 @@ func (s *PostgresStorage) Update(ctx context.Context, mTx monitoredTx, dbTx pgx. , value = $6 , data = $7 , gas = $8 - , gas_price = $9 - , status = $10 - , block_num = $11 - , history = $12 - , updated_at = $13 + , gas_offset = $9 + , gas_price = $10 + , status = $11 + , block_num = $12 + , history = $13 + , updated_at = $14 WHERE owner = $1 AND id = $2` @@ -199,7 +200,7 @@ func (s *PostgresStorage) Update(ctx context.Context, mTx monitoredTx, dbTx pgx. _, err := conn.Exec(ctx, cmd, mTx.owner, mTx.id, mTx.from.String(), mTx.toStringPtr(), mTx.nonce, mTx.valueU64Ptr(), mTx.dataStringPtr(), - mTx.gas, mTx.gasPrice.Uint64(), string(mTx.status), bn, + mTx.gas, mTx.gasOffset, mTx.gasPrice.Uint64(), string(mTx.status), bn, mTx.historyStringSlice(), time.Now().UTC().Round(time.Microsecond)) if err != nil { @@ -212,7 +213,7 @@ func (s *PostgresStorage) Update(ctx context.Context, mTx monitoredTx, dbTx pgx. // scanMtx scans a row and fill the provided instance of monitoredTx with // the row data func (s *PostgresStorage) scanMtx(row pgx.Row, mTx *monitoredTx) error { - // id, from, to, nonce, value, data, gas, gas_price, status, history, created_at, updated_at + // id, from, to, nonce, value, data, gas, gas_offset, gas_price, status, history, created_at, updated_at var from, status string var to, data *string var history []string @@ -220,7 +221,7 @@ func (s *PostgresStorage) scanMtx(row pgx.Row, mTx *monitoredTx) error { var gasPrice uint64 err := row.Scan(&mTx.owner, &mTx.id, &from, &to, &mTx.nonce, &value, - &data, &mTx.gas, &gasPrice, &status, &blockNumber, &history, + &data, &mTx.gas, &mTx.gasOffset, &gasPrice, &status, &blockNumber, &history, &mTx.createdAt, &mTx.updatedAt) if err != nil { return err diff --git a/gasprice/mock_etherman.go b/gasprice/mock_etherman.go index 488c86f58a..ce45468181 100644 --- a/gasprice/mock_etherman.go +++ b/gasprice/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package gasprice @@ -30,13 +30,12 @@ func (_m *ethermanMock) GetL1GasPrice(ctx context.Context) *big.Int { return r0 } -type mockConstructorTestingTnewEthermanMock interface { +// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEthermanMock(t interface { mock.TestingT Cleanup(func()) -} - -// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEthermanMock(t mockConstructorTestingTnewEthermanMock) *ethermanMock { +}) *ethermanMock { mock := ðermanMock{} mock.Mock.Test(t) diff --git a/gasprice/mock_pool.go b/gasprice/mock_pool.go index 901d0b89a1..a91c5cdadd 100644 --- a/gasprice/mock_pool.go +++ b/gasprice/mock_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package gasprice @@ -69,13 +69,12 @@ func (_m *poolMock) SetGasPrices(ctx context.Context, l2GasPrice uint64, l1GasPr return r0 } -type mockConstructorTestingTnewPoolMock interface { +// newPoolMock creates a new instance of poolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newPoolMock(t interface { mock.TestingT Cleanup(func()) -} - -// newPoolMock creates a new instance of poolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newPoolMock(t mockConstructorTestingTnewPoolMock) *poolMock { +}) *poolMock { mock := &poolMock{} mock.Mock.Test(t) diff --git a/go.mod b/go.mod index 367e2a2e51..fc32e324b7 100644 --- a/go.mod +++ b/go.mod @@ -4,33 +4,34 @@ go 1.19 require ( github.com/0xPolygon/cdk-data-availability v0.0.0-20230830141533-4064ada790a6 + github.com/0xPolygonHermez/zkevm-data-streamer v0.1.11 github.com/didip/tollbooth/v6 v6.1.2 - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 - github.com/ethereum/go-ethereum v1.12.1 - github.com/go-git/go-billy/v5 v5.4.1 - github.com/go-git/go-git/v5 v5.8.1 + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 + github.com/ethereum/go-ethereum v1.13.2 + github.com/go-git/go-billy/v5 v5.5.0 + github.com/go-git/go-git/v5 v5.10.0 github.com/gobuffalo/packr/v2 v2.8.3 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.4.0 github.com/habx/pg-commands v0.6.1 github.com/hermeznetwork/tracerr v0.3.2 github.com/iden3/go-iden3-crypto v0.0.15 - github.com/invopop/jsonschema v0.7.0 + github.com/invopop/jsonschema v0.12.0 github.com/jackc/pgconn v1.14.1 github.com/jackc/pgx/v4 v4.18.1 github.com/mitchellh/mapstructure v1.5.0 - github.com/prometheus/client_model v0.4.0 - github.com/prometheus/common v0.44.0 + github.com/prometheus/client_model v0.5.0 + github.com/prometheus/common v0.45.0 github.com/rubenv/sql-migrate v1.5.2 - github.com/spf13/afero v1.9.5 - github.com/spf13/viper v1.16.0 + github.com/spf13/afero v1.10.0 + github.com/spf13/viper v1.17.0 github.com/stretchr/testify v1.8.4 github.com/umbracle/ethgo v0.1.4-0.20230712173909-df37dddf16f0 github.com/urfave/cli/v2 v2.25.7 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.11.0 - golang.org/x/net v0.12.0 - golang.org/x/sync v0.3.0 - google.golang.org/grpc v1.57.0 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 + golang.org/x/sync v0.4.0 + google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -40,12 +41,12 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.8.0 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect github.com/aliyun/alibaba-cloud-sdk-go v1.61.18 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect @@ -54,18 +55,19 @@ require ( github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.10.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/c-kzg-4844 v0.3.0 // indirect + github.com/ethereum/c-kzg-4844 v0.3.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/getsentry/sentry-go v0.18.0 // indirect @@ -81,15 +83,13 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/huin/goupnp v1.0.3 // indirect - github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -103,75 +103,80 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/markbates/errx v1.1.0 // indirect github.com/markbates/oncer v1.0.0 // indirect github.com/markbates/safe v1.0.1 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/rs/cors v1.8.2 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/skeema/knownhosts v1.2.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect - github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.4.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect github.com/valyala/fastjson v1.6.3 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + golang.org/x/tools v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/warnings.v0 v0.1.2 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) require ( - github.com/gorilla/websocket v1.5.0 + github.com/gorilla/websocket v1.5.1 github.com/holiman/uint256 v1.2.3 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect ) require ( + github.com/fatih/color v1.15.0 + github.com/joho/godotenv v1.5.1 github.com/nacos-group/nacos-sdk-go v1.1.4 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.17.0 github.com/segmentio/kafka-go v0.4.43 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 ) replace github.com/0xPolygon/cdk-data-availability => github.com/okx/x1-data-availability v0.0.0-20231113064732-3afebde141fc diff --git a/go.sum b/go.sum index 700f2e9262..f58f1087df 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/0xPolygonHermez/zkevm-data-streamer v0.1.11 h1:cZCxmpj5zT8Zibva6JbT1YuJJRVR8kkj/23TXaF2HtA= +github.com/0xPolygonHermez/zkevm-data-streamer v0.1.11/go.mod h1:853hkExOKPpMqbyEGqF8cuobSlbVd2OLVEZoRFTGRzo= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -58,8 +60,8 @@ github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= @@ -80,8 +82,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -115,15 +117,15 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= @@ -147,9 +149,12 @@ github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uz github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= @@ -165,13 +170,13 @@ github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnm github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -182,12 +187,14 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.0 h1:3Y3hD6l5i0dEYsBL50C+Om644kve3pNqoAcvE26o9zI= -github.com/ethereum/c-kzg-4844 v0.3.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8= -github.com/ethereum/go-ethereum v1.12.1 h1:1kXDPxhLfyySuQYIfRxVBGYuaHdxNNxevA73vjIwsgk= -github.com/ethereum/go-ethereum v1.12.1/go.mod h1:zKetLweqBR8ZS+1O9iJWI8DvmmD2NzD19apjEWDCsnw= +github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= +github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw= +github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= @@ -212,11 +219,11 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= -github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= -github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= -github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= +github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -278,9 +285,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -343,16 +349,16 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/habx/pg-commands v0.6.1 h1:+9vo6+N/usIZ5rF6jIJle5Tjvf01B09i0FPfzIvgoIg= github.com/habx/pg-commands v0.6.1/go.mod h1:PkBR8QOJKbIjv4r1NuOFrz+LyjsbiAtmQbuu6+w0SAA= @@ -387,12 +393,9 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= -github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk= -github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -400,8 +403,8 @@ github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZ github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/invopop/jsonschema v0.7.0 h1:2vgQcBz1n256N+FpX3Jq7Y17AjYt46Ig3zIWyy770So= -github.com/invopop/jsonschema v0.7.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= @@ -467,6 +470,9 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -493,8 +499,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -524,6 +530,8 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= @@ -539,6 +547,7 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -546,13 +555,15 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -587,7 +598,6 @@ github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5Vgl github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -605,8 +615,8 @@ github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= @@ -614,8 +624,8 @@ github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -628,29 +638,29 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -662,6 +672,10 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -689,10 +703,12 @@ github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= @@ -700,15 +716,14 @@ github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -719,7 +734,6 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -728,20 +742,19 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM= -github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= @@ -767,6 +780,8 @@ github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6cz github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -808,23 +823,21 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -848,8 +861,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -860,8 +873,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -889,8 +902,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -945,8 +958,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -972,8 +985,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1040,20 +1053,22 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1061,8 +1076,8 @@ golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1076,8 +1091,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1144,14 +1159,13 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1233,8 +1247,8 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1256,8 +1270,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1290,8 +1304,6 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= diff --git a/hex/hex.go b/hex/hex.go index d6a1468f17..4699eb208a 100644 --- a/hex/hex.go +++ b/hex/hex.go @@ -40,6 +40,12 @@ func DecodeString(str string) ([]byte, error) { func DecodeHex(str string) ([]byte, error) { str = strings.TrimPrefix(str, "0x") + // Check if the string has an odd length + if len(str)%2 != 0 { + // Prepend a '0' to make it even-length + str = "0" + str + } + return hex.DecodeString(str) } diff --git a/hex/hex_test.go b/hex/hex_test.go index 12e6e44048..da86da3589 100644 --- a/hex/hex_test.go +++ b/hex/hex_test.go @@ -1,11 +1,13 @@ package hex import ( + "encoding/hex" "math" "math/big" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEncodeDecodeBig(t *testing.T) { @@ -14,3 +16,37 @@ func TestEncodeDecodeBig(t *testing.T) { d := DecodeBig(e) assert.Equal(t, b.Uint64(), d.Uint64()) } + +// Define a struct for test cases +type TestCase struct { + input string + output []byte + err error +} + +// Unit test function +func TestDecodeHex(t *testing.T) { + testCases := []TestCase{ + {"0", []byte{0}, nil}, + {"00", []byte{0}, nil}, + {"0x0", []byte{0}, nil}, + {"0x00", []byte{0}, nil}, + {"1", []byte{1}, nil}, + {"01", []byte{1}, nil}, + {"", []byte{}, hex.ErrLength}, + {"0x", []byte{}, hex.ErrLength}, + {"zz", []byte{}, hex.InvalidByteError('z')}, + } + + for _, tc := range testCases { + t.Run(tc.input, func(t *testing.T) { + output, err := DecodeHex(tc.input) + if tc.err != nil { + require.Error(t, tc.err, err) + } else { + require.NoError(t, err) + } + require.Equal(t, output, tc.output) + }) + } +} diff --git a/jsonrpc/client/client.go b/jsonrpc/client/client.go index 7ae0057713..10ae2fa29f 100644 --- a/jsonrpc/client/client.go +++ b/jsonrpc/client/client.go @@ -10,6 +10,8 @@ import ( "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" ) +const jsonRPCVersion = "2.0" + // Client defines typed wrappers for the zkEVM RPC API. type Client struct { url string @@ -26,53 +28,112 @@ func NewClient(url string) *Client { // the provided method and parameters, which is compatible with the Ethereum // JSON RPC Server. func JSONRPCCall(url, method string, parameters ...interface{}) (types.Response, error) { - const jsonRPCVersion = "2.0" - params, err := json.Marshal(parameters) if err != nil { return types.Response{}, err } - req := types.Request{ + request := types.Request{ JSONRPC: jsonRPCVersion, ID: float64(1), Method: method, Params: params, } - reqBody, err := json.Marshal(req) + httpRes, err := sendJSONRPC_HTTPRequest(url, request) if err != nil { return types.Response{}, err } - reqBodyReader := bytes.NewReader(reqBody) - httpReq, err := http.NewRequest(http.MethodPost, url, reqBodyReader) + resBody, err := io.ReadAll(httpRes.Body) if err != nil { return types.Response{}, err } + defer httpRes.Body.Close() - httpReq.Header.Add("Content-type", "application/json") + if httpRes.StatusCode != http.StatusOK { + return types.Response{}, fmt.Errorf("%v - %v", httpRes.StatusCode, string(resBody)) + } - httpRes, err := http.DefaultClient.Do(httpReq) + var res types.Response + err = json.Unmarshal(resBody, &res) if err != nil { return types.Response{}, err } + return res, nil +} - if httpRes.StatusCode != http.StatusOK { - return types.Response{}, fmt.Errorf("Invalid status code, expected: %v, found: %v", http.StatusOK, httpRes.StatusCode) +// BatchCall used in batch requests to send multiple methods and parameters at once +type BatchCall struct { + Method string + Parameters []interface{} +} + +// JSONRPCBatchCall executes a 2.0 JSON RPC HTTP Post Batch Request to the provided URL with +// the provided method and parameters groups, which is compatible with the Ethereum +// JSON RPC Server. +func JSONRPCBatchCall(url string, calls ...BatchCall) ([]types.Response, error) { + requests := []types.Request{} + + for i, call := range calls { + params, err := json.Marshal(call.Parameters) + if err != nil { + return nil, err + } + + req := types.Request{ + JSONRPC: jsonRPCVersion, + ID: float64(i), + Method: call.Method, + Params: params, + } + + requests = append(requests, req) + } + + httpRes, err := sendJSONRPC_HTTPRequest(url, requests) + if err != nil { + return nil, err } resBody, err := io.ReadAll(httpRes.Body) if err != nil { - return types.Response{}, err + return nil, err } defer httpRes.Body.Close() - var res types.Response + if httpRes.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%v - %v", httpRes.StatusCode, string(resBody)) + } + + var res []types.Response err = json.Unmarshal(resBody, &res) if err != nil { - return types.Response{}, err + errorMessage := string(resBody) + return nil, fmt.Errorf(errorMessage) } return res, nil } + +func sendJSONRPC_HTTPRequest(url string, payload interface{}) (*http.Response, error) { + reqBody, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + reqBodyReader := bytes.NewReader(reqBody) + httpReq, err := http.NewRequest(http.MethodPost, url, reqBodyReader) + if err != nil { + return nil, err + } + + httpReq.Header.Add("Content-type", "application/json") + + httpRes, err := http.DefaultClient.Do(httpReq) + if err != nil { + return nil, err + } + + return httpRes, nil +} diff --git a/jsonrpc/config.go b/jsonrpc/config.go index aefe6a738a..f44a2b04f2 100644 --- a/jsonrpc/config.go +++ b/jsonrpc/config.go @@ -1,6 +1,9 @@ package jsonrpc -import "github.com/0xPolygonHermez/zkevm-node/config/types" +import ( + "github.com/0xPolygonHermez/zkevm-node/config/types" + "github.com/ethereum/go-ethereum/common" +) // Config represents the configuration of the json rpc type Config struct { @@ -35,10 +38,30 @@ type Config struct { // EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price. EnableL2SuggestedGasPricePolling bool `mapstructure:"EnableL2SuggestedGasPricePolling"` - // TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP) - // to do the parallel requests to RPC.debug_traceTransaction endpoint - TraceBatchUseHTTPS bool `mapstructure:"TraceBatchUseHTTPS"` + // BatchRequestsEnabled defines if the Batch requests are enabled or disabled + BatchRequestsEnabled bool `mapstructure:"BatchRequestsEnabled"` + // BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request + BatchRequestsLimit uint `mapstructure:"BatchRequestsLimit"` + + // L2Coinbase defines which address is going to receive the fees + L2Coinbase common.Address + + // MaxLogsCount is a configuration to set the max number of logs that can be returned + // in a single call to the state, if zero it means no limit + MaxLogsCount uint64 `mapstructure:"MaxLogsCount"` + + // MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs + // logs in a single call to the state, if zero it means no limit + MaxLogsBlockRange uint64 `mapstructure:"MaxLogsBlockRange"` + + // MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying + // native block hashes in a single call to the state, if zero it means no limit + MaxNativeBlockHashBlockRange uint64 `mapstructure:"MaxNativeBlockHashBlockRange"` + + // EnableHttpLog allows the user to enable or disable the logs related to the HTTP + // requests to be captured by the server. + EnableHttpLog bool `mapstructure:"EnableHttpLog"` // EnablePendingTransactionFilter enables pending transaction filter that can support query L2 pending transaction EnablePendingTransactionFilter bool `mapstructure:"EnablePendingTransactionFilter"` @@ -59,6 +82,9 @@ type WebSocketsConfig struct { // Port defines the port to serve the endpoints via WS Port int `mapstructure:"Port"` + + // ReadLimit defines the maximum size of a message read from the client (in bytes) + ReadLimit int64 `mapstructure:"ReadLimit"` } // NacosConfig has parameters to config the nacos client diff --git a/jsonrpc/dbtxmanager.go b/jsonrpc/dbtxmanager.go index 0b294469dc..bb073d0369 100644 --- a/jsonrpc/dbtxmanager.go +++ b/jsonrpc/dbtxmanager.go @@ -23,19 +23,19 @@ func (f *DBTxManager) NewDbTxScope(db DBTxer, scopedFn DBTxScopedFn) (interface{ ctx := context.Background() dbTx, err := db.BeginStateTransaction(ctx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to connect to the state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to connect to the state", err, true) } v, rpcErr := scopedFn(ctx, dbTx) if rpcErr != nil { if txErr := dbTx.Rollback(context.Background()); txErr != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to rollback db transaction", txErr) + return RPCErrorResponse(types.DefaultErrorCode, "failed to rollback db transaction", txErr, true) } return v, rpcErr } if txErr := dbTx.Commit(context.Background()); txErr != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to commit db transaction", txErr) + return RPCErrorResponse(types.DefaultErrorCode, "failed to commit db transaction", txErr, true) } return v, rpcErr } diff --git a/jsonrpc/endpoints_debug.go b/jsonrpc/endpoints_debug.go index add5274420..fdc72b539b 100644 --- a/jsonrpc/endpoints_debug.go +++ b/jsonrpc/endpoints_debug.go @@ -109,11 +109,11 @@ func (d *DebugEndpoints) TraceBlockByNumber(number types.BlockNumber, cfg *trace if errors.Is(err, state.ErrNotFound) { return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block #%d not found", blockNumber)) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by number", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by number", err, true) } traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx) - if err != nil { + if rpcErr != nil { return nil, rpcErr } @@ -129,11 +129,11 @@ func (d *DebugEndpoints) TraceBlockByHash(hash types.ArgHash, cfg *traceConfig) if errors.Is(err, state.ErrNotFound) { return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block %s not found", hash.Hash().String())) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash", err, true) } traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx) - if err != nil { + if rpcErr != nil { return nil, rpcErr } @@ -172,35 +172,35 @@ func (d *DebugEndpoints) TraceBatchByNumber(httpRequest *http.Request, number ty const bufferSize = 10 return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { - batchNumber, rpcErr := number.GetNumericBatchNumber(ctx, d.state, dbTx) + batchNumber, rpcErr := number.GetNumericBatchNumber(ctx, d.state, d.etherman, dbTx) if rpcErr != nil { return nil, rpcErr } batch, err := d.state.GetBatchByNumber(ctx, batchNumber, dbTx) - if errors.Is(err, state.ErrStateNotSynchronized) { + if errors.Is(err, state.ErrNotFound) { return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("batch #%d not found", batchNumber)) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get batch by number", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get batch by number", err, true) } txs, _, err := d.state.GetTransactionsByBatchNumber(ctx, batch.BatchNumber, dbTx) if !errors.Is(err, state.ErrNotFound) && err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v to create the traces", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v to create the traces", batchNumber), err, true) } receipts := make([]ethTypes.Receipt, 0, len(txs)) for _, tx := range txs { receipt, err := d.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v to get trace", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v to get trace", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } requests := make(chan (ethTypes.Receipt), bufferSize) - mu := sync.Mutex{} + mu := &sync.Mutex{} wg := sync.WaitGroup{} wg.Add(len(receipts)) responses := make([]traceResponse, 0, len(receipts)) @@ -247,7 +247,7 @@ func (d *DebugEndpoints) TraceBatchByNumber(httpRequest *http.Request, number ty // wait the traces to be loaded if waitTimeout(&wg, d.cfg.ReadTimeout.Duration) { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: timeout reached", batchNumber), nil) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: timeout reached", batchNumber), nil, true) } close(requests) @@ -268,7 +268,7 @@ func (d *DebugEndpoints) TraceBatchByNumber(httpRequest *http.Request, number ty traces := make([]traceBatchTransactionResponse, 0, len(receipts)) for _, response := range responses { if response.err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: failed to get trace for tx: %v, err: %v", batchNumber, response.txHash.String(), response.err.Error()), nil) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("failed to get traces for batch %v: failed to get trace for tx: %v, err: %v", batchNumber, response.txHash.String(), response.err.Error()), nil, true) } traces = append(traces, traceBatchTransactionResponse{ @@ -285,8 +285,8 @@ func (d *DebugEndpoints) buildTraceBlock(ctx context.Context, txs []*ethTypes.Tr for _, tx := range txs { traceTransaction, err := d.buildTraceTransaction(ctx, tx.Hash(), cfg, dbTx) if err != nil { - errMsg := fmt.Sprintf("failed to get trace for transaction %v", tx.Hash().String()) - return RPCErrorResponse(types.DefaultErrorCode, errMsg, err) + errMsg := fmt.Sprintf("failed to get trace for transaction %v: %v", tx.Hash().String(), err.Error()) + return RPCErrorResponse(types.DefaultErrorCode, errMsg, err, true) } traceBlockTransaction := traceBlockTransactionResponse{ Result: traceTransaction, @@ -305,7 +305,7 @@ func (d *DebugEndpoints) buildTraceTransaction(ctx context.Context, hash common. // check tracer if traceCfg.Tracer != nil && *traceCfg.Tracer != "" && !isBuiltInTracer(*traceCfg.Tracer) && !isJSCustomTracer(*traceCfg.Tracer) { - return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil) + return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil, false) } stateTraceConfig := state.TraceConfig{ @@ -319,10 +319,9 @@ func (d *DebugEndpoints) buildTraceTransaction(ctx context.Context, hash common. } result, err := d.state.DebugTransaction(ctx, hash, stateTraceConfig, dbTx) if errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil) + return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil, false) } else if err != nil { - const errorMessage = "failed to get trace" - log.Errorf("%v: %v", errorMessage, err) + errorMessage := fmt.Sprintf("failed to get trace: %v", err.Error()) return nil, types.NewRPCError(types.DefaultErrorCode, errorMessage) } diff --git a/jsonrpc/endpoints_debug_innertx.go b/jsonrpc/endpoints_debug_innertx.go index 87bb7edc61..b17ab8cbae 100644 --- a/jsonrpc/endpoints_debug_innertx.go +++ b/jsonrpc/endpoints_debug_innertx.go @@ -19,7 +19,7 @@ func (d *DebugEndpoints) buildInnerTransaction(ctx context.Context, hash common. // check tracer if traceCfg.Tracer != nil && *traceCfg.Tracer != "" && !isBuiltInTracer(*traceCfg.Tracer) && !isJSCustomTracer(*traceCfg.Tracer) { - return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil) + return RPCErrorResponse(types.DefaultErrorCode, "invalid tracer", nil, true) } stateTraceConfig := state.TraceConfig{ @@ -32,7 +32,7 @@ func (d *DebugEndpoints) buildInnerTransaction(ctx context.Context, hash common. } result, err := d.state.DebugTransaction(ctx, hash, stateTraceConfig, dbTx) if errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil) + return RPCErrorResponse(types.DefaultErrorCode, "transaction not found", nil, true) } else if err != nil { const errorMessage = "failed to get trace" log.Errorf("%v: %v", errorMessage, err) diff --git a/jsonrpc/endpoints_eth.go b/jsonrpc/endpoints_eth.go index 1a2d665e51..a23e8722cd 100644 --- a/jsonrpc/endpoints_eth.go +++ b/jsonrpc/endpoints_eth.go @@ -8,6 +8,8 @@ import ( "math/big" "net/http" "strings" + "sync" + "time" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/client" @@ -18,7 +20,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/runtime" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/gorilla/websocket" "github.com/jackc/pgx/v4" ) @@ -27,6 +28,9 @@ const ( // to communicate with the state for eth_EstimateGas and eth_Call when // the From field is not specified because it is optional DefaultSenderAddress = "0x1111111111111111111111111111111111111111" + + // maxTopics is the max number of topics a log can have + maxTopics = 4 ) // EthEndpoints contains implementations for the "eth" RPC endpoints @@ -67,9 +71,9 @@ func (e *EthEndpoints) BlockNumber() (interface{}, types.Error) { func (e *EthEndpoints) Call(arg *types.TxArgs, blockArg *types.BlockNumberOrHash) (interface{}, types.Error) { return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { if arg == nil { - return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil) + return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil, false) } else if blockArg == nil { - return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 1", nil) + return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 1", nil, false) } block, respErr := e.getBlockByArg(ctx, blockArg, dbTx) if respErr != nil { @@ -90,7 +94,7 @@ func (e *EthEndpoints) Call(arg *types.TxArgs, blockArg *types.BlockNumberOrHash if arg.Gas == nil || uint64(*arg.Gas) <= 0 { header, err := e.state.GetL2BlockHeaderByNumber(ctx, block.NumberU64(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block header", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block header", err, true) } gas := types.ArgUint64(header.GasLimit) @@ -100,12 +104,12 @@ func (e *EthEndpoints) Call(arg *types.TxArgs, blockArg *types.BlockNumberOrHash defaultSenderAddress := common.HexToAddress(DefaultSenderAddress) sender, tx, err := arg.ToTransaction(ctx, e.state, e.cfg.MaxCumulativeGasUsed, block.Root(), defaultSenderAddress, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err, false) } result, err := e.state.ProcessUnsignedTransaction(ctx, tx, sender, blockToProcess, true, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to execute the unsigned transaction", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to execute the unsigned transaction", err, true) } if result.Reverted() { @@ -125,6 +129,32 @@ func (e *EthEndpoints) ChainId() (interface{}, types.Error) { //nolint:revive return hex.EncodeUint64(e.chainID), nil } +// Coinbase Returns the client coinbase address. +func (e *EthEndpoints) Coinbase() (interface{}, types.Error) { //nolint:revive + if e.cfg.SequencerNodeURI != "" { + return e.getCoinbaseFromSequencerNode() + } + return e.cfg.L2Coinbase.String(), nil +} + +func (e *EthEndpoints) getCoinbaseFromSequencerNode() (interface{}, types.Error) { + res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_coinbase") + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get coinbase from sequencer node", err, true) + } + + if res.Error != nil { + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) + } + + var coinbaseAddress common.Address + err = json.Unmarshal(res.Result, &coinbaseAddress) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to read coinbase from sequencer node", err, true) + } + return coinbaseAddress.String(), nil +} + // EstimateGas generates and returns an estimate of how much gas is necessary to // allow the transaction to complete. // The transaction will not be added to the blockchain. @@ -134,7 +164,7 @@ func (e *EthEndpoints) ChainId() (interface{}, types.Error) { //nolint:revive func (e *EthEndpoints) EstimateGas(arg *types.TxArgs, blockArg *types.BlockNumberOrHash) (interface{}, types.Error) { return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { if arg == nil { - return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil) + return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil, false) } block, respErr := e.getBlockByArg(ctx, blockArg, dbTx) @@ -156,7 +186,7 @@ func (e *EthEndpoints) EstimateGas(arg *types.TxArgs, blockArg *types.BlockNumbe defaultSenderAddress := common.HexToAddress(DefaultSenderAddress) sender, tx, err := arg.ToTransaction(ctx, e.state, e.cfg.MaxCumulativeGasUsed, block.Root(), defaultSenderAddress, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err, false) } gasEstimation, returnValue, err := e.state.EstimateGas(tx, sender, blockToProcess, dbTx) @@ -165,10 +195,7 @@ func (e *EthEndpoints) EstimateGas(arg *types.TxArgs, blockArg *types.BlockNumbe copy(data, returnValue) return nil, types.NewRPCErrorWithData(types.RevertedErrorCode, err.Error(), &data) } else if err != nil { - return nil, types.NewRPCErrorWithData(types.DefaultErrorCode, err.Error(), nil) - } - if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, err.Error(), nil) + return RPCErrorResponse(types.DefaultErrorCode, err.Error(), nil, true) } return hex.EncodeUint64(gasEstimation), nil }) @@ -190,17 +217,17 @@ func (e *EthEndpoints) GasPrice() (interface{}, types.Error) { func (e *EthEndpoints) getPriceFromSequencerNode() (interface{}, types.Error) { res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_gasPrice") if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get gas price from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get gas price from sequencer node", err, true) } if res.Error != nil { - return RPCErrorResponse(res.Error.Code, res.Error.Message, nil) + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) } var gasPrice types.ArgUint64 err = json.Unmarshal(res.Result, &gasPrice) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to read gas price from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to read gas price from sequencer node", err, true) } return gasPrice, nil } @@ -217,7 +244,7 @@ func (e *EthEndpoints) GetBalance(address types.ArgAddress, blockArg *types.Bloc if errors.Is(err, state.ErrNotFound) { return hex.EncodeUint64(0), nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get balance from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get balance from state", err, true) } return hex.EncodeBig(balance), nil @@ -267,7 +294,7 @@ func (e *EthEndpoints) GetBlockByHash(hash types.ArgHash, fullTx bool) (interfac if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash from state", err, true) } txs := block.Transactions() @@ -275,14 +302,14 @@ func (e *EthEndpoints) GetBlockByHash(hash types.ArgHash, fullTx bool) (interfac for _, tx := range txs { receipt, err := e.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } rpcBlock, err := types.NewBlock(block, receipts, fullTx, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err, true) } return rpcBlock, nil @@ -295,7 +322,7 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool) ( if number == types.PendingBlockNumber { lastBlock, err := e.state.GetLastL2Block(ctx, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "couldn't load last block from state to compute the pending block", err) + return RPCErrorResponse(types.DefaultErrorCode, "couldn't load last block from state to compute the pending block", err, true) } header := ethTypes.CopyHeader(lastBlock.Header()) header.ParentHash = lastBlock.Hash() @@ -305,7 +332,7 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool) ( block := ethTypes.NewBlockWithHeader(header) rpcBlock, err := types.NewBlock(block, nil, fullTx, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err) + return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true) } return rpcBlock, nil @@ -320,7 +347,7 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool) ( if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load block from state by number %v", blockNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load block from state by number %v", blockNumber), err, true) } txs := block.Transactions() @@ -328,14 +355,14 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool) ( for _, tx := range txs { receipt, err := e.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } rpcBlock, err := types.NewBlock(block, receipts, fullTx, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err, true) } return rpcBlock, nil @@ -355,7 +382,7 @@ func (e *EthEndpoints) GetCode(address types.ArgAddress, blockArg *types.BlockNu if errors.Is(err, state.ErrNotFound) { return "0x", nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get code", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get code", err, true) } return types.ArgBytes(code), nil @@ -372,9 +399,9 @@ func (e *EthEndpoints) GetCompilers() (interface{}, types.Error) { func (e *EthEndpoints) GetFilterChanges(filterID string) (interface{}, types.Error) { filter, err := e.storage.GetFilter(filterID) if errors.Is(err, ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, "filter not found", err) + return RPCErrorResponse(types.DefaultErrorCode, "filter not found", err, false) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get filter from storage", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get filter from storage", err, true) } switch filter.Type { @@ -382,7 +409,7 @@ func (e *EthEndpoints) GetFilterChanges(filterID string) (interface{}, types.Err { res, err := e.state.GetL2BlockHashesSince(context.Background(), filter.LastPoll, nil) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block hashes", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block hashes", err, true) } rpcErr := e.updateFilterLastPoll(filter.ID) if rpcErr != nil { @@ -397,7 +424,7 @@ func (e *EthEndpoints) GetFilterChanges(filterID string) (interface{}, types.Err { res, err := e.pool.GetPendingTxHashesSince(context.Background(), filter.LastPoll) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get pending transaction hashes", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get pending transaction hashes", err, true) } rpcErr := e.updateFilterLastPoll(filter.ID) if rpcErr != nil { @@ -439,7 +466,7 @@ func (e *EthEndpoints) GetFilterLogs(filterID string) (interface{}, types.Error) if errors.Is(err, ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get filter from storage", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get filter from storage", err, true) } if filter.Type != FilterTypeLog { @@ -460,24 +487,21 @@ func (e *EthEndpoints) GetLogs(filter LogFilter) (interface{}, types.Error) { } func (e *EthEndpoints) internalGetLogs(ctx context.Context, dbTx pgx.Tx, filter LogFilter) (interface{}, types.Error) { - var err error - var fromBlock uint64 = 0 - if filter.FromBlock != nil { - var rpcErr types.Error - fromBlock, rpcErr = filter.FromBlock.GetNumericBlockNumber(ctx, e.state, e.etherman, dbTx) - if rpcErr != nil { - return nil, rpcErr - } - } - - toBlock, rpcErr := filter.ToBlock.GetNumericBlockNumber(ctx, e.state, e.etherman, dbTx) + fromBlockNumber, toBlockNumber, rpcErr := filter.GetNumericBlockNumbers(ctx, e.cfg, e.state, e.etherman, dbTx) if rpcErr != nil { return nil, rpcErr } - logs, err := e.state.GetLogs(ctx, fromBlock, toBlock, filter.Addresses, filter.Topics, filter.BlockHash, filter.Since, dbTx) - if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get logs from state", err) + var err error + logs, err := e.state.GetLogs(ctx, fromBlockNumber, toBlockNumber, filter.Addresses, filter.Topics, filter.BlockHash, filter.Since, dbTx) + if errors.Is(err, state.ErrMaxLogsCountLimitExceeded) { + errMsg := fmt.Sprintf(state.ErrMaxLogsCountLimitExceeded.Error(), e.cfg.MaxLogsCount) + return RPCErrorResponse(types.InvalidParamsErrorCode, errMsg, nil, false) + } else if errors.Is(err, state.ErrMaxLogsBlockRangeLimitExceeded) { + errMsg := fmt.Sprintf(state.ErrMaxLogsBlockRangeLimitExceeded.Error(), e.cfg.MaxLogsBlockRange) + return RPCErrorResponse(types.InvalidParamsErrorCode, errMsg, nil, false) + } else if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get logs from state", err, true) } result := make([]types.Log, 0, len(logs)) @@ -493,7 +517,7 @@ func (e *EthEndpoints) GetStorageAt(address types.ArgAddress, storageKeyStr stri storageKey := types.ArgHash{} err := storageKey.UnmarshalText([]byte(storageKeyStr)) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "unable to decode storage key: hex string invalid", nil) + return RPCErrorResponse(types.DefaultErrorCode, "unable to decode storage key: hex string invalid", nil, false) } return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { @@ -506,7 +530,7 @@ func (e *EthEndpoints) GetStorageAt(address types.ArgAddress, storageKeyStr stri if errors.Is(err, state.ErrNotFound) { return types.ArgBytesPtr(common.Hash{}.Bytes()), nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get storage value from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get storage value from state", err, true) } return types.ArgBytesPtr(common.BigToHash(value).Bytes()), nil @@ -521,19 +545,19 @@ func (e *EthEndpoints) GetTransactionByBlockHashAndIndex(hash types.ArgHash, ind if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction", err, true) } receipt, err := e.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err, true) } res, err := types.NewTransaction(*tx, receipt, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } return res, nil @@ -554,19 +578,19 @@ func (e *EthEndpoints) GetTransactionByBlockNumberAndIndex(number *types.BlockNu if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction", err, true) } receipt, err := e.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err, true) } res, err := types.NewTransaction(*tx, receipt, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } return res, nil @@ -579,19 +603,19 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash) (interface{}, ty // try to get tx from state tx, err := e.state.GetTransactionByHash(ctx, hash.Hash(), dbTx) if err != nil && !errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction by hash from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction by hash from state", err, true) } if tx != nil { receipt, err := e.state.GetTransactionReceipt(ctx, hash.Hash(), dbTx) if errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, "transaction receipt not found", err) + return RPCErrorResponse(types.DefaultErrorCode, "transaction receipt not found", err, false) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction receipt from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction receipt from state", err, true) } res, err := types.NewTransaction(*tx, receipt, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } return res, nil @@ -605,13 +629,13 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash) (interface{}, ty if errors.Is(err, pool.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction by hash from pool", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction by hash from pool", err, true) } if poolTx.Status == pool.TxStatusPending { tx = &poolTx.Transaction res, err := types.NewTransaction(*tx, nil, false) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } return res, nil } @@ -622,17 +646,17 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash) (interface{}, ty func (e *EthEndpoints) getTransactionByHashFromSequencerNode(hash common.Hash) (interface{}, types.Error) { res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_getTransactionByHash", hash.String()) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx from sequencer node", err, true) } if res.Error != nil { - return RPCErrorResponse(res.Error.Code, res.Error.Message, nil) + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) } var tx *types.Transaction err = json.Unmarshal(res.Result, &tx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to read tx from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to read tx from sequencer node", err, true) } return tx, nil } @@ -659,7 +683,7 @@ func (e *EthEndpoints) GetTransactionCount(address types.ArgAddress, blockArg *t } pendingNonce, err = e.pool.GetNonce(ctx, address.Address()) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to count pending transactions", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to count pending transactions", err, true) } } } @@ -669,7 +693,7 @@ func (e *EthEndpoints) GetTransactionCount(address types.ArgAddress, blockArg *t if errors.Is(err, state.ErrNotFound) { return hex.EncodeUint64(0), nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err, true) } if pendingNonce > nonce { @@ -683,17 +707,17 @@ func (e *EthEndpoints) GetTransactionCount(address types.ArgAddress, blockArg *t func (e *EthEndpoints) getTransactionCountFromSequencerNode(address common.Address, number *types.BlockNumber) (interface{}, types.Error) { res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_getTransactionCount", address.String(), number.StringOrHex()) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get nonce from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get nonce from sequencer node", err, true) } if res.Error != nil { - return RPCErrorResponse(res.Error.Code, res.Error.Message, nil) + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) } var nonce types.ArgUint64 err = json.Unmarshal(res.Result, &nonce) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to read nonce from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to read nonce from sequencer node", err, true) } return nonce, nil } @@ -704,7 +728,7 @@ func (e *EthEndpoints) GetBlockTransactionCountByHash(hash types.ArgHash) (inter return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { c, err := e.state.GetL2BlockTransactionCountByHash(ctx, hash.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err, true) } return types.ArgUint64(c), nil @@ -721,7 +745,7 @@ func (e *EthEndpoints) GetBlockTransactionCountByNumber(number *types.BlockNumbe } c, err := e.pool.CountPendingTransactions(ctx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to count pending transactions", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to count pending transactions", err, true) } return types.ArgUint64(c), nil } @@ -734,7 +758,7 @@ func (e *EthEndpoints) GetBlockTransactionCountByNumber(number *types.BlockNumbe c, err := e.state.GetL2BlockTransactionCountByNumber(ctx, blockNumber, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to count transactions", err, true) } return types.ArgUint64(c), nil @@ -744,17 +768,17 @@ func (e *EthEndpoints) GetBlockTransactionCountByNumber(number *types.BlockNumbe func (e *EthEndpoints) getBlockTransactionCountByNumberFromSequencerNode(number *types.BlockNumber) (interface{}, types.Error) { res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_getBlockTransactionCountByNumber", number.StringOrHex()) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx count by block number from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx count by block number from sequencer node", err, true) } if res.Error != nil { - return RPCErrorResponse(res.Error.Code, res.Error.Message, nil) + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) } var count types.ArgUint64 err = json.Unmarshal(res.Result, &count) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to read tx count by block number from sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to read tx count by block number from sequencer node", err, true) } return count, nil } @@ -766,19 +790,19 @@ func (e *EthEndpoints) GetTransactionReceipt(hash types.ArgHash) (interface{}, t if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx from state", err, true) } r, err := e.state.GetTransactionReceipt(ctx, hash.Hash(), dbTx) if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx receipt from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx receipt from state", err, true) } receipt, err := types.NewReceipt(*tx, r) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to build the receipt response", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to build the receipt response", err, true) } return receipt, nil @@ -793,10 +817,10 @@ func (e *EthEndpoints) NewBlockFilter() (interface{}, types.Error) { } // internal -func (e *EthEndpoints) newBlockFilter(wsConn *websocket.Conn) (interface{}, types.Error) { +func (e *EthEndpoints) newBlockFilter(wsConn *concurrentWsConn) (interface{}, types.Error) { id, err := e.storage.NewBlockFilter(wsConn) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to create new block filter", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to create new block filter", err, true) } return id, nil @@ -806,16 +830,25 @@ func (e *EthEndpoints) newBlockFilter(wsConn *websocket.Conn) (interface{}, type // to notify when the state changes (logs). To check if the state // has changed, call eth_getFilterChanges. func (e *EthEndpoints) NewFilter(filter LogFilter) (interface{}, types.Error) { - return e.newFilter(nil, filter) + return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { + return e.newFilter(ctx, nil, filter, dbTx) + }) } // internal -func (e *EthEndpoints) newFilter(wsConn *websocket.Conn, filter LogFilter) (interface{}, types.Error) { +func (e *EthEndpoints) newFilter(ctx context.Context, wsConn *concurrentWsConn, filter LogFilter, dbTx pgx.Tx) (interface{}, types.Error) { + if filter.ShouldFilterByBlockRange() { + _, _, rpcErr := filter.GetNumericBlockNumbers(ctx, e.cfg, e.state, e.etherman, nil) + if rpcErr != nil { + return nil, rpcErr + } + } + id, err := e.storage.NewLogFilter(wsConn, filter) if errors.Is(err, ErrFilterInvalidPayload) { - return RPCErrorResponse(types.InvalidParamsErrorCode, err.Error(), nil) + return RPCErrorResponse(types.InvalidParamsErrorCode, err.Error(), nil, false) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to create new log filter", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to create new log filter", err, true) } return id, nil @@ -829,13 +862,13 @@ func (e *EthEndpoints) NewPendingTransactionFilter() (interface{}, types.Error) } // internal -func (e *EthEndpoints) newPendingTransactionFilter(wsConn *websocket.Conn) (interface{}, types.Error) { +func (e *EthEndpoints) newPendingTransactionFilter(wsConn *concurrentWsConn) (interface{}, types.Error) { if !e.cfg.EnablePendingTransactionFilter { return nil, types.NewRPCError(types.DefaultErrorCode, "not supported yet") } id, err := e.storage.NewPendingTransactionFilter(wsConn) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to create new pending transaction filter", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to create new pending transaction filter", err, true) } return id, nil } @@ -865,11 +898,11 @@ func (e *EthEndpoints) SendRawTransaction(httpRequest *http.Request, input strin func (e *EthEndpoints) relayTxToSequencerNode(input string) (interface{}, types.Error) { res, err := client.JSONRPCCall(e.cfg.SequencerNodeURI, "eth_sendRawTransaction", input) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to relay tx to the sequencer node", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to relay tx to the sequencer node", err, true) } if res.Error != nil { - return RPCErrorResponse(res.Error.Code, res.Error.Message, nil) + return RPCErrorResponse(res.Error.Code, res.Error.Message, nil, false) } txHash := res.Result @@ -880,12 +913,13 @@ func (e *EthEndpoints) relayTxToSequencerNode(input string) (interface{}, types. func (e *EthEndpoints) tryToAddTxToPool(input, ip string) (interface{}, types.Error) { tx, err := hexToTx(input) if err != nil { - return RPCErrorResponse(types.InvalidParamsErrorCode, "invalid tx input", err) + return RPCErrorResponse(types.InvalidParamsErrorCode, "invalid tx input", err, false) } - log.Infof("adding TX to the pool: %v", tx.Hash().Hex()) if err := e.pool.AddTx(context.Background(), *tx, ip); err != nil { - return RPCErrorResponse(types.DefaultErrorCode, err.Error(), nil) + // it's not needed to log the error here, because we check and log if needed + // for each specific case during the "pool.AddTx" internal steps + return RPCErrorResponse(types.DefaultErrorCode, err.Error(), nil, false) } log.Infof("TX added to the pool: %v", tx.Hash().Hex()) @@ -898,7 +932,7 @@ func (e *EthEndpoints) UninstallFilter(filterID string) (interface{}, types.Erro if errors.Is(err, ErrNotFound) { return false, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to uninstall filter", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to uninstall filter", err, true) } return true, nil @@ -912,12 +946,12 @@ func (e *EthEndpoints) Syncing() (interface{}, types.Error) { if errors.Is(err, state.ErrStateNotSynchronized) { return nil, types.NewRPCErrorWithData(types.DefaultErrorCode, state.ErrStateNotSynchronized.Error(), nil) } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get last block number from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get last block number from state", err, true) } syncInfo, err := e.state.GetSyncingInfo(ctx, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get syncing info from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get syncing info from state", err, true) } if syncInfo.CurrentBlockNumber >= syncInfo.LastBlockNumberSeen { @@ -992,16 +1026,18 @@ func (e *EthEndpoints) updateFilterLastPoll(filterID string) types.Error { // The node will return a subscription id. // For each event that matches the subscription a notification with relevant // data is sent together with the subscription id. -func (e *EthEndpoints) Subscribe(wsConn *websocket.Conn, name string, logFilter *LogFilter) (interface{}, types.Error) { +func (e *EthEndpoints) Subscribe(wsConn *concurrentWsConn, name string, logFilter *LogFilter) (interface{}, types.Error) { switch name { case "newHeads": return e.newBlockFilter(wsConn) case "logs": - var lf LogFilter - if logFilter != nil { - lf = *logFilter - } - return e.newFilter(wsConn, lf) + return e.txMan.NewDbTxScope(e.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { + var lf LogFilter + if logFilter != nil { + lf = *logFilter + } + return e.newFilter(ctx, wsConn, lf, dbTx) + }) case "pendingTransactions", "newPendingTransactions": return e.newPendingTransactionFilter(wsConn) case "syncing": @@ -1012,76 +1048,267 @@ func (e *EthEndpoints) Subscribe(wsConn *websocket.Conn, name string, logFilter } // Unsubscribe uninstalls the filter based on the provided filterID -func (e *EthEndpoints) Unsubscribe(wsConn *websocket.Conn, filterID string) (interface{}, types.Error) { +func (e *EthEndpoints) Unsubscribe(wsConn *concurrentWsConn, filterID string) (interface{}, types.Error) { return e.UninstallFilter(filterID) } // uninstallFilterByWSConn uninstalls the filters connected to the // provided web socket connection -func (e *EthEndpoints) uninstallFilterByWSConn(wsConn *websocket.Conn) error { +func (e *EthEndpoints) uninstallFilterByWSConn(wsConn *concurrentWsConn) error { return e.storage.UninstallFilterByWSConn(wsConn) } // onNewL2Block is triggered when the state triggers the event for a new l2 block func (e *EthEndpoints) onNewL2Block(event state.NewL2BlockEvent) { - blockFilters, err := e.storage.GetAllBlockFiltersWithWSConn() + log.Infof("[onNewL2Block] new l2 block event detected for block %v", event.Block.NumberU64()) + start := time.Now() + wg := sync.WaitGroup{} + + wg.Add(1) + go e.notifyNewHeads(&wg, event) + + wg.Add(1) + go e.notifyNewLogs(&wg, event) + + wg.Wait() + log.Infof("[onNewL2Block] new l2 block %v took %v to send the messages to all ws connections", event.Block.NumberU64(), time.Since(start)) +} + +func (e *EthEndpoints) notifyNewHeads(wg *sync.WaitGroup, event state.NewL2BlockEvent) { + defer wg.Done() + start := time.Now() + + b, err := types.NewBlock(&event.Block, nil, false, false) + if err != nil { + log.Errorf("failed to build block response to subscription: %v", err) + return + } + data, err := json.Marshal(b) if err != nil { - log.Errorf("failed to get all block filters with web sockets connections: %v", err) + log.Errorf("failed to marshal block response to subscription: %v", err) + return + } + + filters := e.storage.GetAllBlockFiltersWithWSConn() + log.Infof("[notifyNewHeads] took %v to get block filters with ws connections", time.Since(start)) + + const maxWorkers = 32 + parallelize(maxWorkers, filters, func(worker int, filters []*Filter) { + for _, filter := range filters { + f := filter + start := time.Now() + f.EnqueueSubscriptionDataToBeSent(data) + log.Infof("[notifyNewHeads] took %v to enqueue new l2 block messages", time.Since(start)) + } + }) + + log.Infof("[notifyNewHeads] new l2 block event for block %v took %v to send all the messages for block filters", event.Block.NumberU64(), time.Since(start)) +} + +func (e *EthEndpoints) notifyNewLogs(wg *sync.WaitGroup, event state.NewL2BlockEvent) { + defer wg.Done() + start := time.Now() + + filters := e.storage.GetAllLogFiltersWithWSConn() + log.Infof("[notifyNewLogs] took %v to get log filters with ws connections", time.Since(start)) + + const maxWorkers = 32 + parallelize(maxWorkers, filters, func(worker int, filters []*Filter) { + for _, filter := range filters { + f := filter + start := time.Now() + if e.shouldSkipLogFilter(event, filter) { + return + } + log.Infof("[notifyNewLogs] took %v to check if should skip log filter", time.Since(start)) + + start = time.Now() + // get new logs for this specific filter + logs := filterLogs(event.Logs, filter) + log.Infof("[notifyNewLogs] took %v to filter logs", time.Since(start)) + + start = time.Now() + for _, l := range logs { + data, err := json.Marshal(l) + if err != nil { + log.Errorf("failed to marshal ethLog response to subscription: %v", err) + } + f.EnqueueSubscriptionDataToBeSent(data) + } + log.Infof("[notifyNewLogs] took %v to enqueue log messages", time.Since(start)) + } + }) + + log.Infof("[notifyNewLogs] new l2 block event for block %v took %v to send all the messages for log filters", event.Block.NumberU64(), time.Since(start)) +} + +// shouldSkipLogFilter checks if the log filter can be skipped while notifying new logs. +// it checks the log filter information against the block in the event to decide if the +// information in the event is required by the filter or can be ignored to save resources. +func (e *EthEndpoints) shouldSkipLogFilter(event state.NewL2BlockEvent, filter *Filter) bool { + logFilter := filter.Parameters.(LogFilter) + + if logFilter.BlockHash != nil { + // if the filter block hash is set, we check if the block is the + // one with the expected hash, otherwise we ignore the filter + bh := *logFilter.BlockHash + if bh.String() != event.Block.Hash().String() { + return true + } } else { - for _, filter := range blockFilters { - b, err := types.NewBlock(&event.Block, nil, false, false) - if err != nil { - log.Errorf("failed to build block response to subscription: %v", err) - } else { - e.sendSubscriptionResponse(filter, b) + // if the filter has a fromBlock value set + // and the event block number is smaller than the + // from block, skip this filter + if logFilter.FromBlock != nil { + fromBlock, rpcErr := logFilter.FromBlock.GetNumericBlockNumber(context.Background(), e.state, e.etherman, nil) + if rpcErr != nil { + log.Errorf("failed to get numeric block number for FromBlock field for filter %v: %v", filter.ID, rpcErr) + return true + } + // if the block number is smaller than the fromBlock value + // this means this block is out of the block range for this + // filter, so we skip it + if event.Block.NumberU64() < fromBlock { + return true + } + } + + // if the filter has a toBlock value set + // and the event block number is greater than the + // to block, skip this filter + if logFilter.ToBlock != nil { + toBlock, rpcErr := logFilter.ToBlock.GetNumericBlockNumber(context.Background(), e.state, e.etherman, nil) + if rpcErr != nil { + log.Errorf("failed to get numeric block number for ToBlock field for filter %v: %v", filter.ID, rpcErr) + return true + } + // if the block number is greater than the toBlock value + // this means this block is out of the block range for this + // filter, so we skip it + if event.Block.NumberU64() > toBlock { + return true } } } + return false +} - logFilters, err := e.storage.GetAllLogFiltersWithWSConn() - if err != nil { - log.Errorf("failed to get all log filters with web sockets connections: %v", err) - } else { - for _, filter := range logFilters { - changes, err := e.GetFilterChanges(filter.ID) - if err != nil { - log.Errorf("failed to get filters changes for filter %v with web sockets connections: %v", filter.ID, err) +// filterLogs will filter the provided logsToFilter accordingly to the filters provided +func filterLogs(logsToFilter []*ethTypes.Log, filter *Filter) []types.Log { + logFilter := filter.Parameters.(LogFilter) + + logs := make([]types.Log, 0) + for _, l := range logsToFilter { + // check address filter + if len(logFilter.Addresses) > 0 { + // if the log address doesn't match any address in the filter, skip this log + if !contains(logFilter.Addresses, l.Address) { continue } + } + + // check topics + match := true + if len(logFilter.Topics) > 0 { + out: + // check all topics + for i := 0; i < maxTopics; i++ { + // check if the filter contains information + // to filter this topic position + checkTopic := len(logFilter.Topics) > i + if !checkTopic { + // if we shouldn't check this topic, we can assume + // no more topics needs to be checked, because there + // will be no more topic filters, so we can break out + break out + } + + // check if the topic filter allows any topic + acceptAnyTopic := len(logFilter.Topics[i]) == 0 + if acceptAnyTopic { + // since any topic is allowed, we continue to the next topic filters + continue + } - if changes != nil { - ethLogs := changes.([]types.Log) - for _, ethLog := range ethLogs { - e.sendSubscriptionResponse(filter, ethLog) + // check if the log has the required topic set + logHasTopic := len(l.Topics) > i + if !logHasTopic { + // if the log doesn't have the required topic set, skip this log + match = false + break out + } + + // check if the any topic in the filter matches the log topic + if !contains(logFilter.Topics[i], l.Topics[i]) { + match = false + // if the log topic doesn't match any topic in the filter, skip this log + break out } } } + if match { + logs = append(logs, types.NewLog(*l)) + } } + return logs } -func (e *EthEndpoints) sendSubscriptionResponse(filter *Filter, data interface{}) { - const errMessage = "Unable to write WS message to filter %v, %s" - result, err := json.Marshal(data) - if err != nil { - log.Errorf(fmt.Sprintf(errMessage, filter.ID, err.Error())) +// contains check if the item can be found in the items +func contains[T comparable](items []T, itemsToFind T) bool { + for _, item := range items { + if item == itemsToFind { + return true + } + } + return false +} + +// parallelize split the items into workers accordingly +// to the max number of workers and the number of items, +// allowing the fn to be executed in concurrently for different +// chunks of items. +func parallelize[T any](maxWorkers int, items []T, fn func(worker int, items []T)) { + if len(items) == 0 { + return } - res := types.SubscriptionResponse{ - JSONRPC: "2.0", - Method: "eth_subscription", - Params: types.SubscriptionResponseParams{ - Subscription: filter.ID, - Result: result, - }, + var workersCount = maxWorkers + if workersCount > len(items) { + workersCount = len(items) } - message, err := json.Marshal(res) - if err != nil { - log.Errorf(fmt.Sprintf(errMessage, filter.ID, err.Error())) + + var jobSize = len(items) / workersCount + var rest = len(items) % workersCount + if rest > 0 { + jobSize++ } - err = filter.WsConn.WriteMessage(websocket.TextMessage, message) - if err != nil { - log.Errorf(fmt.Sprintf(errMessage, filter.ID, err.Error())) + wg := sync.WaitGroup{} + for worker := 0; worker < workersCount; worker++ { + rangeStart := worker * jobSize + rangeEnd := ((worker + 1) * jobSize) + + if rangeStart > len(items) { + continue + } + + if rangeEnd > len(items) { + rangeEnd = len(items) + } + + jobItems := items[rangeStart:rangeEnd] + + wg.Add(1) + go func(worker int, filteredItems []T, fn func(worker int, items []T)) { + defer func() { + wg.Done() + err := recover() + if err != nil { + fmt.Println(err) + } + }() + fn(worker, filteredItems) + }(worker, jobItems, fn) } - log.Debugf("WS message sent: %v", string(message)) + wg.Wait() } diff --git a/jsonrpc/endpoints_eth_test.go b/jsonrpc/endpoints_eth_test.go index 0ea391dc5b..ab796c193b 100644 --- a/jsonrpc/endpoints_eth_test.go +++ b/jsonrpc/endpoints_eth_test.go @@ -4,8 +4,10 @@ import ( "context" "encoding/json" "errors" + "fmt" "math/big" "strings" + "sync" "testing" "time" @@ -22,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" - "github.com/gorilla/websocket" "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -569,6 +570,65 @@ func TestChainID(t *testing.T) { assert.Equal(t, s.ChainID(), chainID.Uint64()) } +func TestCoinbase(t *testing.T) { + testCases := []struct { + name string + callSequencer bool + trustedCoinbase *common.Address + permissionlessCoinbase *common.Address + error error + expectedCoinbase common.Address + }{ + {"Coinbase not configured", true, nil, nil, nil, common.Address{}}, + {"Get trusted sequencer coinbase directly", true, state.AddressPtr(common.HexToAddress("0x1")), nil, nil, common.HexToAddress("0x1")}, + {"Get trusted sequencer coinbase via permissionless", false, state.AddressPtr(common.HexToAddress("0x1")), nil, nil, common.HexToAddress("0x1")}, + {"Ignore permissionless config", false, state.AddressPtr(common.HexToAddress("0x2")), state.AddressPtr(common.HexToAddress("0x1")), nil, common.HexToAddress("0x2")}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := getSequencerDefaultConfig() + if tc.trustedCoinbase != nil { + cfg.L2Coinbase = *tc.trustedCoinbase + } + sequencerServer, _, _ := newMockedServerWithCustomConfig(t, cfg) + + var nonSequencerServer *mockedServer + if !tc.callSequencer { + cfg = getNonSequencerDefaultConfig(sequencerServer.ServerURL) + if tc.permissionlessCoinbase != nil { + cfg.L2Coinbase = *tc.permissionlessCoinbase + } + nonSequencerServer, _, _ = newMockedServerWithCustomConfig(t, cfg) + } + + var res types.Response + var err error + if tc.callSequencer { + res, err = sequencerServer.JSONRPCCall("eth_coinbase") + } else { + res, err = nonSequencerServer.JSONRPCCall("eth_coinbase") + } + require.NoError(t, err) + + assert.Nil(t, res.Error) + assert.NotNil(t, res.Result) + + var s string + err = json.Unmarshal(res.Result, &s) + require.NoError(t, err) + result := common.HexToAddress(s) + + assert.Equal(t, tc.expectedCoinbase.String(), result.String()) + + sequencerServer.Stop() + if !tc.callSequencer { + nonSequencerServer.Stop() + } + }) + } +} + func TestEstimateGas(t *testing.T) { s, m, _ := newSequencerMockedServer(t) defer s.Stop() @@ -3545,49 +3605,119 @@ func TestNewFilter(t *testing.T) { } hash := common.HexToHash("0x42") - blockNumber := "8" + blockNumber10 := "10" + blockNumber10010 := "10010" + blockNumber10011 := "10011" testCases := []testCase{ { - Name: "New filter created successfully", + Name: "New filter by block range created successfully", Request: types.LogFilterRequest{ - ToBlock: &blockNumber, + FromBlock: &blockNumber10, + ToBlock: &blockNumber10010, }, ExpectedResult: "1", ExpectedError: nil, SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Commit", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + m.Storage. - On("NewLogFilter", mock.IsType(&websocket.Conn{}), mock.IsType(LogFilter{})). + On("NewLogFilter", mock.IsType(&concurrentWsConn{}), mock.IsType(LogFilter{})). Return("1", nil). Once() }, }, { - Name: "failed to create new filter", + Name: "New filter by block hash created successfully", Request: types.LogFilterRequest{ BlockHash: &hash, }, - ExpectedResult: "", - ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new log filter"), + ExpectedResult: "1", + ExpectedError: nil, SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Commit", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + m.Storage. - On("NewLogFilter", mock.IsType(&websocket.Conn{}), mock.IsType(LogFilter{})). - Return("", errors.New("failed to add new filter")). + On("NewLogFilter", mock.IsType(&concurrentWsConn{}), mock.IsType(LogFilter{})). + Return("1", nil). Once() }, }, { - Name: "failed to create new filter because BlockHash and ToBlock are present", + Name: "New filter not created due to from block greater than to block", + Request: types.LogFilterRequest{ + FromBlock: &blockNumber10010, + ToBlock: &blockNumber10, + }, + ExpectedResult: "", + ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "invalid block range"), + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + { + Name: "New filter not created due to block range bigger than allowed", + Request: types.LogFilterRequest{ + FromBlock: &blockNumber10, + ToBlock: &blockNumber10011, + }, + ExpectedResult: "", + ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "logs are limited to a 10000 block range"), + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + { + Name: "failed to create new filter due to error to store", Request: types.LogFilterRequest{ BlockHash: &hash, - ToBlock: &blockNumber, }, ExpectedResult: "", - ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "invalid argument 0: cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other"), + ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new log filter"), SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() m.Storage. - On("NewLogFilter", mock.IsType(&websocket.Conn{}), mock.IsType(LogFilter{})). - Once(). - Return("", ErrFilterInvalidPayload). + On("NewLogFilter", mock.IsType(&concurrentWsConn{}), mock.IsType(LogFilter{})). + Return("", errors.New("failed to add new filter")). Once() }, }, @@ -3637,7 +3767,7 @@ func TestNewBlockFilter(t *testing.T) { ExpectedError: nil, SetupMocks: func(m *mocksWrapper, tc testCase) { m.Storage. - On("NewBlockFilter", mock.IsType(&websocket.Conn{})). + On("NewBlockFilter", mock.IsType(&concurrentWsConn{})). Return("1", nil). Once() }, @@ -3648,7 +3778,7 @@ func TestNewBlockFilter(t *testing.T) { ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new block filter"), SetupMocks: func(m *mocksWrapper, tc testCase) { m.Storage. - On("NewBlockFilter", mock.IsType(&websocket.Conn{})). + On("NewBlockFilter", mock.IsType(&concurrentWsConn{})). Return("", errors.New("failed to add new block filter")). Once() }, @@ -3697,9 +3827,9 @@ func TestNewPendingTransactionFilter(t *testing.T) { // Name: "New pending transaction filter created successfully", // ExpectedResult: "1", // ExpectedError: nil, - // SetupMocks: func(m *mocks, tc testCase) { + // SetupMocks: func(m *mocksWrapper, tc testCase) { // m.Storage. - // On("NewPendingTransactionFilter", mock.IsType(&websocket.Conn{})). + // On("NewPendingTransactionFilter", mock.IsType(&concurrentWsConn{})). // Return("1", nil). // Once() // }, @@ -3708,9 +3838,9 @@ func TestNewPendingTransactionFilter(t *testing.T) { // Name: "failed to create new pending transaction filter", // ExpectedResult: "", // ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new pending transaction filter"), - // SetupMocks: func(m *mocks, tc testCase) { + // SetupMocks: func(m *mocksWrapper, tc testCase) { // m.Storage. - // On("NewPendingTransactionFilter", mock.IsType(&websocket.Conn{})). + // On("NewPendingTransactionFilter", mock.IsType(&concurrentWsConn{})). // Return("", errors.New("failed to add new pending transaction filter")). // Once() // }, @@ -3952,6 +4082,58 @@ func TestGetLogs(t *testing.T) { Once() }, }, + { + Name: "Get logs fails due to max block range limit exceeded", + Prepare: func(t *testing.T, tc *testCase) { + tc.Filter = ethereum.FilterQuery{ + FromBlock: big.NewInt(1), ToBlock: big.NewInt(10002), + Addresses: []common.Address{common.HexToAddress("0x111")}, + Topics: [][]common.Hash{{common.HexToHash("0x222")}}, + } + tc.ExpectedResult = nil + tc.ExpectedError = types.NewRPCError(types.InvalidParamsErrorCode, "logs are limited to a 10000 block range") + }, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + { + Name: "Get logs fails due to max log count limit exceeded", + Prepare: func(t *testing.T, tc *testCase) { + tc.Filter = ethereum.FilterQuery{ + FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), + Addresses: []common.Address{common.HexToAddress("0x111")}, + Topics: [][]common.Hash{{common.HexToHash("0x222")}}, + } + tc.ExpectedResult = nil + tc.ExpectedError = types.NewRPCError(types.InvalidParamsErrorCode, "query returned more than 10000 results") + }, + SetupMocks: func(m *mocksWrapper, tc testCase) { + var since *time.Time + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + + m.State. + On("GetLogs", context.Background(), tc.Filter.FromBlock.Uint64(), tc.Filter.ToBlock.Uint64(), tc.Filter.Addresses, tc.Filter.Topics, tc.Filter.BlockHash, since, m.DbTx). + Return(nil, state.ErrMaxLogsCountLimitExceeded). + Once() + }, + }, } for _, testCase := range testCases { @@ -4698,3 +4880,305 @@ func TestGetFilterChanges(t *testing.T) { }) } } + +func TestSubscribeNewHeads(t *testing.T) { + s, m, _ := newSequencerMockedServer(t) + defer s.Stop() + + type testCase struct { + Name string + Channel chan *ethTypes.Header + ExpectedError interface{} + SetupMocks func(m *mocksWrapper, tc testCase) + } + + testCases := []testCase{ + { + Name: "Subscribe to new heads Successfully", + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.Storage. + On("NewBlockFilter", mock.IsType(&concurrentWsConn{})). + Return("0x1", nil). + Once() + }, + }, + { + Name: "Subscribe fails to add filter to storage", + ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new block filter"), + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.Storage. + On("NewBlockFilter", mock.IsType(&concurrentWsConn{})). + Return("", fmt.Errorf("failed to add filter to storage")). + Once() + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + tc := testCase + tc.SetupMocks(m, tc) + + c := s.GetWSClient() + + ctx := context.Background() + newHeadsChannel := make(chan *ethTypes.Header, 100) + sub, err := c.SubscribeNewHead(ctx, newHeadsChannel) + + if sub != nil { + assert.NotNil(t, sub) + } + + if err != nil || tc.ExpectedError != nil { + if expectedErr, ok := tc.ExpectedError.(*types.RPCError); ok { + rpcErr := err.(rpc.Error) + assert.Equal(t, expectedErr.ErrorCode(), rpcErr.ErrorCode()) + assert.Equal(t, expectedErr.Error(), rpcErr.Error()) + } else { + assert.Equal(t, tc.ExpectedError, err) + } + } + }) + } +} + +func TestSubscribeNewLogs(t *testing.T) { + s, m, _ := newSequencerMockedServer(t) + defer s.Stop() + + type testCase struct { + Name string + Filter ethereum.FilterQuery + Channel chan *ethTypes.Log + ExpectedError interface{} + Prepare func(t *testing.T, tc *testCase) + SetupMocks func(m *mocksWrapper, tc testCase) + } + + testCases := []testCase{ + { + Name: "Subscribe to new logs by block hash successfully", + Prepare: func(t *testing.T, tc *testCase) { + tc.Filter = ethereum.FilterQuery{ + BlockHash: &blockHash, + } + }, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Commit", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("NewLogFilter", mock.IsType(&concurrentWsConn{}), mock.IsType(LogFilter{})). + Return("0x1", nil). + Once() + }, + }, + { + Name: "Subscribe to new logs fails to add new filter to storage", + ExpectedError: types.NewRPCError(types.DefaultErrorCode, "failed to create new log filter"), + Prepare: func(t *testing.T, tc *testCase) { + tc.Filter = ethereum.FilterQuery{ + BlockHash: &blockHash, + } + }, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("NewLogFilter", mock.IsType(&concurrentWsConn{}), mock.IsType(LogFilter{})). + Return("", fmt.Errorf("failed to add filter to storage")). + Once() + }, + }, + { + Name: "Subscribe to new logs fails due to max block range limit exceeded", + ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "logs are limited to a 10000 block range"), + Prepare: func(t *testing.T, tc *testCase) { + tc.Filter = ethereum.FilterQuery{ + FromBlock: big.NewInt(1), ToBlock: big.NewInt(10002), + } + }, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + tc := testCase + tc.Prepare(t, &tc) + tc.SetupMocks(m, tc) + + c := s.GetWSClient() + + ctx := context.Background() + newLogs := make(chan ethTypes.Log, 100) + sub, err := c.SubscribeFilterLogs(ctx, tc.Filter, newLogs) + + if sub != nil { + assert.NotNil(t, sub) + } + + if err != nil || tc.ExpectedError != nil { + if expectedErr, ok := tc.ExpectedError.(*types.RPCError); ok { + rpcErr := err.(rpc.Error) + assert.Equal(t, expectedErr.ErrorCode(), rpcErr.ErrorCode()) + assert.Equal(t, expectedErr.Error(), rpcErr.Error()) + } else { + assert.Equal(t, tc.ExpectedError, err) + } + } + }) + } +} + +func TestFilterLogs(t *testing.T) { + logs := []*ethTypes.Log{{ + Address: common.HexToAddress("0x1"), + Topics: []common.Hash{ + common.HexToHash("0xA"), + common.HexToHash("0xB"), + }, + }} + + // empty filter + filteredLogs := filterLogs(logs, &Filter{Parameters: LogFilter{}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by the log address + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Addresses: []common.Address{ + common.HexToAddress("0x1"), + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by the log address and another random address + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Addresses: []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by unknown address + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Addresses: []common.Address{ + common.HexToAddress("0x2"), + }}}) + assert.Equal(t, 0, len(filteredLogs)) + + // filtered by topic0 + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xA")}, + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by topic0 but allows any topic1 + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xA")}, + {}, + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by any topic0 but forces topic1 + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {}, + {common.HexToHash("0xB")}, + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by forcing topic0 and topic1 + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xA")}, + {common.HexToHash("0xB")}, + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by forcing topic0 and topic1 to be any of the values + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xA"), common.HexToHash("0xB")}, + {common.HexToHash("0xA"), common.HexToHash("0xB")}, + }}}) + assert.Equal(t, 1, len(filteredLogs)) + + // filtered by forcing topic0 and topic1 to wrong values + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xB")}, + {common.HexToHash("0xA")}, + }}}) + assert.Equal(t, 0, len(filteredLogs)) + + // filtered by forcing topic0 to wrong value + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {common.HexToHash("0xB")}, + }}}) + assert.Equal(t, 0, len(filteredLogs)) + + // filtered by accepting any topic0 by forcing topic1 to wrong value + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {}, + {common.HexToHash("0xA")}, + }}}) + assert.Equal(t, 0, len(filteredLogs)) + + // filtered by accepting any topic0 and topic1 but forcing topic2 that doesn't exist + filteredLogs = filterLogs(logs, &Filter{Parameters: LogFilter{Topics: [][]common.Hash{ + {}, + {}, + {common.HexToHash("0xA")}, + }}}) + assert.Equal(t, 0, len(filteredLogs)) +} + +func TestContains(t *testing.T) { + items := []int{1, 2, 3} + assert.Equal(t, false, contains(items, 0)) + assert.Equal(t, true, contains(items, 1)) + assert.Equal(t, true, contains(items, 2)) + assert.Equal(t, true, contains(items, 3)) + assert.Equal(t, false, contains(items, 4)) +} + +func TestParalelize(t *testing.T) { + items := []int{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, + } + + results := map[int][]int{} + mu := &sync.Mutex{} + + parallelize(7, items, func(worker int, items []int) { + mu.Lock() + results[worker] = items + mu.Unlock() + }) + + assert.ElementsMatch(t, []int{1, 2, 3}, results[0]) + assert.ElementsMatch(t, []int{4, 5, 6}, results[1]) + assert.ElementsMatch(t, []int{7, 8, 9}, results[2]) + assert.ElementsMatch(t, []int{10, 11, 12}, results[3]) + assert.ElementsMatch(t, []int{13, 14, 15}, results[4]) + assert.ElementsMatch(t, []int{16}, results[5]) +} diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go index d74316b9d1..23625d7a49 100644 --- a/jsonrpc/endpoints_zkevm.go +++ b/jsonrpc/endpoints_zkevm.go @@ -128,7 +128,7 @@ func (z *ZKEVMEndpoints) VerifiedBatchNumber() (interface{}, types.Error) { func (z *ZKEVMEndpoints) GetBatchByNumber(batchNumber types.BatchNumber, fullTx bool) (interface{}, types.Error) { return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { var err error - batchNumber, rpcErr := batchNumber.GetNumericBatchNumber(ctx, z.state, dbTx) + batchNumber, rpcErr := batchNumber.GetNumericBatchNumber(ctx, z.state, z.etherman, dbTx) if rpcErr != nil { return nil, rpcErr } @@ -137,49 +137,49 @@ func (z *ZKEVMEndpoints) GetBatchByNumber(batchNumber types.BatchNumber, fullTx if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch from state by number %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch from state by number %v", batchNumber), err, true) } txs, _, err := z.state.GetTransactionsByBatchNumber(ctx, batchNumber, dbTx) if !errors.Is(err, state.ErrNotFound) && err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load batch txs from state by number %v", batchNumber), err, true) } receipts := make([]ethTypes.Receipt, 0, len(txs)) for _, tx := range txs { receipt, err := z.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } virtualBatch, err := z.state.GetVirtualBatch(ctx, batchNumber, dbTx) if err != nil && !errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load virtual batch from state by number %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load virtual batch from state by number %v", batchNumber), err, true) } verifiedBatch, err := z.state.GetVerifiedBatch(ctx, batchNumber, dbTx) if err != nil && !errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load virtual batch from state by number %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load virtual batch from state by number %v", batchNumber), err, true) } ger, err := z.state.GetExitRootByGlobalExitRoot(ctx, batch.GlobalExitRoot, dbTx) if err != nil && !errors.Is(err, state.ErrNotFound) { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load full GER from state by number %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load full GER from state by number %v", batchNumber), err, true) } else if errors.Is(err, state.ErrNotFound) { ger = &state.GlobalExitRoot{} } blocks, err := z.state.GetL2BlocksByBatchNumber(ctx, batchNumber, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load blocks associated to the batch %v", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load blocks associated to the batch %v", batchNumber), err, true) } batch.Transactions = txs rpcBatch, err := types.NewBatch(batch, virtualBatch, verifiedBatch, blocks, receipts, fullTx, true, ger) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build the batch %v response", batchNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build the batch %v response", batchNumber), err, true) } return rpcBatch, nil }) @@ -191,7 +191,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b if number == types.PendingBlockNumber { lastBlock, err := z.state.GetLastL2Block(ctx, dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "couldn't load last block from state to compute the pending block", err) + return RPCErrorResponse(types.DefaultErrorCode, "couldn't load last block from state to compute the pending block", err, true) } header := ethTypes.CopyHeader(lastBlock.Header()) header.ParentHash = lastBlock.Hash() @@ -201,7 +201,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b block := ethTypes.NewBlockWithHeader(header) rpcBlock, err := types.NewBlock(block, nil, fullTx, true) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err) + return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true) } return rpcBlock, nil @@ -216,7 +216,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load block from state by number %v", blockNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load block from state by number %v", blockNumber), err, true) } txs := block.Transactions() @@ -224,14 +224,14 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b for _, tx := range txs { receipt, err := z.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } rpcBlock, err := types.NewBlock(block, receipts, fullTx, true) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err, true) } return rpcBlock, nil @@ -245,7 +245,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByHash(hash types.ArgHash, fullTx bool) (in if errors.Is(err, state.ErrNotFound) { return nil, nil } else if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash from state", err) + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash from state", err, true) } txs := block.Transactions() @@ -253,16 +253,38 @@ func (z *ZKEVMEndpoints) GetFullBlockByHash(hash types.ArgHash, fullTx bool) (in for _, tx := range txs { receipt, err := z.state.GetTransactionReceipt(ctx, tx.Hash(), dbTx) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't load receipt for tx %v", tx.Hash().String()), err, true) } receipts = append(receipts, *receipt) } rpcBlock, err := types.NewBlock(block, receipts, fullTx, true) if err != nil { - return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err) + return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err, true) } return rpcBlock, nil }) } + +// GetNativeBlockHashesInRange return the state root for the blocks in range +func (z *ZKEVMEndpoints) GetNativeBlockHashesInRange(filter NativeBlockHashBlockRangeFilter) (interface{}, types.Error) { + return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) { + fromBlockNumber, toBlockNumber, rpcErr := filter.GetNumericBlockNumbers(ctx, z.cfg, z.state, z.etherman, dbTx) + if rpcErr != nil { + return nil, rpcErr + } + + nativeBlockHashes, err := z.state.GetNativeBlockHashesInRange(ctx, fromBlockNumber, toBlockNumber, dbTx) + if errors.Is(err, state.ErrNotFound) { + return nil, nil + } else if errors.Is(err, state.ErrMaxNativeBlockHashBlockRangeLimitExceeded) { + errMsg := fmt.Sprintf(state.ErrMaxNativeBlockHashBlockRangeLimitExceeded.Error(), z.cfg.MaxNativeBlockHashBlockRange) + return RPCErrorResponse(types.InvalidParamsErrorCode, errMsg, nil, false) + } else if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash from state", err, true) + } + + return nativeBlockHashes, nil + }) +} diff --git a/jsonrpc/endpoints_zkevm.openrpc.json b/jsonrpc/endpoints_zkevm.openrpc.json index 4a7661f89d..f70556a39f 100644 --- a/jsonrpc/endpoints_zkevm.openrpc.json +++ b/jsonrpc/endpoints_zkevm.openrpc.json @@ -325,6 +325,24 @@ "$ref": "#/components/schemas/FullBlockOrNull" } } + }, + { + "name": "zkevm_getNativeBlockHashesInRange", + "summary": "Returns the list of native block hashes.", + "params": [ + { + "name": "filter", + "schema": { + "$ref": "#/components/schemas/NativeBlockHashBlockRangeFilter" + } + } + ], + "result": { + "name": "filter", + "schema": { + "$ref": "#/components/schemas/NativeBlockHashes" + } + } } ], "components": { @@ -1115,6 +1133,26 @@ "type": "string", "description": "Hex representation of a variable length byte array", "pattern": "^0x([a-fA-F0-9]?)+$" + }, + "NativeBlockHashes": { + "title": "native block hashes", + "description": "An array of hashes", + "type": "array", + "items": { + "$ref": "#/components/schemas/Keccak" + } + }, + "NativeBlockHashBlockRangeFilter": { + "title": "NativeBlockHashBlockRangeFilter", + "type": "object", + "properties": { + "fromBlock": { + "$ref": "#/components/schemas/BlockNumber" + }, + "toBlock": { + "$ref": "#/components/schemas/BlockNumber" + } + } } } } diff --git a/jsonrpc/endpoints_zkevm_test.go b/jsonrpc/endpoints_zkevm_test.go index bac0229302..15f8e044f2 100644 --- a/jsonrpc/endpoints_zkevm_test.go +++ b/jsonrpc/endpoints_zkevm_test.go @@ -22,7 +22,7 @@ import ( ) const ( - forkID5 = 5 + forkID6 = 6 ) func TestConsolidatedBlockNumber(t *testing.T) { @@ -714,7 +714,7 @@ func TestGetBatchByNumber(t *testing.T) { batchTxs = append(batchTxs, *tx) effectivePercentages = append(effectivePercentages, state.MaxEffectivePercentage) } - batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID5) + batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID6) require.NoError(t, err) tc.ExpectedResult.BatchL2Data = batchL2Data batch := &state.Batch{ @@ -833,7 +833,7 @@ func TestGetBatchByNumber(t *testing.T) { batchTxs = append(batchTxs, *tx) effectivePercentages = append(effectivePercentages, state.MaxEffectivePercentage) } - batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID5) + batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID6) require.NoError(t, err) batch := &state.Batch{ @@ -925,7 +925,7 @@ func TestGetBatchByNumber(t *testing.T) { Once() m.State. - On("GetLastBatchNumber", context.Background(), m.DbTx). + On("GetLastClosedBatchNumber", context.Background(), m.DbTx). Return(uint64(tc.ExpectedResult.Number), nil). Once() @@ -982,7 +982,7 @@ func TestGetBatchByNumber(t *testing.T) { batchTxs = append(batchTxs, *tx) effectivePercentages = append(effectivePercentages, state.MaxEffectivePercentage) } - batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID5) + batchL2Data, err := state.EncodeTransactions(batchTxs, effectivePercentages, forkID6) require.NoError(t, err) var fb uint64 = 1 batch := &state.Batch{ @@ -1063,7 +1063,7 @@ func TestGetBatchByNumber(t *testing.T) { Once() m.State. - On("GetLastBatchNumber", context.Background(), m.DbTx). + On("GetLastClosedBatchNumber", context.Background(), m.DbTx). Return(uint64(0), errors.New("failed to get last batch number")). Once() }, @@ -1085,7 +1085,7 @@ func TestGetBatchByNumber(t *testing.T) { Once() m.State. - On("GetLastBatchNumber", context.Background(), m.DbTx). + On("GetLastClosedBatchNumber", context.Background(), m.DbTx). Return(uint64(1), nil). Once() @@ -1325,7 +1325,8 @@ func TestGetL2FullBlockByNumber(t *testing.T) { m.State. On("GetL2BlockByNumber", context.Background(), hex.DecodeUint64(tc.Number), m.DbTx). - Return(nil, state.ErrNotFound) + Return(nil, state.ErrNotFound). + Once() }, }, { @@ -1547,6 +1548,158 @@ func TestGetL2FullBlockByNumber(t *testing.T) { } } +func TestGetNativeBlockHashesInRange(t *testing.T) { + type testCase struct { + Name string + Filter NativeBlockHashBlockRangeFilter + ExpectedResult *[]string + ExpectedError interface{} + SetupMocks func(*mocksWrapper, *testCase) + } + + testCases := []testCase{ + { + Name: "Block not found", + Filter: NativeBlockHashBlockRangeFilter{ + FromBlock: types.BlockNumber(0), + ToBlock: types.BlockNumber(10), + }, + ExpectedResult: ptr([]string{}), + ExpectedError: nil, + SetupMocks: func(m *mocksWrapper, tc *testCase) { + m.DbTx. + On("Commit", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + + fromBlock, _ := tc.Filter.FromBlock.GetNumericBlockNumber(context.Background(), nil, nil, nil) + toBlock, _ := tc.Filter.ToBlock.GetNumericBlockNumber(context.Background(), nil, nil, nil) + + m.State. + On("GetNativeBlockHashesInRange", context.Background(), fromBlock, toBlock, m.DbTx). + Return([]common.Hash{}, nil). + Once() + }, + }, + { + Name: "native block hash range returned successfully", + Filter: NativeBlockHashBlockRangeFilter{ + FromBlock: types.BlockNumber(0), + ToBlock: types.BlockNumber(10), + }, + ExpectedResult: ptr([]string{}), + ExpectedError: nil, + SetupMocks: func(m *mocksWrapper, tc *testCase) { + m.DbTx. + On("Commit", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + + fromBlock, _ := tc.Filter.FromBlock.GetNumericBlockNumber(context.Background(), nil, nil, nil) + toBlock, _ := tc.Filter.ToBlock.GetNumericBlockNumber(context.Background(), nil, nil, nil) + hashes := []common.Hash{} + expectedResult := []string{} + for i := fromBlock; i < toBlock; i++ { + sHash := hex.EncodeUint64(i) + hash := common.HexToHash(sHash) + hashes = append(hashes, hash) + expectedResult = append(expectedResult, hash.String()) + } + tc.ExpectedResult = &expectedResult + + m.State. + On("GetNativeBlockHashesInRange", context.Background(), fromBlock, toBlock, m.DbTx). + Return(hashes, nil). + Once() + }, + }, + { + Name: "native block hash range fails due to invalid range", + Filter: NativeBlockHashBlockRangeFilter{ + FromBlock: types.BlockNumber(10), + ToBlock: types.BlockNumber(0), + }, + ExpectedResult: nil, + ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "invalid block range"), + SetupMocks: func(m *mocksWrapper, tc *testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + { + Name: "native block hash range fails due to range limit", + Filter: NativeBlockHashBlockRangeFilter{ + FromBlock: types.BlockNumber(0), + ToBlock: types.BlockNumber(60001), + }, + ExpectedResult: nil, + ExpectedError: types.NewRPCError(types.InvalidParamsErrorCode, "native block hashes are limited to a 60000 block range"), + SetupMocks: func(m *mocksWrapper, tc *testCase) { + m.DbTx. + On("Rollback", context.Background()). + Return(nil). + Once() + + m.State. + On("BeginStateTransaction", context.Background()). + Return(m.DbTx, nil). + Once() + }, + }, + } + + s, m, _ := newSequencerMockedServer(t) + defer s.Stop() + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + tc := testCase + testCase.SetupMocks(m, &tc) + + res, err := s.JSONRPCCall("zkevm_getNativeBlockHashesInRange", tc.Filter) + require.NoError(t, err) + + if tc.ExpectedResult != nil { + require.NotNil(t, res.Result) + require.Nil(t, res.Error) + + var result []string + err = json.Unmarshal(res.Result, &result) + require.NoError(t, err) + + assert.Equal(t, len(*tc.ExpectedResult), len(result)) + assert.ElementsMatch(t, *tc.ExpectedResult, result) + } + + if tc.ExpectedError != nil { + if expectedErr, ok := tc.ExpectedError.(*types.RPCError); ok { + assert.Equal(t, expectedErr.ErrorCode(), res.Error.Code) + assert.Equal(t, expectedErr.Error(), res.Error.Message) + } else { + assert.Equal(t, tc.ExpectedError, err) + } + } + }) + } +} + func ptrUint64(n uint64) *uint64 { return &n } @@ -1565,6 +1718,10 @@ func ptrHash(h common.Hash) *common.Hash { return &h } +func ptr[T any](v T) *T { + return &v +} + func signTx(tx *ethTypes.Transaction, chainID uint64) *ethTypes.Transaction { privateKey, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(0).SetUint64(chainID)) diff --git a/jsonrpc/handler.go b/jsonrpc/handler.go index 6a1f301940..3edb3180e2 100644 --- a/jsonrpc/handler.go +++ b/jsonrpc/handler.go @@ -6,12 +6,10 @@ import ( "net/http" "reflect" "strings" - "sync" "unicode" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/gorilla/websocket" ) const ( @@ -36,7 +34,7 @@ func (f *funcData) numParams() int { type handleRequest struct { types.Request - wsConn *websocket.Conn + wsConn *concurrentWsConn HttpRequest *http.Request } @@ -73,23 +71,10 @@ func newJSONRpcHandler() *Handler { return handler } -var connectionCounter = 0 -var connectionCounterMutex sync.Mutex - // Handle is the function that knows which and how a function should // be executed when a JSON RPC request is received func (h *Handler) Handle(req handleRequest) types.Response { log := log.WithFields("method", req.Method, "requestId", req.ID) - connectionCounterMutex.Lock() - connectionCounter++ - connectionCounterMutex.Unlock() - defer func() { - connectionCounterMutex.Lock() - connectionCounter-- - connectionCounterMutex.Unlock() - log.Debugf("Current open connections %d", connectionCounter) - }() - log.Debugf("Current open connections %d", connectionCounter) log.Debugf("request params %v", string(req.Params)) service, fd, err := h.getFnHandler(req.Request) @@ -106,7 +91,7 @@ func (h *Handler) Handle(req handleRequest) types.Response { firstFuncParamIsWebSocketConn := false firstFuncParamIsHttpRequest := false if funcHasMoreThanOneInputParams { - firstFuncParamIsWebSocketConn = fd.reqt[1].AssignableTo(reflect.TypeOf(&websocket.Conn{})) + firstFuncParamIsWebSocketConn = fd.reqt[1].AssignableTo(reflect.TypeOf(&concurrentWsConn{})) firstFuncParamIsHttpRequest = fd.reqt[1].AssignableTo(reflect.TypeOf(&http.Request{})) } if requestHasWebSocketConn && firstFuncParamIsWebSocketConn { @@ -156,7 +141,7 @@ func (h *Handler) Handle(req handleRequest) types.Response { } // HandleWs handle websocket requests -func (h *Handler) HandleWs(reqBody []byte, wsConn *websocket.Conn, httpReq *http.Request) ([]byte, error) { +func (h *Handler) HandleWs(reqBody []byte, wsConn *concurrentWsConn, httpReq *http.Request) ([]byte, error) { log.Debugf("WS message received: %v", string(reqBody)) var req types.Request if err := json.Unmarshal(reqBody, &req); err != nil { @@ -173,7 +158,7 @@ func (h *Handler) HandleWs(reqBody []byte, wsConn *websocket.Conn, httpReq *http } // RemoveFilterByWsConn uninstalls the filter attached to this websocket connection -func (h *Handler) RemoveFilterByWsConn(wsConn *websocket.Conn) { +func (h *Handler) RemoveFilterByWsConn(wsConn *concurrentWsConn) { service, ok := h.serviceMap[APIEth] if !ok { return diff --git a/jsonrpc/interfaces.go b/jsonrpc/interfaces.go index f1fce40123..acfec7205b 100644 --- a/jsonrpc/interfaces.go +++ b/jsonrpc/interfaces.go @@ -1,18 +1,14 @@ package jsonrpc -import ( - "github.com/gorilla/websocket" -) - // storageInterface json rpc internal storage to persist data type storageInterface interface { - GetAllBlockFiltersWithWSConn() ([]*Filter, error) - GetAllLogFiltersWithWSConn() ([]*Filter, error) + GetAllBlockFiltersWithWSConn() []*Filter + GetAllLogFiltersWithWSConn() []*Filter GetFilter(filterID string) (*Filter, error) - NewBlockFilter(wsConn *websocket.Conn) (string, error) - NewLogFilter(wsConn *websocket.Conn, filter LogFilter) (string, error) - NewPendingTransactionFilter(wsConn *websocket.Conn) (string, error) + NewBlockFilter(wsConn *concurrentWsConn) (string, error) + NewLogFilter(wsConn *concurrentWsConn, filter LogFilter) (string, error) + NewPendingTransactionFilter(wsConn *concurrentWsConn) (string, error) UninstallFilter(filterID string) error - UninstallFilterByWSConn(wsConn *websocket.Conn) error + UninstallFilterByWSConn(wsConn *concurrentWsConn) error UpdateFilterLastPoll(filterID string) error } diff --git a/jsonrpc/metrics/metrics.go b/jsonrpc/metrics/metrics.go index 4ffefca2a1..d4e2e12457 100644 --- a/jsonrpc/metrics/metrics.go +++ b/jsonrpc/metrics/metrics.go @@ -23,6 +23,8 @@ type RequestHandledLabel string const ( // RequestHandledLabelInvalid represents an request of type invalid RequestHandledLabelInvalid RequestHandledLabel = "invalid" + // RequestHandledLabelError represents an request of type error + RequestHandledLabelError RequestHandledLabel = "error" // RequestHandledLabelSingle represents an request of type single RequestHandledLabelSingle RequestHandledLabel = "single" // RequestHandledLabelBatch represents an request of type batch diff --git a/jsonrpc/mock_storage.go b/jsonrpc/mock_storage.go index 105bad5455..e32d1205b9 100644 --- a/jsonrpc/mock_storage.go +++ b/jsonrpc/mock_storage.go @@ -1,11 +1,8 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package jsonrpc -import ( - websocket "github.com/gorilla/websocket" - mock "github.com/stretchr/testify/mock" -) +import mock "github.com/stretchr/testify/mock" // storageMock is an autogenerated mock type for the storageInterface type type storageMock struct { @@ -13,14 +10,10 @@ type storageMock struct { } // GetAllBlockFiltersWithWSConn provides a mock function with given fields: -func (_m *storageMock) GetAllBlockFiltersWithWSConn() ([]*Filter, error) { +func (_m *storageMock) GetAllBlockFiltersWithWSConn() []*Filter { ret := _m.Called() var r0 []*Filter - var r1 error - if rf, ok := ret.Get(0).(func() ([]*Filter, error)); ok { - return rf() - } if rf, ok := ret.Get(0).(func() []*Filter); ok { r0 = rf() } else { @@ -29,24 +22,14 @@ func (_m *storageMock) GetAllBlockFiltersWithWSConn() ([]*Filter, error) { } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } // GetAllLogFiltersWithWSConn provides a mock function with given fields: -func (_m *storageMock) GetAllLogFiltersWithWSConn() ([]*Filter, error) { +func (_m *storageMock) GetAllLogFiltersWithWSConn() []*Filter { ret := _m.Called() var r0 []*Filter - var r1 error - if rf, ok := ret.Get(0).(func() ([]*Filter, error)); ok { - return rf() - } if rf, ok := ret.Get(0).(func() []*Filter); ok { r0 = rf() } else { @@ -55,13 +38,7 @@ func (_m *storageMock) GetAllLogFiltersWithWSConn() ([]*Filter, error) { } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } // GetFilter provides a mock function with given fields: filterID @@ -91,21 +68,21 @@ func (_m *storageMock) GetFilter(filterID string) (*Filter, error) { } // NewBlockFilter provides a mock function with given fields: wsConn -func (_m *storageMock) NewBlockFilter(wsConn *websocket.Conn) (string, error) { +func (_m *storageMock) NewBlockFilter(wsConn *concurrentWsConn) (string, error) { ret := _m.Called(wsConn) var r0 string var r1 error - if rf, ok := ret.Get(0).(func(*websocket.Conn) (string, error)); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn) (string, error)); ok { return rf(wsConn) } - if rf, ok := ret.Get(0).(func(*websocket.Conn) string); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn) string); ok { r0 = rf(wsConn) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(*websocket.Conn) error); ok { + if rf, ok := ret.Get(1).(func(*concurrentWsConn) error); ok { r1 = rf(wsConn) } else { r1 = ret.Error(1) @@ -115,21 +92,21 @@ func (_m *storageMock) NewBlockFilter(wsConn *websocket.Conn) (string, error) { } // NewLogFilter provides a mock function with given fields: wsConn, filter -func (_m *storageMock) NewLogFilter(wsConn *websocket.Conn, filter LogFilter) (string, error) { +func (_m *storageMock) NewLogFilter(wsConn *concurrentWsConn, filter LogFilter) (string, error) { ret := _m.Called(wsConn, filter) var r0 string var r1 error - if rf, ok := ret.Get(0).(func(*websocket.Conn, LogFilter) (string, error)); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn, LogFilter) (string, error)); ok { return rf(wsConn, filter) } - if rf, ok := ret.Get(0).(func(*websocket.Conn, LogFilter) string); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn, LogFilter) string); ok { r0 = rf(wsConn, filter) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(*websocket.Conn, LogFilter) error); ok { + if rf, ok := ret.Get(1).(func(*concurrentWsConn, LogFilter) error); ok { r1 = rf(wsConn, filter) } else { r1 = ret.Error(1) @@ -139,21 +116,21 @@ func (_m *storageMock) NewLogFilter(wsConn *websocket.Conn, filter LogFilter) (s } // NewPendingTransactionFilter provides a mock function with given fields: wsConn -func (_m *storageMock) NewPendingTransactionFilter(wsConn *websocket.Conn) (string, error) { +func (_m *storageMock) NewPendingTransactionFilter(wsConn *concurrentWsConn) (string, error) { ret := _m.Called(wsConn) var r0 string var r1 error - if rf, ok := ret.Get(0).(func(*websocket.Conn) (string, error)); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn) (string, error)); ok { return rf(wsConn) } - if rf, ok := ret.Get(0).(func(*websocket.Conn) string); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn) string); ok { r0 = rf(wsConn) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(*websocket.Conn) error); ok { + if rf, ok := ret.Get(1).(func(*concurrentWsConn) error); ok { r1 = rf(wsConn) } else { r1 = ret.Error(1) @@ -177,11 +154,11 @@ func (_m *storageMock) UninstallFilter(filterID string) error { } // UninstallFilterByWSConn provides a mock function with given fields: wsConn -func (_m *storageMock) UninstallFilterByWSConn(wsConn *websocket.Conn) error { +func (_m *storageMock) UninstallFilterByWSConn(wsConn *concurrentWsConn) error { ret := _m.Called(wsConn) var r0 error - if rf, ok := ret.Get(0).(func(*websocket.Conn) error); ok { + if rf, ok := ret.Get(0).(func(*concurrentWsConn) error); ok { r0 = rf(wsConn) } else { r0 = ret.Error(0) @@ -204,13 +181,12 @@ func (_m *storageMock) UpdateFilterLastPoll(filterID string) error { return r0 } -type mockConstructorTestingTnewStorageMock interface { +// newStorageMock creates a new instance of storageMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newStorageMock(t interface { mock.TestingT Cleanup(func()) -} - -// newStorageMock creates a new instance of storageMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newStorageMock(t mockConstructorTestingTnewStorageMock) *storageMock { +}) *storageMock { mock := &storageMock{} mock.Mock.Test(t) diff --git a/jsonrpc/mocks/mock_dbtx.go b/jsonrpc/mocks/mock_dbtx.go index cfbca16e32..10b1d0da07 100644 --- a/jsonrpc/mocks/mock_dbtx.go +++ b/jsonrpc/mocks/mock_dbtx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -283,13 +283,12 @@ func (_m *DBTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResult return r0 } -type mockConstructorTestingTNewDBTxMock interface { +// NewDBTxMock creates a new instance of DBTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDBTxMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewDBTxMock creates a new instance of DBTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDBTxMock(t mockConstructorTestingTNewDBTxMock) *DBTxMock { +}) *DBTxMock { mock := &DBTxMock{} mock.Mock.Test(t) diff --git a/jsonrpc/mocks/mock_etherman.go b/jsonrpc/mocks/mock_etherman.go index 96f1d60340..e6a7605fd4 100644 --- a/jsonrpc/mocks/mock_etherman.go +++ b/jsonrpc/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -61,13 +61,12 @@ func (_m *EthermanMock) GetSafeBlockNumber(ctx context.Context) (uint64, error) return r0, r1 } -type mockConstructorTestingTNewEthermanMock interface { +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEthermanMock(t mockConstructorTestingTNewEthermanMock) *EthermanMock { +}) *EthermanMock { mock := &EthermanMock{} mock.Mock.Test(t) diff --git a/jsonrpc/mocks/mock_pool.go b/jsonrpc/mocks/mock_pool.go index 99269d0574..7a80e748fe 100644 --- a/jsonrpc/mocks/mock_pool.go +++ b/jsonrpc/mocks/mock_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -185,13 +185,12 @@ func (_m *PoolMock) GetTxByHash(ctx context.Context, hash common.Hash) (*pool.Tr return r0, r1 } -type mockConstructorTestingTNewPoolMock interface { +// NewPoolMock creates a new instance of PoolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPoolMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewPoolMock creates a new instance of PoolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPoolMock(t mockConstructorTestingTNewPoolMock) *PoolMock { +}) *PoolMock { mock := &PoolMock{} mock.Mock.Test(t) diff --git a/jsonrpc/mocks/mock_state.go b/jsonrpc/mocks/mock_state.go index 73ed569a26..0741020cc9 100644 --- a/jsonrpc/mocks/mock_state.go +++ b/jsonrpc/mocks/mock_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -465,6 +465,30 @@ func (_m *StateMock) GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint6 return r0, r1 } +// GetLastClosedBatchNumber provides a mock function with given fields: ctx, dbTx +func (_m *StateMock) GetLastClosedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) { + ret := _m.Called(ctx, dbTx) + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (uint64, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) uint64); ok { + r0 = rf(ctx, dbTx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { + r1 = rf(ctx, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLastConsolidatedL2BlockNumber provides a mock function with given fields: ctx, dbTx func (_m *StateMock) GetLastConsolidatedL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) { ret := _m.Called(ctx, dbTx) @@ -565,6 +589,54 @@ func (_m *StateMock) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*st return r0, r1 } +// GetLastVerifiedBatchNumberUntilL1Block provides a mock function with given fields: ctx, l1BlockNumber, dbTx +func (_m *StateMock) GetLastVerifiedBatchNumberUntilL1Block(ctx context.Context, l1BlockNumber uint64, dbTx pgx.Tx) (uint64, error) { + ret := _m.Called(ctx, l1BlockNumber, dbTx) + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (uint64, error)); ok { + return rf(ctx, l1BlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) uint64); ok { + r0 = rf(ctx, l1BlockNumber, dbTx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, l1BlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastVerifiedL2BlockNumberUntilL1Block provides a mock function with given fields: ctx, l1FinalizedBlockNumber, dbTx +func (_m *StateMock) GetLastVerifiedL2BlockNumberUntilL1Block(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) { + ret := _m.Called(ctx, l1FinalizedBlockNumber, dbTx) + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (uint64, error)); ok { + return rf(ctx, l1FinalizedBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) uint64); ok { + r0 = rf(ctx, l1FinalizedBlockNumber, dbTx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, l1FinalizedBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLastVirtualBatchNum provides a mock function with given fields: ctx, dbTx func (_m *StateMock) GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error) { ret := _m.Called(ctx, dbTx) @@ -639,6 +711,32 @@ func (_m *StateMock) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint return r0, r1 } +// GetNativeBlockHashesInRange provides a mock function with given fields: ctx, fromBlockNumber, toBlockNumber, dbTx +func (_m *StateMock) GetNativeBlockHashesInRange(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]common.Hash, error) { + ret := _m.Called(ctx, fromBlockNumber, toBlockNumber, dbTx) + + var r0 []common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]common.Hash, error)); ok { + return rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []common.Hash); ok { + r0 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetNonce provides a mock function with given fields: ctx, address, root func (_m *StateMock) GetNonce(ctx context.Context, address common.Address, root common.Hash) (uint64, error) { ret := _m.Called(ctx, address, root) @@ -976,11 +1074,6 @@ func (_m *StateMock) IsL2BlockVirtualized(ctx context.Context, blockNumber uint6 return r0, r1 } -// PrepareWebSocket provides a mock function with given fields: -func (_m *StateMock) PrepareWebSocket() { - _m.Called() -} - // ProcessUnsignedTransaction provides a mock function with given fields: ctx, tx, senderAddress, l2BlockNumber, noZKEVMCounters, dbTx func (_m *StateMock) ProcessUnsignedTransaction(ctx context.Context, tx *coretypes.Transaction, senderAddress common.Address, l2BlockNumber *uint64, noZKEVMCounters bool, dbTx pgx.Tx) (*runtime.ExecutionResult, error) { ret := _m.Called(ctx, tx, senderAddress, l2BlockNumber, noZKEVMCounters, dbTx) @@ -1012,13 +1105,17 @@ func (_m *StateMock) RegisterNewL2BlockEventHandler(h state.NewL2BlockEventHandl _m.Called(h) } -type mockConstructorTestingTNewStateMock interface { - mock.TestingT - Cleanup(func()) +// StartToMonitorNewL2Blocks provides a mock function with given fields: +func (_m *StateMock) StartToMonitorNewL2Blocks() { + _m.Called() } // NewStateMock creates a new instance of StateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateMock(t mockConstructorTestingTNewStateMock) *StateMock { +// The first argument is typically a *testing.T value. +func NewStateMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StateMock { mock := &StateMock{} mock.Mock.Test(t) diff --git a/jsonrpc/nacos/start.go b/jsonrpc/nacos/start.go index 0bfee1d5c0..a319b11b12 100644 --- a/jsonrpc/nacos/start.go +++ b/jsonrpc/nacos/start.go @@ -13,6 +13,13 @@ import ( "github.com/nacos-group/nacos-sdk-go/vo" ) +const ( + defaultPort = 26659 + defaultTimeoutMs = uint64(5000) + defaultListenInterval = uint64(10000) + defaultWeight = float64(10) +) + // StartNacosClient start nacos client and register rest service in nacos func StartNacosClient(urls string, namespace string, name string, externalAddr string) { ip, port, err := ResolveIPAndPort(externalAddr) @@ -26,15 +33,11 @@ func StartNacosClient(urls string, namespace string, name string, externalAddr s log.Error(fmt.Sprintf("failed to resolve nacos server url %s: %s", urls, err.Error())) return } - - const timeoutMs = 5000 - const listenInterval = 10000 - client, err := clients.CreateNamingClient(map[string]interface{}{ "serverConfigs": serverConfigs, "clientConfig": constant.ClientConfig{ - TimeoutMs: timeoutMs, - ListenInterval: listenInterval, + TimeoutMs: defaultTimeoutMs, + ListenInterval: defaultListenInterval, NotLoadCacheAtStart: true, NamespaceId: namespace, LogDir: "/dev/null", @@ -46,12 +49,11 @@ func StartNacosClient(urls string, namespace string, name string, externalAddr s return } - const weight = 10 _, err = client.RegisterInstance(vo.RegisterInstanceParam{ Ip: ip, Port: uint64(port), ServiceName: name, - Weight: weight, + Weight: defaultWeight, ClusterName: "DEFAULT", Enable: true, Healthy: true, @@ -73,8 +75,7 @@ func ResolveIPAndPort(addr string) (string, int, error) { laddr := strings.Split(addr, ":") ip := laddr[0] if ip == "127.0.0.1" { - const port = 26659 - return GetLocalIP(), port, nil + return GetLocalIP(), defaultPort, nil } port, err := strconv.Atoi(laddr[1]) if err != nil { diff --git a/jsonrpc/nacos/utils.go b/jsonrpc/nacos/utils.go index 10e3d7ceb0..1b9b755e59 100644 --- a/jsonrpc/nacos/utils.go +++ b/jsonrpc/nacos/utils.go @@ -18,12 +18,11 @@ func GetOneInstance(urls string, nameSpace string, param vo.SelectOneHealthInsta return nil, fmt.Errorf("failed to resolve nacos server url %s: %s", urls, err.Error()) } - const timeoutMs = 5000 namingClient, err := clients.CreateNamingClient(map[string]interface{}{ "serverConfigs": serverConfigs, "clientConfig": constant.ClientConfig{ NamespaceId: nameSpace, - TimeoutMs: timeoutMs, + TimeoutMs: defaultTimeoutMs, NotLoadCacheAtStart: true, LogDir: "/dev/null", }, diff --git a/jsonrpc/query.go b/jsonrpc/query.go index 2cc375dd36..57cf626bcc 100644 --- a/jsonrpc/query.go +++ b/jsonrpc/query.go @@ -1,14 +1,19 @@ package jsonrpc import ( + "context" "encoding/json" "fmt" + "sync" "time" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" "github.com/gorilla/websocket" + "github.com/jackc/pgx/v4" ) const ( @@ -26,7 +31,70 @@ type Filter struct { Type FilterType Parameters interface{} LastPoll time.Time - WsConn *websocket.Conn + WsConn *concurrentWsConn + + wsQueue *state.Queue[[]byte] + wsQueueSignal *sync.Cond +} + +// EnqueueSubscriptionDataToBeSent enqueues subscription data to be sent +// via web sockets connection +func (f *Filter) EnqueueSubscriptionDataToBeSent(data []byte) { + f.wsQueue.Push(data) + f.wsQueueSignal.Broadcast() +} + +// SendEnqueuedSubscriptionData consumes all the enqueued subscription data +// and sends it via web sockets connection. +func (f *Filter) SendEnqueuedSubscriptionData() { + for { + // wait for a signal that a new item was + // added to the queue + log.Debugf("waiting subscription data signal") + f.wsQueueSignal.L.Lock() + f.wsQueueSignal.Wait() + f.wsQueueSignal.L.Unlock() + log.Debugf("subscription data signal received, sending enqueued data") + for { + d, err := f.wsQueue.Pop() + if err == state.ErrQueueEmpty { + break + } else if err != nil { + log.Errorf("failed to pop subscription data from queue to be sent via web sockets to filter %v, %s", f.ID, err.Error()) + break + } + f.sendSubscriptionResponse(d) + } + } +} + +// sendSubscriptionResponse send data as subscription response via +// web sockets connection controlled by a mutex +func (f *Filter) sendSubscriptionResponse(data []byte) { + const errMessage = "Unable to write WS message to filter %v, %s" + + start := time.Now() + res := types.SubscriptionResponse{ + JSONRPC: "2.0", + Method: "eth_subscription", + Params: types.SubscriptionResponseParams{ + Subscription: f.ID, + Result: data, + }, + } + message, err := json.Marshal(res) + if err != nil { + log.Errorf(fmt.Sprintf(errMessage, f.ID, err.Error())) + return + } + + err = f.WsConn.WriteMessage(websocket.TextMessage, message) + if err != nil { + log.Errorf(fmt.Sprintf(errMessage, f.ID, err.Error())) + return + } + log.Debugf("WS message sent: %v", string(message)) + log.Infof("[SendSubscriptionResponse] took %v", time.Since(start)) } // FilterType express the type of the filter, block, logs, pending transactions @@ -88,19 +156,19 @@ func (f *LogFilter) MarshalJSON() ([]byte, error) { obj.BlockHash = f.BlockHash if f.FromBlock != nil && (*f.FromBlock == types.LatestBlockNumber) { - fromblock := "" - obj.FromBlock = &fromblock + fromBlock := "" + obj.FromBlock = &fromBlock } else if f.FromBlock != nil { - fromblock := hex.EncodeUint64(uint64(*f.FromBlock)) - obj.FromBlock = &fromblock + fromBlock := hex.EncodeUint64(uint64(*f.FromBlock)) + obj.FromBlock = &fromBlock } if f.ToBlock != nil && (*f.ToBlock == types.LatestBlockNumber) { - toblock := "" - obj.ToBlock = &toblock + toBlock := "" + obj.ToBlock = &toBlock } else if f.ToBlock != nil { - toblock := hex.EncodeUint64(uint64(*f.ToBlock)) - obj.ToBlock = &toblock + toBlock := hex.EncodeUint64(uint64(*f.ToBlock)) + obj.ToBlock = &toBlock } if f.Addresses != nil { @@ -265,3 +333,71 @@ func (f *LogFilter) Match(log *types.Log) bool { return true } + +// GetNumericBlockNumbers load the numeric block numbers from state accordingly +// to the provided from and to block number +func (f *LogFilter) GetNumericBlockNumbers(ctx context.Context, cfg Config, s types.StateInterface, e types.EthermanInterface, dbTx pgx.Tx) (uint64, uint64, types.Error) { + return getNumericBlockNumbers(ctx, s, e, f.FromBlock, f.ToBlock, cfg.MaxLogsBlockRange, state.ErrMaxLogsBlockRangeLimitExceeded, dbTx) +} + +// ShouldFilterByBlockHash if the filter should consider the block hash value +func (f *LogFilter) ShouldFilterByBlockHash() bool { + return f.BlockHash != nil +} + +// ShouldFilterByBlockRange if the filter should consider the block range values +func (f *LogFilter) ShouldFilterByBlockRange() bool { + return f.FromBlock != nil || f.ToBlock != nil +} + +// Validate check if the filter instance is valid +func (f *LogFilter) Validate() error { + if f.ShouldFilterByBlockHash() && f.ShouldFilterByBlockRange() { + return ErrFilterInvalidPayload + } + return nil +} + +// NativeBlockHashBlockRangeFilter is a filter to filter native block hash by block by number +type NativeBlockHashBlockRangeFilter struct { + FromBlock types.BlockNumber `json:"fromBlock"` + ToBlock types.BlockNumber `json:"toBlock"` +} + +// GetNumericBlockNumbers load the numeric block numbers from state accordingly +// to the provided from and to block number +func (f *NativeBlockHashBlockRangeFilter) GetNumericBlockNumbers(ctx context.Context, cfg Config, s types.StateInterface, e types.EthermanInterface, dbTx pgx.Tx) (uint64, uint64, types.Error) { + return getNumericBlockNumbers(ctx, s, e, &f.FromBlock, &f.ToBlock, cfg.MaxNativeBlockHashBlockRange, state.ErrMaxNativeBlockHashBlockRangeLimitExceeded, dbTx) +} + +// getNumericBlockNumbers load the numeric block numbers from state accordingly +// to the provided from and to block number +func getNumericBlockNumbers(ctx context.Context, s types.StateInterface, e types.EthermanInterface, fromBlock, toBlock *types.BlockNumber, maxBlockRange uint64, maxBlockRangeErr error, dbTx pgx.Tx) (uint64, uint64, types.Error) { + var fromBlockNumber uint64 = 0 + if fromBlock != nil { + fbn, rpcErr := fromBlock.GetNumericBlockNumber(ctx, s, e, dbTx) + if rpcErr != nil { + return 0, 0, rpcErr + } + fromBlockNumber = fbn + } + + toBlockNumber, rpcErr := toBlock.GetNumericBlockNumber(ctx, s, e, dbTx) + if rpcErr != nil { + return 0, 0, rpcErr + } + + if toBlockNumber < fromBlockNumber { + _, rpcErr := RPCErrorResponse(types.InvalidParamsErrorCode, state.ErrInvalidBlockRange.Error(), nil, false) + return 0, 0, rpcErr + } + + blockRange := toBlockNumber - fromBlockNumber + if maxBlockRange > 0 && blockRange > maxBlockRange { + errMsg := fmt.Sprintf(maxBlockRangeErr.Error(), maxBlockRange) + _, rpcErr := RPCErrorResponse(types.InvalidParamsErrorCode, errMsg, nil, false) + return 0, 0, rpcErr + } + + return fromBlockNumber, toBlockNumber, nil +} diff --git a/jsonrpc/server.go b/jsonrpc/server.go index 1c1505261a..b6188e5a8f 100644 --- a/jsonrpc/server.go +++ b/jsonrpc/server.go @@ -7,9 +7,11 @@ import ( "errors" "fmt" "io" + "mime" "net" "net/http" "sync" + "syscall" "time" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/metrics" @@ -35,8 +37,13 @@ const ( APIWeb3 = "web3" wsBufferSizeLimitInBytes = 1024 + maxRequestContentLength = 1024 * 1024 * 5 + contentType = "application/json" ) +// https://www.jsonrpc.org/historical/json-rpc-over-http.html#http-header +var acceptedContentTypes = []string{contentType, "application/json-rpc", "application/jsonrequest"} + // Server is an API backend to handle RPC requests type Server struct { config Config @@ -45,9 +52,20 @@ type Server struct { srv *http.Server wsSrv *http.Server wsUpgrader websocket.Upgrader + + connCounterMutex *sync.Mutex + httpConnCounter int64 + wsConnCounter int64 } -// Service implementation of a service an it's name +// Service defines a struct that will provide public methods to be exposed +// by the RPC server as endpoints, the endpoints will be prefixed with the +// value in the Name property followed by an underscore and the method name +// starting with a lower case char, resulting in a mix of snake case and +// camel case, for example: +// +// A service with name `eth` and with a public method BlockNumber() will allow +// the RPC server to expose this method as `eth_blockNumber`. type Service struct { Name string Service interface{} @@ -62,7 +80,10 @@ func NewServer( storage storageInterface, services []Service, ) *Server { - s.PrepareWebSocket() + if cfg.WebSockets.Enabled { + s.StartToMonitorNewL2Blocks() + } + handler := newJSONRpcHandler() for _, service := range services { @@ -70,9 +91,10 @@ func NewServer( } srv := &Server{ - config: cfg, - handler: handler, - chainID: chainID, + config: cfg, + handler: handler, + chainID: chainID, + connCounterMutex: &sync.Mutex{}, } return srv } @@ -195,18 +217,16 @@ func (s *Server) Stop() error { } func (s *Server) handle(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Type", contentType) w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization") - if (*req).Method == "OPTIONS" { - // TODO(pg): need to count it in the metrics? + if req.Method == http.MethodOptions { return } - if req.Method == "GET" { - // TODO(pg): need to count it in the metrics? + if req.Method == http.MethodGet { _, err := w.Write([]byte("zkEVM JSON RPC Server")) if err != nil { log.Error(err) @@ -214,24 +234,27 @@ func (s *Server) handle(w http.ResponseWriter, req *http.Request) { return } - if req.Method != "POST" { - err := errors.New("method " + req.Method + " not allowed") - s.handleInvalidRequest(w, err) + if code, err := validateRequest(req); err != nil { + handleInvalidRequest(w, err, code) return } - data, err := io.ReadAll(req.Body) + body := io.LimitReader(req.Body, maxRequestContentLength) + data, err := io.ReadAll(body) if err != nil { - s.handleInvalidRequest(w, err) + handleError(w, err) return } single, err := s.isSingleRequest(data) if err != nil { - s.handleInvalidRequest(w, err) + handleInvalidRequest(w, err, http.StatusBadRequest) return } + s.increaseHttpConnCounter() + defer s.decreaseHttpConnCounter() + start := time.Now() var respLen int if single { @@ -240,24 +263,50 @@ func (s *Server) handle(w http.ResponseWriter, req *http.Request) { respLen = s.handleBatchRequest(req, w, data) } metrics.RequestDuration(start) - combinedLog(req, start, http.StatusOK, respLen) + s.combinedLog(req, start, http.StatusOK, respLen) } -func (s *Server) isSingleRequest(data []byte) (bool, types.Error) { +// validateRequest returns a non-zero response code and error message if the +// request is invalid. +func validateRequest(req *http.Request) (int, error) { + if req.Method != http.MethodPost { + err := errors.New("method " + req.Method + " not allowed") + return http.StatusMethodNotAllowed, err + } + + if req.ContentLength > maxRequestContentLength { + err := fmt.Errorf("content length too large (%d>%d)", req.ContentLength, maxRequestContentLength) + return http.StatusRequestEntityTooLarge, err + } + + // Check content-type + if mt, _, err := mime.ParseMediaType(req.Header.Get("content-type")); err == nil { + for _, accepted := range acceptedContentTypes { + if accepted == mt { + return 0, nil + } + } + } + // Invalid content-type + err := fmt.Errorf("invalid content type, only %s is supported", contentType) + return http.StatusUnsupportedMediaType, err +} + +func (s *Server) isSingleRequest(data []byte) (bool, error) { x := bytes.TrimLeft(data, " \t\r\n") if len(x) == 0 { - return false, types.NewRPCError(types.InvalidRequestErrorCode, "Invalid json request") + return false, fmt.Errorf("empty request body") } - return x[0] == '{', nil + return x[0] != '[', nil } func (s *Server) handleSingleRequest(httpRequest *http.Request, w http.ResponseWriter, data []byte) int { defer metrics.RequestHandled(metrics.RequestHandledLabelSingle) request, err := s.parseRequest(data) if err != nil { - handleError(w, err) + handleInvalidRequest(w, err, http.StatusBadRequest) return 0 } req := handleRequest{Request: request, HttpRequest: httpRequest} @@ -278,13 +327,27 @@ func (s *Server) handleSingleRequest(httpRequest *http.Request, w http.ResponseW } func (s *Server) handleBatchRequest(httpRequest *http.Request, w http.ResponseWriter, data []byte) int { + // Checking if batch requests are enabled + if !s.config.BatchRequestsEnabled { + handleInvalidRequest(w, types.ErrBatchRequestsDisabled, http.StatusBadRequest) + return 0 + } + defer metrics.RequestHandled(metrics.RequestHandledLabelBatch) requests, err := s.parseRequests(data) if err != nil { - handleError(w, err) + handleInvalidRequest(w, err, http.StatusBadRequest) return 0 } + // Checking if batch requests limit is exceeded + if s.config.BatchRequestsLimit > 0 { + if len(requests) > int(s.config.BatchRequestsLimit) { + handleInvalidRequest(w, types.ErrBatchRequestsLimitExceeded, http.StatusRequestEntityTooLarge) + return 0 + } + } + responses := make([]types.Response, 0, len(requests)) for _, request := range requests { @@ -306,7 +369,7 @@ func (s *Server) parseRequest(data []byte) (types.Request, error) { var req types.Request if err := json.Unmarshal(data, &req); err != nil { - return types.Request{}, types.NewRPCError(types.InvalidRequestErrorCode, "Invalid json request") + return types.Request{}, fmt.Errorf("invalid json object request body") } return req, nil @@ -316,44 +379,53 @@ func (s *Server) parseRequests(data []byte) ([]types.Request, error) { var requests []types.Request if err := json.Unmarshal(data, &requests); err != nil { - return nil, types.NewRPCError(types.InvalidRequestErrorCode, "Invalid json request") + return nil, fmt.Errorf("invalid json array request body") } return requests, nil } -func (s *Server) handleInvalidRequest(w http.ResponseWriter, err error) { - defer metrics.RequestHandled(metrics.RequestHandledLabelInvalid) - handleError(w, err) -} - func (s *Server) handleWs(w http.ResponseWriter, req *http.Request) { // CORS rule - Allow requests from anywhere s.wsUpgrader.CheckOrigin = func(r *http.Request) bool { return true } // Upgrade the connection to a WS one - wsConn, err := s.wsUpgrader.Upgrade(w, req, nil) + innerWsConn, err := s.wsUpgrader.Upgrade(w, req, nil) if err != nil { log.Error(fmt.Sprintf("Unable to upgrade to a WS connection, %s", err.Error())) - return } + wsConn := newConcurrentWsConn(innerWsConn) + + // Set read limit + wsConn.SetReadLimit(s.config.WebSockets.ReadLimit) + // Defer WS closure - defer func(ws *websocket.Conn) { - err = ws.Close() + defer func(wsConn *concurrentWsConn) { + err = wsConn.Close() if err != nil { log.Error(fmt.Sprintf("Unable to gracefully close WS connection, %s", err.Error())) } }(wsConn) + s.increaseWsConnCounter() + defer s.decreaseWsConnCounter() + + // recover + defer func() { + if err := recover(); err != nil { + log.Error(err) + } + }() log.Info("Websocket connection established") - var mu sync.Mutex for { msgType, message, err := wsConn.ReadMessage() if err != nil { if websocket.IsCloseError(err, websocket.CloseGoingAway, websocket.CloseNormalClosure, websocket.CloseAbnormalClosure) { log.Info("Closing WS connection gracefully") + } else if errors.Is(err, websocket.ErrReadLimit) { + log.Info("Closing WS connection due to read limit exceeded") } else { log.Error(fmt.Sprintf("Unable to read WS message, %s", err.Error())) log.Info("Closing WS connection with error") @@ -365,45 +437,91 @@ func (s *Server) handleWs(w http.ResponseWriter, req *http.Request) { } if msgType == websocket.TextMessage || msgType == websocket.BinaryMessage { - go func() { - mu.Lock() - defer mu.Unlock() - resp, err := s.handler.HandleWs(message, wsConn, req) - if err != nil { - log.Error(fmt.Sprintf("Unable to handle WS request, %s", err.Error())) - _ = wsConn.WriteMessage(msgType, []byte(fmt.Sprintf("WS Handle error: %s", err.Error()))) - } else { - _ = wsConn.WriteMessage(msgType, resp) - } - }() + resp, err := s.handler.HandleWs(message, wsConn, req) + if err != nil { + log.Error(fmt.Sprintf("Unable to handle WS request, %s", err.Error())) + _ = wsConn.WriteMessage(msgType, []byte(fmt.Sprintf("WS Handle error: %s", err.Error()))) + } else { + _ = wsConn.WriteMessage(msgType, resp) + } } } } +func (s *Server) increaseHttpConnCounter() { + s.connCounterMutex.Lock() + s.httpConnCounter++ + s.logConnCounters() + s.connCounterMutex.Unlock() +} + +func (s *Server) decreaseHttpConnCounter() { + s.connCounterMutex.Lock() + s.httpConnCounter-- + s.logConnCounters() + s.connCounterMutex.Unlock() +} + +func (s *Server) increaseWsConnCounter() { + s.connCounterMutex.Lock() + s.wsConnCounter++ + s.logConnCounters() + s.connCounterMutex.Unlock() +} + +func (s *Server) decreaseWsConnCounter() { + s.connCounterMutex.Lock() + s.wsConnCounter-- + s.logConnCounters() + s.connCounterMutex.Unlock() +} + +func (s *Server) logConnCounters() { + totalConnCounter := s.httpConnCounter + s.wsConnCounter + log.Infof("[ HTTP conns: %v | WS conns: %v | Total conns: %v ]", s.httpConnCounter, s.wsConnCounter, totalConnCounter) +} + +func handleInvalidRequest(w http.ResponseWriter, err error, code int) { + defer metrics.RequestHandled(metrics.RequestHandledLabelInvalid) + log.Infof("Invalid Request: %v", err.Error()) + http.Error(w, err.Error(), code) +} + func handleError(w http.ResponseWriter, err error) { - log.Error(err) - _, err = w.Write([]byte(err.Error())) - if err != nil { - log.Error(err) + defer metrics.RequestHandled(metrics.RequestHandledLabelError) + log.Errorf("Error processing request: %v", err) + + if errors.Is(err, syscall.EPIPE) { + // if it is a broken pipe error, return + return } + + // if it is a different error, write it to the response + http.Error(w, err.Error(), http.StatusInternalServerError) } // RPCErrorResponse formats error to be returned through RPC -func RPCErrorResponse(code int, message string, err error) (interface{}, types.Error) { - return RPCErrorResponseWithData(code, message, nil, err) +func RPCErrorResponse(code int, message string, err error, logError bool) (interface{}, types.Error) { + return RPCErrorResponseWithData(code, message, nil, err, logError) } // RPCErrorResponseWithData formats error to be returned through RPC -func RPCErrorResponseWithData(code int, message string, data *[]byte, err error) (interface{}, types.Error) { - if err != nil { - log.Errorf("%v: %v", message, err.Error()) - } else { - log.Error(message) +func RPCErrorResponseWithData(code int, message string, data *[]byte, err error, logError bool) (interface{}, types.Error) { + if logError { + if err != nil { + log.Errorf("%v: %v", message, err.Error()) + } else { + log.Error(message) + } } return nil, types.NewRPCErrorWithData(code, message, data) } -func combinedLog(r *http.Request, start time.Time, httpStatus, dataLen int) { +func (s *Server) combinedLog(r *http.Request, start time.Time, httpStatus, dataLen int) { + if !s.config.EnableHttpLog { + return + } + log.Infof("%s - - %s \"%s %s %s\" %d %d \"%s\" \"%s\"", r.RemoteAddr, start.Format("[02/Jan/2006:15:04:05 -0700]"), diff --git a/jsonrpc/server_test.go b/jsonrpc/server_test.go index 2ab25bee84..f4dc76e201 100644 --- a/jsonrpc/server_test.go +++ b/jsonrpc/server_test.go @@ -1,8 +1,14 @@ package jsonrpc import ( + "bytes" + "context" "fmt" + "io" + "math/big" "net/http" + "sync" + "sync/atomic" "testing" "time" @@ -10,7 +16,11 @@ import ( "github.com/0xPolygonHermez/zkevm-node/jsonrpc/mocks" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -21,9 +31,10 @@ const ( ) type mockedServer struct { - Config Config - Server *Server - ServerURL string + Config Config + Server *Server + ServerURL string + ServerWebSocketsURL string } type mocksWrapper struct { @@ -51,7 +62,7 @@ func newMockedServer(t *testing.T, cfg Config) (*mockedServer, *mocksWrapper, *e var newL2BlockEventHandler state.NewL2BlockEventHandler = func(e state.NewL2BlockEvent) {} st.On("RegisterNewL2BlockEventHandler", mock.IsType(newL2BlockEventHandler)).Once() - st.On("PrepareWebSocket").Once() + st.On("StartToMonitorNewL2Blocks").Once() services := []Service{} if _, ok := apis[APIEth]; ok { @@ -118,10 +129,13 @@ func newMockedServer(t *testing.T, cfg Config) (*mockedServer, *mocksWrapper, *e ethClient, err := ethclient.Dial(serverURL) require.NoError(t, err) + serverWebSocketsURL := fmt.Sprintf("ws://%s:%d", cfg.WebSockets.Host, cfg.WebSockets.Port) + msv := &mockedServer{ - Config: cfg, - Server: server, - ServerURL: serverURL, + Config: cfg, + Server: server, + ServerURL: serverURL, + ServerWebSocketsURL: serverWebSocketsURL, } mks := &mocksWrapper{ @@ -135,28 +149,56 @@ func newMockedServer(t *testing.T, cfg Config) (*mockedServer, *mocksWrapper, *e return msv, mks, ethClient } -func getDefaultConfig() Config { +func getSequencerDefaultConfig() Config { cfg := Config{ - Host: "0.0.0.0", - Port: 9123, - MaxRequestsPerIPAndSecond: maxRequestsPerIPAndSecond, - MaxCumulativeGasUsed: 300000, + Host: "0.0.0.0", + Port: 9123, + MaxRequestsPerIPAndSecond: maxRequestsPerIPAndSecond, + MaxCumulativeGasUsed: 300000, + BatchRequestsEnabled: true, + MaxLogsCount: 10000, + MaxLogsBlockRange: 10000, + MaxNativeBlockHashBlockRange: 60000, + WebSockets: WebSocketsConfig{ + Enabled: true, + Host: "0.0.0.0", + Port: 9133, + ReadLimit: 0, + }, } return cfg } +func getNonSequencerDefaultConfig(sequencerNodeURI string) Config { + cfg := getSequencerDefaultConfig() + cfg.Port = 9124 + cfg.SequencerNodeURI = sequencerNodeURI + return cfg +} + func newSequencerMockedServer(t *testing.T) (*mockedServer, *mocksWrapper, *ethclient.Client) { - cfg := getDefaultConfig() + cfg := getSequencerDefaultConfig() + return newMockedServer(t, cfg) +} + +func newMockedServerWithCustomConfig(t *testing.T, cfg Config) (*mockedServer, *mocksWrapper, *ethclient.Client) { return newMockedServer(t, cfg) } func newNonSequencerMockedServer(t *testing.T, sequencerNodeURI string) (*mockedServer, *mocksWrapper, *ethclient.Client) { - cfg := getDefaultConfig() - cfg.Port = 9124 - cfg.SequencerNodeURI = sequencerNodeURI + cfg := getNonSequencerDefaultConfig(sequencerNodeURI) return newMockedServer(t, cfg) } +func (s *mockedServer) GetWSClient() *ethclient.Client { + ethClient, err := ethclient.Dial(s.ServerWebSocketsURL) + if err != nil { + panic(err) + } + + return ethClient +} + func (s *mockedServer) Stop() { err := s.Server.Stop() if err != nil { @@ -168,6 +210,431 @@ func (s *mockedServer) JSONRPCCall(method string, parameters ...interface{}) (ty return client.JSONRPCCall(s.ServerURL, method, parameters...) } +func (s *mockedServer) JSONRPCBatchCall(calls ...client.BatchCall) ([]types.Response, error) { + return client.JSONRPCBatchCall(s.ServerURL, calls...) +} + func (s *mockedServer) ChainID() uint64 { return chainID } + +func TestBatchRequests(t *testing.T) { + type testCase struct { + Name string + BatchRequestsEnabled bool + BatchRequestsLimit uint + NumberOfRequests int + ExpectedError error + SetupMocks func(m *mocksWrapper, tc testCase) + } + + block := ethTypes.NewBlock( + ðTypes.Header{Number: big.NewInt(2), UncleHash: ethTypes.EmptyUncleHash, Root: ethTypes.EmptyRootHash}, + []*ethTypes.Transaction{ethTypes.NewTransaction(1, common.Address{}, big.NewInt(1), 1, big.NewInt(1), []byte{})}, + nil, + []*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))}, + &trie.StackTrie{}, + ) + + testCases := []testCase{ + { + Name: "batch requests disabled", + BatchRequestsEnabled: false, + BatchRequestsLimit: 0, + NumberOfRequests: 10, + ExpectedError: fmt.Errorf("400 - " + types.ErrBatchRequestsDisabled.Error() + "\n"), + SetupMocks: func(m *mocksWrapper, tc testCase) {}, + }, + { + Name: "batch requests over the limit", + BatchRequestsEnabled: true, + BatchRequestsLimit: 5, + NumberOfRequests: 6, + ExpectedError: fmt.Errorf("413 - " + types.ErrBatchRequestsLimitExceeded.Error() + "\n"), + SetupMocks: func(m *mocksWrapper, tc testCase) { + }, + }, + { + Name: "batch requests unlimited", + BatchRequestsEnabled: true, + BatchRequestsLimit: 0, + NumberOfRequests: 100, + ExpectedError: nil, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx.On("Commit", context.Background()).Return(nil).Times(tc.NumberOfRequests) + m.State.On("BeginStateTransaction", context.Background()).Return(m.DbTx, nil).Times(tc.NumberOfRequests) + m.State.On("GetLastL2BlockNumber", context.Background(), m.DbTx).Return(block.Number().Uint64(), nil).Times(tc.NumberOfRequests) + m.State.On("GetL2BlockByNumber", context.Background(), block.Number().Uint64(), m.DbTx).Return(block, nil).Times(tc.NumberOfRequests) + m.State.On("GetTransactionReceipt", context.Background(), mock.Anything, m.DbTx).Return(ethTypes.NewReceipt([]byte{}, false, uint64(0)), nil) + }, + }, + { + Name: "batch requests equal the limit", + BatchRequestsEnabled: true, + BatchRequestsLimit: 5, + NumberOfRequests: 5, + ExpectedError: nil, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx.On("Commit", context.Background()).Return(nil).Times(tc.NumberOfRequests) + m.State.On("BeginStateTransaction", context.Background()).Return(m.DbTx, nil).Times(tc.NumberOfRequests) + m.State.On("GetLastL2BlockNumber", context.Background(), m.DbTx).Return(block.Number().Uint64(), nil).Times(tc.NumberOfRequests) + m.State.On("GetL2BlockByNumber", context.Background(), block.Number().Uint64(), m.DbTx).Return(block, nil).Times(tc.NumberOfRequests) + m.State.On("GetTransactionReceipt", context.Background(), mock.Anything, m.DbTx).Return(ethTypes.NewReceipt([]byte{}, false, uint64(0)), nil) + }, + }, + { + Name: "batch requests under the limit", + BatchRequestsEnabled: true, + BatchRequestsLimit: 5, + NumberOfRequests: 4, + ExpectedError: nil, + SetupMocks: func(m *mocksWrapper, tc testCase) { + m.DbTx.On("Commit", context.Background()).Return(nil).Times(tc.NumberOfRequests) + m.State.On("BeginStateTransaction", context.Background()).Return(m.DbTx, nil).Times(tc.NumberOfRequests) + m.State.On("GetLastL2BlockNumber", context.Background(), m.DbTx).Return(block.Number().Uint64(), nil).Times(tc.NumberOfRequests) + m.State.On("GetL2BlockByNumber", context.Background(), block.Number().Uint64(), m.DbTx).Return(block, nil).Times(tc.NumberOfRequests) + m.State.On("GetTransactionReceipt", context.Background(), mock.Anything, m.DbTx).Return(ethTypes.NewReceipt([]byte{}, false, uint64(0)), nil) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + tc := testCase + + cfg := getSequencerDefaultConfig() + cfg.BatchRequestsEnabled = tc.BatchRequestsEnabled + cfg.BatchRequestsLimit = tc.BatchRequestsLimit + s, m, _ := newMockedServerWithCustomConfig(t, cfg) + + tc.SetupMocks(m, tc) + + calls := []client.BatchCall{} + + for i := 0; i < tc.NumberOfRequests; i++ { + calls = append(calls, client.BatchCall{ + Method: "eth_getBlockByNumber", + Parameters: []interface{}{"latest"}, + }) + } + + result, err := s.JSONRPCBatchCall(calls...) + if testCase.ExpectedError == nil { + assert.Equal(t, testCase.NumberOfRequests, len(result)) + } else { + assert.Equal(t, 0, len(result)) + assert.Equal(t, testCase.ExpectedError.Error(), err.Error()) + } + + s.Stop() + }) + } +} + +func TestRequestValidation(t *testing.T) { + type testCase struct { + Name string + Method string + Content []byte + ContentType string + ExpectedStatusCode int + ExpectedResponseHeaders map[string][]string + ExpectedMessage string + } + + testCases := []testCase{ + { + Name: "OPTION request", + Method: http.MethodOptions, + ExpectedStatusCode: http.StatusOK, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"application/json"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "", + }, + { + Name: "GET request", + Method: http.MethodGet, + ExpectedStatusCode: http.StatusOK, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"application/json"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "zkEVM JSON RPC Server", + }, + { + Name: "HEAD request", + Method: http.MethodHead, + ExpectedStatusCode: http.StatusMethodNotAllowed, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "", + }, + { + Name: "PUT request", + Method: http.MethodPut, + ExpectedStatusCode: http.StatusMethodNotAllowed, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "method PUT not allowed\n", + }, + { + Name: "PATCH request", + Method: http.MethodPatch, + ExpectedStatusCode: http.StatusMethodNotAllowed, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "method PATCH not allowed\n", + }, + { + Name: "DELETE request", + Method: http.MethodDelete, + ExpectedStatusCode: http.StatusMethodNotAllowed, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "method DELETE not allowed\n", + }, + { + Name: "CONNECT request", + Method: http.MethodConnect, + ExpectedStatusCode: http.StatusNotFound, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + }, + ExpectedMessage: "404 page not found\n", + }, + { + Name: "TRACE request", + Method: http.MethodTrace, + ExpectedStatusCode: http.StatusMethodNotAllowed, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "method TRACE not allowed\n", + }, + { + Name: "Request content bigger than limit", + Method: http.MethodPost, + Content: make([]byte, maxRequestContentLength+1), + ExpectedStatusCode: http.StatusRequestEntityTooLarge, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "content length too large (5242881>5242880)\n", + }, + { + Name: "Invalid content type", + Method: http.MethodPost, + ContentType: "text/html", + ExpectedStatusCode: http.StatusUnsupportedMediaType, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "invalid content type, only application/json is supported\n", + }, + { + Name: "Empty request body", + Method: http.MethodPost, + ContentType: contentType, + Content: []byte(""), + ExpectedStatusCode: http.StatusBadRequest, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "empty request body\n", + }, + { + Name: "Invalid json", + Method: http.MethodPost, + ContentType: contentType, + Content: []byte("this is not a json format string"), + ExpectedStatusCode: http.StatusBadRequest, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "invalid json object request body\n", + }, + { + Name: "Incomplete json object", + Method: http.MethodPost, + ContentType: contentType, + Content: []byte("{ \"field\":"), + ExpectedStatusCode: http.StatusBadRequest, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "invalid json object request body\n", + }, + { + Name: "Incomplete json array", + Method: http.MethodPost, + ContentType: contentType, + Content: []byte("[ { \"field\":"), + ExpectedStatusCode: http.StatusBadRequest, + ExpectedResponseHeaders: map[string][]string{ + "Content-Type": {"text/plain; charset=utf-8"}, + "Access-Control-Allow-Origin": {"*"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Headers": {"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"}, + }, + ExpectedMessage: "invalid json array request body\n", + }, + } + + s, _, _ := newSequencerMockedServer(t) + defer s.Stop() + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + tc := testCase + reqBodyReader := bytes.NewReader(tc.Content) + httpReq, err := http.NewRequest(tc.Method, s.ServerURL, reqBodyReader) + require.NoError(t, err) + + httpReq.Header.Add("Content-type", tc.ContentType) + + httpRes, err := http.DefaultClient.Do(httpReq) + require.NoError(t, err) + + resBody, err := io.ReadAll(httpRes.Body) + require.NoError(t, err) + defer httpRes.Body.Close() + + message := string(resBody) + assert.Equal(t, tc.ExpectedStatusCode, httpRes.StatusCode) + assert.Equal(t, tc.ExpectedMessage, message) + + for responseHeaderKey, responseHeaderValue := range tc.ExpectedResponseHeaders { + assert.ElementsMatch(t, httpRes.Header[responseHeaderKey], responseHeaderValue) + } + }) + } +} + +func TestMaxRequestPerIPPerSec(t *testing.T) { + // this is the number of requests the test will execute + // it's important to keep this number with an amount of + // requests that the machine running this test is able + // to execute in a single second + const numberOfRequests = 100 + // the number of workers are the amount of go routines + // the machine is able to run at the same time without + // consuming all the resources and making the go routines + // to affect each other performance, this number may vary + // depending on the machine spec running the test. + // a good number to this generally is a number close to + // the number of cores or threads provided by the CPU. + const workers = 12 + // it's important to keep this limit smaller than the + // number of requests the test is going to perform, so + // the test can have some requests rejected. + const maxRequestsPerIPAndSecond = 20 + + cfg := getSequencerDefaultConfig() + cfg.MaxRequestsPerIPAndSecond = maxRequestsPerIPAndSecond + s, m, _ := newMockedServerWithCustomConfig(t, cfg) + defer s.Stop() + + // since the limitation is made by second, + // the test waits 1 sec before starting because request are made during the + // server creation to check its availability. Waiting this second means + // we have a fresh second without any other request made. + time.Sleep(time.Second) + + // create a wait group to wait for all the requests to return + wg := sync.WaitGroup{} + wg.Add(numberOfRequests) + + // prepare mocks with specific amount of times it can be called + // this makes us sure the code is calling these methods only for + // allowed requests + times := int(cfg.MaxRequestsPerIPAndSecond) + m.DbTx.On("Commit", context.Background()).Return(nil).Times(times) + m.State.On("BeginStateTransaction", context.Background()).Return(m.DbTx, nil).Times(times) + m.State.On("GetLastL2BlockNumber", context.Background(), m.DbTx).Return(uint64(1), nil).Times(times) + + // prepare the workers to process the requests as long as a job is available + requestsLimitedCount := uint64(0) + jobs := make(chan int, numberOfRequests) + // put each worker to work + for i := 0; i < workers; i++ { + // each worker works in a go routine to be able to have many + // workers working concurrently + go func() { + // a worker keeps working indefinitely looking for new jobs + for { + // waits until a job is available + <-jobs + // send the request + _, err := s.JSONRPCCall("eth_blockNumber") + // if the request works well or gets rejected due to max requests per sec, it's ok + // otherwise we stop the test and log the error. + if err != nil { + if err.Error() == "429 - You have reached maximum request limit." { + atomic.AddUint64(&requestsLimitedCount, 1) + } else { + require.NoError(t, err) + } + } + + // registers in the wait group a request was executed and has returned + wg.Done() + } + }() + } + + // add jobs to notify workers accordingly to the number + // of requests the test wants to send to the server + for i := 0; i < numberOfRequests; i++ { + jobs <- i + } + + // wait for all the requests to return + wg.Wait() + + // checks if all the exceeded requests were limited + assert.Equal(t, uint64(numberOfRequests-maxRequestsPerIPAndSecond), requestsLimitedCount) + + // wait the server to process the last requests without breaking the + // connection abruptly + time.Sleep(time.Second) +} diff --git a/jsonrpc/storage.go b/jsonrpc/storage.go index 32de18fc27..c9f0dc1619 100644 --- a/jsonrpc/storage.go +++ b/jsonrpc/storage.go @@ -7,8 +7,8 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/google/uuid" - "github.com/gorilla/websocket" ) // ErrNotFound represent a not found error. @@ -20,50 +20,92 @@ var ErrFilterInvalidPayload = errors.New("invalid argument 0: cannot specify bot // Storage uses memory to store the data // related to the json rpc server type Storage struct { - filters sync.Map + allFilters map[string]*Filter + allFiltersWithWSConn map[*concurrentWsConn]map[string]*Filter + blockFiltersWithWSConn map[string]*Filter + logFiltersWithWSConn map[string]*Filter + pendingTxFiltersWithWSConn map[string]*Filter + + blockMutex *sync.Mutex + logMutex *sync.Mutex + pendingTxMutex *sync.Mutex } // NewStorage creates and initializes an instance of Storage func NewStorage() *Storage { return &Storage{ - filters: sync.Map{}, + allFilters: make(map[string]*Filter), + allFiltersWithWSConn: make(map[*concurrentWsConn]map[string]*Filter), + blockFiltersWithWSConn: make(map[string]*Filter), + logFiltersWithWSConn: make(map[string]*Filter), + pendingTxFiltersWithWSConn: make(map[string]*Filter), + blockMutex: &sync.Mutex{}, + logMutex: &sync.Mutex{}, + pendingTxMutex: &sync.Mutex{}, } } // NewLogFilter persists a new log filter -func (s *Storage) NewLogFilter(wsConn *websocket.Conn, filter LogFilter) (string, error) { - if filter.BlockHash != nil && (filter.FromBlock != nil || filter.ToBlock != nil) { - return "", ErrFilterInvalidPayload +func (s *Storage) NewLogFilter(wsConn *concurrentWsConn, filter LogFilter) (string, error) { + if err := filter.Validate(); err != nil { + return "", err } return s.createFilter(FilterTypeLog, filter, wsConn) } // NewBlockFilter persists a new block log filter -func (s *Storage) NewBlockFilter(wsConn *websocket.Conn) (string, error) { +func (s *Storage) NewBlockFilter(wsConn *concurrentWsConn) (string, error) { return s.createFilter(FilterTypeBlock, nil, wsConn) } // NewPendingTransactionFilter persists a new pending transaction filter -func (s *Storage) NewPendingTransactionFilter(wsConn *websocket.Conn) (string, error) { +func (s *Storage) NewPendingTransactionFilter(wsConn *concurrentWsConn) (string, error) { return s.createFilter(FilterTypePendingTx, nil, wsConn) } // create persists the filter to the memory and provides the filter id -func (s *Storage) createFilter(t FilterType, parameters interface{}, wsConn *websocket.Conn) (string, error) { +func (s *Storage) createFilter(t FilterType, parameters interface{}, wsConn *concurrentWsConn) (string, error) { lastPoll := time.Now().UTC() id, err := s.generateFilterID() if err != nil { return "", fmt.Errorf("failed to generate filter ID: %w", err) } - s.filters.Store(id, &Filter{ - ID: id, - Type: t, - Parameters: parameters, - LastPoll: lastPoll, - WsConn: wsConn, - }) + s.blockMutex.Lock() + s.logMutex.Lock() + s.pendingTxMutex.Lock() + defer s.blockMutex.Unlock() + defer s.logMutex.Unlock() + defer s.pendingTxMutex.Unlock() + + f := &Filter{ + ID: id, + Type: t, + Parameters: parameters, + LastPoll: lastPoll, + WsConn: wsConn, + wsQueue: state.NewQueue[[]byte](), + wsQueueSignal: sync.NewCond(&sync.Mutex{}), + } + + go state.InfiniteSafeRun(f.SendEnqueuedSubscriptionData, fmt.Sprintf("failed to send enqueued subscription data to filter %v", id), time.Second) + + s.allFilters[id] = f + if f.WsConn != nil { + if _, found := s.allFiltersWithWSConn[f.WsConn]; !found { + s.allFiltersWithWSConn[f.WsConn] = make(map[string]*Filter) + } + + s.allFiltersWithWSConn[f.WsConn][id] = f + if t == FilterTypeBlock { + s.blockFiltersWithWSConn[id] = f + } else if t == FilterTypeLog { + s.logFiltersWithWSConn[id] = f + } else if t == FilterTypePendingTx { + s.pendingTxFiltersWithWSConn[id] = f + } + } return id, nil } @@ -84,87 +126,122 @@ func (s *Storage) generateFilterID() (string, error) { // GetAllBlockFiltersWithWSConn returns an array with all filter that have // a web socket connection and are filtering by new blocks -func (s *Storage) GetAllBlockFiltersWithWSConn() ([]*Filter, error) { - filtersWithWSConn := []*Filter{} - s.filters.Range(func(key, value any) bool { - filter := value.(*Filter) - if filter.WsConn == nil || filter.Type != FilterTypeBlock { - return true - } +func (s *Storage) GetAllBlockFiltersWithWSConn() []*Filter { + s.blockMutex.Lock() + defer s.blockMutex.Unlock() + filters := []*Filter{} + for _, filter := range s.blockFiltersWithWSConn { f := filter - filtersWithWSConn = append(filtersWithWSConn, f) - return true - }) - - return filtersWithWSConn, nil + filters = append(filters, f) + } + return filters } // GetAllLogFiltersWithWSConn returns an array with all filter that have // a web socket connection and are filtering by new logs -func (s *Storage) GetAllLogFiltersWithWSConn() ([]*Filter, error) { - filtersWithWSConn := []*Filter{} - s.filters.Range(func(key, value any) bool { - filter := value.(*Filter) - if filter.WsConn == nil || filter.Type != FilterTypeLog { - return true - } +func (s *Storage) GetAllLogFiltersWithWSConn() []*Filter { + s.logMutex.Lock() + defer s.logMutex.Unlock() + filters := []*Filter{} + for _, filter := range s.logFiltersWithWSConn { f := filter - filtersWithWSConn = append(filtersWithWSConn, f) - return true - }) - - return filtersWithWSConn, nil + filters = append(filters, f) + } + return filters } // GetFilter gets a filter by its id func (s *Storage) GetFilter(filterID string) (*Filter, error) { - filter, found := s.filters.Load(filterID) + s.blockMutex.Lock() + s.logMutex.Lock() + s.pendingTxMutex.Lock() + defer s.blockMutex.Unlock() + defer s.logMutex.Unlock() + defer s.pendingTxMutex.Unlock() + + filter, found := s.allFilters[filterID] if !found { return nil, ErrNotFound } - return filter.(*Filter), nil + return filter, nil } // UpdateFilterLastPoll updates the last poll to now func (s *Storage) UpdateFilterLastPoll(filterID string) error { - filterValue, found := s.filters.Load(filterID) + s.blockMutex.Lock() + s.logMutex.Lock() + s.pendingTxMutex.Lock() + defer s.blockMutex.Unlock() + defer s.logMutex.Unlock() + defer s.pendingTxMutex.Unlock() + + filter, found := s.allFilters[filterID] if !found { return ErrNotFound } - filter := filterValue.(*Filter) filter.LastPoll = time.Now().UTC() - s.filters.Store(filterID, filter) + s.allFilters[filterID] = filter return nil } // UninstallFilter deletes a filter by its id func (s *Storage) UninstallFilter(filterID string) error { - _, found := s.filters.Load(filterID) + s.blockMutex.Lock() + s.logMutex.Lock() + s.pendingTxMutex.Lock() + defer s.blockMutex.Unlock() + defer s.logMutex.Unlock() + defer s.pendingTxMutex.Unlock() + + filter, found := s.allFilters[filterID] if !found { return ErrNotFound } - s.filters.Delete(filterID) + + s.deleteFilter(filter) return nil } // UninstallFilterByWSConn deletes all filters connected to the provided web socket connection -func (s *Storage) UninstallFilterByWSConn(wsConn *websocket.Conn) error { - filterIDsToDelete := []string{} - s.filters.Range(func(key, value any) bool { - id := key.(string) - filter := value.(*Filter) - if filter.WsConn == wsConn { - filterIDsToDelete = append(filterIDsToDelete, id) - } - return true - }) +func (s *Storage) UninstallFilterByWSConn(wsConn *concurrentWsConn) error { + s.blockMutex.Lock() + s.logMutex.Lock() + s.pendingTxMutex.Lock() + defer s.blockMutex.Unlock() + defer s.logMutex.Unlock() + defer s.pendingTxMutex.Unlock() + + filters, found := s.allFiltersWithWSConn[wsConn] + if !found { + return nil + } - for _, filterID := range filterIDsToDelete { - s.filters.Delete(filterID) + for _, filter := range filters { + s.deleteFilter(filter) } return nil } + +// deleteFilter deletes a filter from all the maps +func (s *Storage) deleteFilter(filter *Filter) { + if filter.Type == FilterTypeBlock { + delete(s.blockFiltersWithWSConn, filter.ID) + } else if filter.Type == FilterTypeLog { + delete(s.logFiltersWithWSConn, filter.ID) + } else if filter.Type == FilterTypePendingTx { + delete(s.pendingTxFiltersWithWSConn, filter.ID) + } + + if filter.WsConn != nil { + delete(s.allFiltersWithWSConn[filter.WsConn], filter.ID) + if len(s.allFiltersWithWSConn[filter.WsConn]) == 0 { + delete(s.allFiltersWithWSConn, filter.WsConn) + } + } + + delete(s.allFilters, filter.ID) +} diff --git a/jsonrpc/types/codec.go b/jsonrpc/types/codec.go index ec64d8db16..82fc3d04f2 100644 --- a/jsonrpc/types/codec.go +++ b/jsonrpc/types/codec.go @@ -14,21 +14,27 @@ import ( ) const ( - // PendingBlockNumber represents the pending block number - PendingBlockNumber = BlockNumber(-3) + // EarliestBlockNumber represents the earliest block number, always 0 + EarliestBlockNumber = BlockNumber(-1) // LatestBlockNumber represents the latest block number LatestBlockNumber = BlockNumber(-2) - // EarliestBlockNumber represents the earliest block number - EarliestBlockNumber = BlockNumber(-1) - // SafeBlockNumber represents the last virtualized block number + // PendingBlockNumber represents the pending block number + PendingBlockNumber = BlockNumber(-3) + // SafeBlockNumber represents the last verified block number that is safe on Ethereum SafeBlockNumber = BlockNumber(-4) - // FinalizedBlockNumber represents the last verified block number + // FinalizedBlockNumber represents the last verified block number that is finalized on Ethereum FinalizedBlockNumber = BlockNumber(-5) - // LatestBatchNumber represents the latest batch number - LatestBatchNumber = BatchNumber(-2) - // EarliestBatchNumber represents the earliest batch number + // EarliestBatchNumber represents the earliest batch number, always 0 EarliestBatchNumber = BatchNumber(-1) + // LatestBatchNumber represents the last closed batch number + LatestBatchNumber = BatchNumber(-2) + // PendingBatchNumber represents the last batch in the trusted state + PendingBatchNumber = BatchNumber(-3) + // SafeBatchNumber represents the last batch verified in a block that is safe on Ethereum + SafeBatchNumber = BatchNumber(-4) + // FinalizedBatchNumber represents the last batch verified in a block that has been finalized on Ethereum + FinalizedBatchNumber = BatchNumber(-5) // Earliest contains the string to represent the earliest block known. Earliest = "earliest" @@ -170,6 +176,9 @@ func (b *BlockNumber) GetNumericBlockNumber(ctx context.Context, s StateInterfac } switch bValue { + case EarliestBlockNumber: + return 0, nil + case LatestBlockNumber, PendingBlockNumber: lastBlockNumber, err := s.GetLastL2BlockNumber(ctx, dbTx) if err != nil { @@ -178,16 +187,13 @@ func (b *BlockNumber) GetNumericBlockNumber(ctx context.Context, s StateInterfac return lastBlockNumber, nil - case EarliestBlockNumber: - return 0, nil - case SafeBlockNumber: l1SafeBlockNumber, err := e.GetSafeBlockNumber(ctx) if err != nil { return 0, NewRPCError(DefaultErrorCode, "failed to get the safe block number from ethereum") } - lastBlockNumber, err := s.GetSafeL2BlockNumber(ctx, l1SafeBlockNumber, dbTx) + lastBlockNumber, err := s.GetLastVerifiedL2BlockNumberUntilL1Block(ctx, l1SafeBlockNumber, dbTx) if errors.Is(err, state.ErrNotFound) { return 0, nil } else if err != nil { @@ -202,7 +208,7 @@ func (b *BlockNumber) GetNumericBlockNumber(ctx context.Context, s StateInterfac return 0, NewRPCError(DefaultErrorCode, "failed to get the finalized block number from ethereum") } - lastBlockNumber, err := s.GetFinalizedL2BlockNumber(ctx, l1FinalizedBlockNumber, dbTx) + lastBlockNumber, err := s.GetLastVerifiedL2BlockNumberUntilL1Block(ctx, l1FinalizedBlockNumber, dbTx) if errors.Is(err, state.ErrNotFound) { return 0, nil } else if err != nil { @@ -407,23 +413,61 @@ func (b *BatchNumber) UnmarshalJSON(buffer []byte) error { } // GetNumericBatchNumber returns a numeric batch number based on the BatchNumber instance -func (b *BatchNumber) GetNumericBatchNumber(ctx context.Context, s StateInterface, dbTx pgx.Tx) (uint64, Error) { +func (b *BatchNumber) GetNumericBatchNumber(ctx context.Context, s StateInterface, e EthermanInterface, dbTx pgx.Tx) (uint64, Error) { bValue := LatestBatchNumber if b != nil { bValue = *b } switch bValue { + case EarliestBatchNumber: + return 0, nil + case LatestBatchNumber: - lastBatchNumber, err := s.GetLastBatchNumber(ctx, dbTx) + batchNumber, err := s.GetLastClosedBatchNumber(ctx, dbTx) if err != nil { return 0, NewRPCError(DefaultErrorCode, "failed to get the last batch number from state") } - return lastBatchNumber, nil + return batchNumber, nil - case EarliestBatchNumber: - return 0, nil + case PendingBatchNumber: + batchNumber, err := s.GetLastBatchNumber(ctx, dbTx) + if err != nil { + return 0, NewRPCError(DefaultErrorCode, "failed to get the pending batch number from state") + } + + return batchNumber, nil + + case SafeBatchNumber: + l1SafeBlockNumber, err := e.GetSafeBlockNumber(ctx) + if err != nil { + return 0, NewRPCError(DefaultErrorCode, "failed to get the safe batch number from ethereum") + } + + batchNumber, err := s.GetLastVerifiedBatchNumberUntilL1Block(ctx, l1SafeBlockNumber, dbTx) + if errors.Is(err, state.ErrNotFound) { + return 0, nil + } else if err != nil { + return 0, NewRPCError(DefaultErrorCode, "failed to get the safe batch number from state") + } + + return batchNumber, nil + + case FinalizedBatchNumber: + l1FinalizedBlockNumber, err := e.GetFinalizedBlockNumber(ctx) + if err != nil { + return 0, NewRPCError(DefaultErrorCode, "failed to get the finalized batch number from ethereum") + } + + batchNumber, err := s.GetLastVerifiedBatchNumberUntilL1Block(ctx, l1FinalizedBlockNumber, dbTx) + if errors.Is(err, state.ErrNotFound) { + return 0, nil + } else if err != nil { + return 0, NewRPCError(DefaultErrorCode, "failed to get the finalized batch number from state") + } + + return batchNumber, nil default: if bValue < 0 { diff --git a/jsonrpc/types/codec_test.go b/jsonrpc/types/codec_test.go index 2a72ca5abe..d233f90cfe 100644 --- a/jsonrpc/types/codec_test.go +++ b/jsonrpc/types/codec_test.go @@ -71,7 +71,7 @@ func TestGetNumericBlockNumber(t *testing.T) { }, { name: "BlockNumber LatestBlockNumber", - bn: bnPtr(LatestBlockNumber), + bn: ptr(LatestBlockNumber), expectedBlockNumber: 50, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { @@ -83,7 +83,7 @@ func TestGetNumericBlockNumber(t *testing.T) { }, { name: "BlockNumber PendingBlockNumber", - bn: bnPtr(PendingBlockNumber), + bn: ptr(PendingBlockNumber), expectedBlockNumber: 30, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { @@ -95,57 +95,57 @@ func TestGetNumericBlockNumber(t *testing.T) { }, { name: "BlockNumber EarliestBlockNumber", - bn: bnPtr(EarliestBlockNumber), + bn: ptr(EarliestBlockNumber), expectedBlockNumber: 0, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, }, { name: "BlockNumber SafeBlockNumber", - bn: bnPtr(SafeBlockNumber), + bn: ptr(SafeBlockNumber), expectedBlockNumber: 40, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { - liSafeBlock := uint64(30) + safeBlockNumber := uint64(30) e. On("GetSafeBlockNumber", context.Background()). - Return(liSafeBlock, nil). + Return(safeBlockNumber, nil). Once() s. - On("GetSafeL2BlockNumber", context.Background(), liSafeBlock, d). + On("GetLastVerifiedL2BlockNumberUntilL1Block", context.Background(), safeBlockNumber, d). Return(uint64(40), nil). Once() }, }, { name: "BlockNumber FinalizedBlockNumber", - bn: bnPtr(FinalizedBlockNumber), + bn: ptr(FinalizedBlockNumber), expectedBlockNumber: 60, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { - liFinalizedBlock := uint64(50) + finalizedBlockNumber := uint64(50) e. On("GetFinalizedBlockNumber", context.Background()). - Return(liFinalizedBlock, nil). + Return(finalizedBlockNumber, nil). Once() s. - On("GetFinalizedL2BlockNumber", context.Background(), liFinalizedBlock, d). + On("GetLastVerifiedL2BlockNumberUntilL1Block", context.Background(), finalizedBlockNumber, d). Return(uint64(60), nil). Once() }, }, { name: "BlockNumber Positive Number", - bn: bnPtr(BlockNumber(int64(10))), + bn: ptr(BlockNumber(int64(10))), expectedBlockNumber: 10, expectedError: nil, setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, }, { name: "BlockNumber Negative Number <= -6", - bn: bnPtr(BlockNumber(int64(-6))), + bn: ptr(BlockNumber(int64(-6))), expectedBlockNumber: 0, expectedError: NewRPCError(InvalidParamsErrorCode, "invalid block number: -6"), setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, @@ -167,6 +167,129 @@ func TestGetNumericBlockNumber(t *testing.T) { } } +func TestGetNumericBatchNumber(t *testing.T) { + s := mocks.NewStateMock(t) + e := mocks.NewEthermanMock(t) + + type testCase struct { + name string + bn *BatchNumber + expectedBatchNumber uint64 + expectedError Error + setupMocks func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) + } + + testCases := []testCase{ + { + name: "BatchNumber nil", + bn: nil, + expectedBatchNumber: 40, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { + s. + On("GetLastClosedBatchNumber", context.Background(), d). + Return(uint64(40), nil). + Once() + }, + }, + { + name: "BatchNumber LatestBatchNumber", + bn: ptr(LatestBatchNumber), + expectedBatchNumber: 50, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { + s. + On("GetLastClosedBatchNumber", context.Background(), d). + Return(uint64(50), nil). + Once() + }, + }, + { + name: "BatchNumber PendingBatchNumber", + bn: ptr(PendingBatchNumber), + expectedBatchNumber: 90, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { + s. + On("GetLastBatchNumber", context.Background(), d). + Return(uint64(90), nil). + Once() + }, + }, + { + name: "BatchNumber EarliestBatchNumber", + bn: ptr(EarliestBatchNumber), + expectedBatchNumber: 0, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, + }, + { + name: "BatchNumber SafeBatchNumber", + bn: ptr(SafeBatchNumber), + expectedBatchNumber: 40, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { + safeBlockNumber := uint64(30) + e. + On("GetSafeBlockNumber", context.Background()). + Return(safeBlockNumber, nil). + Once() + + s. + On("GetLastVerifiedBatchNumberUntilL1Block", context.Background(), safeBlockNumber, d). + Return(uint64(40), nil). + Once() + }, + }, + { + name: "BatchNumber FinalizedBatchNumber", + bn: ptr(FinalizedBatchNumber), + expectedBatchNumber: 60, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) { + finalizedBlockNumber := uint64(50) + e. + On("GetFinalizedBlockNumber", context.Background()). + Return(finalizedBlockNumber, nil). + Once() + + s. + On("GetLastVerifiedBatchNumberUntilL1Block", context.Background(), finalizedBlockNumber, d). + Return(uint64(60), nil). + Once() + }, + }, + { + name: "BatchNumber Positive Number", + bn: ptr(BatchNumber(int64(10))), + expectedBatchNumber: 10, + expectedError: nil, + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, + }, + { + name: "BatchNumber Negative Number <= -6", + bn: ptr(BatchNumber(int64(-6))), + expectedBatchNumber: 0, + expectedError: NewRPCError(InvalidParamsErrorCode, "invalid batch number: -6"), + setupMocks: func(s *mocks.StateMock, d *mocks.DBTxMock, t *testCase) {}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + tc := testCase + dbTx := mocks.NewDBTxMock(t) + testCase.setupMocks(s, dbTx, &tc) + result, rpcErr := testCase.bn.GetNumericBatchNumber(context.Background(), s, e, dbTx) + assert.Equal(t, testCase.expectedBatchNumber, result) + if rpcErr != nil || testCase.expectedError != nil { + assert.Equal(t, testCase.expectedError.ErrorCode(), rpcErr.ErrorCode()) + assert.Equal(t, testCase.expectedError.Error(), rpcErr.Error()) + } + }) + } +} + func TestResponseMarshal(t *testing.T) { testCases := []struct { Name string @@ -262,11 +385,11 @@ func TestBlockNumberStringOrHex(t *testing.T) { bn *BlockNumber expectedResult string }{ - {bn: bnPtr(BlockNumber(-3)), expectedResult: "pending"}, - {bn: bnPtr(BlockNumber(-2)), expectedResult: "latest"}, - {bn: bnPtr(BlockNumber(-1)), expectedResult: "earliest"}, - {bn: bnPtr(BlockNumber(0)), expectedResult: "0x0"}, - {bn: bnPtr(BlockNumber(100)), expectedResult: "0x64"}, + {bn: ptr(BlockNumber(-3)), expectedResult: "pending"}, + {bn: ptr(BlockNumber(-2)), expectedResult: "latest"}, + {bn: ptr(BlockNumber(-1)), expectedResult: "earliest"}, + {bn: ptr(BlockNumber(0)), expectedResult: "0x0"}, + {bn: ptr(BlockNumber(100)), expectedResult: "0x64"}, } for _, testCase := range testCases { @@ -284,7 +407,7 @@ func TestBlockNumberOrHashMarshaling(t *testing.T) { testCases := []testCase{ // success - {`{"blockNumber":"1"}`, &BlockNumberOrHash{number: bnPtr(BlockNumber(uint64(1)))}, nil}, + {`{"blockNumber":"1"}`, &BlockNumberOrHash{number: ptr(BlockNumber(uint64(1)))}, nil}, {`{"blockHash":"0x1"}`, &BlockNumberOrHash{hash: argHashPtr(common.HexToHash("0x1"))}, nil}, {`{"blockHash":"0x1", "requireCanonical":true}`, &BlockNumberOrHash{hash: argHashPtr(common.HexToHash("0x1")), requireCanonical: true}, nil}, // float wrong value @@ -318,8 +441,8 @@ func TestBlockNumberOrHashMarshaling(t *testing.T) { } } -func bnPtr(bn BlockNumber) *BlockNumber { - return &bn +func ptr[T any](t T) *T { + return &t } func argHashPtr(hash common.Hash) *ArgHash { diff --git a/jsonrpc/types/errors.go b/jsonrpc/types/errors.go index 17e095094c..24e865b352 100644 --- a/jsonrpc/types/errors.go +++ b/jsonrpc/types/errors.go @@ -17,6 +17,16 @@ const ( ParserErrorCode = -32700 ) +var ( + // ErrBatchRequestsDisabled returned by the server when a batch request + // is detected and the batch requests are disabled via configuration + ErrBatchRequestsDisabled = fmt.Errorf("batch requests are disabled") + + // ErrBatchRequestsLimitExceeded returned by the server when a batch request + // is detected and the number of requests are greater than the configured limit. + ErrBatchRequestsLimitExceeded = fmt.Errorf("batch requests limit exceeded") +) + // Error interface type Error interface { Error() string diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go index e68d4c8c63..4bb328c362 100644 --- a/jsonrpc/types/interfaces.go +++ b/jsonrpc/types/interfaces.go @@ -26,7 +26,7 @@ type PoolInterface interface { // StateInterface gathers the methods required to interact with the state. type StateInterface interface { - PrepareWebSocket() + StartToMonitorNewL2Blocks() BeginStateTransaction(ctx context.Context) (pgx.Tx, error) DebugTransaction(ctx context.Context, transactionHash common.Hash, traceConfig state.TraceConfig, dbTx pgx.Tx) (*runtime.ExecutionResult, error) EstimateGas(transaction *types.Transaction, senderAddress common.Address, l2BlockNumber *uint64, dbTx pgx.Tx) (uint64, []byte, error) @@ -53,7 +53,6 @@ type StateInterface interface { GetTransactionReceipt(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*types.Receipt, error) IsL2BlockConsolidated(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (bool, error) IsL2BlockVirtualized(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (bool, error) - ProcessUnsignedTransaction(ctx context.Context, tx *types.Transaction, senderAddress common.Address, l2BlockNumber *uint64, noZKEVMCounters bool, dbTx pgx.Tx) (*runtime.ExecutionResult, error) RegisterNewL2BlockEventHandler(h state.NewL2BlockEventHandler) GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error) @@ -65,8 +64,10 @@ type StateInterface interface { GetVerifiedBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VerifiedBatch, error) GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*state.GlobalExitRoot, error) GetL2BlocksByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]types.Block, error) - GetSafeL2BlockNumber(ctx context.Context, l1SafeBlockNumber uint64, dbTx pgx.Tx) (uint64, error) - GetFinalizedL2BlockNumber(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) + GetNativeBlockHashesInRange(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]common.Hash, error) + GetLastClosedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) + GetLastVerifiedL2BlockNumberUntilL1Block(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) + GetLastVerifiedBatchNumberUntilL1Block(ctx context.Context, l1BlockNumber uint64, dbTx pgx.Tx) (uint64, error) } // EthermanInterface provides integration with L1 diff --git a/jsonrpc/wsconn.go b/jsonrpc/wsconn.go new file mode 100644 index 0000000000..359c711b16 --- /dev/null +++ b/jsonrpc/wsconn.go @@ -0,0 +1,46 @@ +package jsonrpc + +import ( + "sync" + + "github.com/gorilla/websocket" +) + +// concurrentWsConn is a wrapped web socket connection +// that provide methods to deal with concurrency +type concurrentWsConn struct { + wsConn *websocket.Conn + mutex *sync.Mutex +} + +// NewConcurrentWsConn creates a new instance of concurrentWsConn +func newConcurrentWsConn(wsConn *websocket.Conn) *concurrentWsConn { + return &concurrentWsConn{ + wsConn: wsConn, + mutex: &sync.Mutex{}, + } +} + +// ReadMessage reads a message from the inner web socket connection +func (c *concurrentWsConn) ReadMessage() (messageType int, p []byte, err error) { + return c.wsConn.ReadMessage() +} + +// WriteMessage writes a message to the inner web socket connection +func (c *concurrentWsConn) WriteMessage(messageType int, data []byte) error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.wsConn.WriteMessage(messageType, data) +} + +// Close closes the inner web socket connection +func (c *concurrentWsConn) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.wsConn.Close() +} + +// SetReadLimit sets the read limit to the inner web socket connection +func (c *concurrentWsConn) SetReadLimit(limit int64) { + c.wsConn.SetReadLimit(limit) +} diff --git a/log/log.go b/log/log.go index f8d26d08a4..11965b0d9d 100644 --- a/log/log.go +++ b/log/log.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "strings" + "sync/atomic" "github.com/0xPolygonHermez/zkevm-node" "github.com/hermeznetwork/tracerr" @@ -27,11 +28,12 @@ type Logger struct { } // root logger -var log *Logger +var log atomic.Pointer[Logger] func getDefaultLog() *Logger { - if log != nil { - return log + l := log.Load() + if l != nil { + return l } // default level: debug zapLogger, _, err := NewLogger(Config{ @@ -42,8 +44,8 @@ func getDefaultLog() *Logger { if err != nil { panic(err) } - log = &Logger{x: zapLogger} - return log + log.Store(&Logger{x: zapLogger}) + return log.Load() } // Init the logger with defined level. outputs defines the outputs where the @@ -56,7 +58,7 @@ func Init(cfg Config) { if err != nil { panic(err) } - log = &Logger{x: zapLogger} + log.Store(&Logger{x: zapLogger}) } // NewLogger creates the logger with defined level. outputs defines the outputs where the @@ -240,14 +242,14 @@ func Warnf(template string, args ...interface{}) { // Fatalf calls log.Fatalf on the root Logger. func Fatalf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Fatalf(template+" %s", args...) + getDefaultLog().Fatalf(template, args...) } // Errorf calls log.Errorf on the root logger and stores the error message into // the ErrorFile. func Errorf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Errorf(template+" %s", args...) + getDefaultLog().Errorf(template, args...) } // appendStackTraceMaybeKV will append the stacktrace to the KV diff --git a/merkletree/hashdb/hashdb.pb.go b/merkletree/hashdb/hashdb.pb.go index ecece0a6a3..06ba41809f 100644 --- a/merkletree/hashdb/hashdb.pb.go +++ b/merkletree/hashdb/hashdb.pb.go @@ -7,11 +7,12 @@ package hashdb import ( + reflect "reflect" + sync "sync" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" ) const ( diff --git a/merkletree/hashdb/hashdb_grpc.pb.go b/merkletree/hashdb/hashdb_grpc.pb.go index 41d28b5654..663bcf4812 100644 --- a/merkletree/hashdb/hashdb_grpc.pb.go +++ b/merkletree/hashdb/hashdb_grpc.pb.go @@ -8,6 +8,7 @@ package hashdb import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/pool/config.go b/pool/config.go index d5cb55a44b..9a9d17c2bb 100644 --- a/pool/config.go +++ b/pool/config.go @@ -9,7 +9,6 @@ import ( type Config struct { // FreeClaimGasLimit is the max gas allowed use to do a free claim FreeClaimGasLimit uint64 `mapstructure:"FreeClaimGasLimit"` - // IntervalToRefreshBlockedAddresses is the time it takes to sync the // blocked address list from db to memory IntervalToRefreshBlockedAddresses types.Duration `mapstructure:"IntervalToRefreshBlockedAddresses"` @@ -43,4 +42,34 @@ type Config struct { // FreeGasAddress is the default free gas address FreeGasAddress string `mapstructure:"FreeGasAddress"` + // EffectiveGasPrice is the config for the effective gas price calculation + EffectiveGasPrice EffectiveGasPriceCfg `mapstructure:"EffectiveGasPrice"` +} + +// EffectiveGasPriceCfg contains the configuration properties for the effective gas price +type EffectiveGasPriceCfg struct { + // Enabled is a flag to enable/disable the effective gas price + Enabled bool `mapstructure:"Enabled"` + + // L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price + L1GasPriceFactor float64 `mapstructure:"L1GasPriceFactor"` + + // ByteGasCost is the gas cost per byte that is not 0 + ByteGasCost uint64 `mapstructure:"ByteGasCost"` + + // ZeroByteGasCost is the gas cost per byte that is 0 + ZeroByteGasCost uint64 `mapstructure:"ZeroByteGasCost"` + + // NetProfit is the profit margin to apply to the calculated breakEvenGasPrice + NetProfit float64 `mapstructure:"NetProfit"` + + // BreakEvenFactor is the factor to apply to the calculated breakevenGasPrice when comparing it with the gasPriceSigned of a tx + BreakEvenFactor float64 `mapstructure:"BreakEvenFactor"` + + // FinalDeviationPct is the max allowed deviation percentage BreakEvenGasPrice on re-calculation + FinalDeviationPct uint64 `mapstructure:"FinalDeviationPct"` + + // L2GasPriceSuggesterFactor is the factor to apply to L1 gas price to get the suggested L2 gas price used in the + // calculations when the effective gas price is disabled (testing/metrics purposes) + L2GasPriceSuggesterFactor float64 `mapstructure:"L2GasPriceSuggesterFactor"` } diff --git a/pool/config_test.go b/pool/config_test.go new file mode 100644 index 0000000000..c37eb483fd --- /dev/null +++ b/pool/config_test.go @@ -0,0 +1,63 @@ +package pool + +import ( + "testing" + + "github.com/0xPolygonHermez/zkevm-node/state" +) + +func TestIsWithinConstraints(t *testing.T) { + cfg := state.BatchConstraintsCfg{ + MaxCumulativeGasUsed: 500, + MaxKeccakHashes: 100, + MaxPoseidonHashes: 200, + MaxPoseidonPaddings: 150, + MaxMemAligns: 1000, + MaxArithmetics: 2000, + MaxBinaries: 3000, + MaxSteps: 4000, + } + + testCases := []struct { + desc string + counters state.ZKCounters + expected bool + }{ + { + desc: "All constraints within limits", + counters: state.ZKCounters{ + CumulativeGasUsed: 300, + UsedKeccakHashes: 50, + UsedPoseidonHashes: 100, + UsedPoseidonPaddings: 75, + UsedMemAligns: 500, + UsedArithmetics: 1000, + UsedBinaries: 2000, + UsedSteps: 2000, + }, + expected: true, + }, + { + desc: "All constraints exceed limits", + counters: state.ZKCounters{ + CumulativeGasUsed: 600, + UsedKeccakHashes: 150, + UsedPoseidonHashes: 300, + UsedPoseidonPaddings: 200, + UsedMemAligns: 2000, + UsedArithmetics: 3000, + UsedBinaries: 4000, + UsedSteps: 5000, + }, + expected: false, + }, + } + + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + if got := cfg.IsWithinConstraints(tC.counters); got != tC.expected { + t.Errorf("Expected %v, got %v", tC.expected, got) + } + }) + } +} diff --git a/pool/effectivegasprice.go b/pool/effectivegasprice.go new file mode 100644 index 0000000000..d78b2c5408 --- /dev/null +++ b/pool/effectivegasprice.go @@ -0,0 +1,143 @@ +package pool + +import ( + "bytes" + "errors" + "math/big" + + "github.com/0xPolygonHermez/zkevm-node/state" +) + +var ( + // ErrEffectiveGasPriceEmpty happens when the effectiveGasPrice or gasPrice is nil or zero + ErrEffectiveGasPriceEmpty = errors.New("effectiveGasPrice or gasPrice cannot be nil or zero") + + // ErrEffectiveGasPriceIsZero happens when the calculated EffectiveGasPrice is zero + ErrEffectiveGasPriceIsZero = errors.New("effectiveGasPrice cannot be zero") +) + +// EffectiveGasPrice implements the effective gas prices calculations and checks +type EffectiveGasPrice struct { + cfg EffectiveGasPriceCfg + minGasPriceAllowed uint64 +} + +// NewEffectiveGasPrice creates and initializes an instance of EffectiveGasPrice +func NewEffectiveGasPrice(cfg EffectiveGasPriceCfg, minGasPriceAllowed uint64) *EffectiveGasPrice { + return &EffectiveGasPrice{ + cfg: cfg, + minGasPriceAllowed: minGasPriceAllowed, + } +} + +// IsEnabled return if effectiveGasPrice calculation is enabled +func (e *EffectiveGasPrice) IsEnabled() bool { + return e.cfg.Enabled +} + +// GetFinalDeviation return the value for the config parameter FinalDeviationPct +func (e *EffectiveGasPrice) GetFinalDeviation() uint64 { + return e.cfg.FinalDeviationPct +} + +// GetTxAndL2GasPrice return the tx gas price and l2 suggested gas price to use in egp calculations +// If egp is disabled we will use a "simulated" tx and l2 gas price, that is calculated using the L2GasPriceSuggesterFactor config param +func (e *EffectiveGasPrice) GetTxAndL2GasPrice(txGasPrice *big.Int, l1GasPrice uint64, l2GasPrice uint64) (egpTxGasPrice *big.Int, egpL2GasPrice uint64) { + if !e.cfg.Enabled { + // If egp is not enabled we use the L2GasPriceSuggesterFactor to calculate the "simulated" suggested L2 gas price + gp := new(big.Int).SetUint64(uint64(e.cfg.L2GasPriceSuggesterFactor * float64(l1GasPrice))) + return gp, gp.Uint64() + } else { + return txGasPrice, l2GasPrice + } +} + +// CalculateBreakEvenGasPrice calculates the break even gas price for a transaction +func (e *EffectiveGasPrice) CalculateBreakEvenGasPrice(rawTx []byte, txGasPrice *big.Int, txGasUsed uint64, l1GasPrice uint64) (*big.Int, error) { + const ( + // constants used in calculation of BreakEvenGasPrice + signatureBytesLength = 65 + effectivePercentageBytesLength = 1 + constBytesTx = signatureBytesLength + effectivePercentageBytesLength + ) + + if l1GasPrice == 0 { + return nil, ErrZeroL1GasPrice + } + + if txGasUsed == 0 { + // Returns tx.GasPrice as the breakEvenGasPrice + return txGasPrice, nil + } + + // Get L2 Min Gas Price + l2MinGasPrice := uint64(float64(l1GasPrice) * e.cfg.L1GasPriceFactor) + if l2MinGasPrice < e.minGasPriceAllowed { + l2MinGasPrice = e.minGasPriceAllowed + } + + txZeroBytes := uint64(bytes.Count(rawTx, []byte{0})) + txNonZeroBytes := uint64(len(rawTx)) - txZeroBytes + + // Calculate BreakEvenGasPrice + totalTxPrice := (txGasUsed * l2MinGasPrice) + + ((constBytesTx+txNonZeroBytes)*e.cfg.ByteGasCost+txZeroBytes*e.cfg.ZeroByteGasCost)*l1GasPrice + breakEvenGasPrice := new(big.Int).SetUint64(uint64(float64(totalTxPrice/txGasUsed) * e.cfg.NetProfit)) + + return breakEvenGasPrice, nil +} + +// CalculateEffectiveGasPrice calculates the final effective gas price for a tx +func (e *EffectiveGasPrice) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int, txGasUsed uint64, l1GasPrice uint64, l2GasPrice uint64) (*big.Int, error) { + breakEvenGasPrice, err := e.CalculateBreakEvenGasPrice(rawTx, txGasPrice, txGasUsed, l1GasPrice) + + if err != nil { + return nil, err + } + + bfL2GasPrice := new(big.Float).SetUint64(l2GasPrice) + bfTxGasPrice := new(big.Float).SetInt(txGasPrice) + + ratioPriority := new(big.Float).SetFloat64(1.0) + + if bfTxGasPrice.Cmp(bfL2GasPrice) == 1 { + //ratioPriority = (txGasPrice / l2GasPrice) + ratioPriority = new(big.Float).Quo(bfTxGasPrice, bfL2GasPrice) + } + + bfEffectiveGasPrice := new(big.Float).Mul(new(big.Float).SetInt(breakEvenGasPrice), ratioPriority) + + effectiveGasPrice := new(big.Int) + bfEffectiveGasPrice.Int(effectiveGasPrice) + + if effectiveGasPrice.Cmp(new(big.Int).SetUint64(0)) == 0 { + return nil, ErrEffectiveGasPriceIsZero + } + + return effectiveGasPrice, nil +} + +// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage +func (e *EffectiveGasPrice) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { + const bits = 256 + var bitsBigInt = big.NewInt(bits) + + if effectiveGasPrice == nil || gasPrice == nil || + gasPrice.Cmp(big.NewInt(0)) == 0 || effectiveGasPrice.Cmp(big.NewInt(0)) == 0 { + return 0, ErrEffectiveGasPriceEmpty + } + + if gasPrice.Cmp(effectiveGasPrice) <= 0 { + return state.MaxEffectivePercentage, nil + } + + // Simulate Ceil with integer division + b := new(big.Int).Mul(effectiveGasPrice, bitsBigInt) + b = b.Add(b, gasPrice) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + b = b.Div(b, gasPrice) + // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + + return uint8(b.Uint64()), nil +} diff --git a/pool/effectivegasprice_test.go b/pool/effectivegasprice_test.go new file mode 100644 index 0000000000..fc09976bb7 --- /dev/null +++ b/pool/effectivegasprice_test.go @@ -0,0 +1,271 @@ +package pool + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + minGasPriceAllowed = 10 +) + +var ( + egpCfg = EffectiveGasPriceCfg{ + Enabled: true, + L1GasPriceFactor: 0.25, + ByteGasCost: 16, + ZeroByteGasCost: 4, + NetProfit: 1, + BreakEvenFactor: 1.1, + FinalDeviationPct: 10, + L2GasPriceSuggesterFactor: 0.5, + } +) + +func TestCalculateEffectiveGasPricePercentage(t *testing.T) { + egp := NewEffectiveGasPrice(egpCfg, minGasPriceAllowed) + + testCases := []struct { + name string + breakEven *big.Int + gasPrice *big.Int + expectedValue uint8 + err error + }{ + + { + name: "Nil breakEven or gasPrice", + gasPrice: big.NewInt(1), + expectedValue: uint8(0), + err: ErrEffectiveGasPriceEmpty, + }, + { + name: "Zero breakEven or gasPrice", + breakEven: big.NewInt(1), + gasPrice: big.NewInt(0), + expectedValue: uint8(0), + err: ErrEffectiveGasPriceEmpty, + }, + { + name: "Both positive, gasPrice less than breakEven", + breakEven: big.NewInt(22000000000), + gasPrice: big.NewInt(11000000000), + expectedValue: uint8(255), + }, + { + name: "Both positive, gasPrice more than breakEven", + breakEven: big.NewInt(19800000000), + gasPrice: big.NewInt(22000000000), + expectedValue: uint8(230), + }, + { + name: "100% (255) effective percentage 1", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(22000000000), + expectedValue: 255, + }, + { + name: "100% (255) effective percentage 2", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(21999999999), + expectedValue: 255, + }, + { + name: "100% (255) effective percentage 3", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(21900000000), + expectedValue: 254, + }, + { + name: "50% (127) effective percentage", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(11000000000), + expectedValue: 127, + }, + { + name: "(40) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(157), + expectedValue: 40, + }, + { + name: "(1) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(1), + expectedValue: 0, + }, + { + name: "(2) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(4), + expectedValue: 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual, err := egp.CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) + assert.Equal(t, tc.err, err) + if actual != 0 { + assert.Equal(t, tc.expectedValue, actual) + } else { + assert.Zero(t, tc.expectedValue) + } + }) + } +} + +func TestCalculateBreakEvenGasPrice(t *testing.T) { + egp := NewEffectiveGasPrice(egpCfg, minGasPriceAllowed) + + testCases := []struct { + name string + rawTx []byte + txGasPrice *big.Int + txGasUsed uint64 + l1GasPrice uint64 + expectedValue *big.Int + err error + }{ + + { + name: "Test empty tx", + rawTx: []byte{}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + expectedValue: new(big.Int).SetUint64(553), + }, + { + name: "Test l1GasPrice=0", + rawTx: []byte{}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 0, + expectedValue: new(big.Int).SetUint64(553), + err: ErrZeroL1GasPrice, + }, + { + name: "Test txGasUsed=0", + rawTx: []byte{}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 0, + l1GasPrice: 100, + expectedValue: new(big.Int).SetUint64(1000), + }, + { + name: "Test tx len=10, zeroByte=0", + rawTx: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + expectedValue: new(big.Int).SetUint64(633), + }, + { + name: "Test tx len=10, zeroByte=10", + rawTx: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + expectedValue: new(big.Int).SetUint64(573), + }, + { + name: "Test tx len=10, zeroByte=5", + rawTx: []byte{1, 0, 2, 0, 3, 0, 4, 0, 5, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + expectedValue: new(big.Int).SetUint64(603), + }, + { + name: "Test tx len=10, zeroByte=5 minGasPrice", + rawTx: []byte{1, 0, 2, 0, 3, 0, 4, 0, 5, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 10, + expectedValue: new(big.Int).SetUint64(67), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual, err := egp.CalculateBreakEvenGasPrice(tc.rawTx, tc.txGasPrice, tc.txGasUsed, tc.l1GasPrice) + assert.Equal(t, tc.err, err) + if err == nil { + if actual.Cmp(new(big.Int).SetUint64(0)) != 0 { + assert.Equal(t, tc.expectedValue, actual) + } else { + assert.Zero(t, tc.expectedValue) + } + } + }) + } +} + +func TestCalculateEffectiveGasPrice(t *testing.T) { + egp := NewEffectiveGasPrice(egpCfg, minGasPriceAllowed) + + testCases := []struct { + name string + rawTx []byte + txGasPrice *big.Int + txGasUsed uint64 + l1GasPrice uint64 + l2GasPrice uint64 + expectedValue *big.Int + err error + }{ + { + name: "Test tx len=10, zeroByte=0", + rawTx: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + l2GasPrice: 1000, + expectedValue: new(big.Int).SetUint64(633), + }, + { + name: "Test tx len=10, zeroByte=10", + rawTx: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + l2GasPrice: 500, + expectedValue: new(big.Int).SetUint64(573 * 2), + }, + { + name: "Test tx len=10, zeroByte=5", + rawTx: []byte{1, 0, 2, 0, 3, 0, 4, 0, 5, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 100, + l2GasPrice: 250, + expectedValue: new(big.Int).SetUint64(603 * 4), + }, + { + name: "Test tx len=10, zeroByte=5 minGasPrice", + rawTx: []byte{1, 0, 2, 0, 3, 0, 4, 0, 5, 0}, + txGasPrice: new(big.Int).SetUint64(1000), + txGasUsed: 200, + l1GasPrice: 10, + l2GasPrice: 1100, + expectedValue: new(big.Int).SetUint64(67), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual, err := egp.CalculateEffectiveGasPrice(tc.rawTx, tc.txGasPrice, tc.txGasUsed, tc.l1GasPrice, tc.l2GasPrice) + assert.Equal(t, tc.err, err) + if err == nil { + if actual.Cmp(new(big.Int).SetUint64(0)) != 0 { + assert.Equal(t, tc.expectedValue, actual) + } else { + assert.Zero(t, tc.expectedValue) + } + } + }) + } +} diff --git a/pool/errors.go b/pool/errors.go index 14bcb8b08e..62963ba8bd 100644 --- a/pool/errors.go +++ b/pool/errors.go @@ -64,4 +64,16 @@ var ( // ErrGasPrice is returned if the transaction has specified lower gas price than the minimum allowed. ErrGasPrice = errors.New("gas price too low") + + // ErrReceivedZeroL1GasPrice is returned if the L1 gas price is 0. + ErrReceivedZeroL1GasPrice = errors.New("received L1 gas price 0") + + // ErrInvalidIP is returned if the IP address is invalid. + ErrInvalidIP = errors.New("invalid IP address") + + // ErrOutOfCounters is returned if the pool is out of counters. + ErrOutOfCounters = errors.New("out of counters") + + // ErrZeroL1GasPrice is returned if the L1 gas price is 0. + ErrZeroL1GasPrice = errors.New("L1 gas price 0") ) diff --git a/pool/interfaces.go b/pool/interfaces.go index 81fa0600d6..4a6116deb9 100644 --- a/pool/interfaces.go +++ b/pool/interfaces.go @@ -25,6 +25,7 @@ type storage interface { IsTxPending(ctx context.Context, hash common.Hash) (bool, error) SetGasPrices(ctx context.Context, l2GasPrice uint64, l1GasPrice uint64) error DeleteGasPricesHistoryOlderThan(ctx context.Context, date time.Time) error + DeleteFailedTransactionsOlderThan(ctx context.Context, date time.Time) error UpdateTxsStatus(ctx context.Context, updateInfo []TxStatusUpdateInfo) error UpdateTxStatus(ctx context.Context, updateInfo TxStatusUpdateInfo) error UpdateTxWIPStatus(ctx context.Context, hash common.Hash, isWIP bool) error diff --git a/pool/pgpoolstorage/pgpoolstorage.go b/pool/pgpoolstorage/pgpoolstorage.go index 51d5aab1ba..9ba5a91ae9 100644 --- a/pool/pgpoolstorage/pgpoolstorage.go +++ b/pool/pgpoolstorage/pgpoolstorage.go @@ -415,6 +415,16 @@ func (p *PostgresPoolStorage) DeleteTransactionsByHashes(ctx context.Context, ha return nil } +// DeleteFailedTransactionsOlderThan deletes all failed transactions older than the given date +func (p *PostgresPoolStorage) DeleteFailedTransactionsOlderThan(ctx context.Context, date time.Time) error { + sql := `DELETE FROM pool.transaction WHERE status = 'failed' and received_at < $1` + + if _, err := p.db.Exec(ctx, sql, date); err != nil { + return err + } + return nil +} + // SetGasPrices sets the latest l2 and l1 gas prices func (p *PostgresPoolStorage) SetGasPrices(ctx context.Context, l2GasPrice, l1GasPrice uint64) error { sql := "INSERT INTO pool.gas_price (price, l1_price, timestamp) VALUES ($1, $2, $3)" diff --git a/pool/pool.go b/pool/pool.go index 0157b2c0ab..2adfdcfd0a 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -18,11 +18,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -const ( - // BridgeClaimMethodSignature for tracking BridgeClaimMethodSignature method - BridgeClaimMethodSignature = "0x2cffd02e" -) - var ( // ErrNotFound indicates an object has not been found for the search criteria used ErrNotFound = errors.New("object not found") @@ -34,9 +29,16 @@ var ( // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced // with a different one without the required price bump. ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") - // FreeClaimAddress is the default free gas address FreeClaimAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + + // ErrEffectiveGasPriceGasPriceTooLow the tx gas price is lower than breakEvenGasPrice and lower than L2GasPrice + ErrEffectiveGasPriceGasPriceTooLow = errors.New("effective gas price: gas price too low") +) + +const ( + // BridgeClaimMethodSignature for tracking BridgeClaimMethodSignature method + BridgeClaimMethodSignature = "0x2cffd02e" ) // Pool is an implementation of the Pool interface @@ -46,6 +48,7 @@ type Pool struct { state stateInterface chainID uint64 cfg Config + batchConstraintsCfg state.BatchConstraintsCfg blockedAddresses sync.Map minSuggestedGasPrice *big.Int minSuggestedGasPriceMux *sync.RWMutex @@ -54,6 +57,7 @@ type Pool struct { gasPrices GasPrices gasPricesMux *sync.RWMutex l2BridgeAddr common.Address + effectiveGasPrice *EffectiveGasPrice } type preExecutionResponse struct { @@ -72,32 +76,26 @@ type GasPrices struct { } // NewPool creates and initializes an instance of Pool -func NewPool(cfg Config, s storage, st stateInterface, l2BridgeAddr common.Address, chainID uint64, eventLog *event.EventLog) *Pool { +func NewPool(cfg Config, batchConstraintsCfg state.BatchConstraintsCfg, s storage, st stateInterface, l2BridgeAddr common.Address, chainID uint64, eventLog *event.EventLog) *Pool { startTimestamp := time.Now() p := &Pool{ cfg: cfg, + batchConstraintsCfg: batchConstraintsCfg, startTimestamp: startTimestamp, storage: s, state: st, chainID: chainID, blockedAddresses: sync.Map{}, minSuggestedGasPriceMux: new(sync.RWMutex), + minSuggestedGasPrice: big.NewInt(int64(cfg.DefaultMinGasPriceAllowed)), eventLog: eventLog, gasPrices: GasPrices{0, 0}, gasPricesMux: new(sync.RWMutex), l2BridgeAddr: l2BridgeAddr, + effectiveGasPrice: NewEffectiveGasPrice(cfg.EffectiveGasPrice, cfg.DefaultMinGasPriceAllowed), } - FreeClaimAddress = cfg.FreeGasAddress - - p.refreshBlockedAddresses() - go func(cfg *Config, p *Pool) { - for { - time.Sleep(cfg.IntervalToRefreshBlockedAddresses.Duration) - p.refreshBlockedAddresses() - } - }(&cfg, p) - + p.refreshGasPrices() go func(cfg *Config, p *Pool) { for { p.refreshGasPrices() @@ -121,6 +119,19 @@ func (p *Pool) refreshGasPrices() { p.gasPricesMux.Unlock() } +// StartRefreshingBlockedAddressesPeriodically will make this instance of the pool +// to check periodically(accordingly to the configuration) for updates regarding +// the blocked address and update the in memory blocked addresses +func (p *Pool) StartRefreshingBlockedAddressesPeriodically() { + p.refreshBlockedAddresses() + go func(p *Pool) { + for { + time.Sleep(p.cfg.IntervalToRefreshBlockedAddresses.Duration) + p.refreshBlockedAddresses() + } + }(p) +} + // refreshBlockedAddresses refreshes the list of blocked addresses for the provided instance of pool func (p *Pool) refreshBlockedAddresses() { blockedAddresses, err := p.storage.GetAllAddressesBlocked(context.Background()) @@ -207,7 +218,7 @@ func (p *Pool) StoreTx(ctx context.Context, tx types.Transaction, ip string, isW log.Errorf("error adding event: %v", err) } // Do not add tx to the pool - return fmt.Errorf("out of counters") + return ErrOutOfCounters } else if preExecutionResponse.isOOG { event := &event.Event{ ReceivedAt: time.Now(), @@ -225,12 +236,69 @@ func (p *Pool) StoreTx(ctx context.Context, tx types.Transaction, ip string, isW } } + gasPrices, err := p.GetGasPrices(ctx) + if err != nil { + return err + } + + err = p.ValidateBreakEvenGasPrice(ctx, tx, preExecutionResponse.txResponse.GasUsed, gasPrices) + if err != nil { + return err + } + poolTx := NewTransaction(tx, ip, isWIP, p) poolTx.ZKCounters = preExecutionResponse.usedZkCounters return p.storage.AddTx(ctx, *poolTx) } +// ValidateBreakEvenGasPrice validates the effective gas price +func (p *Pool) ValidateBreakEvenGasPrice(ctx context.Context, tx types.Transaction, preExecutionGasUsed uint64, gasPrices GasPrices) error { + // Get the tx gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price + txGasPrice, _ := p.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice(), gasPrices.L1GasPrice, gasPrices.L2GasPrice) + + breakEvenGasPrice, err := p.effectiveGasPrice.CalculateBreakEvenGasPrice(tx.Data(), txGasPrice, preExecutionGasUsed, gasPrices.L1GasPrice) + if err != nil { + if p.cfg.EffectiveGasPrice.Enabled { + log.Errorf("error calculating BreakEvenGasPrice: %v", err) + return err + } else { + log.Warnf("EffectiveGasPrice is disabled, but failed to calculate BreakEvenGasPrice: %s", err) + return nil + } + } + + reject := false + loss := new(big.Int).SetUint64(0) + + tmpFactor := new(big.Float).Mul(new(big.Float).SetInt(breakEvenGasPrice), new(big.Float).SetFloat64(p.cfg.EffectiveGasPrice.BreakEvenFactor)) + breakEvenGasPriceWithFactor := new(big.Int) + tmpFactor.Int(breakEvenGasPriceWithFactor) + + if breakEvenGasPriceWithFactor.Cmp(txGasPrice) == 1 { // breakEvenGasPriceWithMargin > txGasPrice + // check against L2GasPrice now + L2GasPrice := big.NewInt(0).SetUint64(gasPrices.L2GasPrice) + if txGasPrice.Cmp(L2GasPrice) == -1 { // txGasPrice < gasPrices.L2GasPrice + // reject tx + reject = true + } else { + // accept loss + loss = loss.Sub(breakEvenGasPriceWithFactor, txGasPrice) + } + } + + log.Infof("egp-log: txGasPrice(): %v, breakEvenGasPrice: %v, breakEvenGasPriceWithFactor: %v, gasUsed: %v, reject: %t, loss: %v, L1GasPrice: %d, L2GasPrice: %d, Enabled: %t, tx: %s", + txGasPrice, breakEvenGasPrice, breakEvenGasPriceWithFactor, preExecutionGasUsed, reject, loss, gasPrices.L1GasPrice, gasPrices.L2GasPrice, p.cfg.EffectiveGasPrice.Enabled, tx.Hash().String()) + + // Reject transaction if EffectiveGasPrice is enabled + if p.cfg.EffectiveGasPrice.Enabled && reject { + log.Infof("reject tx with gasPrice lower than L2GasPrice, tx: %s", tx.Hash().String()) + return ErrEffectiveGasPriceGasPriceTooLow + } + + return nil +} + // preExecuteTx executes a transaction to calculate its zkCounters func (p *Pool) preExecuteTx(ctx context.Context, tx types.Transaction) (preExecutionResponse, error) { response := preExecutionResponse{usedZkCounters: state.ZKCounters{}, isOOC: false, isOOG: false, isReverted: false} @@ -238,15 +306,35 @@ func (p *Pool) preExecuteTx(ctx context.Context, tx types.Transaction) (preExecu // TODO: Add effectivePercentage = 0xFF to the request (factor of 1) when gRPC message is updated processBatchResponse, err := p.state.PreProcessTransaction(ctx, &tx, nil) if err != nil { - return response, err + isOOC := executor.IsROMOutOfCountersError(executor.RomErrorCode(err)) + isOOG := errors.Is(err, runtime.ErrOutOfGas) + if !isOOC && !isOOG { + return response, err + } else { + response.isOOC = isOOC + response.isOOG = isOOG + if processBatchResponse.Responses != nil && len(processBatchResponse.Responses) > 0 { + response.usedZkCounters = processBatchResponse.UsedZkCounters + response.txResponse = processBatchResponse.Responses[0] + } + return response, nil + } } if processBatchResponse.Responses != nil && len(processBatchResponse.Responses) > 0 { errorToCheck := processBatchResponse.Responses[0].RomError - response.isReverted = errors.Is(errorToCheck, runtime.ErrExecutionReverted) response.isExecutorLevelError = processBatchResponse.IsExecutorLevelError - response.isOOC = executor.IsROMOutOfCountersError(executor.RomErrorCode(errorToCheck)) - response.isOOG = errors.Is(errorToCheck, runtime.ErrOutOfGas) + if errorToCheck != nil { + response.isReverted = errors.Is(errorToCheck, runtime.ErrExecutionReverted) + response.isOOC = executor.IsROMOutOfCountersError(executor.RomErrorCode(errorToCheck)) + response.isOOG = errors.Is(errorToCheck, runtime.ErrOutOfGas) + } else { + if !p.batchConstraintsCfg.IsWithinConstraints(processBatchResponse.UsedZkCounters) { + response.isOOC = true + log.Errorf("OutOfCounters Error (Node level) for tx: %s", tx.Hash().String()) + } + } + response.usedZkCounters = processBatchResponse.UsedZkCounters response.txResponse = processBatchResponse.Responses[0] } @@ -315,6 +403,11 @@ func (p *Pool) IsTxPending(ctx context.Context, hash common.Hash) (bool, error) } func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { + // Make sure the IP is valid. + if poolTx.IP != "" && !IsValidIP(poolTx.IP) { + return ErrInvalidIP + } + // Make sure the transaction is signed properly. if err := state.CheckSignature(poolTx.Transaction); err != nil { return ErrInvalidSender @@ -331,6 +424,11 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { return ErrTxTypeNotSupported } + // check Pre EIP155 txs signature + if txChainID == 0 && !state.IsPreEIP155Tx(poolTx.Transaction) { + return ErrInvalidSender + } + // gets tx sender for validations from, err := state.GetSender(poolTx.Transaction) if err != nil { @@ -358,11 +456,13 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { lastL2Block, err := p.state.GetLastL2Block(ctx, nil) if err != nil { + log.Errorf("failed to load last l2 block while adding tx to the pool", err) return err } currentNonce, err := p.state.GetNonce(ctx, from, lastL2Block.Root()) if err != nil { + log.Errorf("failed to get nonce while adding tx to the pool", err) return err } // Ensure the transaction adheres to nonce ordering @@ -391,6 +491,7 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { if p.cfg.GlobalQueue > 0 { txCount, err := p.storage.CountTransactionsByStatus(ctx, TxStatusPending) if err != nil { + log.Errorf("failed to count pool txs by status pending while adding tx to the pool", err) return err } if txCount >= p.cfg.GlobalQueue { @@ -404,6 +505,7 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { gasPriceCmp := poolTx.GasPrice().Cmp(p.minSuggestedGasPrice) p.minSuggestedGasPriceMux.RUnlock() if gasPriceCmp == -1 { + log.Debugf("low gas price: minSuggestedGasPrice %v got %v", p.minSuggestedGasPrice, poolTx.GasPrice()) return ErrGasPrice } } @@ -412,6 +514,7 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { // cost == V + GP * GL balance, err := p.state.GetBalance(ctx, from, lastL2Block.Root()) if err != nil { + log.Errorf("failed to get balance for account %v while adding tx to the pool", from.String(), err) return err } @@ -432,6 +535,7 @@ func (p *Pool) validateTx(ctx context.Context, poolTx Transaction) error { // if the new one has a price bump oldTxs, err := p.storage.GetTxsByFromAndNonce(ctx, from, poolTx.Nonce()) if err != nil { + log.Errorf("failed to txs for the same account and nonce while adding tx to the pool", err) return err } @@ -546,13 +650,13 @@ func (p *Pool) GetDefaultMinGasPriceAllowed() uint64 { return p.cfg.DefaultMinGasPriceAllowed } -// GetL1GasPrice returns the L1 gas price -func (p *Pool) GetL1GasPrice() uint64 { +// GetL1AndL2GasPrice returns the L1 and L2 gas price from memory struct +func (p *Pool) GetL1AndL2GasPrice() (uint64, uint64) { p.gasPricesMux.RLock() gasPrices := p.gasPrices p.gasPricesMux.RUnlock() - return gasPrices.L1GasPrice + return gasPrices.L1GasPrice, gasPrices.L2GasPrice } const ( diff --git a/pool/pool_test.go b/pool/pool_test.go index bba01edcff..f9448fa2a4 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -38,7 +38,7 @@ import ( ) const ( - forkID5 = 5 + forkID6 = 6 senderPrivateKey = "0x28b2b0318721be8c8339199172cd7cc8f5e273800a35616ec893083a4b32c02e" senderAddress = "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D" ) @@ -65,11 +65,34 @@ var ( IntervalToRefreshGasPrices: cfgTypes.NewDuration(5 * time.Second), AccountQueue: 15, GlobalQueue: 20, + EffectiveGasPrice: pool.EffectiveGasPriceCfg{ + Enabled: true, + L1GasPriceFactor: 0.25, + ByteGasCost: 16, + ZeroByteGasCost: 4, + NetProfit: 1, + BreakEvenFactor: 1.1, + FinalDeviationPct: 10, + L2GasPriceSuggesterFactor: 0.5, + }, } gasPrice = big.NewInt(1000000000) l1GasPrice = big.NewInt(1000000000000) gasLimit = uint64(21000) chainID = big.NewInt(1337) + bc = state.BatchConstraintsCfg{ + MaxTxsPerBatch: 300, + MaxBatchBytesSize: 120000, + MaxCumulativeGasUsed: 30000000, + MaxKeccakHashes: 2145, + MaxPoseidonHashes: 252357, + MaxPoseidonPaddings: 135191, + MaxMemAligns: 236585, + MaxArithmetics: 236585, + MaxBinaries: 473170, + MaxSteps: 7570538, + } + ip = "101.1.50.20" ) func TestMain(m *testing.M) { @@ -82,6 +105,83 @@ func TestMain(m *testing.M) { os.Exit(code) } +type testData struct { + pool *pool.Pool + st *state.State + + stateSqlDB *pgxpool.Pool + poolSqlDB *pgxpool.Pool +} + +func Test_AddTxEGPAceptedBecauseGasPriceIsTheSuggested(t *testing.T) { + ctx := context.Background() + + data := prepareToExecuteTx(t, chainID.Uint64()) + defer data.stateSqlDB.Close() //nolint:gosec,errcheck + defer data.poolSqlDB.Close() //nolint:gosec,errcheck + + b := make([]byte, cfg.MaxTxDataBytesSize-20) + to := common.HexToAddress(senderAddress) + gasPrice := big.NewInt(1000000000) + gasLimitForThisTx := uint64(21000) + uint64(16)*uint64(len(b)) + tx := ethTypes.NewTransaction(0, to, big.NewInt(0), gasLimitForThisTx, gasPrice, b) + + // GetAuth configures and returns an auth object. + auth, err := operations.GetAuth(senderPrivateKey, chainID.Uint64()) + require.NoError(t, err) + signedTx, err := auth.Signer(auth.From, tx) + require.NoError(t, err) + + err = data.pool.AddTx(ctx, *signedTx, ip) + require.NoError(t, err) +} + +func Test_EGPValidateEffectiveGasPrice(t *testing.T) { + tests := []struct { + name string + egpEnabled bool + gasPriceTx *big.Int + preExecutionGasUsed uint64 + gasPrices pool.GasPrices + expectedError error + }{ + { + name: "Reject transaction if below break-even and below current estimated L2 gas price", + egpEnabled: true, + gasPriceTx: big.NewInt(1000000000), + preExecutionGasUsed: uint64(21000) * 2000, + gasPrices: pool.GasPrices{ + L1GasPrice: uint64(1000000000000), + L2GasPrice: uint64(1000000000 + 1), + }, + expectedError: pool.ErrEffectiveGasPriceGasPriceTooLow, + }, + { + name: "Accept transaction if below break-even and below current estimated L2 gas price if EGP is disabled", + egpEnabled: false, + gasPriceTx: big.NewInt(1000000000), + preExecutionGasUsed: uint64(21000) * 2000, + gasPrices: pool.GasPrices{ + L1GasPrice: uint64(1000000000000), + L2GasPrice: uint64(1000000000 + 1), + }, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg.EffectiveGasPrice.Enabled = tt.egpEnabled + data := prepareToExecuteTx(t, chainID.Uint64()) + dataLen := cfg.MaxTxDataBytesSize - 20 + signedTx := createSignedTx(t, dataLen, tt.gasPriceTx, uint64(21000)+uint64(16)*uint64(dataLen)) + + err := data.pool.ValidateBreakEvenGasPrice(context.Background(), *signedTx, tt.preExecutionGasUsed, tt.gasPrices) + require.ErrorIs(t, err, tt.expectedError) + }) + } +} + func Test_AddTx(t *testing.T) { initOrResetDB(t) @@ -119,7 +219,7 @@ func Test_AddTx(t *testing.T) { require.NoError(t, err) const chainID = 2576980377 - p := setupPool(t, cfg, s, st, chainID, ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID, ctx, eventLog) tx := new(ethTypes.Transaction) expectedTxEncoded := "0xf86880843b9aca008252089400000000000000000000000000000000000000008080850133333355a03ee24709870c8dbc67884c9c8acb864c1aceaaa7332b9a3db0d7a5d7c68eb8e4a0302980b070f5e3ffca3dc27b07daf69d66ab27d4df648e0b3ed059cf23aa168d" @@ -127,7 +227,7 @@ func Test_AddTx(t *testing.T) { require.NoError(t, err) tx.UnmarshalBinary(b) //nolint:gosec,errcheck - err = p.AddTx(ctx, *tx, "") + err = p.AddTx(ctx, *tx, ip) require.NoError(t, err) rows, err := poolSqlDB.Query(ctx, "SELECT hash, encoded, decoded, status, used_steps FROM pool.transaction") @@ -200,7 +300,7 @@ func Test_AddTx_OversizedData(t *testing.T) { require.NoError(t, err) const chainID = 2576980377 - p := pool.NewPool(cfg, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) + p := pool.NewPool(cfg, bc, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) b := make([]byte, cfg.MaxTxBytesSize+1) to := common.HexToAddress(operations.DefaultSequencerAddress) @@ -212,7 +312,7 @@ func Test_AddTx_OversizedData(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.EqualError(t, err, pool.ErrOversizedData.Error()) } @@ -266,17 +366,17 @@ func Test_AddPreEIP155Tx(t *testing.T) { require.NoError(t, err) const chainID = 2576980377 - p := setupPool(t, cfg, s, st, chainID, ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID, ctx, eventLog) batchL2Data := "0xe580843b9aca00830186a0941275fbb540c8efc58b812ba83b0d0b8b9917ae98808464fbb77c6b39bdc5f8e458aba689f2a1ff8c543a94e4817bda40f3fe34080c4ab26c1e3c2fc2cda93bc32f0a79940501fd505dcf48d94abfde932ebf1417f502cb0d9de81bff" b, err := hex.DecodeHex(batchL2Data) require.NoError(t, err) - txs, _, _, err := state.DecodeTxs(b, forkID5) + txs, _, _, err := state.DecodeTxs(b, forkID6) require.NoError(t, err) tx := txs[0] - err = p.AddTx(ctx, tx, "") + err = p.AddTx(ctx, tx, ip) require.NoError(t, err) rows, err := poolSqlDB.Query(ctx, "SELECT hash, encoded, decoded, status FROM pool.transaction") @@ -335,7 +435,7 @@ func Test_GetPendingTxs(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) const txsCount = 10 const limit = 5 @@ -351,7 +451,7 @@ func Test_GetPendingTxs(t *testing.T) { tx := ethTypes.NewTransaction(uint64(i), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) } @@ -395,7 +495,7 @@ func Test_GetPendingTxsZeroPassed(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) const txsCount = 10 const limit = 0 @@ -411,7 +511,7 @@ func Test_GetPendingTxsZeroPassed(t *testing.T) { tx := ethTypes.NewTransaction(uint64(i), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) } @@ -455,7 +555,7 @@ func Test_GetTopPendingTxByProfitabilityAndZkCounters(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) const txsCount = 10 @@ -470,7 +570,7 @@ func Test_GetTopPendingTxByProfitabilityAndZkCounters(t *testing.T) { tx := ethTypes.NewTransaction(uint64(i), common.Address{}, big.NewInt(10), gasLimit, big.NewInt(gasPrice.Int64()+int64(i)), []byte{}) signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) } @@ -515,7 +615,7 @@ func Test_UpdateTxsStatus(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -526,13 +626,13 @@ func Test_UpdateTxsStatus(t *testing.T) { tx1 := ethTypes.NewTransaction(uint64(0), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx1, err := auth.Signer(auth.From, tx1) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx1, "") + err = p.AddTx(ctx, *signedTx1, ip) require.NoError(t, err) tx2 := ethTypes.NewTransaction(uint64(1), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx2, err := auth.Signer(auth.From, tx2) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx2, "") + err = p.AddTx(ctx, *signedTx2, ip) require.NoError(t, err) expectedFailedReason := "failed" @@ -557,10 +657,10 @@ func Test_UpdateTxsStatus(t *testing.T) { var count int rows, err := poolSqlDB.Query(ctx, "SELECT status, failed_reason FROM pool.transaction WHERE hash = ANY($1)", []string{signedTx1.Hash().String(), signedTx2.Hash().String()}) - defer rows.Close() // nolint:staticcheck if err != nil { t.Error(err) } + defer rows.Close() // nolint:staticcheck var state, failedReason string for rows.Next() { count++ @@ -606,7 +706,7 @@ func Test_UpdateTxStatus(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -616,7 +716,7 @@ func Test_UpdateTxStatus(t *testing.T) { tx := ethTypes.NewTransaction(uint64(0), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - if err := p.AddTx(ctx, *signedTx, ""); err != nil { + if err := p.AddTx(ctx, *signedTx, ip); err != nil { t.Error(err) } expectedFailedReason := "failed" @@ -649,7 +749,7 @@ func Test_SetAndGetGasPrice(t *testing.T) { require.NoError(t, err) eventLog := event.NewEventLog(event.Config{}, eventStorage) - p := pool.NewPool(cfg, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) + p := pool.NewPool(cfg, bc, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) nBig, err := rand.Int(rand.Reader, big.NewInt(0).SetUint64(math.MaxUint64)) require.NoError(t, err) @@ -674,7 +774,7 @@ func TestDeleteGasPricesHistoryOlderThan(t *testing.T) { require.NoError(t, err) eventLog := event.NewEventLog(event.Config{}, eventStorage) - p := pool.NewPool(cfg, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) + p := pool.NewPool(cfg, bc, s, nil, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID.Uint64(), eventLog) ctx := context.Background() @@ -743,7 +843,7 @@ func TestGetPendingTxSince(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) const txsCount = 10 @@ -763,7 +863,7 @@ func TestGetPendingTxSince(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) txsAddedTime = append(txsAddedTime, time.Now()) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) txsAddedHashes = append(txsAddedHashes, signedTx.Hash()) time.Sleep(1 * time.Second) @@ -835,7 +935,7 @@ func Test_DeleteTransactionsByHashes(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -846,13 +946,13 @@ func Test_DeleteTransactionsByHashes(t *testing.T) { tx1 := ethTypes.NewTransaction(uint64(0), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx1, err := auth.Signer(auth.From, tx1) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx1, "") + err = p.AddTx(ctx, *signedTx1, ip) require.NoError(t, err) tx2 := ethTypes.NewTransaction(uint64(1), common.Address{}, big.NewInt(10), gasLimit, gasPrice, []byte{}) signedTx2, err := auth.Signer(auth.From, tx2) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx2, "") + err = p.AddTx(ctx, *signedTx2, ip) require.NoError(t, err) err = p.DeleteTransactionsByHashes(ctx, []common.Hash{signedTx1.Hash(), signedTx2.Hash()}) @@ -924,12 +1024,12 @@ func Test_TryAddIncompatibleTxs(t *testing.T) { require.NoError(t, err) chainIdOver64Bits := big.NewInt(0).SetUint64(math.MaxUint64) - chainIdOver64Bits = chainIdOver64Bits.Add(chainIdOver64Bits, big.NewInt(1)) + chainIdOver64Bits = chainIdOver64Bits.Add(chainIdOver64Bits, big.NewInt(2)) authChainIdOver64Bits, err := bind.NewKeyedTransactorWithChainID(privateKey, chainIdOver64Bits) require.NoError(t, err) bigIntOver256Bits, _ := big.NewInt(0).SetString(encoding.MaxUint256StrNumber, encoding.Base10) - bigIntOver256Bits = bigIntOver256Bits.Add(bigIntOver256Bits, big.NewInt(1)) + bigIntOver256Bits = bigIntOver256Bits.Add(bigIntOver256Bits, big.NewInt(2)) testCases := []testCase{ { @@ -985,8 +1085,8 @@ func Test_TryAddIncompatibleTxs(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { incompatibleTx := testCase.createIncompatibleTx() - p := setupPool(t, cfg, s, st, incompatibleTx.ChainId().Uint64(), ctx, eventLog) - err = p.AddTx(ctx, incompatibleTx, "") + p := setupPool(t, cfg, bc, s, st, incompatibleTx.ChainId().Uint64(), ctx, eventLog) + err = p.AddTx(ctx, incompatibleTx, ip) assert.Equal(t, testCase.expectedError, err) }) } @@ -994,7 +1094,7 @@ func Test_TryAddIncompatibleTxs(t *testing.T) { func newState(sqlDB *pgxpool.Pool, eventLog *event.EventLog) *state.State { ctx := context.Background() - stateDb := state.NewPostgresStorage(sqlDB) + stateDb := state.NewPostgresStorage(state.Config{}, sqlDB) zkProverURI := testutils.GetEnv("ZKPROVER_URI", "localhost") executorServerConfig := executor.Config{URI: fmt.Sprintf("%s:50071", zkProverURI), MaxGRPCMessageSize: 100000000} @@ -1050,7 +1150,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -1069,7 +1169,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err) assert.Equal(t, err.Error(), pool.ErrIntrinsicGas.Error()) @@ -1083,7 +1183,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err) assert.Equal(t, err.Error(), pool.ErrIntrinsicGas.Error()) @@ -1097,7 +1197,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) tx = ethTypes.NewTx(ðTypes.LegacyTx{ @@ -1110,7 +1210,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err) assert.Equal(t, err.Error(), pool.ErrIntrinsicGas.Error()) @@ -1124,7 +1224,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err) assert.Equal(t, err.Error(), pool.ErrIntrinsicGas.Error()) @@ -1138,7 +1238,7 @@ func Test_AddTxWithIntrinsicGasTooLow(t *testing.T) { }) signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) txs, err := p.GetPendingTxs(ctx, 0) @@ -1176,7 +1276,7 @@ func Test_AddTx_GasPriceErr(t *testing.T) { name: "GasPriceTooLowErr", nonce: 0, to: nil, - gasLimit: gasLimit, + gasLimit: gasLimit, // Is a contract 53000 gasPrice: big.NewInt(0).SetUint64(gasPrice.Uint64() - uint64(1)), data: []byte{}, expectedError: pool.ErrGasPrice, @@ -1230,7 +1330,7 @@ func Test_AddTx_GasPriceErr(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) tx := ethTypes.NewTx(ðTypes.LegacyTx{ Nonce: tc.nonce, To: tc.to, @@ -1248,7 +1348,7 @@ func Test_AddTx_GasPriceErr(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) if tc.expectedError != nil { require.ErrorIs(t, err, tc.expectedError) } else { @@ -1287,9 +1387,9 @@ func Test_AddRevertedTx(t *testing.T) { require.NoError(t, dbTx.Commit(ctx)) s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) - require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -1309,7 +1409,7 @@ func Test_AddRevertedTx(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) txs, err := p.GetPendingTxs(ctx, 0) @@ -1382,7 +1482,8 @@ func Test_BlockedAddress(t *testing.T) { GlobalQueue: 1024, } - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) + p.StartRefreshingBlockedAddressesPeriodically() gasPrices, err := p.GetGasPrices(ctx) require.NoError(t, err) @@ -1398,7 +1499,7 @@ func Test_BlockedAddress(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) // block address @@ -1419,7 +1520,7 @@ func Test_BlockedAddress(t *testing.T) { signedTx, err = auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Equal(t, pool.ErrBlockedSender, err) // remove block @@ -1430,7 +1531,7 @@ func Test_BlockedAddress(t *testing.T) { time.Sleep(cfg.IntervalToRefreshBlockedAddresses.Duration) // allowed to add tx again - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) } @@ -1504,7 +1605,7 @@ func Test_AddTx_GasOverBatchLimit(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) tx := ethTypes.NewTx(ðTypes.LegacyTx{ Nonce: tc.nonce, To: tc.to, @@ -1522,7 +1623,7 @@ func Test_AddTx_GasOverBatchLimit(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) if tc.expectedError != nil { require.ErrorIs(t, err, tc.expectedError) } else { @@ -1578,7 +1679,7 @@ func Test_AddTx_AccountQueueLimit(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(senderPrivateKey, "0x")) require.NoError(t, err) @@ -1598,7 +1699,7 @@ func Test_AddTx_AccountQueueLimit(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) nonce++ } @@ -1613,7 +1714,7 @@ func Test_AddTx_AccountQueueLimit(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err, pool.ErrNonceTooHigh) } @@ -1679,7 +1780,7 @@ func Test_AddTx_GlobalQueueLimit(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) for _, privateKey := range accounts { auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) @@ -1694,7 +1795,7 @@ func Test_AddTx_GlobalQueueLimit(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.NoError(t, err) } @@ -1714,7 +1815,7 @@ func Test_AddTx_GlobalQueueLimit(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err, pool.ErrTxPoolOverflow) } @@ -1765,7 +1866,7 @@ func Test_AddTx_NonceTooHigh(t *testing.T) { s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) require.NoError(t, err) - p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) + p := setupPool(t, cfg, bc, s, st, chainID.Uint64(), ctx, eventLog) tx := ethTypes.NewTx(ðTypes.LegacyTx{ Nonce: cfg.AccountQueue, @@ -1783,15 +1884,142 @@ func Test_AddTx_NonceTooHigh(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - err = p.AddTx(ctx, *signedTx, "") + err = p.AddTx(ctx, *signedTx, ip) require.Error(t, err, pool.ErrNonceTooHigh) } -func setupPool(t *testing.T, cfg pool.Config, s *pgpoolstorage.PostgresPoolStorage, st *state.State, chainID uint64, ctx context.Context, eventLog *event.EventLog) *pool.Pool { - p := pool.NewPool(cfg, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) +func Test_AddTx_IPValidation(t *testing.T) { + var tests = []struct { + name string + ip string + expected error + }{ + {"Valid IPv4", "127.0.0.1", nil}, + {"Valid IPv6", "2001:db8:0:1:1:1:1:1", nil}, + {"Invalid IP", "300.0.0.1", pool.ErrInvalidIP}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + initOrResetDB(t) + + stateSqlDB, err := db.NewSQLDB(stateDBCfg) + require.NoError(t, err) + defer stateSqlDB.Close() //nolint:gosec,errcheck + + poolSqlDB, err := db.NewSQLDB(poolDBCfg) + require.NoError(t, err) + + defer poolSqlDB.Close() //nolint:gosec,errcheck + + eventStorage, err := nileventstorage.NewNilEventStorage() + if err != nil { + log.Fatal(err) + } + eventLog := event.NewEventLog(event.Config{}, eventStorage) + + st := newState(stateSqlDB, eventLog) + + genesisBlock := state.Block{ + BlockNumber: 0, + BlockHash: state.ZeroHash, + ParentHash: state.ZeroHash, + ReceivedAt: time.Now(), + } + ctx := context.Background() + dbTx, err := st.BeginStateTransaction(ctx) + require.NoError(t, err) + _, err = st.SetGenesis(ctx, genesisBlock, genesis, dbTx) + require.NoError(t, err) + require.NoError(t, dbTx.Commit(ctx)) + + s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) + require.NoError(t, err) + + const chainID = 2576980377 + p := setupPool(t, cfg, bc, s, st, chainID, ctx, eventLog) + + tx := new(ethTypes.Transaction) + expectedTxEncoded := "0xf86880843b9aca008252089400000000000000000000000000000000000000008080850133333355a03ee24709870c8dbc67884c9c8acb864c1aceaaa7332b9a3db0d7a5d7c68eb8e4a0302980b070f5e3ffca3dc27b07daf69d66ab27d4df648e0b3ed059cf23aa168d" + b, err := hex.DecodeHex(expectedTxEncoded) + require.NoError(t, err) + tx.UnmarshalBinary(b) //nolint:gosec,errcheck + + err = p.AddTx(context.Background(), *tx, tc.ip) + + if tc.expected != nil { + assert.ErrorIs(t, err, tc.expected) + } else { + assert.NoError(t, err) + } + }) + } +} - err := p.SetGasPrices(ctx, gasPrice.Uint64(), l1GasPrice.Uint64()) +func setupPool(t *testing.T, cfg pool.Config, constraintsCfg state.BatchConstraintsCfg, s *pgpoolstorage.PostgresPoolStorage, st *state.State, chainID uint64, ctx context.Context, eventLog *event.EventLog) *pool.Pool { + err := s.SetGasPrices(ctx, gasPrice.Uint64(), l1GasPrice.Uint64()) require.NoError(t, err) + p := pool.NewPool(cfg, constraintsCfg, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), chainID, eventLog) p.StartPollingMinSuggestedGasPrice(ctx) return p } + +func prepareToExecuteTx(t *testing.T, chainIDToCreate uint64) testData { + initOrResetDB(t) + + stateSqlDB, err := db.NewSQLDB(stateDBCfg) + require.NoError(t, err) + //defer stateSqlDB.Close() //nolint:gosec,errcheck + + poolSqlDB, err := db.NewSQLDB(poolDBCfg) + require.NoError(t, err) + + //defer poolSqlDB.Close() //nolint:gosec,errcheck + + eventStorage, err := nileventstorage.NewNilEventStorage() + if err != nil { + stateSqlDB.Close() //nolint:gosec,errcheck + poolSqlDB.Close() //nolint:gosec,errcheck + log.Fatal(err) + } + eventLog := event.NewEventLog(event.Config{}, eventStorage) + + st := newState(stateSqlDB, eventLog) + + genesisBlock := state.Block{ + BlockNumber: 0, + BlockHash: state.ZeroHash, + ParentHash: state.ZeroHash, + ReceivedAt: time.Now(), + } + ctx := context.Background() + dbTx, err := st.BeginStateTransaction(ctx) + require.NoError(t, err) + _, err = st.SetGenesis(ctx, genesisBlock, genesis, dbTx) + require.NoError(t, err) + require.NoError(t, dbTx.Commit(ctx)) + + s, err := pgpoolstorage.NewPostgresPoolStorage(poolDBCfg) + require.NoError(t, err) + + p := setupPool(t, cfg, bc, s, st, chainIDToCreate, ctx, eventLog) + return testData{ + pool: p, + st: st, + stateSqlDB: stateSqlDB, + poolSqlDB: poolSqlDB, + } +} + +func createSignedTx(t *testing.T, dataLen int, gasPrice *big.Int, gasLimit uint64) *ethTypes.Transaction { + b := make([]byte, cfg.MaxTxDataBytesSize-20) + to := common.HexToAddress(senderAddress) + //gasPrice := big.NewInt(1000000000) + //gasLimitForThisTx := uint64(21000) + uint64(16)*uint64(len(b)) + tx := ethTypes.NewTransaction(0, to, big.NewInt(0), gasLimit, gasPrice, b) + auth, err := operations.GetAuth(senderPrivateKey, chainID.Uint64()) + require.NoError(t, err) + signedTx, err := auth.Signer(auth.From, tx) + require.NoError(t, err) + return signedTx +} diff --git a/pool/validation.go b/pool/validation.go new file mode 100644 index 0000000000..daf1460115 --- /dev/null +++ b/pool/validation.go @@ -0,0 +1,8 @@ +package pool + +import "net" + +// IsValidIP returns true if the given string is a valid IP address +func IsValidIP(ip string) bool { + return ip != "" && net.ParseIP(ip) != nil +} diff --git a/pool/validation_test.go b/pool/validation_test.go new file mode 100644 index 0000000000..d03ee86fc1 --- /dev/null +++ b/pool/validation_test.go @@ -0,0 +1,27 @@ +package pool + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_IsValidIP(t *testing.T) { + var tests = []struct { + name string + ip string + expected bool + }{ + {"Valid IPv4", "127.0.0.1", true}, + {"Valid IPv6", "2001:db8:0:1:1:1:1:1", true}, + {"Invalid IP", "300.0.0.1", false}, + {"Empty IP", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsValidIP(tt.ip) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/proto/src/proto/executor/v1/executor.proto b/proto/src/proto/executor/v1/executor.proto index b6cf03f720..b2c7aacc05 100644 --- a/proto/src/proto/executor/v1/executor.proto +++ b/proto/src/proto/executor/v1/executor.proto @@ -31,6 +31,8 @@ message ProcessBatchRequest { map db = 13; map contracts_bytecode = 14; // For debug/testing purpposes only. Don't fill this on production TraceConfig trace_config = 15; + string context_id = 16; + uint32 get_keys = 17; // if 1, the keys used to read or write storage values will be returned } message ProcessBatchResponse { @@ -52,6 +54,8 @@ message ProcessBatchResponse { uint64 flush_id = 16; uint64 stored_flush_id = 17; string prover_id = 18; + repeated bytes nodes_keys = 19; + repeated bytes program_keys = 20; } /** @@ -167,6 +171,8 @@ message Contract { string value = 3; bytes data = 4; uint64 gas = 5; + // Define type of internal call: CREATE, CREATE2, CALL, CALLCODE, DELEGATECALL, STATICCALL + string type = 6; } message ProcessTransactionResponse { @@ -197,9 +203,13 @@ message ProcessTransactionResponse { // Trace repeated ExecutionTraceStep execution_trace = 13; CallTrace call_trace = 14; - // Efective Gas Price + // Efective Gas Price string effective_gas_price = 15; uint32 effective_percentage = 16; + // Flag to indicate if opcode 'GASPRICE' has been called + uint32 has_gasprice_opcode = 17; + // Flag to indicate if opcode 'BALANCE' has been called + uint32 has_balance_opcode = 18; } message Log { @@ -450,4 +460,28 @@ enum ExecutorError { EXECUTOR_ERROR_SM_MAIN_HASHK_READ_OUT_OF_RANGE = 67; // EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE indicates that the main execution Poseidon check found read out of range EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE = 68; -} + // EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT indicates that the input parameter old_state_root is invalid + EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT = 69; + // EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH indicates that the input parameter old_acc_input_hash is invalid + EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH = 70; + // EXECUTOR_ERROR_INVALID_CHAIN_ID indicates that the input parameter chain_id is invalid + EXECUTOR_ERROR_INVALID_CHAIN_ID = 71; + // EXECUTOR_ERROR_INVALID_BATCH_L2_DATA indicates that the input parameter batch_l2_data is invalid + EXECUTOR_ERROR_INVALID_BATCH_L2_DATA = 72; + // EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT indicates that the input parameter global_exit_root is invalid + EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT = 73; + // EXECUTOR_ERROR_INVALID_COINBASE indicates that the input parameter coinbase (i.e. sequencer address) is invalid + EXECUTOR_ERROR_INVALID_COINBASE = 74; + // EXECUTOR_ERROR_INVALID_FROM indicates that the input parameter from is invalid + EXECUTOR_ERROR_INVALID_FROM = 75; + // EXECUTOR_ERROR_INVALID_DB_KEY indicates that the input parameter db key is invalid + EXECUTOR_ERROR_INVALID_DB_KEY = 76; + // EXECUTOR_ERROR_INVALID_DB_VALUE indicates that the input parameter db value is invalid + EXECUTOR_ERROR_INVALID_DB_VALUE = 77; + // EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY indicates that the input parameter contracts_bytecode key is invalid + EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY = 78; + // EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE indicates that the input parameter contracts_bytecode value is invalid + EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE = 79; + // EXECUTOR_ERROR_INVALID_GET_KEY indicates that the input parameter get key is invalid, e.g. is true but fork_id<5 + EXECUTOR_ERROR_INVALID_GET_KEY = 80; +} \ No newline at end of file diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go index 451c6f8655..6adb8787cf 100644 --- a/sequencer/addrqueue.go +++ b/sequencer/addrqueue.go @@ -194,7 +194,7 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) ( } } - // We check if we have a new readyTx from the notReadyTxs (at this point, to optmize the code, + // We check if we have a new readyTx from the notReadyTxs (at this point, to optimize the code, // we are not including the oldReadyTx in notReadyTxs, as it can match again if the nonce has not changed) if a.readyTx == nil { nrTx, found := a.notReadyTxs[a.currentNonce] diff --git a/sequencer/closingsignalsmanager_test.go b/sequencer/closingsignalsmanager_test.go index e6eca56cef..a47ca14730 100644 --- a/sequencer/closingsignalsmanager_test.go +++ b/sequencer/closingsignalsmanager_test.go @@ -72,9 +72,9 @@ func setupTest(t *testing.T) { eventLog := event.NewEventLog(event.Config{}, eventStorage) localStateTree := merkletree.NewStateTree(localMtDBServiceClient) - localState = state.NewState(stateCfg, state.NewPostgresStorage(localStateDb), localExecutorClient, localStateTree, eventLog) + localState = state.NewState(stateCfg, state.NewPostgresStorage(state.Config{}, localStateDb), localExecutorClient, localStateTree, eventLog) - batchConstraints := batchConstraints{ + batchConstraints := state.BatchConstraintsCfg{ MaxTxsPerBatch: 300, MaxBatchBytesSize: 120000, MaxCumulativeGasUsed: 30000000, diff --git a/sequencer/config.go b/sequencer/config.go index df1c1ff934..8f43428a3a 100644 --- a/sequencer/config.go +++ b/sequencer/config.go @@ -1,6 +1,7 @@ package sequencer import ( + "github.com/0xPolygonHermez/zkevm-data-streamer/log" "github.com/0xPolygonHermez/zkevm-node/config/types" ) @@ -16,37 +17,6 @@ type Config struct { // FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting FrequencyToCheckTxsForDelete types.Duration `mapstructure:"FrequencyToCheckTxsForDelete"` - // MaxTxsPerBatch is the maximum amount of transactions in the batch - MaxTxsPerBatch uint64 `mapstructure:"MaxTxsPerBatch"` - - // MaxBatchBytesSize is the maximum batch size in bytes - // (subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1) - MaxBatchBytesSize uint64 `mapstructure:"MaxBatchBytesSize"` - - // MaxCumulativeGasUsed is max gas amount used by batch - MaxCumulativeGasUsed uint64 `mapstructure:"MaxCumulativeGasUsed"` - - // MaxKeccakHashes is max keccak hashes used by batch - MaxKeccakHashes uint32 `mapstructure:"MaxKeccakHashes"` - - // MaxPoseidonHashes is max poseidon hashes batch can handle - MaxPoseidonHashes uint32 `mapstructure:"MaxPoseidonHashes"` - - // MaxPoseidonPaddings is max poseidon paddings batch can handle - MaxPoseidonPaddings uint32 `mapstructure:"MaxPoseidonPaddings"` - - // MaxMemAligns is max mem aligns batch can handle - MaxMemAligns uint32 `mapstructure:"MaxMemAligns"` - - // MaxArithmetics is max arithmetics batch can handle - MaxArithmetics uint32 `mapstructure:"MaxArithmetics"` - - // MaxBinaries is max binaries batch can handle - MaxBinaries uint32 `mapstructure:"MaxBinaries"` - - // MaxSteps is max steps batch can handle - MaxSteps uint32 `mapstructure:"MaxSteps"` - // TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime TxLifetimeCheckTimeout types.Duration `mapstructure:"TxLifetimeCheckTimeout"` @@ -59,8 +29,20 @@ type Config struct { // DBManager's specific config properties DBManager DBManagerCfg `mapstructure:"DBManager"` - // EffectiveGasPrice is the config for the gas price - EffectiveGasPrice EffectiveGasPriceCfg `mapstructure:"EffectiveGasPrice"` + // StreamServerCfg is the config for the stream server + StreamServer StreamServerCfg `mapstructure:"StreamServer"` +} + +// StreamServerCfg contains the data streamer's configuration properties +type StreamServerCfg struct { + // Port to listen on + Port uint16 `mapstructure:"Port"` + // Filename of the binary data file + Filename string `mapstructure:"Filename"` + // Enabled is a flag to enable/disable the data streamer + Enabled bool `mapstructure:"Enabled"` + // Log is the log configuration + Log log.Config `mapstructure:"Log"` } // FinalizerCfg contains the finalizer's configuration properties @@ -108,25 +90,3 @@ type DBManagerCfg struct { PoolRetrievalInterval types.Duration `mapstructure:"PoolRetrievalInterval"` L2ReorgRetrievalInterval types.Duration `mapstructure:"L2ReorgRetrievalInterval"` } - -// EffectiveGasPriceCfg contains the configuration properties for the effective gas price -type EffectiveGasPriceCfg struct { - // MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation - MaxBreakEvenGasPriceDeviationPercentage uint64 `mapstructure:"MaxBreakEvenGasPriceDeviationPercentage"` - - // L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price - L1GasPriceFactor float64 `mapstructure:"L1GasPriceFactor"` - - // ByteGasCost is the gas cost per byte - ByteGasCost uint64 `mapstructure:"ByteGasCost"` - - // MarginFactor is the margin factor percentage to be added to the L2 min gas price - MarginFactor float64 `mapstructure:"MarginFactor"` - - // Enabled is a flag to enable/disable the effective gas price - Enabled bool `mapstructure:"Enabled"` - - // DefaultMinGasPriceAllowed is the default min gas price to suggest - // This value is assigned from [Pool].DefaultMinGasPriceAllowed - DefaultMinGasPriceAllowed uint64 -} diff --git a/sequencer/dbmanager.go b/sequencer/dbmanager.go index 465e2ce103..86ccca070e 100644 --- a/sequencer/dbmanager.go +++ b/sequencer/dbmanager.go @@ -5,6 +5,7 @@ import ( "math/big" "time" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/state" @@ -13,16 +14,22 @@ import ( "github.com/jackc/pgx/v4" ) +const ( + datastreamChannelMultiplier = 2 +) + // Pool Loader and DB Updater type dbManager struct { - cfg DBManagerCfg - txPool txPool - state stateInterface - worker workerInterface - l2ReorgCh chan L2ReorgEvent - ctx context.Context - batchConstraints batchConstraints - numberOfReorgs uint64 + cfg DBManagerCfg + txPool txPool + state stateInterface + worker workerInterface + l2ReorgCh chan L2ReorgEvent + ctx context.Context + batchConstraints state.BatchConstraintsCfg + numberOfStateInconsistencies uint64 + streamServer *datastreamer.StreamServer + dataToStream chan state.DSL2FullBlock } func (d *dbManager) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { @@ -41,13 +48,16 @@ type ClosingBatchParameters struct { EffectivePercentages []uint8 } -func newDBManager(ctx context.Context, config DBManagerCfg, txPool txPool, state stateInterface, worker *Worker, closingSignalCh ClosingSignalCh, batchConstraints batchConstraints) *dbManager { - numberOfReorgs, err := state.CountReorgs(ctx, nil) +func newDBManager(ctx context.Context, config DBManagerCfg, txPool txPool, stateInterface stateInterface, worker *Worker, closingSignalCh ClosingSignalCh, batchConstraints state.BatchConstraintsCfg) *dbManager { + numberOfReorgs, err := stateInterface.CountReorgs(ctx, nil) if err != nil { log.Error("failed to get number of reorgs: %v", err) } - return &dbManager{ctx: ctx, cfg: config, txPool: txPool, state: state, worker: worker, l2ReorgCh: closingSignalCh.L2ReorgCh, batchConstraints: batchConstraints, numberOfReorgs: numberOfReorgs} + return &dbManager{ctx: ctx, cfg: config, txPool: txPool, + state: stateInterface, worker: worker, l2ReorgCh: closingSignalCh.L2ReorgCh, + batchConstraints: batchConstraints, numberOfStateInconsistencies: numberOfReorgs, + dataToStream: make(chan state.DSL2FullBlock, batchConstraints.MaxTxsPerBatch*datastreamChannelMultiplier)} } // Start stars the dbManager routines @@ -56,9 +66,12 @@ func (d *dbManager) Start() { go func() { for { time.Sleep(d.cfg.L2ReorgRetrievalInterval.Duration) - d.checkIfReorg() + d.checkStateInconsistency() } }() + if d.streamServer != nil { + go d.sendDataToStreamer() + } } // GetLastBatchNumber get the latest batch number from state @@ -102,15 +115,16 @@ func (d *dbManager) CreateFirstBatch(ctx context.Context, sequencerAddress commo return processingCtx } -// checkIfReorg checks if a reorg has happened -func (d *dbManager) checkIfReorg() { - numberOfReorgs, err := d.state.CountReorgs(d.ctx, nil) +// checkStateInconsistency checks if state inconsistency happened +func (d *dbManager) checkStateInconsistency() { + stateInconsistenciesDetected, err := d.state.CountReorgs(d.ctx, nil) if err != nil { log.Error("failed to get number of reorgs: %v", err) + return } - if numberOfReorgs != d.numberOfReorgs { - log.Warnf("New L2 reorg detected") + if stateInconsistenciesDetected != d.numberOfStateInconsistencies { + log.Warnf("New State Inconsistency detected") d.l2ReorgCh <- L2ReorgEvent{} } } @@ -134,6 +148,87 @@ func (d *dbManager) loadFromPool() { } } +// sendDataToStreamer sends data to the data stream server +func (d *dbManager) sendDataToStreamer() { + var err error + for { + // Read error from previous iteration + if err != nil { + err = d.streamServer.RollbackAtomicOp() + if err != nil { + log.Errorf("failed to rollback atomic op: %v", err) + } + d.streamServer = nil + } + + // Read data from channel + fullL2Block := <-d.dataToStream + + l2Block := fullL2Block + l2Transactions := fullL2Block.Txs + + if d.streamServer != nil { + err = d.streamServer.StartAtomicOp() + if err != nil { + log.Errorf("failed to start atomic op for l2block %v: %v ", l2Block.L2BlockNumber, err) + continue + } + + bookMark := state.DSBookMark{ + Type: state.BookMarkTypeL2Block, + L2BlockNumber: l2Block.L2BlockNumber, + } + + _, err = d.streamServer.AddStreamBookmark(bookMark.Encode()) + if err != nil { + log.Errorf("failed to add stream bookmark for l2block %v: %v", l2Block.L2BlockNumber, err) + continue + } + + blockStart := state.DSL2BlockStart{ + BatchNumber: l2Block.BatchNumber, + L2BlockNumber: l2Block.L2BlockNumber, + Timestamp: l2Block.Timestamp, + GlobalExitRoot: l2Block.GlobalExitRoot, + Coinbase: l2Block.Coinbase, + ForkID: l2Block.ForkID, + } + + _, err = d.streamServer.AddStreamEntry(state.EntryTypeL2BlockStart, blockStart.Encode()) + if err != nil { + log.Errorf("failed to add stream entry for l2block %v: %v", l2Block.L2BlockNumber, err) + continue + } + + for _, l2Transaction := range l2Transactions { + _, err = d.streamServer.AddStreamEntry(state.EntryTypeL2Tx, l2Transaction.Encode()) + if err != nil { + log.Errorf("failed to add l2tx stream entry for l2block %v: %v", l2Block.L2BlockNumber, err) + continue + } + } + + blockEnd := state.DSL2BlockEnd{ + L2BlockNumber: l2Block.L2BlockNumber, + BlockHash: l2Block.BlockHash, + StateRoot: l2Block.StateRoot, + } + + _, err = d.streamServer.AddStreamEntry(state.EntryTypeL2BlockEnd, blockEnd.Encode()) + if err != nil { + log.Errorf("failed to add stream entry for l2block %v: %v", l2Block.L2BlockNumber, err) + continue + } + + err = d.streamServer.CommitAtomicOp() + if err != nil { + log.Errorf("failed to commit atomic op for l2block %v: %v ", l2Block.L2BlockNumber, err) + continue + } + } + } +} + func (d *dbManager) addTxToWorker(tx pool.Transaction) error { txTracker, err := d.worker.NewTxTracker(tx, tx.ZKCounters, tx.IP) if err != nil { @@ -167,7 +262,7 @@ func (d *dbManager) DeleteTransactionFromPool(ctx context.Context, txHash common // StoreProcessedTxAndDeleteFromPool stores a tx into the state and changes it status in the pool func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error { - d.checkIfReorg() + d.checkStateInconsistency() log.Debugf("Storing tx %v", tx.response.TxHash) dbTx, err := d.BeginStateTransaction(ctx) @@ -175,7 +270,7 @@ func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx tr return err } - err = d.state.StoreTransaction(ctx, tx.batchNumber, tx.response, tx.coinbase, uint64(tx.timestamp.Unix()), dbTx) + l2BlockHeader, err := d.state.StoreTransaction(ctx, tx.batchNumber, tx.response, tx.coinbase, uint64(tx.timestamp.Unix()), tx.egpLog, dbTx) if err != nil { return err } @@ -183,6 +278,10 @@ func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx tr // Update batch l2 data batch, err := d.state.GetBatchByNumber(ctx, tx.batchNumber, dbTx) if err != nil { + err2 := dbTx.Rollback(ctx) + if err2 != nil { + log.Errorf("failed to rollback dbTx when getting batch that gave err: %v. Rollback err: %v", err2, err) + } return err } @@ -196,6 +295,10 @@ func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx tr if !tx.isForcedBatch { err = d.state.UpdateBatchL2Data(ctx, tx.batchNumber, batch.BatchL2Data, dbTx) if err != nil { + err2 := dbTx.Rollback(ctx) + if err2 != nil { + log.Errorf("failed to rollback dbTx when updating batch l2 data that gave err: %v. Rollback err: %v", err2, err) + } return err } } @@ -205,13 +308,50 @@ func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx tr return err } - // Change Tx status to selected - err = d.txPool.UpdateTxStatus(ctx, tx.response.TxHash, pool.TxStatusSelected, false, nil) - if err != nil { - return err + if !tx.isForcedBatch { + // Change Tx status to selected + err = d.txPool.UpdateTxStatus(ctx, tx.response.TxHash, pool.TxStatusSelected, false, nil) + if err != nil { + return err + } } log.Infof("StoreProcessedTxAndDeleteFromPool: successfully stored tx: %v for batch: %v", tx.response.TxHash.String(), tx.batchNumber) + + // Send data to streamer + if d.streamServer != nil { + forkID := d.state.GetForkIDByBatchNumber(tx.batchNumber) + + l2Block := state.DSL2Block{ + BatchNumber: tx.batchNumber, + L2BlockNumber: l2BlockHeader.Number.Uint64(), + Timestamp: tx.timestamp.Unix(), + GlobalExitRoot: batch.GlobalExitRoot, + Coinbase: tx.coinbase, + ForkID: uint16(forkID), + BlockHash: l2BlockHeader.Hash(), + StateRoot: l2BlockHeader.Root, + } + + binaryTxData, err := tx.response.Tx.MarshalBinary() + if err != nil { + return err + } + + l2Transaction := state.DSL2Transaction{ + L2BlockNumber: l2Block.L2BlockNumber, + EffectiveGasPricePercentage: uint8(tx.response.EffectivePercentage), + IsValid: 1, + EncodedLength: uint32(len(binaryTxData)), + Encoded: binaryTxData, + } + + d.dataToStream <- state.DSL2FullBlock{ + DSL2Block: l2Block, + Txs: []state.DSL2Transaction{l2Transaction}, + } + } + return nil } @@ -270,7 +410,7 @@ func (d *dbManager) GetWIPBatch(ctx context.Context) (*WipBatch, error) { // Init counters to MAX values var totalBytes uint64 = d.batchConstraints.MaxBatchBytesSize - var batchZkCounters state.ZKCounters = state.ZKCounters{ + var batchZkCounters = state.ZKCounters{ CumulativeGasUsed: d.batchConstraints.MaxCumulativeGasUsed, UsedKeccakHashes: d.batchConstraints.MaxKeccakHashes, UsedPoseidonHashes: d.batchConstraints.MaxPoseidonHashes, @@ -568,8 +708,8 @@ func (d *dbManager) GetDefaultMinGasPriceAllowed() uint64 { return d.txPool.GetDefaultMinGasPriceAllowed() } -func (d *dbManager) GetL1GasPrice() uint64 { - return d.txPool.GetL1GasPrice() +func (d *dbManager) GetL1AndL2GasPrice() (uint64, uint64) { + return d.txPool.GetL1AndL2GasPrice() } // GetStoredFlushID returns the stored flush ID and prover ID diff --git a/sequencer/dbmanager_test.go b/sequencer/dbmanager_test.go index 0a8cf82c23..aa13d97adc 100644 --- a/sequencer/dbmanager_test.go +++ b/sequencer/dbmanager_test.go @@ -65,7 +65,7 @@ func setupDBManager() { eventLog := event.NewEventLog(event.Config{}, eventStorage) stateTree = merkletree.NewStateTree(mtDBServiceClient) - testState = state.NewState(stateCfg, state.NewPostgresStorage(stateDb), executorClient, stateTree, eventLog) + testState = state.NewState(stateCfg, state.NewPostgresStorage(state.Config{}, stateDb), executorClient, stateTree, eventLog) // DBManager closingSignalCh := ClosingSignalCh{ @@ -73,7 +73,7 @@ func setupDBManager() { GERCh: make(chan common.Hash), L2ReorgCh: make(chan L2ReorgEvent), } - batchConstraints := batchConstraints{ + batchConstraints := state.BatchConstraintsCfg{ MaxTxsPerBatch: 300, MaxBatchBytesSize: 120000, MaxCumulativeGasUsed: 30000000, diff --git a/sequencer/effective_gas_price.go b/sequencer/effective_gas_price.go deleted file mode 100644 index 001c3cf80a..0000000000 --- a/sequencer/effective_gas_price.go +++ /dev/null @@ -1,138 +0,0 @@ -package sequencer - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/0xPolygonHermez/zkevm-node/event" - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" -) - -// CalculateTxBreakEvenGasPrice calculates the break even gas price for a transaction -func (f *finalizer) CalculateTxBreakEvenGasPrice(tx *TxTracker, gasUsed uint64) (*big.Int, error) { - const ( - // constants used in calculation of BreakEvenGasPrice - signatureBytesLength = 65 - effectivePercentageBytesLength = 1 - totalRlpFieldsLength = signatureBytesLength + effectivePercentageBytesLength - ) - - if tx.L1GasPrice == 0 { - log.Warn("CalculateTxBreakEvenGasPrice: L1 gas price 0. Skipping estimation for tx %s", tx.HashStr) - return nil, ErrZeroL1GasPrice - } - - if gasUsed == 0 { - // Returns tx.GasPrice as the breakEvenGasPrice - return tx.GasPrice, nil - } - - // Get L2 Min Gas Price - l2MinGasPrice := uint64(float64(tx.L1GasPrice) * f.effectiveGasPriceCfg.L1GasPriceFactor) - if l2MinGasPrice < f.defaultMinGasPriceAllowed { - l2MinGasPrice = f.defaultMinGasPriceAllowed - } - - // Calculate BreakEvenGasPrice - totalTxPrice := (gasUsed * l2MinGasPrice) + ((totalRlpFieldsLength + tx.BatchResources.Bytes) * f.effectiveGasPriceCfg.ByteGasCost * tx.L1GasPrice) - breakEvenGasPrice := big.NewInt(0).SetUint64(uint64(float64(totalTxPrice/gasUsed) * f.effectiveGasPriceCfg.MarginFactor)) - - return breakEvenGasPrice, nil -} - -// CompareTxBreakEvenGasPrice calculates the newBreakEvenGasPrice with the newGasUsed and compares it with -// the tx.BreakEvenGasPrice. It returns ErrEffectiveGasPriceReprocess if the tx needs to be reprocessed with -// the tx.BreakEvenGasPrice updated, otherwise it returns nil -func (f *finalizer) CompareTxBreakEvenGasPrice(ctx context.Context, tx *TxTracker, newGasUsed uint64) error { - // Increase nunber of executions related to gas price - tx.EffectiveGasPriceProcessCount++ - - newBreakEvenGasPrice, err := f.CalculateTxBreakEvenGasPrice(tx, newGasUsed) - if err != nil { - log.Errorf("failed to calculate breakEvenPrice with new gasUsed for tx %s, error: %s", tx.HashStr, err.Error()) - return err - } - - // if newBreakEvenGasPrice >= tx.GasPrice then we do a final reprocess using tx.GasPrice - if newBreakEvenGasPrice.Cmp(tx.GasPrice) >= 0 { - tx.BreakEvenGasPrice = tx.GasPrice - tx.IsEffectiveGasPriceFinalExecution = true - return ErrEffectiveGasPriceReprocess - } else { //newBreakEvenGasPrice < tx.GasPrice - // Compute the abosulte difference between tx.BreakEvenGasPrice - newBreakEvenGasPrice - diff := new(big.Int).Abs(new(big.Int).Sub(tx.BreakEvenGasPrice, newBreakEvenGasPrice)) - // Compute max difference allowed of breakEvenGasPrice - maxDiff := new(big.Int).Div(new(big.Int).Mul(tx.BreakEvenGasPrice, f.maxBreakEvenGasPriceDeviationPercentage), big.NewInt(100)) //nolint:gomnd - - // if diff is greater than the maxDiff allowed - if diff.Cmp(maxDiff) == 1 { - if tx.EffectiveGasPriceProcessCount < 2 { //nolint:gomnd - // it is the first process of the tx we reprocess it with the newBreakEvenGasPrice - tx.BreakEvenGasPrice = newBreakEvenGasPrice - return ErrEffectiveGasPriceReprocess - } else { - // it is the second process attempt. It makes no sense to have a big diff at - // this point, for this reason we do a final reprocess using tx.GasPrice. - // Also we generate a critical event as this tx needs to be analized since - tx.BreakEvenGasPrice = tx.GasPrice - tx.IsEffectiveGasPriceFinalExecution = true - ev := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Critical, - EventID: event.EventID_FinalizerBreakEvenGasPriceBigDifference, - Description: fmt.Sprintf("The difference: %s between the breakEvenGasPrice and the newBreakEvenGasPrice is more than %d %%", diff.String(), f.effectiveGasPriceCfg.MaxBreakEvenGasPriceDeviationPercentage), - Json: struct { - transactionHash string - preExecutionBreakEvenGasPrice string - newBreakEvenGasPrice string - diff string - deviation string - }{ - transactionHash: tx.Hash.String(), - preExecutionBreakEvenGasPrice: tx.BreakEvenGasPrice.String(), - newBreakEvenGasPrice: newBreakEvenGasPrice.String(), - diff: diff.String(), - deviation: maxDiff.String(), - }, - } - err = f.eventLog.LogEvent(ctx, ev) - if err != nil { - log.Errorf("failed to log event: %s", err.Error()) - } - return ErrEffectiveGasPriceReprocess - } - } // if the diff < maxDiff it is ok, no reprocess of the tx is needed - } - - return nil -} - -// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage -func CalculateEffectiveGasPricePercentage(gasPrice *big.Int, breakEven *big.Int) (uint8, error) { - const bits = 256 - var bitsBigInt = big.NewInt(bits) - - if breakEven == nil || gasPrice == nil || - gasPrice.Cmp(big.NewInt(0)) == 0 || breakEven.Cmp(big.NewInt(0)) == 0 { - return 0, ErrBreakEvenGasPriceEmpty - } - - if gasPrice.Cmp(breakEven) <= 0 { - return state.MaxEffectivePercentage, nil - } - - // Simulate Ceil with integer division - b := new(big.Int).Mul(breakEven, bitsBigInt) - b = b.Add(b, gasPrice) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - b = b.Div(b, gasPrice) - // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - - return uint8(b.Uint64()), nil -} diff --git a/sequencer/effective_gas_price_test.go b/sequencer/effective_gas_price_test.go deleted file mode 100644 index 312faac0b8..0000000000 --- a/sequencer/effective_gas_price_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package sequencer - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCalcGasPriceEffectivePercentage(t *testing.T) { - testCases := []struct { - name string - breakEven *big.Int - gasPrice *big.Int - expectedValue uint8 - err error - }{ - { - name: "Nil breakEven or gasPrice", - gasPrice: big.NewInt(1), - expectedValue: uint8(0), - }, - { - name: "Zero breakEven or gasPrice", - breakEven: big.NewInt(1), - gasPrice: big.NewInt(0), - expectedValue: uint8(0), - }, - { - name: "Both positive, gasPrice less than breakEven", - breakEven: big.NewInt(22000000000), - gasPrice: big.NewInt(11000000000), - expectedValue: uint8(255), - }, - { - name: "Both positive, gasPrice more than breakEven", - breakEven: big.NewInt(19800000000), - gasPrice: big.NewInt(22000000000), - expectedValue: uint8(230), - }, - { - name: "100% (255) effective percentage 1", - gasPrice: big.NewInt(22000000000), - breakEven: big.NewInt(22000000000), - expectedValue: 255, - }, - { - name: "100% (255) effective percentage 2", - gasPrice: big.NewInt(22000000000), - breakEven: big.NewInt(21999999999), - expectedValue: 255, - }, - { - name: "100% (255) effective percentage 3", - gasPrice: big.NewInt(22000000000), - breakEven: big.NewInt(21900000000), - expectedValue: 254, - }, - { - name: "50% (127) effective percentage", - gasPrice: big.NewInt(22000000000), - breakEven: big.NewInt(11000000000), - expectedValue: 127, - }, - { - name: "(40) effective percentage", - gasPrice: big.NewInt(1000), - breakEven: big.NewInt(157), - expectedValue: 40, - }, - { - name: "(1) effective percentage", - gasPrice: big.NewInt(1000), - breakEven: big.NewInt(1), - expectedValue: 0, - }, - { - name: "(2) effective percentage", - gasPrice: big.NewInt(1000), - breakEven: big.NewInt(4), - expectedValue: 1, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual, _ := CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) - assert.Equal(t, tc.err, err) - if actual != 0 { - assert.Equal(t, tc.expectedValue, actual) - } else { - assert.Zero(t, tc.expectedValue) - } - }) - } -} diff --git a/sequencer/errors.go b/sequencer/errors.go index 92f33a42f1..ab231dc348 100644 --- a/sequencer/errors.go +++ b/sequencer/errors.go @@ -5,12 +5,8 @@ import "errors" var ( // ErrExpiredTransaction happens when the transaction is expired ErrExpiredTransaction = errors.New("transaction expired") - // ErrBreakEvenGasPriceEmpty happens when the breakEven or gasPrice is nil or zero - ErrBreakEvenGasPriceEmpty = errors.New("breakEven and gasPrice cannot be nil or zero") // ErrEffectiveGasPriceReprocess happens when the effective gas price requires reexecution ErrEffectiveGasPriceReprocess = errors.New("effective gas price requires reprocessing the transaction") - // ErrZeroL1GasPrice is returned if the L1 gas price is 0. - ErrZeroL1GasPrice = errors.New("L1 gas price 0") // ErrDuplicatedNonce is returned when adding a new tx to the worker and there is an existing tx // with the same nonce and higher gasPrice (in this case we keep the existing tx) ErrDuplicatedNonce = errors.New("duplicated nonce") diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 148bf536cc..ca0d3557bc 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -11,6 +11,7 @@ import ( "sync/atomic" "time" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" @@ -36,19 +37,21 @@ var ( // finalizer represents the finalizer component of the sequencer. type finalizer struct { - cfg FinalizerCfg - effectiveGasPriceCfg EffectiveGasPriceCfg - closingSignalCh ClosingSignalCh - isSynced func(ctx context.Context) bool - sequencerAddress common.Address - worker workerInterface - dbManager dbManagerInterface - executor stateInterface - batch *WipBatch - batchConstraints batchConstraints - processRequest state.ProcessRequest - sharedResourcesMux *sync.RWMutex - lastGERHash common.Hash + cfg FinalizerCfg + closingSignalCh ClosingSignalCh + isSynced func(ctx context.Context) bool + sequencerAddress common.Address + worker workerInterface + dbManager dbManagerInterface + executor stateInterface + batch *WipBatch + batchConstraints state.BatchConstraintsCfg + processRequest state.ProcessRequest + sharedResourcesMux *sync.RWMutex + // GER of the current WIP batch + currentGERHash common.Hash + // GER of the batch previous to the current WIP batch + previousGERHash common.Hash reprocessFullBatchError atomic.Bool // closing signals nextGER common.Hash @@ -61,8 +64,7 @@ type finalizer struct { // event log eventLog *event.EventLog // effective gas price calculation - maxBreakEvenGasPriceDeviationPercentage *big.Int - defaultMinGasPriceAllowed uint64 + effectiveGasPrice *pool.EffectiveGasPrice // Processed txs pendingTransactionsToStore chan transactionToStore pendingTransactionsToStoreWG *sync.WaitGroup @@ -71,6 +73,7 @@ type finalizer struct { proverID string lastPendingFlushID uint64 pendingFlushIDCond *sync.Cond + streamServer *datastreamer.StreamServer } type transactionToStore struct { @@ -84,6 +87,7 @@ type transactionToStore struct { oldStateRoot common.Hash isForcedBatch bool flushId uint64 + egpLog *state.EffectiveGasPriceLog } // WipBatch represents a work-in-progress batch. @@ -107,31 +111,31 @@ func (w *WipBatch) isEmpty() bool { // newFinalizer returns a new instance of Finalizer. func newFinalizer( cfg FinalizerCfg, - effectiveGasPriceCfg EffectiveGasPriceCfg, - + poolCfg pool.Config, worker workerInterface, dbManager dbManagerInterface, executor stateInterface, sequencerAddr common.Address, isSynced func(ctx context.Context) bool, closingSignalCh ClosingSignalCh, - batchConstraints batchConstraints, + batchConstraints state.BatchConstraintsCfg, eventLog *event.EventLog, + streamServer *datastreamer.StreamServer, ) *finalizer { f := finalizer{ - cfg: cfg, - effectiveGasPriceCfg: effectiveGasPriceCfg, - closingSignalCh: closingSignalCh, - isSynced: isSynced, - sequencerAddress: sequencerAddr, - worker: worker, - dbManager: dbManager, - executor: executor, - batch: new(WipBatch), - batchConstraints: batchConstraints, - processRequest: state.ProcessRequest{}, - sharedResourcesMux: new(sync.RWMutex), - lastGERHash: state.ZeroHash, + cfg: cfg, + closingSignalCh: closingSignalCh, + isSynced: isSynced, + sequencerAddress: sequencerAddr, + worker: worker, + dbManager: dbManager, + executor: executor, + batch: new(WipBatch), + batchConstraints: batchConstraints, + processRequest: state.ProcessRequest{}, + sharedResourcesMux: new(sync.RWMutex), + currentGERHash: state.ZeroHash, + previousGERHash: state.ZeroHash, // closing signals nextGER: common.Hash{}, nextGERDeadline: 0, @@ -141,16 +145,18 @@ func newFinalizer( nextForcedBatchesMux: new(sync.RWMutex), handlingL2Reorg: false, // event log - eventLog: eventLog, - maxBreakEvenGasPriceDeviationPercentage: new(big.Int).SetUint64(effectiveGasPriceCfg.MaxBreakEvenGasPriceDeviationPercentage), - pendingTransactionsToStore: make(chan transactionToStore, batchConstraints.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier), - pendingTransactionsToStoreWG: new(sync.WaitGroup), - storedFlushID: 0, + eventLog: eventLog, + // effective gas price calculation instance + effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice, poolCfg.DefaultMinGasPriceAllowed), + pendingTransactionsToStore: make(chan transactionToStore, batchConstraints.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier), + pendingTransactionsToStoreWG: new(sync.WaitGroup), + storedFlushID: 0, // Mutex is unlocked when the condition is broadcasted storedFlushIDCond: sync.NewCond(&sync.Mutex{}), proverID: "", lastPendingFlushID: 0, pendingFlushIDCond: sync.NewCond(&sync.Mutex{}), + streamServer: streamServer, } f.reprocessFullBatchError.Store(false) @@ -160,8 +166,6 @@ func newFinalizer( // Start starts the finalizer. func (f *finalizer) Start(ctx context.Context, batch *WipBatch, processingReq *state.ProcessRequest) { - f.defaultMinGasPriceAllowed = f.dbManager.GetDefaultMinGasPriceAllowed() - var err error if batch != nil { f.batch = batch @@ -325,6 +329,7 @@ func (f *finalizer) addPendingTxToStore(ctx context.Context, txToStore transacti // finalizeBatches runs the endless loop for processing transactions finalizing batches. func (f *finalizer) finalizeBatches(ctx context.Context) { log.Debug("finalizer init loop") + showNotFoundTxLog := true // used to log debug only the first message when there is no txs to process for { start := now() if f.batch.batchNumber == f.cfg.StopSequencerOnBatchNum { @@ -338,15 +343,16 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { if tx != nil { metrics.GetLogStatistics().CumulativeCounting(metrics.TxCounter) log.Debugf("processing tx: %s", tx.Hash.Hex()) + showNotFoundTxLog = true - // reset the count of effective GasPrice process attempts (since the tx may have been tried to be processed before) - tx.EffectiveGasPriceProcessCount = 0 + firstTxProcess := true f.sharedResourcesMux.Lock() for { - _, err := f.processTransaction(ctx, tx) + _, err := f.processTransaction(ctx, tx, firstTxProcess) if err != nil { if err == ErrEffectiveGasPriceReprocess { + firstTxProcess = false log.Info("reprocessing tx because of effective gas price calculation: %s", tx.Hash.Hex()) metrics.GetLogStatistics().CumulativeCounting(metrics.ReprocessingTxCounter) continue @@ -363,7 +369,10 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { f.sharedResourcesMux.Unlock() } else { // wait for new txs - // log.Debugf("no transactions to be processed. Sleeping for %v", f.cfg.SleepDuration.Duration) + if showNotFoundTxLog { + log.Debug("no transactions to be processed. Waiting...") + showNotFoundTxLog = false + } if f.cfg.SleepDuration.Duration > 0 { time.Sleep(f.cfg.SleepDuration.Duration) metrics.GetLogStatistics().CumulativeCounting(metrics.GetTxPauseCounter) @@ -507,7 +516,7 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { // We need to process the batch to update the state root before closing the batch if f.batch.initialStateRoot == f.batch.stateRoot { log.Info("reprocessing batch because the state root has not changed...") - _, err = f.processTransaction(ctx, nil) + _, err = f.processTransaction(ctx, nil, true) if err != nil { return nil, err } @@ -538,6 +547,33 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { } metrics.GetLogStatistics().CumulativeTiming(metrics.FinalizeBatchCloseBatch, time.Since(tsCloseBatch)) + // Check if the batch is empty and sending a GER Update to the stream is needed + if f.streamServer != nil && f.batch.isEmpty() && f.currentGERHash != f.previousGERHash { + updateGer := state.DSUpdateGER{ + BatchNumber: f.batch.batchNumber, + Timestamp: f.batch.timestamp.Unix(), + GlobalExitRoot: f.currentGERHash, + Coinbase: f.sequencerAddress, + ForkID: uint16(f.dbManager.GetForkIDByBatchNumber(f.batch.batchNumber)), + StateRoot: f.batch.stateRoot, + } + + err = f.streamServer.StartAtomicOp() + if err != nil { + log.Errorf("failed to start atomic op for Update GER on batch %v: %v", f.batch.batchNumber, err) + } + + _, err = f.streamServer.AddStreamEntry(state.EntryTypeUpdateGER, updateGer.Encode()) + if err != nil { + log.Errorf("failed to add stream entry for Update GER on batch %v: %v", f.batch.batchNumber, err) + } + + err = f.streamServer.CommitAtomicOp() + if err != nil { + log.Errorf("failed to commit atomic op for Update GER on batch %v: %v", f.batch.batchNumber, err) + } + } + // Metadata for the next batch tsOpenBatch := time.Now() stateRoot := f.batch.stateRoot @@ -554,13 +590,14 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { // Take into consideration the GER f.nextGERMux.Lock() if f.nextGER != state.ZeroHash { - f.lastGERHash = f.nextGER + f.previousGERHash = f.currentGERHash + f.currentGERHash = f.nextGER } f.nextGER = state.ZeroHash f.nextGERDeadline = 0 f.nextGERMux.Unlock() - batch, err := f.openWIPBatch(ctx, lastBatchNumber+1, f.lastGERHash, stateRoot) + batch, err := f.openWIPBatch(ctx, lastBatchNumber+1, f.currentGERHash, stateRoot) if err == nil { f.processRequest.Timestamp = batch.timestamp f.processRequest.BatchNumber = batch.batchNumber @@ -574,7 +611,7 @@ func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) { } // processTransaction processes a single transaction. -func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errWg *sync.WaitGroup, err error) { +func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, firstTxProcess bool) (errWg *sync.WaitGroup, err error) { var txHash string if tx != nil { txHash = tx.Hash.String() @@ -599,46 +636,70 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW f.processRequest.Transactions = tx.RawTx hashStr = tx.HashStr - log.Infof("EffectiveGasPriceProcessCount=%d", tx.EffectiveGasPriceProcessCount) - // If it is the first time we process this tx then we calculate the BreakEvenGasPrice - if tx.EffectiveGasPriceProcessCount == 0 { + txGasPrice := tx.GasPrice + + // If it is the first time we process this tx then we calculate the EffectiveGasPrice + if firstTxProcess { // Get L1 gas price and store in txTracker to make it consistent during the lifespan of the transaction - tx.L1GasPrice = f.dbManager.GetL1GasPrice() - log.Infof("tx.L1GasPrice=%d", tx.L1GasPrice) - // Calculate the new breakEvenPrice - tx.BreakEvenGasPrice, err = f.CalculateTxBreakEvenGasPrice(tx, tx.BatchResources.ZKCounters.CumulativeGasUsed) + tx.L1GasPrice, tx.L2GasPrice = f.dbManager.GetL1AndL2GasPrice() + // Get the tx and l2 gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price + txGasPrice, txL2GasPrice := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice) + + // Save values for later logging + tx.EGPLog.L1GasPrice = tx.L1GasPrice + tx.EGPLog.L2GasPrice = txL2GasPrice + tx.EGPLog.GasUsedFirst = tx.BatchResources.ZKCounters.CumulativeGasUsed + tx.EGPLog.GasPrice.Set(txGasPrice) + + // Calculate EffectiveGasPrice + egp, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, tx.BatchResources.ZKCounters.CumulativeGasUsed, tx.L1GasPrice, txL2GasPrice) if err != nil { - if f.effectiveGasPriceCfg.Enabled { + if f.effectiveGasPrice.IsEnabled() { return nil, err } else { - log.Warnf("EffectiveGasPrice is disabled, but failed to calculate BreakEvenGasPrice: %s", err) + log.Warnf("EffectiveGasPrice is disabled, but failed to calculate EffectiveGasPrice: %s", err) + tx.EGPLog.Error = fmt.Sprintf("CalculateEffectiveGasPrice#1: %s", err) + } + } else { + tx.EffectiveGasPrice.Set(egp) + + // Save first EffectiveGasPrice for later logging + tx.EGPLog.ValueFirst.Set(tx.EffectiveGasPrice) + + // If EffectiveGasPrice >= txGasPrice, we process the tx with tx.GasPrice + if tx.EffectiveGasPrice.Cmp(txGasPrice) >= 0 { + tx.EffectiveGasPrice.Set(txGasPrice) + + loss := new(big.Int).Sub(tx.EffectiveGasPrice, txGasPrice) + // If loss > 0 the warning message indicating we loss fee for thix tx + if loss.Cmp(new(big.Int).SetUint64(0)) == 1 { + log.Warnf("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, txHash: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr) + } + + tx.IsLastExecution = true } } } - effectivePercentage := state.MaxEffectivePercentage - - if tx.BreakEvenGasPrice != nil && tx.BreakEvenGasPrice.Uint64() != 0 { - // If the tx gas price is lower than the break even gas price, we process the tx with the user gas price (100%) - if tx.GasPrice.Cmp(tx.BreakEvenGasPrice) <= 0 { - tx.IsEffectiveGasPriceFinalExecution = true + effectivePercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + if err != nil { + if f.effectiveGasPrice.IsEnabled() { + return nil, err } else { - effectivePercentage, err = CalculateEffectiveGasPricePercentage(tx.GasPrice, tx.BreakEvenGasPrice) - if err != nil { - log.Errorf("failed to calculate effective percentage: %s", err) - return nil, err - } + log.Warnf("EffectiveGasPrice is disabled, but failed to to CalculateEffectiveGasPricePercentage#1: %s", err) + tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPricePercentage#1: %s", tx.EGPLog.Error, err) } + } else { + // Save percentage for later logging + tx.EGPLog.Percentage = effectivePercentage } - log.Infof("calculated breakEvenGasPrice: %d, gasPrice: %d, effectivePercentage: %d for tx: %s", tx.BreakEvenGasPrice, tx.GasPrice, effectivePercentage, tx.HashStr) // If EGP is disabled we use tx GasPrice (MaxEffectivePercentage=255) - if !f.effectiveGasPriceCfg.Enabled { + if !f.effectiveGasPrice.IsEnabled() { effectivePercentage = state.MaxEffectivePercentage } - var effectivePercentageAsDecodedHex []byte - effectivePercentageAsDecodedHex, err = hex.DecodeHex(fmt.Sprintf("%x", effectivePercentage)) + effectivePercentageAsDecodedHex, err := hex.DecodeHex(fmt.Sprintf("%x", effectivePercentage)) if err != nil { return nil, err } @@ -717,31 +778,59 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx return nil, err } - if f.effectiveGasPriceCfg.Enabled && !tx.IsEffectiveGasPriceFinalExecution { - err := f.CompareTxBreakEvenGasPrice(ctx, tx, result.Responses[0].GasUsed) - if err != nil { - return nil, err - } - } else if !f.effectiveGasPriceCfg.Enabled { - reprocessNeeded := false - newBreakEvenGasPrice, err := f.CalculateTxBreakEvenGasPrice(tx, result.Responses[0].GasUsed) + egpEnabled := f.effectiveGasPrice.IsEnabled() + + if !tx.IsLastExecution { + tx.IsLastExecution = true + + // Get the tx gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price + txGasPrice, txL2GasPrice := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice) + + newEffectiveGasPrice, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, result.Responses[0].GasUsed, tx.L1GasPrice, txL2GasPrice) if err != nil { - log.Warnf("EffectiveGasPrice is disabled, but failed to calculate BreakEvenGasPrice: %s", err) + if egpEnabled { + log.Errorf("failed to calculate EffectiveGasPrice with new gasUsed for tx %s, error: %s", tx.HashStr, err.Error()) + return nil, err + } else { + log.Warnf("EffectiveGasPrice is disabled, but failed to calculate EffectiveGasPrice with new gasUsed for tx %s, error: %s", tx.HashStr, err.Error()) + tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPrice#2: %s", tx.EGPLog.Error, err) + } } else { - // Compute the absolute difference between tx.BreakEvenGasPrice - newBreakEvenGasPrice - diff := new(big.Int).Abs(new(big.Int).Sub(tx.BreakEvenGasPrice, newBreakEvenGasPrice)) - // Compute max difference allowed of breakEvenGasPrice - maxDiff := new(big.Int).Div(new(big.Int).Mul(tx.BreakEvenGasPrice, f.maxBreakEvenGasPriceDeviationPercentage), big.NewInt(100)) //nolint:gomnd - - // if diff is greater than the maxDiff allowed - if diff.Cmp(maxDiff) == 1 { - reprocessNeeded = true + // Save new (second) gas used and second effective gas price calculation for later logging + tx.EGPLog.ValueSecond.Set(newEffectiveGasPrice) + tx.EGPLog.GasUsedSecond = result.Responses[0].GasUsed + + errCompare := f.CompareTxEffectiveGasPrice(ctx, tx, newEffectiveGasPrice, result.Responses[0].HasGaspriceOpcode, result.Responses[0].HasBalanceOpcode) + + // If EffectiveGasPrice is disabled we will calculate the percentage and save it for later logging + if !egpEnabled { + effectivePercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + if err != nil { + log.Warnf("EffectiveGasPrice is disabled, but failed to CalculateEffectiveGasPricePercentage#2: %s", err) + tx.EGPLog.Error = fmt.Sprintf("%s, CalculateEffectiveGasPricePercentage#2: %s", tx.EGPLog.Error, err) + } else { + // Save percentage for later logging + tx.EGPLog.Percentage = effectivePercentage + } + } + + if errCompare != nil && egpEnabled { + return nil, errCompare } - log.Infof("calculated newBreakEvenGasPrice: %d, tx.BreakEvenGasPrice: %d for tx: %s", newBreakEvenGasPrice, tx.BreakEvenGasPrice, tx.HashStr) - log.Infof("Would need reprocess: %t, diff: %d, maxDiff: %d", reprocessNeeded, diff, maxDiff) } } + // Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging + tx.EGPLog.Enabled = egpEnabled + tx.EGPLog.GasPriceOC = result.Responses[0].HasGaspriceOpcode + tx.EGPLog.BalanceOC = result.Responses[0].HasBalanceOpcode + tx.EGPLog.ValueFinal.Set(tx.EffectiveGasPrice) + + // Log here the results of EGP calculation + log.Infof("egp-log: final: %d, first: %d, second: %d, percentage: %d, deviation: %d, maxDeviation: %d, gasUsed1: %d, gasUsed2: %d, gasPrice: %d, l1GasPrice: %d, l2GasPrice: %d, reprocess: %t, gasPriceOC: %t, balanceOC: %t, enabled: %t, txSize: %d, txHash: %s, error: %s", + tx.EGPLog.ValueFinal, tx.EGPLog.ValueFirst, tx.EGPLog.ValueSecond, tx.EGPLog.Percentage, tx.EGPLog.FinalDeviation, tx.EGPLog.MaxDeviation, tx.EGPLog.GasUsedFirst, tx.EGPLog.GasUsedSecond, + tx.EGPLog.GasPrice, tx.EGPLog.L1GasPrice, tx.EGPLog.L2GasPrice, tx.EGPLog.Reprocess, tx.EGPLog.GasPriceOC, tx.EGPLog.BalanceOC, egpEnabled, len(tx.RawTx), tx.HashStr, tx.EGPLog.Error) + txToStore := transactionToStore{ hash: tx.Hash, from: tx.From, @@ -753,6 +842,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx oldStateRoot: oldStateRoot, isForcedBatch: false, flushId: result.FlushID, + egpLog: &tx.EGPLog, } f.updateLastPendingFlushID(result.FlushID) @@ -773,8 +863,9 @@ func (f *finalizer) handleForcedTxsProcessResp(ctx context.Context, request stat // Handle Transaction Error if txResp.RomError != nil { romErr := executor.RomErrorCode(txResp.RomError) - if executor.IsIntrinsicError(romErr) { - // If we have an intrinsic error, we should continue processing the batch, but skip the transaction + if executor.IsIntrinsicError(romErr) || romErr == executor.RomError_ROM_ERROR_INVALID_RLP { + // If we have an intrinsic error or the RLP is invalid + // we should continue processing the batch, but skip the transaction log.Errorf("handleForcedTxsProcessResp: ROM error: %s", txResp.RomError) continue } @@ -810,6 +901,50 @@ func (f *finalizer) handleForcedTxsProcessResp(ctx context.Context, request stat } } +// CompareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice. +// It returns ErrEffectiveGasPriceReprocess if the tx needs to be reprocessed with +// the tx.EffectiveGasPrice updated, otherwise it returns nil +func (f *finalizer) CompareTxEffectiveGasPrice(ctx context.Context, tx *TxTracker, newEffectiveGasPrice *big.Int, hasGasPriceOC bool, hasBalanceOC bool) error { + // Get the tx gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price + txGasPrice, _ := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice) + + // Compute the absolute difference between tx.EffectiveGasPrice - newEffectiveGasPrice + diff := new(big.Int).Abs(new(big.Int).Sub(tx.EffectiveGasPrice, newEffectiveGasPrice)) + // Compute max deviation allowed of newEffectiveGasPrice + maxDeviation := new(big.Int).Div(new(big.Int).Mul(tx.EffectiveGasPrice, new(big.Int).SetUint64(f.effectiveGasPrice.GetFinalDeviation())), big.NewInt(100)) //nolint:gomnd + + // Save FinalDeviation (diff) and MaxDeviation for later logging + tx.EGPLog.FinalDeviation.Set(diff) + tx.EGPLog.MaxDeviation.Set(maxDeviation) + + // if (diff > finalDeviation) + if diff.Cmp(maxDeviation) == 1 { + // if newEfectiveGasPrice < txGasPrice + if newEffectiveGasPrice.Cmp(txGasPrice) == -1 { + if hasGasPriceOC || hasBalanceOC { + tx.EffectiveGasPrice.Set(txGasPrice) + } else { + tx.EffectiveGasPrice.Set(newEffectiveGasPrice) + } + } else { + tx.EffectiveGasPrice.Set(txGasPrice) + + loss := new(big.Int).Sub(newEffectiveGasPrice, txGasPrice) + // If loss > 0 the warning message indicating we loss fee for thix tx + if loss.Cmp(new(big.Int).SetUint64(0)) == 1 { + log.Warnf("egp-loss: gasPrice: %d, EffectiveGasPrice2: %d, loss: %d, txHash: %s", txGasPrice, newEffectiveGasPrice, loss, tx.HashStr) + } + } + + // Save Reprocess for later logging + tx.EGPLog.Reprocess = true + + return ErrEffectiveGasPriceReprocess + } // else (diff <= finalDeviation) it is ok, no reprocess of the tx is needed + + return nil +} + // storeProcessedTx stores the processed transaction in the database. func (f *finalizer) storeProcessedTx(ctx context.Context, txToStore transactionToStore) { if txToStore.response != nil { @@ -1044,18 +1179,49 @@ func (f *finalizer) processForcedBatch(ctx context.Context, lastBatchNumberInSta if len(response.Responses) > 0 && !response.IsRomOOCError { for _, txResponse := range response.Responses { - sender, err := state.GetSender(txResponse.Tx) - if err != nil { - log.Warnf("failed trying to add forced tx (%s) to worker. Error getting sender from tx, Err: %v", txResponse.TxHash, err) - continue + if !errors.Is(txResponse.RomError, executor.RomErr(executor.RomError_ROM_ERROR_INVALID_RLP)) { + sender, err := state.GetSender(txResponse.Tx) + if err != nil { + log.Warnf("failed trying to add forced tx (%s) to worker. Error getting sender from tx, Err: %v", txResponse.TxHash, err) + continue + } + f.worker.AddForcedTx(txResponse.TxHash, sender) + } else { + log.Warnf("ROM_ERROR_INVALID_RLP error received from executor for forced batch %d", forcedBatch.ForcedBatchNumber) } - f.worker.AddForcedTx(txResponse.TxHash, sender) } f.handleForcedTxsProcessResp(ctx, request, response, stateRoot) + } else { + if f.streamServer != nil && f.currentGERHash != forcedBatch.GlobalExitRoot { + updateGer := state.DSUpdateGER{ + BatchNumber: request.BatchNumber, + Timestamp: request.Timestamp.Unix(), + GlobalExitRoot: request.GlobalExitRoot, + Coinbase: f.sequencerAddress, + ForkID: uint16(f.dbManager.GetForkIDByBatchNumber(request.BatchNumber)), + StateRoot: response.NewStateRoot, + } + + err = f.streamServer.StartAtomicOp() + if err != nil { + log.Errorf("failed to start atomic op for forced batch %v: %v", forcedBatch.ForcedBatchNumber, err) + } + + _, err = f.streamServer.AddStreamEntry(state.EntryTypeUpdateGER, updateGer.Encode()) + if err != nil { + log.Errorf("failed to add stream entry for forced batch %v: %v", forcedBatch.ForcedBatchNumber, err) + } + + err = f.streamServer.CommitAtomicOp() + if err != nil { + log.Errorf("failed to commit atomic op for forced batch %v: %v", forcedBatch.ForcedBatchNumber, err) + } + } } + f.nextGERMux.Lock() - f.lastGERHash = forcedBatch.GlobalExitRoot + f.currentGERHash = forcedBatch.GlobalExitRoot f.nextGERMux.Unlock() stateRoot = response.NewStateRoot lastBatchNumberInState += 1 @@ -1353,7 +1519,7 @@ func (f *finalizer) getConstraintThresholdUint32(input uint32) uint32 { } // getUsedBatchResources returns the used resources in the batch -func getUsedBatchResources(constraints batchConstraints, remainingResources state.BatchResources) state.BatchResources { +func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResources state.BatchResources) state.BatchResources { return state.BatchResources{ ZKCounters: state.ZKCounters{ CumulativeGasUsed: constraints.MaxCumulativeGasUsed - remainingResources.ZKCounters.CumulativeGasUsed, diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go index 2def11057f..533a0cb87c 100644 --- a/sequencer/finalizer_test.go +++ b/sequencer/finalizer_test.go @@ -36,7 +36,7 @@ var ( executorMock = new(StateMock) workerMock = new(WorkerMock) dbTxMock = new(DbTxMock) - bc = batchConstraints{ + bc = state.BatchConstraintsCfg{ MaxTxsPerBatch: 300, MaxBatchBytesSize: 120000, MaxCumulativeGasUsed: 30000000, @@ -53,13 +53,6 @@ var ( GERCh: make(chan common.Hash), L2ReorgCh: make(chan L2ReorgEvent), } - effectiveGasPriceCfg = EffectiveGasPriceCfg{ - MaxBreakEvenGasPriceDeviationPercentage: 10, - L1GasPriceFactor: 0.25, - ByteGasCost: 16, - MarginFactor: 1, - Enabled: false, - } cfg = FinalizerCfg{ GERDeadlineTimeout: cfgTypes.Duration{ Duration: 60, @@ -83,6 +76,19 @@ var ( GERFinalityNumberOfBlocks: 64, SequentialReprocessFullBatch: true, } + poolCfg = pool.Config{ + EffectiveGasPrice: pool.EffectiveGasPriceCfg{ + Enabled: false, + L1GasPriceFactor: 0.25, + ByteGasCost: 16, + ZeroByteGasCost: 4, + NetProfit: 1.0, + BreakEvenFactor: 1.1, + FinalDeviationPct: 10, + L2GasPriceSuggesterFactor: 0.5, + }, + DefaultMinGasPriceAllowed: 1000000000, + } chainID = new(big.Int).SetInt64(400) pvtKey = "0x28b2b0318721be8c8339199172cd7cc8f5e273800a35616ec893083a4b32c02e" nonce1 = uint64(1) @@ -108,7 +114,7 @@ var ( decodedBatchL2Data []byte done chan bool gasPrice = big.NewInt(1000000) - breakEvenGasPrice = big.NewInt(1000000) + effectiveGasPrice = big.NewInt(1000000) l1GasPrice = uint64(1000000) ) @@ -124,7 +130,7 @@ func TestNewFinalizer(t *testing.T) { dbManagerMock.On("GetLastSentFlushID", context.Background()).Return(uint64(0), nil) // arrange and act - f = newFinalizer(cfg, effectiveGasPriceCfg, workerMock, dbManagerMock, executorMock, seqAddr, isSynced, closingSignalCh, bc, eventLog) + f = newFinalizer(cfg, poolCfg, workerMock, dbManagerMock, executorMock, seqAddr, isSynced, closingSignalCh, bc, eventLog, nil) // assert assert.NotNil(t, f) @@ -140,12 +146,29 @@ func TestNewFinalizer(t *testing.T) { func TestFinalizer_handleProcessTransactionResponse(t *testing.T) { f = setupFinalizer(true) ctx = context.Background() - txTracker := &TxTracker{Hash: txHash, From: senderAddr, Nonce: 1, GasPrice: gasPrice, BreakEvenGasPrice: breakEvenGasPrice, L1GasPrice: l1GasPrice, BatchResources: state.BatchResources{ - Bytes: 1000, - ZKCounters: state.ZKCounters{ - CumulativeGasUsed: 500, + txTracker := &TxTracker{ + Hash: txHash, + From: senderAddr, + Nonce: 1, + GasPrice: gasPrice, + EffectiveGasPrice: effectiveGasPrice, + L1GasPrice: l1GasPrice, + EGPLog: state.EffectiveGasPriceLog{ + ValueFinal: new(big.Int).SetUint64(0), + ValueFirst: new(big.Int).SetUint64(0), + ValueSecond: new(big.Int).SetUint64(0), + FinalDeviation: new(big.Int).SetUint64(0), + MaxDeviation: new(big.Int).SetUint64(0), + GasPrice: new(big.Int).SetUint64(0), + }, + BatchResources: state.BatchResources{ + Bytes: 1000, + ZKCounters: state.ZKCounters{ + CumulativeGasUsed: 500, + }, }, - }} + RawTx: []byte{0, 0, 1, 2, 3, 4, 5}, + } txResponse := &state.ProcessTransactionResponse{ TxHash: txHash, @@ -323,7 +346,7 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) { <-done // wait for the goroutine to finish f.pendingTransactionsToStoreWG.Wait() require.Len(t, storedTxs, 1) - actualTx := storedTxs[0] + actualTx := storedTxs[0] //nolint:gosec assertEqualTransactionToStore(t, tc.expectedStoredTx, actualTx) } else { require.Empty(t, storedTxs) @@ -1444,14 +1467,24 @@ func Test_processTransaction(t *testing.T) { Hash: txHash, From: senderAddr, Nonce: nonce1, - BreakEvenGasPrice: breakEvenGasPrice, - GasPrice: breakEvenGasPrice, + GasPrice: effectiveGasPrice, + EffectiveGasPrice: effectiveGasPrice, + L1GasPrice: l1GasPrice, + EGPLog: state.EffectiveGasPriceLog{ + ValueFinal: new(big.Int).SetUint64(0), + ValueFirst: new(big.Int).SetUint64(0), + ValueSecond: new(big.Int).SetUint64(0), + FinalDeviation: new(big.Int).SetUint64(0), + MaxDeviation: new(big.Int).SetUint64(0), + GasPrice: new(big.Int).SetUint64(0), + }, BatchResources: state.BatchResources{ Bytes: 1000, ZKCounters: state.ZKCounters{ CumulativeGasUsed: 500, }, }, + RawTx: []byte{0, 0, 1, 2, 3, 4, 5}, } successfulTxResponse := &state.ProcessTransactionResponse{ TxHash: txHash, @@ -1543,7 +1576,7 @@ func Test_processTransaction(t *testing.T) { }() } - dbManagerMock.On("GetL1GasPrice").Return(uint64(1000000)).Once() + dbManagerMock.On("GetL1AndL2GasPrice").Return(uint64(1000000), uint64(100000)).Once() executorMock.On("ProcessBatch", tc.ctx, mock.Anything, true).Return(tc.expectedResponse, tc.executorErr).Once() if tc.executorErr == nil { workerMock.On("DeleteTx", tc.tx.Hash, tc.tx.From).Return().Once() @@ -1562,7 +1595,7 @@ func Test_processTransaction(t *testing.T) { workerMock.On("DeleteTx", tc.tx.Hash, tc.tx.From).Return().Once() } - errWg, err := f.processTransaction(tc.ctx, tc.tx) + errWg, err := f.processTransaction(tc.ctx, tc.tx, true) if tc.expectedStoredTx.batchResponse != nil { close(f.pendingTransactionsToStore) // ensure the channel is closed @@ -2502,35 +2535,34 @@ func setupFinalizer(withWipBatch bool) *finalizer { } eventLog := event.NewEventLog(event.Config{}, eventStorage) return &finalizer{ - cfg: cfg, - effectiveGasPriceCfg: effectiveGasPriceCfg, - closingSignalCh: closingSignalCh, - isSynced: isSynced, - sequencerAddress: seqAddr, - worker: workerMock, - dbManager: dbManagerMock, - executor: executorMock, - batch: wipBatch, - batchConstraints: bc, - processRequest: state.ProcessRequest{}, - sharedResourcesMux: new(sync.RWMutex), - lastGERHash: common.Hash{}, + cfg: cfg, + closingSignalCh: closingSignalCh, + isSynced: isSynced, + sequencerAddress: seqAddr, + worker: workerMock, + dbManager: dbManagerMock, + executor: executorMock, + batch: wipBatch, + batchConstraints: bc, + processRequest: state.ProcessRequest{}, + sharedResourcesMux: new(sync.RWMutex), + currentGERHash: common.Hash{}, // closing signals - nextGER: common.Hash{}, - nextGERDeadline: 0, - nextGERMux: new(sync.RWMutex), - nextForcedBatches: make([]state.ForcedBatch, 0), - nextForcedBatchDeadline: 0, - nextForcedBatchesMux: new(sync.RWMutex), - handlingL2Reorg: false, - eventLog: eventLog, - maxBreakEvenGasPriceDeviationPercentage: big.NewInt(10), - pendingTransactionsToStore: make(chan transactionToStore, bc.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier), - pendingTransactionsToStoreWG: new(sync.WaitGroup), - storedFlushID: 0, - storedFlushIDCond: sync.NewCond(new(sync.Mutex)), - proverID: "", - lastPendingFlushID: 0, - pendingFlushIDCond: sync.NewCond(new(sync.Mutex)), + nextGER: common.Hash{}, + nextGERDeadline: 0, + nextGERMux: new(sync.RWMutex), + nextForcedBatches: make([]state.ForcedBatch, 0), + nextForcedBatchDeadline: 0, + nextForcedBatchesMux: new(sync.RWMutex), + handlingL2Reorg: false, + effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice, poolCfg.DefaultMinGasPriceAllowed), + eventLog: eventLog, + pendingTransactionsToStore: make(chan transactionToStore, bc.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier), + pendingTransactionsToStoreWG: new(sync.WaitGroup), + storedFlushID: 0, + storedFlushIDCond: sync.NewCond(new(sync.Mutex)), + proverID: "", + lastPendingFlushID: 0, + pendingFlushIDCond: sync.NewCond(new(sync.Mutex)), } } diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 0583c1637f..b8a6079dbb 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -5,7 +5,7 @@ import ( "math/big" "time" - "github.com/0xPolygonHermez/zkevm-node/ethtxmanager" + ethmanTypes "github.com/0xPolygonHermez/zkevm-node/etherman/types" "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/metrics" @@ -20,6 +20,7 @@ import ( // txPool contains the methods required to interact with the tx pool. type txPool interface { DeleteTransactionsByHashes(ctx context.Context, hashes []common.Hash) error + DeleteFailedTransactionsOlderThan(ctx context.Context, date time.Time) error DeleteTransactionByHash(ctx context.Context, hash common.Hash) error MarkWIPTxsAsPending(ctx context.Context) error GetNonWIPPendingTxs(ctx context.Context) ([]pool.Transaction, error) @@ -28,16 +29,18 @@ type txPool interface { UpdateTxWIPStatus(ctx context.Context, hash common.Hash, isWIP bool) error GetGasPrices(ctx context.Context) (pool.GasPrices, error) GetDefaultMinGasPriceAllowed() uint64 - GetL1GasPrice() uint64 + GetL1AndL2GasPrice() (uint64, uint64) } // etherman contains the methods required to interact with ethereum. type etherman interface { + EstimateGasSequenceBatches(sender common.Address, sequences []ethmanTypes.Sequence, l2CoinBase common.Address, committeeSignaturesAndAddrs []byte) (*types.Transaction, error) GetSendSequenceFee(numBatches uint64) (*big.Int, error) TrustedSequencer() (common.Address, error) GetLatestBatchNumber() (uint64, error) GetLastBatchTimestamp() (uint64, error) GetLatestBlockTimestamp(ctx context.Context) (uint64, error) + BuildSequenceBatchesTxData(sender common.Address, sequences []ethmanTypes.Sequence, l2CoinBase common.Address, committeeSignaturesAndAddrs []byte) (to *common.Address, data []byte, err error) GetLatestBlockNumber(ctx context.Context) (uint64, error) } @@ -62,7 +65,7 @@ type stateInterface interface { GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error GetLastNBatches(ctx context.Context, numBatches uint, dbTx pgx.Tx) ([]*state.Batch, error) - StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error + StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, dbTx pgx.Tx) (*types.Header, error) GetLastClosedBatch(ctx context.Context, dbTx pgx.Tx) (*state.Batch, error) GetLastL2Block(ctx context.Context, dbTx pgx.Tx) (*types.Block, error) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Block, error) @@ -78,6 +81,10 @@ type stateInterface interface { FlushMerkleTree(ctx context.Context) error GetStoredFlushID(ctx context.Context) (uint64, string, error) GetForkIDByBatchNumber(batchNumber uint64) uint64 + GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*state.DSL2Block, error) + GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSBatch, error) + GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSL2Block, error) + GetDSL2Transactions(ctx context.Context, firstL2Block, lastL2Block uint64, dbTx pgx.Tx) ([]*state.DSL2Transaction, error) } type workerInterface interface { @@ -124,16 +131,9 @@ type dbManagerInterface interface { FlushMerkleTree(ctx context.Context) error GetGasPrices(ctx context.Context) (pool.GasPrices, error) GetDefaultMinGasPriceAllowed() uint64 - GetL1GasPrice() uint64 + GetL1AndL2GasPrice() (uint64, uint64) GetStoredFlushID(ctx context.Context) (uint64, string, error) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) GetForkIDByBatchNumber(batchNumber uint64) uint64 } - -type ethTxManager interface { - Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error - Result(ctx context.Context, owner, id string, dbTx pgx.Tx) (ethtxmanager.MonitoredTxResult, error) - ResultsByStatus(ctx context.Context, owner string, statuses []ethtxmanager.MonitoredTxStatus, dbTx pgx.Tx) ([]ethtxmanager.MonitoredTxResult, error) - ProcessPendingMonitoredTxs(ctx context.Context, owner string, failedResultHandler ethtxmanager.ResultHandler, dbTx pgx.Tx) -} diff --git a/sequencer/mock_db_manager.go b/sequencer/mock_db_manager.go index c2a12fc589..c969f4c90c 100644 --- a/sequencer/mock_db_manager.go +++ b/sequencer/mock_db_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer @@ -288,18 +288,28 @@ func (_m *DbManagerMock) GetGasPrices(ctx context.Context) (pool.GasPrices, erro return r0, r1 } -// GetL1GasPrice provides a mock function with given fields: -func (_m *DbManagerMock) GetL1GasPrice() uint64 { +// GetL1AndL2GasPrice provides a mock function with given fields: +func (_m *DbManagerMock) GetL1AndL2GasPrice() (uint64, uint64) { ret := _m.Called() var r0 uint64 + var r1 uint64 + if rf, ok := ret.Get(0).(func() (uint64, uint64)); ok { + return rf() + } if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() } else { r0 = ret.Get(0).(uint64) } - return r0 + if rf, ok := ret.Get(1).(func() uint64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(uint64) + } + + return r0, r1 } // GetLastBatch provides a mock function with given fields: ctx @@ -719,13 +729,12 @@ func (_m *DbManagerMock) UpdateTxStatus(ctx context.Context, hash common.Hash, n return r0 } -type mockConstructorTestingTNewDbManagerMock interface { +// NewDbManagerMock creates a new instance of DbManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbManagerMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewDbManagerMock creates a new instance of DbManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDbManagerMock(t mockConstructorTestingTNewDbManagerMock) *DbManagerMock { +}) *DbManagerMock { mock := &DbManagerMock{} mock.Mock.Test(t) diff --git a/sequencer/mock_dbtx.go b/sequencer/mock_dbtx.go index 196f2b1850..d43613fb71 100644 --- a/sequencer/mock_dbtx.go +++ b/sequencer/mock_dbtx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer @@ -283,13 +283,12 @@ func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResult return r0 } -type mockConstructorTestingTNewDbTxMock interface { +// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbTxMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDbTxMock(t mockConstructorTestingTNewDbTxMock) *DbTxMock { +}) *DbTxMock { mock := &DbTxMock{} mock.Mock.Test(t) diff --git a/sequencer/mock_etherman.go b/sequencer/mock_etherman.go index 74b8a3d117..d7290db452 100644 --- a/sequencer/mock_etherman.go +++ b/sequencer/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer @@ -6,10 +6,12 @@ import ( context "context" big "math/big" + "github.com/0xPolygonHermez/zkevm-node/etherman/types" + coretypes "github.com/ethereum/go-ethereum/core/types" + common "github.com/ethereum/go-ethereum/common" mock "github.com/stretchr/testify/mock" - ) // EthermanMock is an autogenerated mock type for the etherman type @@ -17,6 +19,67 @@ type EthermanMock struct { mock.Mock } +// BuildSequenceBatchesTxData provides a mock function with given fields: sender, sequences, l2CoinBase +func (_m *EthermanMock) BuildSequenceBatchesTxData(sender common.Address, sequences []types.Sequence, l2CoinBase common.Address, committeeSignaturesAndAddrs []byte) (*common.Address, []byte, error) { + ret := _m.Called(sender, sequences, l2CoinBase) + + var r0 *common.Address + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(common.Address, []types.Sequence, common.Address) (*common.Address, []byte, error)); ok { + return rf(sender, sequences, l2CoinBase) + } + if rf, ok := ret.Get(0).(func(common.Address, []types.Sequence, common.Address) *common.Address); ok { + r0 = rf(sender, sequences, l2CoinBase) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*common.Address) + } + } + + if rf, ok := ret.Get(1).(func(common.Address, []types.Sequence, common.Address) []byte); ok { + r1 = rf(sender, sequences, l2CoinBase) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(common.Address, []types.Sequence, common.Address) error); ok { + r2 = rf(sender, sequences, l2CoinBase) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// EstimateGasSequenceBatches provides a mock function with given fields: sender, sequences, l2CoinBase +func (_m *EthermanMock) EstimateGasSequenceBatches(sender common.Address, sequences []types.Sequence, l2CoinBase common.Address, committeeSignaturesAndAddrs []byte) (*coretypes.Transaction, error) { + ret := _m.Called(sender, sequences, l2CoinBase) + + var r0 *coretypes.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(common.Address, []types.Sequence, common.Address) (*coretypes.Transaction, error)); ok { + return rf(sender, sequences, l2CoinBase) + } + if rf, ok := ret.Get(0).(func(common.Address, []types.Sequence, common.Address) *coretypes.Transaction); ok { + r0 = rf(sender, sequences, l2CoinBase) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(common.Address, []types.Sequence, common.Address) error); ok { + r1 = rf(sender, sequences, l2CoinBase) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLastBatchTimestamp provides a mock function with given fields: func (_m *EthermanMock) GetLastBatchTimestamp() (uint64, error) { ret := _m.Called() @@ -165,13 +228,12 @@ func (_m *EthermanMock) TrustedSequencer() (common.Address, error) { return r0, r1 } -type mockConstructorTestingTNewEthermanMock interface { +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEthermanMock(t mockConstructorTestingTNewEthermanMock) *EthermanMock { +}) *EthermanMock { mock := &EthermanMock{} mock.Mock.Test(t) diff --git a/sequencer/mock_pool.go b/sequencer/mock_pool.go index 40723aca09..cb5cb82f7a 100644 --- a/sequencer/mock_pool.go +++ b/sequencer/mock_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer @@ -12,6 +12,8 @@ import ( pool "github.com/0xPolygonHermez/zkevm-node/pool" state "github.com/0xPolygonHermez/zkevm-node/state" + + time "time" ) // PoolMock is an autogenerated mock type for the txPool type @@ -19,6 +21,20 @@ type PoolMock struct { mock.Mock } +// DeleteFailedTransactionsOlderThan provides a mock function with given fields: ctx, date +func (_m *PoolMock) DeleteFailedTransactionsOlderThan(ctx context.Context, date time.Time) error { + ret := _m.Called(ctx, date) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time) error); ok { + r0 = rf(ctx, date) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeleteTransactionByHash provides a mock function with given fields: ctx, hash func (_m *PoolMock) DeleteTransactionByHash(ctx context.Context, hash common.Hash) error { ret := _m.Called(ctx, hash) @@ -85,18 +101,28 @@ func (_m *PoolMock) GetGasPrices(ctx context.Context) (pool.GasPrices, error) { return r0, r1 } -// GetL1GasPrice provides a mock function with given fields: -func (_m *PoolMock) GetL1GasPrice() uint64 { +// GetL1AndL2GasPrice provides a mock function with given fields: +func (_m *PoolMock) GetL1AndL2GasPrice() (uint64, uint64) { ret := _m.Called() var r0 uint64 + var r1 uint64 + if rf, ok := ret.Get(0).(func() (uint64, uint64)); ok { + return rf() + } if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() } else { r0 = ret.Get(0).(uint64) } - return r0 + if rf, ok := ret.Get(1).(func() uint64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(uint64) + } + + return r0, r1 } // GetNonWIPPendingTxs provides a mock function with given fields: ctx @@ -193,13 +219,12 @@ func (_m *PoolMock) UpdateTxWIPStatus(ctx context.Context, hash common.Hash, isW return r0 } -type mockConstructorTestingTNewPoolMock interface { +// NewPoolMock creates a new instance of PoolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPoolMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewPoolMock creates a new instance of PoolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPoolMock(t mockConstructorTestingTNewPoolMock) *PoolMock { +}) *PoolMock { mock := &PoolMock{} mock.Mock.Test(t) diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go index 4590f7fb87..56138fda1a 100644 --- a/sequencer/mock_state.go +++ b/sequencer/mock_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer @@ -210,6 +210,110 @@ func (_m *StateMock) GetBatchByNumber(ctx context.Context, batchNumber uint64, d return r0, r1 } +// GetDSBatches provides a mock function with given fields: ctx, firstBatchNumber, lastBatchNumber, dbTx +func (_m *StateMock) GetDSBatches(ctx context.Context, firstBatchNumber uint64, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSBatch, error) { + ret := _m.Called(ctx, firstBatchNumber, lastBatchNumber, dbTx) + + var r0 []*state.DSBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.DSBatch, error)); ok { + return rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.DSBatch); ok { + r0 = rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*state.DSBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetDSGenesisBlock provides a mock function with given fields: ctx, dbTx +func (_m *StateMock) GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*state.DSL2Block, error) { + ret := _m.Called(ctx, dbTx) + + var r0 *state.DSL2Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.DSL2Block, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.DSL2Block); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.DSL2Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { + r1 = rf(ctx, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetDSL2Blocks provides a mock function with given fields: ctx, firstBatchNumber, lastBatchNumber, dbTx +func (_m *StateMock) GetDSL2Blocks(ctx context.Context, firstBatchNumber uint64, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSL2Block, error) { + ret := _m.Called(ctx, firstBatchNumber, lastBatchNumber, dbTx) + + var r0 []*state.DSL2Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.DSL2Block, error)); ok { + return rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.DSL2Block); ok { + r0 = rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*state.DSL2Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, firstBatchNumber, lastBatchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetDSL2Transactions provides a mock function with given fields: ctx, firstL2Block, lastL2Block, dbTx +func (_m *StateMock) GetDSL2Transactions(ctx context.Context, firstL2Block uint64, lastL2Block uint64, dbTx pgx.Tx) ([]*state.DSL2Transaction, error) { + ret := _m.Called(ctx, firstL2Block, lastL2Block, dbTx) + + var r0 []*state.DSL2Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.DSL2Transaction, error)); ok { + return rf(ctx, firstL2Block, lastL2Block, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.DSL2Transaction); ok { + r0 = rf(ctx, firstL2Block, lastL2Block, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*state.DSL2Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, firstL2Block, lastL2Block, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetForcedBatch provides a mock function with given fields: ctx, forcedBatchNumber, dbTx func (_m *StateMock) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) { ret := _m.Called(ctx, forcedBatchNumber, dbTx) @@ -848,18 +952,30 @@ func (_m *StateMock) ProcessSequencerBatch(ctx context.Context, batchNumber uint return r0, r1 } -// StoreTransaction provides a mock function with given fields: ctx, batchNumber, processedTx, coinbase, timestamp, dbTx -func (_m *StateMock) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, processedTx, coinbase, timestamp, dbTx) +// StoreTransaction provides a mock function with given fields: ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx +func (_m *StateMock) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, dbTx pgx.Tx) (*types.Header, error) { + ret := _m.Called(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, dbTx) + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) (*types.Header, error)); ok { + return rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) *types.Header); ok { + r0 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // UpdateBatchL2Data provides a mock function with given fields: ctx, batchNumber, batchL2Data, dbTx @@ -876,13 +992,12 @@ func (_m *StateMock) UpdateBatchL2Data(ctx context.Context, batchNumber uint64, return r0 } -type mockConstructorTestingTNewStateMock interface { +// NewStateMock creates a new instance of StateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewStateMock creates a new instance of StateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateMock(t mockConstructorTestingTNewStateMock) *StateMock { +}) *StateMock { mock := &StateMock{} mock.Mock.Test(t) diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go index 9532148429..d61a789387 100644 --- a/sequencer/mock_worker.go +++ b/sequencer/mock_worker.go @@ -1,12 +1,13 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package sequencer import ( context "context" - "github.com/0xPolygonHermez/zkevm-node/pool" big "math/big" + "github.com/0xPolygonHermez/zkevm-node/pool" + common "github.com/ethereum/go-ethereum/common" mock "github.com/stretchr/testify/mock" @@ -156,13 +157,12 @@ func (_m *WorkerMock) UpdateTxZKCounters(txHash common.Hash, from common.Address _m.Called(txHash, from, ZKCounters) } -type mockConstructorTestingTNewWorkerMock interface { +// NewWorkerMock creates a new instance of WorkerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkerMock(t interface { mock.TestingT Cleanup(func()) -} - -// NewWorkerMock creates a new instance of WorkerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWorkerMock(t mockConstructorTestingTNewWorkerMock) *WorkerMock { +}) *WorkerMock { mock := &WorkerMock{} mock.Mock.Test(t) diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go index 4e9014b594..a60a7bf8b4 100644 --- a/sequencer/sequencer.go +++ b/sequencer/sequencer.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" @@ -17,31 +18,18 @@ import ( // Sequencer represents a sequencer type Sequencer struct { - cfg Config + cfg Config + batchCfg state.BatchConfig + poolCfg pool.Config - pool txPool - state stateInterface - eventLog *event.EventLog - ethTxManager ethTxManager - etherman etherman + pool txPool + state stateInterface + eventLog *event.EventLog + etherman etherman address common.Address } -// batchConstraints represents the constraints for a batch -type batchConstraints struct { - MaxTxsPerBatch uint64 - MaxBatchBytesSize uint64 - MaxCumulativeGasUsed uint64 - MaxKeccakHashes uint32 - MaxPoseidonHashes uint32 - MaxPoseidonPaddings uint32 - MaxMemAligns uint32 - MaxArithmetics uint32 - MaxBinaries uint32 - MaxSteps uint32 -} - // L2ReorgEvent is the event that is triggered when a reorg happens in the L2 type L2ReorgEvent struct { TxHashes []common.Hash @@ -55,21 +43,24 @@ type ClosingSignalCh struct { } // New init sequencer -func New(cfg Config, txPool txPool, state stateInterface, etherman etherman, manager ethTxManager, eventLog *event.EventLog) (*Sequencer, error) { +func New(cfg Config, batchCfg state.BatchConfig, poolCfg pool.Config, txPool txPool, state stateInterface, etherman etherman, eventLog *event.EventLog) (*Sequencer, error) { addr, err := etherman.TrustedSequencer() if err != nil { return nil, fmt.Errorf("failed to get trusted sequencer address, err: %v", err) } - return &Sequencer{ - cfg: cfg, - pool: txPool, - state: state, - etherman: etherman, - ethTxManager: manager, - address: addr, - eventLog: eventLog, - }, nil + sequencer := &Sequencer{ + cfg: cfg, + batchCfg: batchCfg, + poolCfg: poolCfg, + pool: txPool, + state: state, + etherman: etherman, + address: addr, + eventLog: eventLog, + } + + return sequencer, nil } // Start starts the sequencer @@ -86,29 +77,38 @@ func (s *Sequencer) Start(ctx context.Context) { L2ReorgCh: make(chan L2ReorgEvent), } - batchConstraints := batchConstraints{ - MaxTxsPerBatch: s.cfg.MaxTxsPerBatch, - MaxBatchBytesSize: s.cfg.MaxBatchBytesSize, - MaxCumulativeGasUsed: s.cfg.MaxCumulativeGasUsed, - MaxKeccakHashes: s.cfg.MaxKeccakHashes, - MaxPoseidonHashes: s.cfg.MaxPoseidonHashes, - MaxPoseidonPaddings: s.cfg.MaxPoseidonPaddings, - MaxMemAligns: s.cfg.MaxMemAligns, - MaxArithmetics: s.cfg.MaxArithmetics, - MaxBinaries: s.cfg.MaxBinaries, - MaxSteps: s.cfg.MaxSteps, - } - err := s.pool.MarkWIPTxsAsPending(ctx) if err != nil { log.Fatalf("failed to mark WIP txs as pending, err: %v", err) } - worker := NewWorker(s.state) - dbManager := newDBManager(ctx, s.cfg.DBManager, s.pool, s.state, worker, closingSignalCh, batchConstraints) + worker := NewWorker(s.state, s.batchCfg.Constraints) + dbManager := newDBManager(ctx, s.cfg.DBManager, s.pool, s.state, worker, closingSignalCh, s.batchCfg.Constraints) + + // Start stream server if enabled + if s.cfg.StreamServer.Enabled { + streamServer, err := datastreamer.NewServer(s.cfg.StreamServer.Port, state.StreamTypeSequencer, s.cfg.StreamServer.Filename, &s.cfg.StreamServer.Log) + if err != nil { + log.Fatalf("failed to create stream server, err: %v", err) + } + + dbManager.streamServer = streamServer + err = dbManager.streamServer.Start() + if err != nil { + log.Fatalf("failed to start stream server, err: %v", err) + } + + s.updateDataStreamerFile(ctx, streamServer) + } + go dbManager.Start() - finalizer := newFinalizer(s.cfg.Finalizer, s.cfg.EffectiveGasPrice, worker, dbManager, s.state, s.address, s.isSynced, closingSignalCh, batchConstraints, s.eventLog) + var streamServer *datastreamer.StreamServer = nil + if s.cfg.StreamServer.Enabled { + streamServer = dbManager.streamServer + } + + finalizer := newFinalizer(s.cfg.Finalizer, s.poolCfg, worker, dbManager, s.state, s.address, s.isSynced, closingSignalCh, s.batchCfg.Constraints, s.eventLog, streamServer) currBatch, processingReq := s.bootstrap(ctx, dbManager, finalizer) go finalizer.Start(ctx, currBatch, processingReq) @@ -139,6 +139,14 @@ func (s *Sequencer) Start(ctx context.Context) { <-ctx.Done() } +func (s *Sequencer) updateDataStreamerFile(ctx context.Context, streamServer *datastreamer.StreamServer) { + err := state.GenerateDataStreamerFile(ctx, streamServer, s.state) + if err != nil { + log.Fatalf("failed to generate data streamer file, err: %v", err) + } + log.Info("Data streamer file updated") +} + func (s *Sequencer) bootstrap(ctx context.Context, dbManager *dbManager, finalizer *finalizer) (*WipBatch, *state.ProcessRequest) { var ( currBatch *WipBatch @@ -204,13 +212,22 @@ func (s *Sequencer) purgeOldPoolTxs(ctx context.Context) { log.Errorf("failed to get txs hashes to delete, err: %v", err) continue } - log.Infof("will try to delete %d redundant txs", len(txHashes)) + log.Infof("trying to delete %d selected txs", len(txHashes)) err = s.pool.DeleteTransactionsByHashes(ctx, txHashes) if err != nil { - log.Errorf("failed to delete txs from the pool, err: %v", err) + log.Errorf("failed to delete selected txs from the pool, err: %v", err) continue } log.Infof("deleted %d selected txs from the pool", len(txHashes)) + + log.Infof("trying to delete failed txs from the pool") + // Delete failed txs older than a certain date (14 seconds per L1 block) + err = s.pool.DeleteFailedTransactionsOlderThan(ctx, time.Now().Add(-time.Duration(s.cfg.BlocksAmountForTxsToBeDeleted*14)*time.Second)) //nolint:gomnd + if err != nil { + log.Errorf("failed to delete failed txs from the pool, err: %v", err) + continue + } + log.Infof("failed txs deleted from the pool") } } @@ -250,7 +267,7 @@ func (s *Sequencer) isSynced(ctx context.Context) bool { return true } -func getMaxRemainingResources(constraints batchConstraints) state.BatchResources { +func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.BatchResources { return state.BatchResources{ ZKCounters: state.ZKCounters{ CumulativeGasUsed: constraints.MaxCumulativeGasUsed, diff --git a/sequencer/txtracker.go b/sequencer/txtracker.go index 2bcc5b8f55..4f8a70b569 100644 --- a/sequencer/txtracker.go +++ b/sequencer/txtracker.go @@ -11,25 +11,24 @@ import ( // TxTracker is a struct that contains all the tx data needed to be managed by the worker type TxTracker struct { - Hash common.Hash - HashStr string - From common.Address - FromStr string - Nonce uint64 - Gas uint64 // To check if it fits into a batch - GasPrice *big.Int - Cost *big.Int // Cost = Amount + Benefit - Benefit *big.Int // GasLimit * GasPrice - BatchResources state.BatchResources // To check if it fits into a batch - RawTx []byte - ReceivedAt time.Time // To check if it has been in the txSortedList for too long - IP string // IP of the tx sender - FailedReason *string // FailedReason is the reason why the tx failed, if it failed - BreakEvenGasPrice *big.Int - GasPriceEffectivePercentage uint8 - EffectiveGasPriceProcessCount uint8 - IsEffectiveGasPriceFinalExecution bool - L1GasPrice uint64 + Hash common.Hash + HashStr string + From common.Address + FromStr string + Nonce uint64 + Gas uint64 // To check if it fits into a batch + GasPrice *big.Int + Cost *big.Int // Cost = Amount + Benefit + BatchResources state.BatchResources // To check if it fits into a batch + RawTx []byte + ReceivedAt time.Time // To check if it has been in the txSortedList for too long + IP string // IP of the tx sender + FailedReason *string // FailedReason is the reason why the tx failed, if it failed + EffectiveGasPrice *big.Int + IsLastExecution bool + EGPLog state.EffectiveGasPriceLog + L1GasPrice uint64 + L2GasPrice uint64 } // newTxTracker creates and inti a TxTracker @@ -44,6 +43,7 @@ func newTxTracker(ptx pool.Transaction, counters state.ZKCounters, ip string) (* if err != nil { return nil, err } + txTracker := &TxTracker{ Hash: tx.Hash(), HashStr: tx.Hash().String(), @@ -53,17 +53,22 @@ func newTxTracker(ptx pool.Transaction, counters state.ZKCounters, ip string) (* Gas: tx.Gas(), GasPrice: tx.GasPrice(), Cost: tx.Cost(), - Benefit: new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice()), BatchResources: state.BatchResources{ Bytes: tx.Size(), ZKCounters: counters, }, - RawTx: rawTx, - ReceivedAt: time.Now(), - IP: ip, - BreakEvenGasPrice: new(big.Int).SetUint64(0), - EffectiveGasPriceProcessCount: 0, - IsEffectiveGasPriceFinalExecution: false, + RawTx: rawTx, + ReceivedAt: time.Now(), + IP: ip, + EffectiveGasPrice: new(big.Int).SetUint64(0), + EGPLog: state.EffectiveGasPriceLog{ + ValueFinal: new(big.Int).SetUint64(0), + ValueFirst: new(big.Int).SetUint64(0), + ValueSecond: new(big.Int).SetUint64(0), + FinalDeviation: new(big.Int).SetUint64(0), + MaxDeviation: new(big.Int).SetUint64(0), + GasPrice: new(big.Int).SetUint64(0), + }, } return txTracker, nil diff --git a/sequencer/worker.go b/sequencer/worker.go index 99ef3992c4..d98e5e58d3 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -16,18 +16,20 @@ import ( // Worker represents the worker component of the sequencer type Worker struct { - pool map[string]*addrQueue - txSortedList *txSortedList - workerMutex sync.Mutex - state stateInterface + pool map[string]*addrQueue + txSortedList *txSortedList + workerMutex sync.Mutex + state stateInterface + batchConstraints state.BatchConstraintsCfg } // NewWorker creates an init a worker -func NewWorker(state stateInterface) *Worker { +func NewWorker(state stateInterface, constraints state.BatchConstraintsCfg) *Worker { w := Worker{ - pool: make(map[string]*addrQueue), - txSortedList: newTxSortedList(), - state: state, + pool: make(map[string]*addrQueue), + txSortedList: newTxSortedList(), + state: state, + batchConstraints: constraints, } return &w @@ -42,8 +44,20 @@ func (w *Worker) NewTxTracker(tx pool.Transaction, counters state.ZKCounters, ip func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *TxTracker, dropReason error) { w.workerMutex.Lock() - addr, found := w.pool[tx.FromStr] + // Make sure the IP is valid. + if tx.IP != "" && !pool.IsValidIP(tx.IP) { + w.workerMutex.Unlock() + return nil, pool.ErrInvalidIP + } + // Make sure the transaction's batch resources are within the constraints. + if !w.batchConstraints.IsWithinConstraints(tx.BatchResources.ZKCounters) { + log.Errorf("OutOfCounters Error (Node level) for tx: %s", tx.Hash.String()) + w.workerMutex.Unlock() + return nil, pool.ErrOutOfCounters + } + + addr, found := w.pool[tx.FromStr] if !found { // Unlock the worker to let execute other worker functions while creating the new AddrQueue w.workerMutex.Unlock() diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go index 98382898e5..837f59aeaf 100644 --- a/sequencer/worker_test.go +++ b/sequencer/worker_test.go @@ -5,11 +5,31 @@ import ( "math/big" "testing" + "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" ) +const ( + validIP = "10.23.100.1" +) + +var ( + // Init ZKEVM resourceCostMax values + rcMax = state.BatchConstraintsCfg{ + MaxCumulativeGasUsed: 10, + MaxArithmetics: 10, + MaxBinaries: 10, + MaxKeccakHashes: 10, + MaxMemAligns: 10, + MaxPoseidonHashes: 10, + MaxPoseidonPaddings: 10, + MaxSteps: 10, + MaxBatchBytesSize: 10, + } +) + type workerAddTxTestCase struct { name string from common.Address @@ -20,6 +40,8 @@ type workerAddTxTestCase struct { usedBytes uint64 gasPrice *big.Int expectedTxSortedList []common.Hash + ip string + expectedErr error } type workerAddrQueueInfo struct { @@ -42,10 +64,17 @@ func processWorkerAddTxTestCases(ctx context.Context, t *testing.T, worker *Work tx.BatchResources.Bytes = testCase.usedBytes tx.GasPrice = testCase.gasPrice tx.updateZKCounters(testCase.counters) + if testCase.ip == "" { + // A random valid IP Address + tx.IP = validIP + } else { + tx.IP = testCase.ip + } t.Logf("%s=%d", testCase.name, tx.GasPrice) _, err := worker.AddTxTracker(ctx, &tx) - if err != nil { + if err != nil && testCase.expectedErr != nil { + assert.ErrorIs(t, err, testCase.expectedErr) return } @@ -66,9 +95,9 @@ func TestWorkerAddTx(t *testing.T) { var nilErr error stateMock := NewStateMock(t) - worker := initWorker(stateMock) + worker := initWorker(stateMock, rcMax) - ctx := context.Background() + ctx = context.Background() stateMock.On("GetLastStateRoot", ctx, nil).Return(common.Hash{0}, nilErr) @@ -121,6 +150,31 @@ func TestWorkerAddTx(t *testing.T) { {3}, {2}, {1}, }, }, + { + name: "Invalid IP address", from: common.Address{5}, txHash: common.Hash{5}, nonce: 1, + counters: state.ZKCounters{CumulativeGasUsed: 1, UsedKeccakHashes: 1, UsedPoseidonHashes: 1, UsedPoseidonPaddings: 1, UsedMemAligns: 1, UsedArithmetics: 1, UsedBinaries: 1, UsedSteps: 1}, + usedBytes: 1, + ip: "invalid IP", + expectedErr: pool.ErrInvalidIP, + }, + { + name: "Out Of Counters Err", + from: common.Address{5}, txHash: common.Hash{5}, nonce: 1, + cost: new(big.Int).SetInt64(5), + // Here, we intentionally set the counters such that they violate the constraints + counters: state.ZKCounters{ + CumulativeGasUsed: worker.batchConstraints.MaxCumulativeGasUsed + 1, + UsedKeccakHashes: worker.batchConstraints.MaxKeccakHashes + 1, + UsedPoseidonHashes: worker.batchConstraints.MaxPoseidonHashes + 1, + UsedPoseidonPaddings: worker.batchConstraints.MaxPoseidonPaddings + 1, + UsedMemAligns: worker.batchConstraints.MaxMemAligns + 1, + UsedArithmetics: worker.batchConstraints.MaxArithmetics + 1, + UsedBinaries: worker.batchConstraints.MaxBinaries + 1, + UsedSteps: worker.batchConstraints.MaxSteps + 1, + }, + usedBytes: 1, + expectedErr: pool.ErrOutOfCounters, + }, { name: "Adding from:0x04, tx:0x04/gp:100", from: common.Address{4}, txHash: common.Hash{4}, nonce: 1, gasPrice: new(big.Int).SetInt64(100), cost: new(big.Int).SetInt64(5), @@ -144,7 +198,7 @@ func TestWorkerGetBestTx(t *testing.T) { } stateMock := NewStateMock(t) - worker := initWorker(stateMock) + worker := initWorker(stateMock, rcMax) ctx := context.Background() @@ -232,7 +286,7 @@ func TestWorkerGetBestTx(t *testing.T) { } } -func initWorker(stateMock *StateMock) *Worker { - worker := NewWorker(stateMock) +func initWorker(stateMock *StateMock, rcMax state.BatchConstraintsCfg) *Worker { + worker := NewWorker(stateMock, rcMax) return worker } diff --git a/sequencesender/config.go b/sequencesender/config.go index 57107a33dc..9059b61d61 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -12,10 +12,10 @@ type Config struct { WaitPeriodSendSequence types.Duration `mapstructure:"WaitPeriodSendSequence"` // LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent LastBatchVirtualizationTimeMaxWaitPeriod types.Duration `mapstructure:"LastBatchVirtualizationTimeMaxWaitPeriod"` - // MaxTxSizeForL1 is the maximum size a single transaction can have. This field has - // non-trivial consequences: larger transactions than 128KB are significantly harder and - // more expensive to propagate; larger transactions also take more resources - // to validate whether they fit into the pool or not. + // // MaxTxSizeForL1 is the maximum size a single transaction can have. This field has + // // non-trivial consequences: larger transactions than 128KB are significantly harder and + // // more expensive to propagate; larger transactions also take more resources + // // to validate whether they fit into the pool or not. MaxTxSizeForL1 uint64 `mapstructure:"MaxTxSizeForL1"` // MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx @@ -24,7 +24,7 @@ type Config struct { // SenderAddress defines which private key the eth tx manager needs to use // to sign the L1 txs SenderAddress common.Address - // L2Coinbase defines which addess is going to receive the fees + // L2Coinbase defines which address is going to receive the fees L2Coinbase common.Address `mapstructure:"L2Coinbase"` // PrivateKey defines all the key store files that are going // to be read in order to provide the private keys to sign the L1 txs @@ -34,4 +34,16 @@ type Config struct { // UseValidium is a flag to enable/disable the use of validium UseValidium bool `mapstructure:"UseValidium"` + + // GasOffset is the amount of gas to be added to the gas estimation in order + // to provide an amount that is higher than the estimated one. This is used + // to avoid the TX getting reverted in case something has changed in the network + // state after the estimation which can cause the TX to require more gas to be + // executed. + // + // ex: + // gas estimation: 1000 + // gas offset: 100 + // final gas: 1100 + GasOffset uint64 `mapstructure:"GasOffset"` } diff --git a/sequencesender/interfaces.go b/sequencesender/interfaces.go index 1ba0247005..653d7e0c6d 100644 --- a/sequencesender/interfaces.go +++ b/sequencesender/interfaces.go @@ -37,6 +37,6 @@ type stateInterface interface { } type ethTxManager interface { - Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, dbTx pgx.Tx) error + Add(ctx context.Context, owner, id string, from common.Address, to *common.Address, value *big.Int, data []byte, gasOffset uint64, dbTx pgx.Tx) error ProcessPendingMonitoredTxs(ctx context.Context, owner string, failedResultHandler ethtxmanager.ResultHandler, dbTx pgx.Tx) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 902cd53f81..770d1ced6b 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -66,8 +66,8 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context, ticker *time.Tic s.ethTxManager.ProcessPendingMonitoredTxs(ctx, ethTxManagerOwner, func(result ethtxmanager.MonitoredTxResult, dbTx pgx.Tx) { if result.Status == ethtxmanager.MonitoredTxStatusFailed { retry = true - resultLog := log.WithFields("owner", ethTxManagerOwner, "id", result.ID) - resultLog.Error("failed to send sequence, TODO: review this fatal and define what to do in this case") + mTxResultLogger := ethtxmanager.CreateMonitoredTxResultLogger(ethTxManagerOwner, result) + mTxResultLogger.Error("failed to send sequence, TODO: review this fatal and define what to do in this case") } }, nil) @@ -121,7 +121,6 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context, ticker *time.Tic if !s.isValidium() { signaturesAndAddrs = nil } - to, data, err := s.etherman.BuildSequenceBatchesTxData(s.cfg.SenderAddress, sequences, s.cfg.L2Coinbase, signaturesAndAddrs) if err != nil { log.Error("error estimating new sequenceBatches to add to eth tx manager: ", err) @@ -131,9 +130,10 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context, ticker *time.Tic firstSequence := sequences[0] lastSequence := sequences[len(sequences)-1] monitoredTxID := fmt.Sprintf(monitoredIDFormat, firstSequence.BatchNumber, lastSequence.BatchNumber) - err = s.ethTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, s.cfg.SenderAddress, to, nil, data, nil) + err = s.ethTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, s.cfg.SenderAddress, to, nil, data, s.cfg.GasOffset, nil) if err != nil { - log.Error("error to add sequences tx to eth tx manager: ", err) + mTxLogger := ethtxmanager.CreateLogger(ethTxManagerOwner, monitoredTxID, s.cfg.SenderAddress, to) + mTxLogger.Errorf("error to add sequences tx to eth tx manager: ", err) waitTick(ctx, ticker) return } @@ -150,7 +150,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen currentBatchNumToSequence := lastVirtualBatchNum + 1 sequences := []types.Sequence{} - + // var estimatedGas uint64 var tx *ethTypes.Transaction // Add sequences until too big for a single L1 tx or last batch is reached @@ -192,7 +192,6 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen } sequences = append(sequences, seq) - if s.isValidium() { if len(sequences) == int(s.cfg.MaxBatchesForL1) { log.Infof( @@ -254,6 +253,12 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen return nil, nil } +func isDataForEthTxTooBig(err error) bool { + return errors.Is(err, ethman.ErrGasRequiredExceedsAllowance) || + errors.Is(err, ErrOversizedData) || + errors.Is(err, ethman.ErrContentLengthTooLarge) +} + // handleEstimateGasSendSequenceErr handles an error on the estimate gas. It will return: // nil, error: impossible to handle gracefully // sequence, nil: handled gracefully. Potentially manipulating the sequences @@ -320,12 +325,6 @@ func (s *SequenceSender) handleEstimateGasSendSequenceErr( return sequences, nil } -func isDataForEthTxTooBig(err error) bool { - return errors.Is(err, ethman.ErrGasRequiredExceedsAllowance) || - errors.Is(err, ErrOversizedData) || - errors.Is(err, ethman.ErrContentLengthTooLarge) -} - func (s *SequenceSender) isValidium() bool { if !s.cfg.UseValidium { return false diff --git a/sonar-project.properties b/sonar-project.properties index 91ca29df92..ee4e905cfb 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1 +1,10 @@ sonar.projectKey=x1-node + +sonar.sources=. +sonar.exclusions=**/*_test.go +sonar.exclusions=**/mock_*.go + +sonar.tests=. +sonar.test.inclusions=**/*_test.go +sonar.go.coverage.reportPaths=coverage.out +sonar.go.tests.reportPaths=report.json diff --git a/state/batch.go b/state/batch.go index ee457bcef6..67cdf2db13 100644 --- a/state/batch.go +++ b/state/batch.go @@ -12,6 +12,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" "github.com/jackc/pgx/v4" ) @@ -149,17 +150,7 @@ func (s *State) ProcessSequencerBatch(ctx context.Context, batchNumber uint64, b return nil, err } - txs := []types.Transaction{} - forkID := s.GetForkIDByBatchNumber(batchNumber) - - if processBatchResponse.Responses != nil && len(processBatchResponse.Responses) > 0 { - txs, _, _, err = DecodeTxs(batchL2Data, forkID) - if err != nil && !errors.Is(err, ErrInvalidData) { - return nil, err - } - } - - result, err := s.convertToProcessBatchResponse(txs, processBatchResponse) + result, err := s.convertToProcessBatchResponse(processBatchResponse) if err != nil { return nil, err } @@ -192,20 +183,15 @@ func (s *State) ProcessBatch(ctx context.Context, request ProcessRequest, update UpdateMerkleTree: updateMT, ChainId: s.cfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } res, err := s.sendBatchRequestToExecutor(ctx, processBatchRequest, request.Caller) if err != nil { return nil, err } - txs, _, effP, err := DecodeTxs(request.Transactions, forkID) - if err != nil && !errors.Is(err, ErrInvalidData) { - return nil, err - } - log.Infof("ProcessBatch: %d txs, %#v effP", len(txs), effP) - var result *ProcessBatchResponse - result, err = s.convertToProcessBatchResponse(txs, res) + result, err = s.convertToProcessBatchResponse(res) if err != nil { return nil, err } @@ -249,6 +235,7 @@ func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree UpdateMerkleTree: updateMT, ChainId: s.cfg.ChainID, ForkId: forkId, + ContextId: uuid.NewString(), } // Send Batch to the Executor @@ -263,6 +250,7 @@ func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree log.Debugf("ExecuteBatch[processBatchRequest.UpdateMerkleTree]: %v", processBatchRequest.UpdateMerkleTree) log.Debugf("ExecuteBatch[processBatchRequest.ChainId]: %v", processBatchRequest.ChainId) log.Debugf("ExecuteBatch[processBatchRequest.ForkId]: %v", processBatchRequest.ForkId) + log.Debugf("ExecuteBatch[processBatchRequest.ContextId]: %v", processBatchRequest.ContextId) processBatchResponse, err := s.executorClient.ProcessBatch(ctx, processBatchRequest) if err != nil { @@ -330,6 +318,7 @@ func (s *State) processBatch(ctx context.Context, batchNumber uint64, batchL2Dat UpdateMerkleTree: cTrue, ChainId: s.cfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } return s.sendBatchRequestToExecutor(ctx, processBatchRequest, caller) @@ -352,6 +341,7 @@ func (s *State) sendBatchRequestToExecutor(ctx context.Context, processBatchRequ log.Debugf("processBatch[processBatchRequest.UpdateMerkleTree]: %v", processBatchRequest.UpdateMerkleTree) log.Debugf("processBatch[processBatchRequest.ChainId]: %v", processBatchRequest.ChainId) log.Debugf("processBatch[processBatchRequest.ForkId]: %v", processBatchRequest.ForkId) + log.Debugf("processBatch[processBatchRequest.ContextId]: %v", processBatchRequest.ContextId) } now := time.Now() res, err := s.executorClient.ProcessBatch(ctx, processBatchRequest) @@ -411,9 +401,15 @@ func (s *State) CloseBatch(ctx context.Context, receipt ProcessingReceipt, dbTx // the flushID (incremental value returned by executor), // the ProverID (executor running ID) the result of closing the batch. func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, uint64, string, error) { + BatchL2Data := processingCtx.BatchL2Data + if BatchL2Data == nil { + log.Warnf("Batch %v: ProcessAndStoreClosedBatch: processingCtx.BatchL2Data is nil, assuming is empty", processingCtx.BatchNumber) + var BatchL2DataEmpty []byte + BatchL2Data = &BatchL2DataEmpty + } // Decode transactions forkID := s.GetForkIDByBatchNumber(processingCtx.BatchNumber) - decodedTransactions, _, _, err := DecodeTxs(encodedTxs, forkID) + decodedTransactions, _, _, err := DecodeTxs(*BatchL2Data, forkID) if err != nil && !errors.Is(err, ErrInvalidData) { log.Debugf("error decoding transactions: %v", err) return common.Hash{}, noFlushID, noProverID, err @@ -423,10 +419,12 @@ func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx Pr if dbTx == nil { return common.Hash{}, noFlushID, noProverID, ErrDBTxNil } + // Avoid writing twice to the DB the BatchL2Data that is going to be written also in the call closeBatch + processingCtx.BatchL2Data = nil if err := s.OpenBatch(ctx, processingCtx, dbTx); err != nil { return common.Hash{}, noFlushID, noProverID, err } - processed, err := s.processBatch(ctx, processingCtx.BatchNumber, encodedTxs, caller, dbTx) + processed, err := s.processBatch(ctx, processingCtx.BatchNumber, *BatchL2Data, caller, dbTx) if err != nil { return common.Hash{}, noFlushID, noProverID, err } @@ -448,23 +446,21 @@ func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx Pr // Remove unprocessed tx if i == len(processed.Responses)-1 { processed.Responses = processed.Responses[:i] - decodedTransactions = decodedTransactions[:i] } else { processed.Responses = append(processed.Responses[:i], processed.Responses[i+1:]...) - decodedTransactions = append(decodedTransactions[:i], decodedTransactions[i+1:]...) } i-- } } - processedBatch, err := s.convertToProcessBatchResponse(decodedTransactions, processed) + processedBatch, err := s.convertToProcessBatchResponse(processed) if err != nil { return common.Hash{}, noFlushID, noProverID, err } if len(processedBatch.Responses) > 0 { // Store processed txs into the batch - err = s.StoreTransactions(ctx, processingCtx.BatchNumber, processedBatch.Responses, dbTx) + err = s.StoreTransactions(ctx, processingCtx.BatchNumber, processedBatch.Responses, nil, dbTx) if err != nil { return common.Hash{}, noFlushID, noProverID, err } @@ -476,7 +472,7 @@ func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx Pr StateRoot: processedBatch.NewStateRoot, LocalExitRoot: processedBatch.NewLocalExitRoot, AccInputHash: processedBatch.NewAccInputHash, - BatchL2Data: encodedTxs, + BatchL2Data: *BatchL2Data, }, dbTx) } diff --git a/state/config.go b/state/config.go index 62cae08c93..34bf46d9fd 100644 --- a/state/config.go +++ b/state/config.go @@ -1,6 +1,9 @@ package state -import "github.com/0xPolygonHermez/zkevm-node/config/types" +import ( + "github.com/0xPolygonHermez/zkevm-node/config/types" + "github.com/0xPolygonHermez/zkevm-node/db" +) // Config is state config type Config struct { @@ -24,4 +27,53 @@ type Config struct { // New fork id to be used for batches greaters than ForkUpgradeBatchNumber (fork upgrade) ForkUpgradeNewForkId uint64 + + // DB is the database configuration + DB db.Config `mapstructure:"DB"` + + // Configuration for the batch constraints + Batch BatchConfig `mapstructure:"Batch"` + + // MaxLogsCount is a configuration to set the max number of logs that can be returned + // in a single call to the state, if zero it means no limit + MaxLogsCount uint64 + + // MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs + // logs in a single call to the state, if zero it means no limit + MaxLogsBlockRange uint64 + + // MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying + // native block hashes in a single call to the state, if zero it means no limit + MaxNativeBlockHashBlockRange uint64 +} + +// BatchConfig represents the configuration of the batch constraints +type BatchConfig struct { + Constraints BatchConstraintsCfg `mapstructure:"Constraints"` +} + +// BatchConstraintsCfg represents the configuration of the batch constraints +type BatchConstraintsCfg struct { + MaxTxsPerBatch uint64 `mapstructure:"MaxTxsPerBatch"` + MaxBatchBytesSize uint64 `mapstructure:"MaxBatchBytesSize"` + MaxCumulativeGasUsed uint64 `mapstructure:"MaxCumulativeGasUsed"` + MaxKeccakHashes uint32 `mapstructure:"MaxKeccakHashes"` + MaxPoseidonHashes uint32 `mapstructure:"MaxPoseidonHashes"` + MaxPoseidonPaddings uint32 `mapstructure:"MaxPoseidonPaddings"` + MaxMemAligns uint32 `mapstructure:"MaxMemAligns"` + MaxArithmetics uint32 `mapstructure:"MaxArithmetics"` + MaxBinaries uint32 `mapstructure:"MaxBinaries"` + MaxSteps uint32 `mapstructure:"MaxSteps"` +} + +// IsWithinConstraints checks if the counters are within the batch constraints +func (c BatchConstraintsCfg) IsWithinConstraints(counters ZKCounters) bool { + return counters.CumulativeGasUsed <= c.MaxCumulativeGasUsed && + counters.UsedKeccakHashes <= c.MaxKeccakHashes && + counters.UsedPoseidonHashes <= c.MaxPoseidonHashes && + counters.UsedPoseidonPaddings <= c.MaxPoseidonPaddings && + counters.UsedMemAligns <= c.MaxMemAligns && + counters.UsedArithmetics <= c.MaxArithmetics && + counters.UsedBinaries <= c.MaxBinaries && + counters.UsedSteps <= c.MaxSteps } diff --git a/state/converters.go b/state/converters.go index 9881d78567..4d416fb834 100644 --- a/state/converters.go +++ b/state/converters.go @@ -32,12 +32,12 @@ func ConvertToCounters(resp *executor.ProcessBatchResponse) ZKCounters { } // TestConvertToProcessBatchResponse for test purposes -func (s *State) TestConvertToProcessBatchResponse(txs []types.Transaction, response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { - return s.convertToProcessBatchResponse(txs, response) +func (s *State) TestConvertToProcessBatchResponse(response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { + return s.convertToProcessBatchResponse(response) } -func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { - responses, err := s.convertToProcessTransactionResponse(txs, response.Responses) +func (s *State) convertToProcessBatchResponse(response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { + responses, err := s.convertToProcessTransactionResponse(response.Responses) if err != nil { return nil, err } @@ -86,7 +86,7 @@ func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response // IsStateRootChanged returns true if the transaction changes the state root func IsStateRootChanged(err executor.RomError) bool { - return !executor.IsIntrinsicError(err) && !executor.IsROMOutOfCountersError(err) + return !executor.IsIntrinsicError(err) && !executor.IsROMOutOfCountersError(err) && err != executor.RomError_ROM_ERROR_INVALID_RLP } func convertToReadWriteAddresses(addresses map[string]*executor.InfoReadWrite) (map[common.Address]*InfoReadWrite, error) { @@ -123,9 +123,9 @@ func convertToReadWriteAddresses(addresses map[string]*executor.InfoReadWrite) ( return results, nil } -func (s *State) convertToProcessTransactionResponse(txs []types.Transaction, responses []*executor.ProcessTransactionResponse) ([]*ProcessTransactionResponse, error) { +func (s *State) convertToProcessTransactionResponse(responses []*executor.ProcessTransactionResponse) ([]*ProcessTransactionResponse, error) { results := make([]*ProcessTransactionResponse, 0, len(responses)) - for i, response := range responses { + for _, response := range responses { trace, err := convertToStructLogArray(response.ExecutionTrace) if err != nil { return nil, err @@ -151,39 +151,53 @@ func (s *State) convertToProcessTransactionResponse(txs []types.Transaction, res result.CallTrace = *callTrace result.EffectiveGasPrice = response.EffectiveGasPrice result.EffectivePercentage = response.EffectivePercentage - result.Tx = txs[i] + result.HasGaspriceOpcode = (response.HasGaspriceOpcode == 1) + result.HasBalanceOpcode = (response.HasBalanceOpcode == 1) - _, err = DecodeTx(common.Bytes2Hex(response.GetRlpTx())) - if err != nil { - timestamp := time.Now() - log.Errorf("error decoding rlp returned by executor %v at %v", err, timestamp) - - event := &event.Event{ - ReceivedAt: timestamp, - Source: event.Source_Node, - Level: event.Level_Error, - EventID: event.EventID_ExecutorRLPError, - Json: string(response.GetRlpTx()), - } + tx := new(types.Transaction) - err = s.eventLog.LogEvent(context.Background(), event) + if response.Error != executor.RomError_ROM_ERROR_INVALID_RLP && len(response.GetRlpTx()) > 0 { + tx, err = DecodeTx(common.Bytes2Hex(response.GetRlpTx())) if err != nil { - log.Errorf("error storing payload: %v", err) + timestamp := time.Now() + log.Errorf("error decoding rlp returned by executor %v at %v", err, timestamp) + + event := &event.Event{ + ReceivedAt: timestamp, + Source: event.Source_Node, + Level: event.Level_Error, + EventID: event.EventID_ExecutorRLPError, + Json: string(response.GetRlpTx()), + } + + eventErr := s.eventLog.LogEvent(context.Background(), event) + if eventErr != nil { + log.Errorf("error storing payload: %v", err) + } + + return nil, err + } + } else { + log.Warnf("ROM_ERROR_INVALID_RLP returned by the executor") + } + + if tx != nil { + result.Tx = *tx + log.Debugf("ProcessTransactionResponse[TxHash]: %v", result.TxHash) + if response.Error == executor.RomError_ROM_ERROR_NO_ERROR { + log.Debugf("ProcessTransactionResponse[Nonce]: %v", result.Tx.Nonce()) } + log.Debugf("ProcessTransactionResponse[StateRoot]: %v", result.StateRoot.String()) + log.Debugf("ProcessTransactionResponse[Error]: %v", result.RomError) + log.Debugf("ProcessTransactionResponse[GasUsed]: %v", result.GasUsed) + log.Debugf("ProcessTransactionResponse[GasLeft]: %v", result.GasLeft) + log.Debugf("ProcessTransactionResponse[GasRefunded]: %v", result.GasRefunded) + log.Debugf("ProcessTransactionResponse[ChangesStateRoot]: %v", result.ChangesStateRoot) + log.Debugf("ProcessTransactionResponse[EffectiveGasPrice]: %v", result.EffectiveGasPrice) + log.Debugf("ProcessTransactionResponse[EffectivePercentage]: %v", result.EffectivePercentage) } results = append(results, result) - - log.Debugf("ProcessTransactionResponse[TxHash]: %v", result.TxHash) - log.Debugf("ProcessTransactionResponse[Nonce]: %v", result.Tx.Nonce()) - log.Debugf("ProcessTransactionResponse[StateRoot]: %v", result.StateRoot.String()) - log.Debugf("ProcessTransactionResponse[Error]: %v", result.RomError) - log.Debugf("ProcessTransactionResponse[GasUsed]: %v", result.GasUsed) - log.Debugf("ProcessTransactionResponse[GasLeft]: %v", result.GasLeft) - log.Debugf("ProcessTransactionResponse[GasRefunded]: %v", result.GasRefunded) - log.Debugf("ProcessTransactionResponse[ChangesStateRoot]: %v", result.ChangesStateRoot) - log.Debugf("ProcessTransactionResponse[EffectiveGasPrice]: %v", result.EffectiveGasPrice) - log.Debugf("ProcessTransactionResponse[EffectivePercentage]: %v", result.EffectivePercentage) } return results, nil diff --git a/state/datastream.go b/state/datastream.go new file mode 100644 index 0000000000..cf0f2d84db --- /dev/null +++ b/state/datastream.go @@ -0,0 +1,489 @@ +package state + +import ( + "context" + "encoding/binary" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/ethereum/go-ethereum/common" + "github.com/jackc/pgx/v4" +) + +const ( + // StreamTypeSequencer represents a Sequencer stream + StreamTypeSequencer datastreamer.StreamType = 1 + // EntryTypeBookMark represents a bookmark entry + EntryTypeBookMark datastreamer.EntryType = datastreamer.EtBookmark + // EntryTypeL2BlockStart represents a L2 block start + EntryTypeL2BlockStart datastreamer.EntryType = 1 + // EntryTypeL2Tx represents a L2 transaction + EntryTypeL2Tx datastreamer.EntryType = 2 + // EntryTypeL2BlockEnd represents a L2 block end + EntryTypeL2BlockEnd datastreamer.EntryType = 3 + // EntryTypeUpdateGER represents a GER update + EntryTypeUpdateGER datastreamer.EntryType = 4 + // BookMarkTypeL2Block represents a L2 block bookmark + BookMarkTypeL2Block byte = 0 +) + +// DSBatch represents a data stream batch +type DSBatch struct { + Batch + ForkID uint16 +} + +// DSFullBatch represents a data stream batch ant its L2 blocks +type DSFullBatch struct { + DSBatch + L2Blocks []DSL2FullBlock +} + +// DSL2FullBlock represents a data stream L2 full block and its transactions +type DSL2FullBlock struct { + DSL2Block + Txs []DSL2Transaction +} + +// DSL2Block is a full l2 block +type DSL2Block struct { + BatchNumber uint64 // 8 bytes + L2BlockNumber uint64 // 8 bytes + Timestamp int64 // 8 bytes + GlobalExitRoot common.Hash // 32 bytes + Coinbase common.Address // 20 bytes + ForkID uint16 // 2 bytes + BlockHash common.Hash // 32 bytes + StateRoot common.Hash // 32 bytes +} + +// DSL2BlockStart represents a data stream L2 block start +type DSL2BlockStart struct { + BatchNumber uint64 // 8 bytes + L2BlockNumber uint64 // 8 bytes + Timestamp int64 // 8 bytes + GlobalExitRoot common.Hash // 32 bytes + Coinbase common.Address // 20 bytes + ForkID uint16 // 2 bytes +} + +// Encode returns the encoded DSL2BlockStart as a byte slice +func (b DSL2BlockStart) Encode() []byte { + bytes := make([]byte, 0) + bytes = binary.LittleEndian.AppendUint64(bytes, b.BatchNumber) + bytes = binary.LittleEndian.AppendUint64(bytes, b.L2BlockNumber) + bytes = binary.LittleEndian.AppendUint64(bytes, uint64(b.Timestamp)) + bytes = append(bytes, b.GlobalExitRoot.Bytes()...) + bytes = append(bytes, b.Coinbase.Bytes()...) + bytes = binary.LittleEndian.AppendUint16(bytes, b.ForkID) + return bytes +} + +// Decode decodes the DSL2BlockStart from a byte slice +func (b DSL2BlockStart) Decode(data []byte) DSL2BlockStart { + b.BatchNumber = binary.LittleEndian.Uint64(data[0:8]) + b.L2BlockNumber = binary.LittleEndian.Uint64(data[8:16]) + b.Timestamp = int64(binary.LittleEndian.Uint64(data[16:24])) + b.GlobalExitRoot = common.BytesToHash(data[24:56]) + b.Coinbase = common.BytesToAddress(data[56:76]) + b.ForkID = binary.LittleEndian.Uint16(data[76:78]) + return b +} + +// DSL2Transaction represents a data stream L2 transaction +type DSL2Transaction struct { + L2BlockNumber uint64 // Not included in the encoded data + EffectiveGasPricePercentage uint8 // 1 byte + IsValid uint8 // 1 byte + EncodedLength uint32 // 4 bytes + Encoded []byte +} + +// Encode returns the encoded DSL2Transaction as a byte slice +func (l DSL2Transaction) Encode() []byte { + bytes := make([]byte, 0) + bytes = append(bytes, byte(l.EffectiveGasPricePercentage)) + bytes = append(bytes, byte(l.IsValid)) + bytes = binary.LittleEndian.AppendUint32(bytes, l.EncodedLength) + bytes = append(bytes, l.Encoded...) + return bytes +} + +// Decode decodes the DSL2Transaction from a byte slice +func (l DSL2Transaction) Decode(data []byte) DSL2Transaction { + l.EffectiveGasPricePercentage = uint8(data[0]) + l.IsValid = uint8(data[1]) + l.EncodedLength = binary.LittleEndian.Uint32(data[2:6]) + l.Encoded = data[6:] + return l +} + +// DSL2BlockEnd represents a L2 block end +type DSL2BlockEnd struct { + L2BlockNumber uint64 // 8 bytes + BlockHash common.Hash // 32 bytes + StateRoot common.Hash // 32 bytes +} + +// Encode returns the encoded DSL2BlockEnd as a byte slice +func (b DSL2BlockEnd) Encode() []byte { + bytes := make([]byte, 0) + bytes = binary.LittleEndian.AppendUint64(bytes, b.L2BlockNumber) + bytes = append(bytes, b.BlockHash[:]...) + bytes = append(bytes, b.StateRoot[:]...) + return bytes +} + +// Decode decodes the DSL2BlockEnd from a byte slice +func (b DSL2BlockEnd) Decode(data []byte) DSL2BlockEnd { + b.L2BlockNumber = binary.LittleEndian.Uint64(data[0:8]) + b.BlockHash = common.BytesToHash(data[8:40]) + b.StateRoot = common.BytesToHash(data[40:72]) + return b +} + +// DSBookMark represents a data stream bookmark +type DSBookMark struct { + Type byte + L2BlockNumber uint64 +} + +// Encode returns the encoded DSBookMark as a byte slice +func (b DSBookMark) Encode() []byte { + bytes := make([]byte, 0) + bytes = append(bytes, b.Type) + bytes = binary.LittleEndian.AppendUint64(bytes, b.L2BlockNumber) + return bytes +} + +// Decode decodes the DSBookMark from a byte slice +func (b DSBookMark) Decode(data []byte) DSBookMark { + b.Type = byte(data[0]) + b.L2BlockNumber = binary.LittleEndian.Uint64(data[1:9]) + return b +} + +// DSUpdateGER represents a data stream GER update +type DSUpdateGER struct { + BatchNumber uint64 // 8 bytes + Timestamp int64 // 8 bytes + GlobalExitRoot common.Hash // 32 bytes + Coinbase common.Address // 20 bytes + ForkID uint16 // 2 bytes + StateRoot common.Hash // 32 bytes +} + +// Encode returns the encoded DSUpdateGER as a byte slice +func (g DSUpdateGER) Encode() []byte { + bytes := make([]byte, 0) + bytes = binary.LittleEndian.AppendUint64(bytes, g.BatchNumber) + bytes = binary.LittleEndian.AppendUint64(bytes, uint64(g.Timestamp)) + bytes = append(bytes, g.GlobalExitRoot[:]...) + bytes = append(bytes, g.Coinbase[:]...) + bytes = binary.LittleEndian.AppendUint16(bytes, g.ForkID) + bytes = append(bytes, g.StateRoot[:]...) + return bytes +} + +// Decode decodes the DSUpdateGER from a byte slice +func (g DSUpdateGER) Decode(data []byte) DSUpdateGER { + g.BatchNumber = binary.LittleEndian.Uint64(data[0:8]) + g.Timestamp = int64(binary.LittleEndian.Uint64(data[8:16])) + g.GlobalExitRoot = common.BytesToHash(data[16:48]) + g.Coinbase = common.BytesToAddress(data[48:68]) + g.ForkID = binary.LittleEndian.Uint16(data[68:70]) + g.StateRoot = common.BytesToHash(data[70:102]) + return g +} + +// DSState gathers the methods required to interact with the data stream state. +type DSState interface { + GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*DSL2Block, error) + GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*DSBatch, error) + GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*DSL2Block, error) + GetDSL2Transactions(ctx context.Context, firstL2Block, lastL2Block uint64, dbTx pgx.Tx) ([]*DSL2Transaction, error) +} + +// GenerateDataStreamerFile generates or resumes a data stream file +func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.StreamServer, stateDB DSState) error { + header := streamServer.GetHeader() + + var currentBatchNumber uint64 = 0 + var currentL2Block uint64 = 0 + + if header.TotalEntries == 0 { + // Get Genesis block + genesisL2Block, err := stateDB.GetDSGenesisBlock(ctx, nil) + if err != nil { + return err + } + + err = streamServer.StartAtomicOp() + if err != nil { + return err + } + + bookMark := DSBookMark{ + Type: BookMarkTypeL2Block, + L2BlockNumber: genesisL2Block.L2BlockNumber, + } + + _, err = streamServer.AddStreamBookmark(bookMark.Encode()) + if err != nil { + return err + } + + genesisBlock := DSL2BlockStart{ + BatchNumber: genesisL2Block.BatchNumber, + L2BlockNumber: genesisL2Block.L2BlockNumber, + Timestamp: genesisL2Block.Timestamp, + GlobalExitRoot: genesisL2Block.GlobalExitRoot, + Coinbase: genesisL2Block.Coinbase, + ForkID: genesisL2Block.ForkID, + } + + log.Infof("Genesis block: %+v", genesisBlock) + + _, err = streamServer.AddStreamEntry(1, genesisBlock.Encode()) + if err != nil { + return err + } + + genesisBlockEnd := DSL2BlockEnd{ + L2BlockNumber: genesisL2Block.L2BlockNumber, + BlockHash: genesisL2Block.BlockHash, + StateRoot: genesisL2Block.StateRoot, + } + + _, err = streamServer.AddStreamEntry(EntryTypeL2BlockEnd, genesisBlockEnd.Encode()) + if err != nil { + return err + } + + err = streamServer.CommitAtomicOp() + if err != nil { + return err + } + } else { + latestEntry, err := streamServer.GetEntry(header.TotalEntries - 1) + if err != nil { + return err + } + + log.Infof("Latest entry: %+v", latestEntry) + + switch latestEntry.Type { + case EntryTypeUpdateGER: + log.Info("Latest entry type is UpdateGER") + currentBatchNumber = binary.LittleEndian.Uint64(latestEntry.Data[0:8]) + case EntryTypeL2BlockEnd: + log.Info("Latest entry type is L2BlockEnd") + currentL2Block = binary.LittleEndian.Uint64(latestEntry.Data[0:8]) + + bookMark := DSBookMark{ + Type: BookMarkTypeL2Block, + L2BlockNumber: currentL2Block, + } + + firstEntry, err := streamServer.GetFirstEventAfterBookmark(bookMark.Encode()) + if err != nil { + return err + } + currentBatchNumber = binary.LittleEndian.Uint64(firstEntry.Data[0:8]) + } + } + + log.Infof("Current Batch number: %d", currentBatchNumber) + log.Infof("Current L2 block number: %d", currentL2Block) + + var entry uint64 = header.TotalEntries + var currentGER = common.Hash{} + + if entry > 0 { + entry-- + } + + // Start on the current batch number + 1 + currentBatchNumber++ + + var err error + + const limit = 10000 + + for err == nil { + log.Debugf("Current entry number: %d", entry) + log.Debugf("Current batch number: %d", currentBatchNumber) + // Get Next Batch + batches, err := stateDB.GetDSBatches(ctx, currentBatchNumber, currentBatchNumber+limit, nil) + if err != nil { + if err == ErrStateNotSynchronized { + break + } + log.Errorf("Error getting batch %d: %s", currentBatchNumber, err.Error()) + return err + } + + // Finished? + if len(batches) == 0 { + break + } + + l2Blocks, err := stateDB.GetDSL2Blocks(ctx, batches[0].BatchNumber, batches[len(batches)-1].BatchNumber, nil) + if err != nil { + log.Errorf("Error getting L2 blocks for batches starting at %d: %s", batches[0].BatchNumber, err.Error()) + return err + } + + l2Txs := make([]*DSL2Transaction, 0) + if len(l2Blocks) > 0 { + l2Txs, err = stateDB.GetDSL2Transactions(ctx, l2Blocks[0].L2BlockNumber, l2Blocks[len(l2Blocks)-1].L2BlockNumber, nil) + if err != nil { + log.Errorf("Error getting L2 transactions for blocks starting at %d: %s", l2Blocks[0].L2BlockNumber, err.Error()) + return err + } + } + + // Gererate full batches + fullBatches := computeFullBatches(batches, l2Blocks, l2Txs) + log.Debugf("Full batches: %+v", fullBatches) + + currentBatchNumber += limit + + for _, batch := range fullBatches { + if len(batch.L2Blocks) == 0 { + // Empty batch + // Check if there is a GER update + if batch.GlobalExitRoot != currentGER && batch.GlobalExitRoot != (common.Hash{}) { + updateGer := DSUpdateGER{ + BatchNumber: batch.BatchNumber, + Timestamp: batch.Timestamp.Unix(), + GlobalExitRoot: batch.GlobalExitRoot, + Coinbase: batch.Coinbase, + ForkID: batch.ForkID, + StateRoot: batch.StateRoot, + } + + err = streamServer.StartAtomicOp() + if err != nil { + return err + } + + entry, err = streamServer.AddStreamEntry(EntryTypeUpdateGER, updateGer.Encode()) + if err != nil { + return err + } + + err = streamServer.CommitAtomicOp() + if err != nil { + return err + } + + currentGER = batch.GlobalExitRoot + } + continue + } + + err = streamServer.StartAtomicOp() + if err != nil { + return err + } + + for _, l2block := range batch.L2Blocks { + blockStart := DSL2BlockStart{ + BatchNumber: l2block.BatchNumber, + L2BlockNumber: l2block.L2BlockNumber, + Timestamp: l2block.Timestamp, + GlobalExitRoot: l2block.GlobalExitRoot, + Coinbase: l2block.Coinbase, + ForkID: l2block.ForkID, + } + + bookMark := DSBookMark{ + Type: BookMarkTypeL2Block, + L2BlockNumber: blockStart.L2BlockNumber, + } + + _, err = streamServer.AddStreamBookmark(bookMark.Encode()) + if err != nil { + return err + } + + _, err = streamServer.AddStreamEntry(EntryTypeL2BlockStart, blockStart.Encode()) + if err != nil { + return err + } + + for _, tx := range l2block.Txs { + entry, err = streamServer.AddStreamEntry(EntryTypeL2Tx, tx.Encode()) + if err != nil { + return err + } + } + + blockEnd := DSL2BlockEnd{ + L2BlockNumber: l2block.L2BlockNumber, + BlockHash: l2block.BlockHash, + StateRoot: l2block.StateRoot, + } + + _, err = streamServer.AddStreamEntry(EntryTypeL2BlockEnd, blockEnd.Encode()) + if err != nil { + return err + } + currentGER = l2block.GlobalExitRoot + } + // Commit at the end of each batch group + err = streamServer.CommitAtomicOp() + if err != nil { + return err + } + } + } + + return err +} + +// computeFullBatches computes the full batches +func computeFullBatches(batches []*DSBatch, l2Blocks []*DSL2Block, l2Txs []*DSL2Transaction) []*DSFullBatch { + currentL2Block := 0 + currentL2Tx := 0 + + fullBatches := make([]*DSFullBatch, 0) + + for _, batch := range batches { + fullBatch := &DSFullBatch{ + DSBatch: *batch, + } + + for i := currentL2Block; i < len(l2Blocks); i++ { + l2Block := l2Blocks[i] + if l2Block.BatchNumber == batch.BatchNumber { + fullBlock := DSL2FullBlock{ + DSL2Block: *l2Block, + } + + for j := currentL2Tx; j < len(l2Txs); j++ { + l2Tx := l2Txs[j] + if l2Tx.L2BlockNumber == l2Block.L2BlockNumber { + fullBlock.Txs = append(fullBlock.Txs, *l2Tx) + currentL2Tx++ + } + if l2Tx.L2BlockNumber > l2Block.L2BlockNumber { + break + } + } + + fullBatch.L2Blocks = append(fullBatch.L2Blocks, fullBlock) + currentL2Block++ + } + + if l2Block.BatchNumber > batch.BatchNumber { + break + } + } + + fullBatches = append(fullBatches, fullBatch) + } + + return fullBatches +} diff --git a/state/errors.go b/state/errors.go index 606ad910f7..730cbba59b 100644 --- a/state/errors.go +++ b/state/errors.go @@ -57,6 +57,18 @@ var ( ErrInvalidData = errors.New("invalid data") // ErrBatchResourceBytesUnderflow happens when the batch runs out of Bytes ErrBatchResourceBytesUnderflow = NewBatchRemainingResourcesUnderflowError(nil, "Bytes") + // ErrInvalidBlockRange returned when the selected block range is invalid, generally + // because the toBlock is bigger than the fromBlock + ErrInvalidBlockRange = errors.New("invalid block range") + // ErrMaxLogsCountLimitExceeded returned when the number of logs is bigger than the + // configured limit + ErrMaxLogsCountLimitExceeded = errors.New("query returned more than %v results") + // ErrMaxLogsBlockRangeLimitExceeded returned when the range between block number range + // to filter logs is bigger than the configured limit + ErrMaxLogsBlockRangeLimitExceeded = errors.New("logs are limited to a %v block range") + // ErrMaxNativeBlockHashBlockRangeLimitExceeded returned when the range between block number range + // to filter native block hashes is bigger than the configured limit + ErrMaxNativeBlockHashBlockRangeLimitExceeded = errors.New("native block hashes are limited to a %v block range") zkCounterErrPrefix = "ZKCounter: " ) diff --git a/state/genesis.go b/state/genesis.go index ab05bbed17..b30c5a908b 100644 --- a/state/genesis.go +++ b/state/genesis.go @@ -177,5 +177,10 @@ func (s *State) SetGenesis(ctx context.Context, block Block, genesis Genesis, db l2Block := types.NewBlock(header, []*types.Transaction{}, []*types.Header{}, receipts, &trie.StackTrie{}) l2Block.ReceivedAt = block.ReceivedAt - return newRoot, s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, MaxEffectivePercentage, dbTx) + storeTxsEGPData := []StoreTxEGPData{} + for range l2Block.Transactions() { + storeTxsEGPData = append(storeTxsEGPData, StoreTxEGPData{EGPLog: nil, EffectivePercentage: MaxEffectivePercentage}) + } + + return newRoot, s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, storeTxsEGPData, dbTx) } diff --git a/state/helper.go b/state/helper.go index b009047555..93855ef961 100644 --- a/state/helper.go +++ b/state/helper.go @@ -59,7 +59,6 @@ func prepareRPLTxData(tx types.Transaction) ([]byte, error) { sign := 1 - (v.Uint64() & 1) nonce, gasPrice, gas, to, value, data, chainID := tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.ChainId() - log.Debug(nonce, " ", gasPrice, " ", gas, " ", to, " ", value, " ", len(data), " ", chainID, " ") rlpFieldsToEncode := []interface{}{ nonce, @@ -70,7 +69,7 @@ func prepareRPLTxData(tx types.Transaction) ([]byte, error) { data, } - if tx.ChainId().Uint64() > 0 { + if !IsPreEIP155Tx(tx) { rlpFieldsToEncode = append(rlpFieldsToEncode, chainID) rlpFieldsToEncode = append(rlpFieldsToEncode, uint(0)) rlpFieldsToEncode = append(rlpFieldsToEncode, uint(0)) @@ -338,6 +337,13 @@ func toPostgresInterval(duration string) (string, error) { return fmt.Sprintf("%s %s", duration[:len(duration)-1], pgUnit), nil } +// IsPreEIP155Tx checks if the tx is a tx that has a chainID as zero and +// V field is either 27 or 28 +func IsPreEIP155Tx(tx types.Transaction) bool { + v, _, _ := tx.RawSignatureValues() + return tx.ChainId().Uint64() == 0 && (v.Uint64() == 27 || v.Uint64() == 28) +} + // CheckLogOrder checks the order of the logs. The order should be incremental func CheckLogOrder(logs []*types.Log) bool { logsAux := make([]*types.Log, len(logs)) diff --git a/state/infinite.go b/state/infinite.go new file mode 100644 index 0000000000..0f170fe53b --- /dev/null +++ b/state/infinite.go @@ -0,0 +1,27 @@ +package state + +import ( + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +// InfiniteSafeRun executes a function and in case it fails, +// runs the function again infinitely +func InfiniteSafeRun(fn func(), errorMessage string, restartInterval time.Duration) { + for { + SafeRun(fn, errorMessage) + time.Sleep(restartInterval) + } +} + +// SafeRun executes a function with a deferred recover +// to avoid to panic. +func SafeRun(fn func(), errorMessage string) { + defer func() { + if r := recover(); r != nil { + log.Errorf(errorMessage, r) + } + }() + fn() +} diff --git a/state/l2block.go b/state/l2block.go index a4d4824ab3..ed2205c2a6 100644 --- a/state/l2block.go +++ b/state/l2block.go @@ -3,7 +3,6 @@ package state import ( "context" "errors" - "math/big" "sync" "time" @@ -11,6 +10,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +const newL2BlocksCheckInterval = 200 * time.Millisecond + // NewL2BlockEventHandler represent a func that will be called by the // state when a NewL2BlockEvent is triggered type NewL2BlockEventHandler func(e NewL2BlockEvent) @@ -19,19 +20,17 @@ type NewL2BlockEventHandler func(e NewL2BlockEvent) // when a new l2 block is detected with data related to this new l2 block. type NewL2BlockEvent struct { Block types.Block + Logs []*types.Log } -// PrepareWebSocket allows the RPC to prepare ws -func (s *State) PrepareWebSocket() { - lastL2Block, err := s.GetLastL2Block(context.Background(), nil) - if errors.Is(err, ErrStateNotSynchronized) { - lastL2Block = types.NewBlockWithHeader(&types.Header{Number: big.NewInt(0)}) - } else if err != nil { - log.Fatalf("failed to load the last l2 block: %v", err) - } - s.lastL2BlockSeen = *lastL2Block - go s.monitorNewL2Blocks() - go s.handleEvents() +// StartToMonitorNewL2Blocks starts 2 go routines that will +// monitor new blocks and execute handlers registered to be executed +// when a new l2 block is detected. This is used by the RPC WebSocket +// filter subscription but can be used by any other component that +// needs to react to a new L2 block added to the state. +func (s *State) StartToMonitorNewL2Blocks() { + go InfiniteSafeRun(s.monitorNewL2Blocks, "fail to monitor new l2 blocks: %v:", time.Second) + go InfiniteSafeRun(s.handleEvents, "fail to handle events: %v", time.Second) } // RegisterNewL2BlockEventHandler add the provided handler to the list of handlers @@ -41,33 +40,18 @@ func (s *State) RegisterNewL2BlockEventHandler(h NewL2BlockEventHandler) { s.newL2BlockEventHandlers = append(s.newL2BlockEventHandlers, h) } -func (s *State) handleEvents() { - for newL2BlockEvent := range s.newL2BlockEvents { - if len(s.newL2BlockEventHandlers) == 0 { - continue - } - - wg := sync.WaitGroup{} - for _, handler := range s.newL2BlockEventHandlers { - wg.Add(1) - go func(h NewL2BlockEventHandler) { - defer func() { - wg.Done() - if r := recover(); r != nil { - log.Errorf("failed and recovered in NewL2BlockEventHandler: %v", r) - } - }() - h(newL2BlockEvent) - }(handler) - } - wg.Wait() - } -} - func (s *State) monitorNewL2Blocks() { waitNextCycle := func() { - time.Sleep(1 * time.Second) + time.Sleep(newL2BlocksCheckInterval) + } + + lastL2BlockNumber, err := s.GetLastL2BlockNumber(context.Background(), nil) + if errors.Is(err, ErrStateNotSynchronized) { + lastL2BlockNumber = 0 + } else if err != nil { + log.Fatalf("failed to load the last l2 block: %v", err) } + lastL2BlockNumberSeen := lastL2BlockNumber for { if len(s.newL2BlockEventHandlers) == 0 { @@ -75,7 +59,7 @@ func (s *State) monitorNewL2Blocks() { continue } - lastL2Block, err := s.GetLastL2Block(context.Background(), nil) + lastL2BlockNumber, err := s.GetLastL2BlockNumber(context.Background(), nil) if errors.Is(err, ErrStateNotSynchronized) { waitNextCycle() continue @@ -86,26 +70,66 @@ func (s *State) monitorNewL2Blocks() { } // not updates until now - if lastL2Block == nil || s.lastL2BlockSeen.NumberU64() >= lastL2Block.NumberU64() { + if lastL2BlockNumber == 0 || lastL2BlockNumberSeen >= lastL2BlockNumber { waitNextCycle() continue } - for bn := s.lastL2BlockSeen.NumberU64() + uint64(1); bn <= lastL2Block.NumberU64(); bn++ { + fromBlockNumber := lastL2BlockNumberSeen + uint64(1) + toBlockNumber := lastL2BlockNumber + log.Infof("[monitorNewL2Blocks] new l2 block detected from block %v to %v", fromBlockNumber, toBlockNumber) + + for bn := fromBlockNumber; bn <= toBlockNumber; bn++ { block, err := s.GetL2BlockByNumber(context.Background(), bn, nil) if err != nil { - log.Errorf("failed to l2 block while monitoring new blocks: %v", err) + log.Errorf("failed to get l2 block while monitoring new blocks: %v", err) + break + } + logs, err := s.GetLogsByBlockNumber(context.Background(), bn, nil) + if err != nil { + log.Errorf("failed to get l2 block while monitoring new blocks: %v", err) break } + log.Debugf("[monitorNewL2Blocks] sending NewL2BlockEvent for block %v", block.NumberU64()) + start := time.Now() s.newL2BlockEvents <- NewL2BlockEvent{ Block: *block, + Logs: logs, } - log.Infof("new l2 blocks detected, Number %v, Hash %v", block.NumberU64(), block.Hash().String()) - s.lastL2BlockSeen = *block + lastL2BlockNumberSeen = block.NumberU64() + log.Infof("[monitorNewL2Blocks] NewL2BlockEvent for block %v took %v to be sent", block.NumberU64(), time.Since(start)) + log.Infof("new l2 block detected: number %v, hash %v", block.NumberU64(), block.Hash().String()) } // interval to check for new l2 blocks waitNextCycle() } } + +func (s *State) handleEvents() { + for newL2BlockEvent := range s.newL2BlockEvents { + log.Infof("[handleEvents] new l2 block event detected for block: %v", newL2BlockEvent.Block.NumberU64()) + if len(s.newL2BlockEventHandlers) == 0 { + continue + } + + wg := sync.WaitGroup{} + for _, handler := range s.newL2BlockEventHandlers { + wg.Add(1) + go func(h NewL2BlockEventHandler, e NewL2BlockEvent) { + defer func() { + wg.Done() + if r := recover(); r != nil { + log.Errorf("failed and recovered in NewL2BlockEventHandler: %v", r) + } + }() + log.Infof("[handleEvents] triggering new l2 block event handler for block: %v", e.Block.NumberU64()) + start := time.Now() + h(e) + log.Infof("[handleEvents] new l2 block event handler for block %v took %v to be executed", e.Block.NumberU64(), time.Since(start)) + }(handler, newL2BlockEvent) + } + wg.Wait() + } +} diff --git a/state/pgstatestorage.go b/state/pgstatestorage.go index 50e181326b..c1919070fa 100644 --- a/state/pgstatestorage.go +++ b/state/pgstatestorage.go @@ -9,6 +9,7 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" @@ -25,12 +26,14 @@ const ( // PostgresStorage implements the Storage interface type PostgresStorage struct { + cfg Config *pgxpool.Pool } // NewPostgresStorage creates a new StateDB -func NewPostgresStorage(db *pgxpool.Pool) *PostgresStorage { +func NewPostgresStorage(cfg Config, db *pgxpool.Pool) *PostgresStorage { return &PostgresStorage{ + cfg, db, } } @@ -55,7 +58,7 @@ func (p *PostgresStorage) Reset(ctx context.Context, blockNumber uint64, dbTx pg } // ResetForkID resets the state to reprocess the newer batches with the correct forkID -func (p *PostgresStorage) ResetForkID(ctx context.Context, batchNumber, forkID uint64, version string, dbTx pgx.Tx) error { +func (p *PostgresStorage) ResetForkID(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { e := p.getExecQuerier(dbTx) const resetVirtualStateSQL = "delete from state.block where block_num >=(select min(block_num) from state.virtual_batch where batch_num >= $1)" if _, err := e.Exec(ctx, resetVirtualStateSQL, batchNumber); err != nil { @@ -65,14 +68,6 @@ func (p *PostgresStorage) ResetForkID(ctx context.Context, batchNumber, forkID u if err != nil { return err } - reorg := TrustedReorg{ - BatchNumber: batchNumber, - Reason: fmt.Sprintf("New ForkID: %d. Version: %s", forkID, version), - } - err = p.AddTrustedReorg(ctx, &reorg, dbTx) - if err != nil { - return err - } // Delete proofs for higher batches const deleteProofsSQL = "delete from state.proof where batch_num >= $1 or (batch_num <= $1 and batch_num_final >= $1)" @@ -548,8 +543,7 @@ func (p *PostgresStorage) GetLatestVirtualBatchTimestamp(ctx context.Context, db func (p *PostgresStorage) SetLastBatchInfoSeenOnEthereum(ctx context.Context, lastBatchNumberSeen, lastBatchNumberVerified uint64, dbTx pgx.Tx) error { const query = ` UPDATE state.sync_info - SET last_batch_num_seen = $1 - , last_batch_num_consolidated = $2` + SET last_batch_num_seen = $1, last_batch_num_consolidated = $2` e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, query, lastBatchNumberSeen, lastBatchNumberVerified) @@ -577,7 +571,7 @@ func (p *PostgresStorage) GetBatchByNumber(ctx context.Context, batchNumber uint batch, err := scanBatch(row) if errors.Is(err, pgx.ErrNoRows) { - return nil, ErrStateNotSynchronized + return nil, ErrNotFound } else if err != nil { return nil, err } @@ -1481,11 +1475,39 @@ func scanLogs(rows pgx.Rows) ([]*types.Log, error) { return logs, nil } +// GetTransactionEGPLogByHash gets the EGP log accordingly to the provided transaction hash +func (p *PostgresStorage) GetTransactionEGPLogByHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*EffectiveGasPriceLog, error) { + var ( + egpLogData []byte + egpLog EffectiveGasPriceLog + ) + const getTransactionByHashSQL = "SELECT egp_log FROM state.transaction WHERE hash = $1" + + q := p.getExecQuerier(dbTx) + err := q.QueryRow(ctx, getTransactionByHashSQL, transactionHash.String()).Scan(&egpLogData) + + if errors.Is(err, pgx.ErrNoRows) { + return nil, ErrNotFound + } else if err != nil { + return nil, err + } + + err = json.Unmarshal(egpLogData, &egpLog) + if err != nil { + return nil, err + } + + return &egpLog, nil +} + // AddL2Block adds a new L2 block to the State Store -func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *types.Block, receipts []*types.Receipt, effectivePercentage uint8, dbTx pgx.Tx) error { +func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *types.Block, receipts []*types.Receipt, txsEGPData []StoreTxEGPData, dbTx pgx.Tx) error { + log.Infof("[AddL2Block] adding l2 block: %v", l2Block.NumberU64()) + start := time.Now() + e := p.getExecQuerier(dbTx) - const addTransactionSQL = "INSERT INTO state.transaction (hash, encoded, decoded, l2_block_num, effective_percentage) VALUES($1, $2, $3, $4, $5)" + const addTransactionSQL = "INSERT INTO state.transaction (hash, encoded, decoded, l2_block_num, effective_percentage, egp_log) VALUES($1, $2, $3, $4, $5, $6)" const addL2BlockSQL = ` INSERT INTO state.l2block (block_num, block_hash, header, uncles, parent_hash, state_root, received_at, batch_num, created_at) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9)` @@ -1515,7 +1537,16 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 return err } - for _, tx := range l2Block.Transactions() { + for idx, tx := range l2Block.Transactions() { + egpLog := "" + if txsEGPData != nil { + egpLogBytes, err := json.Marshal(txsEGPData[idx].EGPLog) + if err != nil { + return err + } + egpLog = string(egpLogBytes) + } + binary, err := tx.MarshalBinary() if err != nil { return err @@ -1527,7 +1558,7 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 return err } decoded := string(binary) - _, err = e.Exec(ctx, addTransactionSQL, tx.Hash().String(), encoded, decoded, l2Block.Number().Uint64(), effectivePercentage) + _, err = e.Exec(ctx, addTransactionSQL, tx.Hash().String(), encoded, decoded, l2Block.Number().Uint64(), txsEGPData[idx].EffectivePercentage, egpLog) if err != nil { return err } @@ -1546,7 +1577,7 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 } } } - + log.Infof("[AddL2Block] l2 block %v took %v to be added", l2Block.NumberU64(), time.Since(start)) return nil } @@ -1594,20 +1625,20 @@ func (p *PostgresStorage) GetLastConsolidatedL2BlockNumber(ctx context.Context, return lastConsolidatedBlockNumber, nil } -// GetSafeL2BlockNumber gets the last l2 block virtualized that was mined -// on or after the safe block on L1 -func (p *PostgresStorage) GetSafeL2BlockNumber(ctx context.Context, l1SafeBlockNumber uint64, dbTx pgx.Tx) (uint64, error) { - var l2SafeBlockNumber uint64 +// GetLastVerifiedL2BlockNumberUntilL1Block gets the last block number that was verified in +// or before the provided l1 block number. This is used to identify if a l2 block is safe or finalized. +func (p *PostgresStorage) GetLastVerifiedL2BlockNumberUntilL1Block(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) { + var blockNumber uint64 const query = ` SELECT b.block_num FROM state.l2block b - INNER JOIN state.virtual_batch vb - ON vb.batch_num = b.batch_num + INNER JOIN state.verified_batch vb + ON vb.batch_num = b.batch_num WHERE vb.block_num <= $1 ORDER BY b.block_num DESC LIMIT 1` q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, query, l1SafeBlockNumber).Scan(&l2SafeBlockNumber) + err := q.QueryRow(ctx, query, l1FinalizedBlockNumber).Scan(&blockNumber) if errors.Is(err, pgx.ErrNoRows) { return 0, ErrNotFound @@ -1615,23 +1646,21 @@ func (p *PostgresStorage) GetSafeL2BlockNumber(ctx context.Context, l1SafeBlockN return 0, err } - return l2SafeBlockNumber, nil + return blockNumber, nil } -// GetFinalizedL2BlockNumber gets the last l2 block verified that was mined -// on or after the finalized block on L1 -func (p *PostgresStorage) GetFinalizedL2BlockNumber(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) { - var l2FinalizedBlockNumber uint64 +// GetLastVerifiedBatchNumberUntilL1Block gets the last batch number that was verified in +// or before the provided l1 block number. This is used to identify if a batch is safe or finalized. +func (p *PostgresStorage) GetLastVerifiedBatchNumberUntilL1Block(ctx context.Context, l1BlockNumber uint64, dbTx pgx.Tx) (uint64, error) { + var batchNumber uint64 const query = ` - SELECT b.block_num - FROM state.l2block b - INNER JOIN state.verified_batch vb - ON vb.batch_num = b.batch_num + SELECT vb.batch_num + FROM state.verified_batch vb WHERE vb.block_num <= $1 - ORDER BY b.block_num DESC LIMIT 1` + ORDER BY vb.batch_num DESC LIMIT 1` q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, query, l1FinalizedBlockNumber).Scan(&l2FinalizedBlockNumber) + err := q.QueryRow(ctx, query, l1BlockNumber).Scan(&batchNumber) if errors.Is(err, pgx.ErrNoRows) { return 0, ErrNotFound @@ -1639,7 +1668,7 @@ func (p *PostgresStorage) GetFinalizedL2BlockNumber(ctx context.Context, l1Final return 0, err } - return l2FinalizedBlockNumber, nil + return batchNumber, nil } // GetLastL2BlockNumber gets the last l2 block number @@ -1954,51 +1983,78 @@ func (p *PostgresStorage) IsL2BlockVirtualized(ctx context.Context, blockNumber return isVirtualized, nil } -// GetLogs returns the logs that match the filter -func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64, addresses []common.Address, topics [][]common.Hash, blockHash *common.Hash, since *time.Time, dbTx pgx.Tx) ([]*types.Log, error) { - const getLogsByBlockHashSQL = ` - SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 - FROM state.log l - INNER JOIN state.transaction t ON t.hash = l.tx_hash - INNER JOIN state.l2block b ON b.block_num = t.l2_block_num - WHERE b.block_hash = $1 - AND (l.address = any($2) OR $2 IS NULL) - AND (l.topic0 = any($3) OR $3 IS NULL) - AND (l.topic1 = any($4) OR $4 IS NULL) - AND (l.topic2 = any($5) OR $5 IS NULL) - AND (l.topic3 = any($6) OR $6 IS NULL) - AND (b.created_at >= $7 OR $7 IS NULL) - ORDER BY b.block_num ASC, l.log_index ASC` - const getLogsByBlockNumbersSQL = ` +// GetLogsByBlockNumber get all the logs from a specific block ordered by log index +func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Log, error) { + const query = ` SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 FROM state.log l INNER JOIN state.transaction t ON t.hash = l.tx_hash INNER JOIN state.l2block b ON b.block_num = t.l2_block_num - WHERE b.block_num BETWEEN $1 AND $2 - AND (l.address = any($3) OR $3 IS NULL) - AND (l.topic0 = any($4) OR $4 IS NULL) - AND (l.topic1 = any($5) OR $5 IS NULL) - AND (l.topic2 = any($6) OR $6 IS NULL) - AND (l.topic3 = any($7) OR $7 IS NULL) - AND (b.created_at >= $8 OR $8 IS NULL) - ORDER BY b.block_num ASC, l.log_index ASC` - - var args []interface{} - var query string - if blockHash != nil { - args = []interface{}{blockHash.String()} - query = getLogsByBlockHashSQL - } else { - args = []interface{}{fromBlock, toBlock} - query = getLogsByBlockNumbersSQL + WHERE b.block_num = $1 + ORDER BY l.log_index ASC` + + q := p.getExecQuerier(dbTx) + rows, err := q.Query(ctx, query, blockNumber) + if err != nil { + return nil, err } + return scanLogs(rows) +} + +// GetLogs returns the logs that match the filter +func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64, addresses []common.Address, topics [][]common.Hash, blockHash *common.Hash, since *time.Time, dbTx pgx.Tx) ([]*types.Log, error) { + // query parts + const queryCount = `SELECT count(*) ` + const querySelect = `SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 ` + + const queryBody = `FROM state.log l + INNER JOIN state.transaction t ON t.hash = l.tx_hash + INNER JOIN state.l2block b ON b.block_num = t.l2_block_num + WHERE (l.address = any($1) OR $1 IS NULL) + AND (l.topic0 = any($2) OR $2 IS NULL) + AND (l.topic1 = any($3) OR $3 IS NULL) + AND (l.topic2 = any($4) OR $4 IS NULL) + AND (l.topic3 = any($5) OR $5 IS NULL) + AND (b.created_at >= $6 OR $6 IS NULL) ` + + const queryFilterByBlockHash = `AND b.block_hash = $7 ` + const queryFilterByBlockNumbers = `AND b.block_num BETWEEN $7 AND $8 ` + + const queryOrder = `ORDER BY b.block_num ASC, l.log_index ASC` + + // count queries + const queryToCountLogsByBlockHash = "" + + queryCount + + queryBody + + queryFilterByBlockHash + const queryToCountLogsByBlockNumbers = "" + + queryCount + + queryBody + + queryFilterByBlockNumbers + + // select queries + const queryToSelectLogsByBlockHash = "" + + querySelect + + queryBody + + queryFilterByBlockHash + + queryOrder + const queryToSelectLogsByBlockNumbers = "" + + querySelect + + queryBody + + queryFilterByBlockNumbers + + queryOrder + + args := []interface{}{} + + // address filter if len(addresses) > 0 { args = append(args, p.addressesToHex(addresses)) } else { args = append(args, nil) } + // topic filters for i := 0; i < maxTopics; i++ { if len(topics) > i && len(topics[i]) > 0 { args = append(args, p.hashesToHex(topics[i])) @@ -2007,11 +2063,45 @@ func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock } } + // since filter args = append(args, since) + // block filter + var queryToCount string + var queryToSelect string + if blockHash != nil { + args = append(args, blockHash.String()) + queryToCount = queryToCountLogsByBlockHash + queryToSelect = queryToSelectLogsByBlockHash + } else { + if toBlock < fromBlock { + return nil, ErrInvalidBlockRange + } + + blockRange := toBlock - fromBlock + if p.cfg.MaxLogsBlockRange > 0 && blockRange > p.cfg.MaxLogsBlockRange { + return nil, ErrMaxLogsBlockRangeLimitExceeded + } + + args = append(args, fromBlock, toBlock) + queryToCount = queryToCountLogsByBlockNumbers + queryToSelect = queryToSelectLogsByBlockNumbers + } + q := p.getExecQuerier(dbTx) - rows, err := q.Query(ctx, query, args...) + if p.cfg.MaxLogsCount > 0 { + var count uint64 + err := q.QueryRow(ctx, queryToCount, args...).Scan(&count) + if err != nil { + return nil, err + } + if count > p.cfg.MaxLogsCount { + return nil, ErrMaxLogsCountLimitExceeded + } + } + + rows, err := q.Query(ctx, queryToSelect, args...) if err != nil { return nil, err } @@ -2138,7 +2228,7 @@ func (p *PostgresStorage) GetExitRootByGlobalExitRoot(ctx context.Context, ger c // AddSequence stores the sequence information to allow the aggregator verify sequences. func (p *PostgresStorage) AddSequence(ctx context.Context, sequence Sequence, dbTx pgx.Tx) error { - const addSequenceSQL = "INSERT INTO state.sequences (from_batch_num, to_batch_num) VALUES($1, $2)" + const addSequenceSQL = "INSERT INTO state.sequences (from_batch_num, to_batch_num) VALUES($1, $2) ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, addSequenceSQL, sequence.FromBatchNumber, sequence.ToBatchNumber) @@ -2409,6 +2499,26 @@ func (p *PostgresStorage) GetLastClosedBatch(ctx context.Context, dbTx pgx.Tx) ( return &batch, nil } +// GetLastClosedBatchNumber returns the latest closed batch +func (p *PostgresStorage) GetLastClosedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) { + const getLastClosedBatchSQL = ` + SELECT bt.batch_num + FROM state.batch bt + WHERE global_exit_root IS NOT NULL AND state_root IS NOT NULL + ORDER BY bt.batch_num DESC + LIMIT 1;` + + batchNumber := uint64(0) + e := p.getExecQuerier(dbTx) + err := e.QueryRow(ctx, getLastClosedBatchSQL).Scan(&batchNumber) + if errors.Is(err, pgx.ErrNoRows) { + return 0, ErrStateNotSynchronized + } else if err != nil { + return 0, err + } + return batchNumber, nil +} + // UpdateBatchL2Data updates data tx data in a batch func (p *PostgresStorage) UpdateBatchL2Data(ctx context.Context, batchNumber uint64, batchL2Data []byte, dbTx pgx.Tx) error { const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2 WHERE batch_num = $1" @@ -2461,19 +2571,6 @@ func (p *PostgresStorage) CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64, return count, nil } -// GetForkIDTrustedReorgCount returns the forkID -func (p *PostgresStorage) GetForkIDTrustedReorgCount(ctx context.Context, forkID uint64, version string, dbTx pgx.Tx) (uint64, error) { - const forkIDTrustedReorgSQL = "SELECT COUNT(*) FROM state.trusted_reorg WHERE reason=$1" - - var count uint64 - q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, forkIDTrustedReorgSQL, fmt.Sprintf("New ForkID: %d. Version: %s", forkID, version)).Scan(&count) - if err != nil { - return 0, err - } - return count, nil -} - // GetReorgedTransactions returns the transactions that were reorged func (p *PostgresStorage) GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*types.Transaction, error) { const getReorgedTransactionsSql = "SELECT encoded FROM state.transaction t INNER JOIN state.l2block b ON t.l2_block_num = b.block_num WHERE b.batch_num >= $1 ORDER BY l2_block_num ASC" @@ -2539,7 +2636,7 @@ func (p *PostgresStorage) GetBatchByForcedBatchNum(ctx context.Context, forcedBa // AddForkID adds a new forkID to the storage func (p *PostgresStorage) AddForkID(ctx context.Context, forkID ForkIDInterval, dbTx pgx.Tx) error { - const addForkIDSQL = "INSERT INTO state.fork_id (from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ($1, $2, $3, $4, $5)" + const addForkIDSQL = "INSERT INTO state.fork_id (from_batch_num, to_batch_num, fork_id, version, block_num) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (fork_id) DO UPDATE SET block_num = $5 WHERE state.fork_id.fork_id = $3;" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, addForkIDSQL, forkID.FromBatchNumber, forkID.ToBatchNumber, forkID.ForkId, forkID.Version, forkID.BlockNumber) return err @@ -2585,3 +2682,236 @@ func (p *PostgresStorage) UpdateForkID(ctx context.Context, forkID ForkIDInterva } return nil } + +// GetNativeBlockHashesInRange return the state root for the blocks in range +func (p *PostgresStorage) GetNativeBlockHashesInRange(ctx context.Context, fromBlock, toBlock uint64, dbTx pgx.Tx) ([]common.Hash, error) { + const l2TxSQL = ` + SELECT l2b.state_root + FROM state.l2block l2b + WHERE block_num BETWEEN $1 AND $2 + ORDER BY l2b.block_num ASC` + + if toBlock < fromBlock { + return nil, ErrInvalidBlockRange + } + + blockRange := toBlock - fromBlock + if p.cfg.MaxNativeBlockHashBlockRange > 0 && blockRange > p.cfg.MaxNativeBlockHashBlockRange { + return nil, ErrMaxNativeBlockHashBlockRangeLimitExceeded + } + + e := p.getExecQuerier(dbTx) + rows, err := e.Query(ctx, l2TxSQL, fromBlock, toBlock) + if err != nil { + return nil, err + } + defer rows.Close() + + nativeBlockHashes := []common.Hash{} + + for rows.Next() { + var nativeBlockHash string + err := rows.Scan(&nativeBlockHash) + if err != nil { + return nil, err + } + nativeBlockHashes = append(nativeBlockHashes, common.HexToHash(nativeBlockHash)) + } + return nativeBlockHashes, nil +} + +// GetDSGenesisBlock returns the genesis block +func (p *PostgresStorage) GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*DSL2Block, error) { + const genesisL2BlockSQL = `SELECT 0 as batch_num, l2b.block_num, l2b.received_at, '0x0000000000000000000000000000000000000000' as global_exit_root, l2b.header->>'miner' AS coinbase, 0 as fork_id, l2b.block_hash, l2b.state_root + FROM state.l2block l2b + WHERE l2b.block_num = 0` + + e := p.getExecQuerier(dbTx) + + row := e.QueryRow(ctx, genesisL2BlockSQL) + + l2block, err := scanL2Block(row) + if err != nil { + return nil, err + } + + return l2block, nil +} + +// GetDSL2Blocks returns the L2 blocks +func (p *PostgresStorage) GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*DSL2Block, error) { + const l2BlockSQL = `SELECT l2b.batch_num, l2b.block_num, l2b.received_at, b.global_exit_root, l2b.header->>'miner' AS coinbase, f.fork_id, l2b.block_hash, l2b.state_root + FROM state.l2block l2b, state.batch b, state.fork_id f + WHERE l2b.batch_num BETWEEN $1 AND $2 AND l2b.batch_num = b.batch_num AND l2b.batch_num between f.from_batch_num AND f.to_batch_num + ORDER BY l2b.block_num ASC` + e := p.getExecQuerier(dbTx) + rows, err := e.Query(ctx, l2BlockSQL, firstBatchNumber, lastBatchNumber) + if err != nil { + return nil, err + } + defer rows.Close() + + l2blocks := make([]*DSL2Block, 0, len(rows.RawValues())) + + for rows.Next() { + l2block, err := scanL2Block(rows) + if err != nil { + return nil, err + } + l2blocks = append(l2blocks, l2block) + } + + return l2blocks, nil +} + +func scanL2Block(row pgx.Row) (*DSL2Block, error) { + l2Block := DSL2Block{} + var ( + gerStr string + coinbaseStr string + timestamp time.Time + blockHashStr string + stateRootStr string + ) + if err := row.Scan( + &l2Block.BatchNumber, + &l2Block.L2BlockNumber, + ×tamp, + &gerStr, + &coinbaseStr, + &l2Block.ForkID, + &blockHashStr, + &stateRootStr, + ); err != nil { + return &l2Block, err + } + l2Block.GlobalExitRoot = common.HexToHash(gerStr) + l2Block.Coinbase = common.HexToAddress(coinbaseStr) + l2Block.Timestamp = timestamp.Unix() + l2Block.BlockHash = common.HexToHash(blockHashStr) + l2Block.StateRoot = common.HexToHash(stateRootStr) + + return &l2Block, nil +} + +// GetDSL2Transactions returns the L2 transactions +func (p *PostgresStorage) GetDSL2Transactions(ctx context.Context, firstL2Block, lastL2Block uint64, dbTx pgx.Tx) ([]*DSL2Transaction, error) { + const l2TxSQL = `SELECT l2_block_num, t.effective_percentage, t.encoded + FROM state.transaction t + WHERE l2_block_num BETWEEN $1 AND $2 + ORDER BY t.l2_block_num ASC` + + e := p.getExecQuerier(dbTx) + rows, err := e.Query(ctx, l2TxSQL, firstL2Block, lastL2Block) + if err != nil { + return nil, err + } + defer rows.Close() + + l2Txs := make([]*DSL2Transaction, 0, len(rows.RawValues())) + + for rows.Next() { + l2Tx, err := scanDSL2Transaction(rows) + if err != nil { + return nil, err + } + l2Txs = append(l2Txs, l2Tx) + } + + return l2Txs, nil +} + +func scanDSL2Transaction(row pgx.Row) (*DSL2Transaction, error) { + l2Transaction := DSL2Transaction{} + encoded := []byte{} + if err := row.Scan( + &l2Transaction.L2BlockNumber, + &l2Transaction.EffectiveGasPricePercentage, + &encoded, + ); err != nil { + return nil, err + } + tx, err := DecodeTx(string(encoded)) + if err != nil { + return nil, err + } + + binaryTxData, err := tx.MarshalBinary() + if err != nil { + return nil, err + } + + l2Transaction.Encoded = binaryTxData + l2Transaction.EncodedLength = uint32(len(l2Transaction.Encoded)) + l2Transaction.IsValid = 1 + return &l2Transaction, nil +} + +// GetDSBatches returns the DS batches +func (p *PostgresStorage) GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*DSBatch, error) { + const getBatchByNumberSQL = ` + SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, f.fork_id + FROM state.batch b, state.fork_id f + WHERE b.state_root is not null AND b.batch_num >= $1 AND b.batch_num <= $2 AND batch_num between f.from_batch_num AND f.to_batch_num` + + e := p.getExecQuerier(dbTx) + rows, err := e.Query(ctx, getBatchByNumberSQL, firstBatchNumber, lastBatchNumber) + if err != nil { + return nil, err + } + if err != nil { + return nil, err + } + defer rows.Close() + + batches := make([]*DSBatch, 0, len(rows.RawValues())) + + for rows.Next() { + batch, err := scanDSBatch(rows) + if err != nil { + return nil, err + } + batches = append(batches, &batch) + } + + return batches, nil +} + +func scanDSBatch(row pgx.Row) (DSBatch, error) { + batch := DSBatch{} + var ( + gerStr string + lerStr *string + aihStr *string + stateStr *string + coinbaseStr string + ) + err := row.Scan( + &batch.BatchNumber, + &gerStr, + &lerStr, + &aihStr, + &stateStr, + &batch.Timestamp, + &coinbaseStr, + &batch.BatchL2Data, + &batch.ForcedBatchNum, + &batch.ForkID, + ) + if err != nil { + return batch, err + } + batch.GlobalExitRoot = common.HexToHash(gerStr) + if lerStr != nil { + batch.LocalExitRoot = common.HexToHash(*lerStr) + } + if stateStr != nil { + batch.StateRoot = common.HexToHash(*stateStr) + } + if aihStr != nil { + batch.AccInputHash = common.HexToHash(*aihStr) + } + + batch.Coinbase = common.HexToAddress(coinbaseStr) + return batch, nil +} diff --git a/state/pgstatestorage_test.go b/state/pgstatestorage_test.go index d97f503991..89f48b0d47 100644 --- a/state/pgstatestorage_test.go +++ b/state/pgstatestorage_test.go @@ -27,7 +27,11 @@ var ( ) func setup() { - pgStateStorage = state.NewPostgresStorage(stateDb) + cfg := state.Config{ + MaxLogsCount: 10000, + MaxLogsBlockRange: 10000, + } + pgStateStorage = state.NewPostgresStorage(cfg, stateDb) } func TestGetBatchByL2BlockNumber(t *testing.T) { @@ -82,7 +86,12 @@ func TestGetBatchByL2BlockNumber(t *testing.T) { l2Block := types.NewBlock(header, transactions, []*types.Header{}, receipts, &trie.StackTrie{}) receipt.BlockHash = l2Block.Hash() - err = pgStateStorage.AddL2Block(ctx, batchNumber, l2Block, receipts, state.MaxEffectivePercentage, dbTx) + storeTxsEGPData := []state.StoreTxEGPData{} + for range transactions { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + } + + err = pgStateStorage.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) require.NoError(t, err) result, err := pgStateStorage.BatchNumberByL2BlockNumber(ctx, l2Block.Number().Uint64(), dbTx) require.NoError(t, err) @@ -141,11 +150,16 @@ func TestAddAndGetSequences(t *testing.T) { sequence3 := state.Sequence{ FromBatchNumber: 7, - ToBatchNumber: 8, + ToBatchNumber: 7, } err = testState.AddSequence(ctx, sequence3, dbTx) require.NoError(t, err) + // Insert it again to test on conflict + sequence3.ToBatchNumber = 8 + err = testState.AddSequence(ctx, sequence3, dbTx) + require.NoError(t, err) + sequences, err := testState.GetSequences(ctx, 0, dbTx) require.NoError(t, err) require.Equal(t, 3, len(sequences)) @@ -488,6 +502,9 @@ func TestForkIDs(t *testing.T) { for _, fork := range forks { err = testState.AddForkID(ctx, fork, dbTx) require.NoError(t, err) + // Insert twice to test on conflict do nothing + err = testState.AddForkID(ctx, fork, dbTx) + require.NoError(t, err) } forkIDs, err := testState.GetForkIDs(ctx, dbTx) @@ -510,10 +527,30 @@ func TestForkIDs(t *testing.T) { require.Equal(t, forkID3.ToBatchNumber, forkIDs[len(forkIDs)-1].ToBatchNumber) require.Equal(t, forkID3.ForkId, forkIDs[len(forkIDs)-1].ForkId) + forkID3.BlockNumber = 101 + err = testState.AddForkID(ctx, forkID3, dbTx) + require.NoError(t, err) + forkIDs, err = testState.GetForkIDs(ctx, dbTx) + require.NoError(t, err) + require.Equal(t, 3, len(forkIDs)) + require.Equal(t, forkID3.ToBatchNumber, forkIDs[len(forkIDs)-1].ToBatchNumber) + require.Equal(t, forkID3.ForkId, forkIDs[len(forkIDs)-1].ForkId) + require.Equal(t, forkID3.BlockNumber, forkIDs[len(forkIDs)-1].BlockNumber) + + forkID3.BlockNumber = 2 + err = testState.AddForkID(ctx, forkID3, dbTx) + require.NoError(t, err) + forkIDs, err = testState.GetForkIDs(ctx, dbTx) + require.NoError(t, err) + require.Equal(t, 3, len(forkIDs)) + require.Equal(t, forkID3.ToBatchNumber, forkIDs[len(forkIDs)-1].ToBatchNumber) + require.Equal(t, forkID3.ForkId, forkIDs[len(forkIDs)-1].ForkId) + require.Equal(t, forkID3.BlockNumber, forkIDs[len(forkIDs)-1].BlockNumber) + require.NoError(t, dbTx.Commit(ctx)) } -func TestGetSafeL2BlockNumber(t *testing.T) { +func TestGetLastVerifiedL2BlockNumberUntilL1Block(t *testing.T) { initOrResetDB() ctx := context.Background() dbTx, err := testState.BeginStateTransaction(ctx) @@ -524,50 +561,61 @@ func TestGetSafeL2BlockNumber(t *testing.T) { addr := common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266") hash := common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1") for i := 1; i <= 10; i++ { + blockNumber := uint64(i) + // add l1 block - err = testState.AddBlock(ctx, state.NewBlock(uint64(i)), dbTx) + err = testState.AddBlock(ctx, state.NewBlock(blockNumber), dbTx) require.NoError(t, err) + batchNumber := uint64(i * 10) + // add batch - _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", i) + _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", batchNumber) require.NoError(t, err) // add l2 block - l2Block := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(i + 10))}) - err = testState.AddL2Block(ctx, uint64(i), l2Block, []*types.Receipt{}, uint8(0), dbTx) - require.NoError(t, err) + l2Block := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(0).SetUint64(blockNumber + uint64(10))}) - // virtualize batch - if i <= 6 { - b := state.VirtualBatch{BlockNumber: uint64(i), BatchNumber: uint64(i), Coinbase: addr, SequencerAddr: addr, TxHash: hash} - err = testState.AddVirtualBatch(ctx, &b, dbTx) - require.NoError(t, err) + storeTxsEGPData := []state.StoreTxEGPData{} + for range l2Block.Transactions() { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(0)}) } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, []*types.Receipt{}, storeTxsEGPData, dbTx) + require.NoError(t, err) + + virtualBatch := state.VirtualBatch{BlockNumber: blockNumber, BatchNumber: batchNumber, Coinbase: addr, SequencerAddr: addr, TxHash: hash} + err = testState.AddVirtualBatch(ctx, &virtualBatch, dbTx) + require.NoError(t, err) + + verifiedBatch := state.VerifiedBatch{BlockNumber: blockNumber, BatchNumber: batchNumber, TxHash: hash} + err = testState.AddVerifiedBatch(ctx, &verifiedBatch, dbTx) + require.NoError(t, err) } type testCase struct { - name string - l1SafeBlockNumber uint64 - expectedL2SafeBlockNumber uint64 + name string + l1BlockNumber uint64 + expectedBatchNumber uint64 } testCases := []testCase{ - {name: "l1 safe block number smaller than block number for the last virtualized batch", l1SafeBlockNumber: 2, expectedL2SafeBlockNumber: 12}, - {name: "l1 safe block number equal to block number for the last virtualized batch", l1SafeBlockNumber: 6, expectedL2SafeBlockNumber: 16}, - {name: "l1 safe block number bigger than number for the last virtualized batch", l1SafeBlockNumber: 8, expectedL2SafeBlockNumber: 16}, + {name: "l1 block number smaller than block number for the last verified batch", l1BlockNumber: 1, expectedBatchNumber: 11}, + {name: "l1 block number equal to block number for the last verified batch", l1BlockNumber: 10, expectedBatchNumber: 20}, + {name: "l1 block number bigger than number for the last verified batch", l1BlockNumber: 20, expectedBatchNumber: 20}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - l2SafeBlockNumber, err := testState.GetSafeL2BlockNumber(ctx, uint64(tc.l1SafeBlockNumber), dbTx) + batchNumber, err := testState.GetLastVerifiedL2BlockNumberUntilL1Block(ctx, uint64(tc.l1BlockNumber), dbTx) require.NoError(t, err) - assert.Equal(t, tc.expectedL2SafeBlockNumber, l2SafeBlockNumber) + assert.Equal(t, tc.expectedBatchNumber, batchNumber) }) } } -func TestGetFinalizedL2BlockNumber(t *testing.T) { +func TestGetLastVerifiedBatchNumberUntilL1Block(t *testing.T) { initOrResetDB() ctx := context.Background() dbTx, err := testState.BeginStateTransaction(ctx) @@ -578,54 +626,346 @@ func TestGetFinalizedL2BlockNumber(t *testing.T) { addr := common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266") hash := common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1") for i := 1; i <= 10; i++ { + blockNumber := uint64(i) + // add l1 block - err = testState.AddBlock(ctx, state.NewBlock(uint64(i)), dbTx) + err = testState.AddBlock(ctx, state.NewBlock(blockNumber), dbTx) require.NoError(t, err) + batchNumber := uint64(i * 10) + // add batch - _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", i) + _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", batchNumber) require.NoError(t, err) - // add l2 block - l2Block := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(i + 10))}) - err = testState.AddL2Block(ctx, uint64(i), l2Block, []*types.Receipt{}, uint8(0), dbTx) + virtualBatch := state.VirtualBatch{BlockNumber: blockNumber, BatchNumber: batchNumber, Coinbase: addr, SequencerAddr: addr, TxHash: hash} + err = testState.AddVirtualBatch(ctx, &virtualBatch, dbTx) require.NoError(t, err) - // virtualize batch - if i <= 6 { - b := state.VirtualBatch{BlockNumber: uint64(i), BatchNumber: uint64(i), Coinbase: addr, SequencerAddr: addr, TxHash: hash} - err = testState.AddVirtualBatch(ctx, &b, dbTx) + verifiedBatch := state.VerifiedBatch{BlockNumber: blockNumber, BatchNumber: batchNumber, TxHash: hash} + err = testState.AddVerifiedBatch(ctx, &verifiedBatch, dbTx) + require.NoError(t, err) + } + + type testCase struct { + name string + l1BlockNumber uint64 + expectedBatchNumber uint64 + } + + testCases := []testCase{ + {name: "l1 block number smaller than block number for the last verified batch", l1BlockNumber: 1, expectedBatchNumber: 10}, + {name: "l1 block number equal to block number for the last verified batch", l1BlockNumber: 10, expectedBatchNumber: 100}, + {name: "l1 block number bigger than number for the last verified batch", l1BlockNumber: 20, expectedBatchNumber: 100}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + batchNumber, err := testState.GetLastVerifiedBatchNumberUntilL1Block(ctx, uint64(tc.l1BlockNumber), dbTx) require.NoError(t, err) + + assert.Equal(t, tc.expectedBatchNumber, batchNumber) + }) + } +} + +func TestSyncInfo(t *testing.T) { + // Init database instance + initOrResetDB() + + ctx := context.Background() + tx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + + // Test update on conflict + err = testState.SetInitSyncBatch(ctx, 1, tx) + require.NoError(t, err) + err = testState.SetInitSyncBatch(ctx, 1, tx) + require.NoError(t, err) + err = testState.SetLastBatchInfoSeenOnEthereum(ctx, 10, 8, tx) + require.NoError(t, err) + err = testState.SetInitSyncBatch(ctx, 1, tx) + require.NoError(t, err) + err = testState.SetLastBatchInfoSeenOnEthereum(ctx, 10, 8, tx) + require.NoError(t, err) + err = testState.SetLastBatchInfoSeenOnEthereum(ctx, 10, 8, tx) + require.NoError(t, err) + + err = tx.Commit(ctx) + require.NoError(t, err) +} + +func TestGetBatchByNumber(t *testing.T) { + initOrResetDB() + + ctx := context.Background() + dbTx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + + _, err = testState.PostgresStorage.Exec(ctx, `INSERT INTO state.batch + (batch_num, global_exit_root, local_exit_root, state_root, timestamp, coinbase, raw_txs_data) + VALUES(1, '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', '0xbf34f9a52a63229e90d1016011655bc12140bba5b771817b88cbf340d08dcbde', '2022-12-19 08:17:45.000', '0x0000000000000000000000000000000000000000', NULL); + `) + require.NoError(t, err) + + batchNum := uint64(1) + b, err := testState.GetBatchByNumber(ctx, batchNum, dbTx) + require.NoError(t, err) + assert.Equal(t, b.BatchNumber, batchNum) + + batchNum = uint64(2) + b, err = testState.GetBatchByNumber(ctx, batchNum, dbTx) + require.Error(t, state.ErrNotFound, err) + assert.Nil(t, b) + + require.NoError(t, dbTx.Commit(ctx)) +} + +func TestGetLogs(t *testing.T) { + initOrResetDB() + + ctx := context.Background() + + cfg := state.Config{ + MaxLogsCount: 8, + MaxLogsBlockRange: 10, + } + pgStateStorage = state.NewPostgresStorage(cfg, stateDb) + testState.PostgresStorage = pgStateStorage + + dbTx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + err = testState.AddBlock(ctx, block, dbTx) + assert.NoError(t, err) + + batchNumber := uint64(1) + _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", batchNumber) + assert.NoError(t, err) + + time := time.Now() + blockNumber := big.NewInt(1) + + for i := 0; i < 3; i++ { + tx := types.NewTx(&types.LegacyTx{ + Nonce: uint64(i), + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + logs := []*types.Log{} + for j := 0; j < 4; j++ { + logs = append(logs, &types.Log{TxHash: tx.Hash(), Index: uint(j)}) } - // verify batch - if i <= 3 { - b := state.VerifiedBatch{BlockNumber: uint64(i), BatchNumber: uint64(i), TxHash: hash} - err = testState.AddVerifiedBatch(ctx, &b, dbTx) - require.NoError(t, err) + receipt := &types.Receipt{ + Type: uint8(tx.Type()), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: 0, + Status: types.ReceiptStatusSuccessful, + Logs: logs, + } + + transactions := []*types.Transaction{tx} + receipts := []*types.Receipt{receipt} + + header := &types.Header{ + Number: big.NewInt(int64(i) + 1), + ParentHash: state.ZeroHash, + Coinbase: state.ZeroAddress, + Root: state.ZeroHash, + GasUsed: 1, + GasLimit: 10, + Time: uint64(time.Unix()), + } + + l2Block := types.NewBlock(header, transactions, []*types.Header{}, receipts, &trie.StackTrie{}) + for _, receipt := range receipts { + receipt.BlockHash = l2Block.Hash() + } + + storeTxsEGPData := []state.StoreTxEGPData{} + for range transactions { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + require.NoError(t, err) } type testCase struct { - name string - l1FinalizedBlockNumber uint64 - expectedL2FinalizedBlockNumber uint64 + name string + from uint64 + to uint64 + logCount int + expectedError error } testCases := []testCase{ - {name: "l1 finalized block number smaller than block number for the last verified batch", l1FinalizedBlockNumber: 1, expectedL2FinalizedBlockNumber: 11}, - {name: "l1 finalized block number equal to block number for the last verified batch", l1FinalizedBlockNumber: 3, expectedL2FinalizedBlockNumber: 13}, - {name: "l1 finalized block number bigger than number for the last verified batch", l1FinalizedBlockNumber: 5, expectedL2FinalizedBlockNumber: 13}, + { + name: "invalid block range", + from: 2, + to: 1, + logCount: 0, + expectedError: state.ErrInvalidBlockRange, + }, + { + name: "block range bigger than allowed", + from: 1, + to: 12, + logCount: 0, + expectedError: state.ErrMaxLogsBlockRangeLimitExceeded, + }, + { + name: "log count bigger than allowed", + from: 1, + to: 3, + logCount: 0, + expectedError: state.ErrMaxLogsCountLimitExceeded, + }, + { + name: "logs returned successfully", + from: 1, + to: 2, + logCount: 8, + expectedError: nil, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + logs, err := testState.GetLogs(ctx, testCase.from, testCase.to, []common.Address{}, [][]common.Hash{}, nil, nil, dbTx) + + assert.Equal(t, testCase.logCount, len(logs)) + assert.Equal(t, testCase.expectedError, err) + }) + } + require.NoError(t, dbTx.Commit(ctx)) +} + +func TestGetNativeBlockHashesInRange(t *testing.T) { + initOrResetDB() + + ctx := context.Background() + + cfg := state.Config{ + MaxNativeBlockHashBlockRange: 10, } + pgStateStorage = state.NewPostgresStorage(cfg, stateDb) + testState.PostgresStorage = pgStateStorage - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - l2FinalizedBlockNumber, err := testState.GetFinalizedL2BlockNumber(ctx, uint64(tc.l1FinalizedBlockNumber), dbTx) - require.NoError(t, err) + dbTx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + err = testState.AddBlock(ctx, block, dbTx) + assert.NoError(t, err) + + batchNumber := uint64(1) + _, err = testState.PostgresStorage.Exec(ctx, "INSERT INTO state.batch (batch_num) VALUES ($1)", batchNumber) + assert.NoError(t, err) + + time := time.Now() + blockNumber := big.NewInt(1) + + nativeBlockHashes := []common.Hash{} - assert.Equal(t, tc.expectedL2FinalizedBlockNumber, l2FinalizedBlockNumber) + for i := 0; i < 10; i++ { + tx := types.NewTx(&types.LegacyTx{ + Nonce: uint64(i), + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + receipt := &types.Receipt{ + Type: uint8(tx.Type()), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: 0, + Status: types.ReceiptStatusSuccessful, + } + + transactions := []*types.Transaction{tx} + receipts := []*types.Receipt{receipt} + + header := &types.Header{ + Number: big.NewInt(int64(i) + 1), + ParentHash: state.ZeroHash, + Coinbase: state.ZeroAddress, + Root: common.HexToHash(hex.EncodeBig(big.NewInt(int64(i)))), + GasUsed: 1, + GasLimit: 10, + Time: uint64(time.Unix()), + } + + l2Block := types.NewBlock(header, transactions, []*types.Header{}, receipts, &trie.StackTrie{}) + for _, receipt := range receipts { + receipt.BlockHash = l2Block.Hash() + } + + storeTxsEGPData := []state.StoreTxEGPData{} + for range transactions { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + require.NoError(t, err) + + nativeBlockHashes = append(nativeBlockHashes, l2Block.Header().Root) + } + + type testCase struct { + name string + from uint64 + to uint64 + expectedResults []common.Hash + expectedError error + } + + testCases := []testCase{ + { + name: "invalid block range", + from: 2, + to: 1, + expectedResults: nil, + expectedError: state.ErrInvalidBlockRange, + }, + { + name: "block range bigger than allowed", + from: 1, + to: 12, + expectedResults: nil, + expectedError: state.ErrMaxNativeBlockHashBlockRangeLimitExceeded, + }, + { + name: "hashes returned successfully", + from: 4, + to: 7, + expectedResults: nativeBlockHashes[3:7], + expectedError: nil, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + results, err := testState.GetNativeBlockHashesInRange(ctx, testCase.from, testCase.to, dbTx) + + assert.ElementsMatch(t, testCase.expectedResults, results) + assert.Equal(t, testCase.expectedError, err) }) } + + require.NoError(t, dbTx.Commit(ctx)) } func TestGetBatchL2DataByNumber(t *testing.T) { diff --git a/state/queue.go b/state/queue.go new file mode 100644 index 0000000000..7e78254bfd --- /dev/null +++ b/state/queue.go @@ -0,0 +1,67 @@ +package state + +import ( + "fmt" + "sync" +) + +// ErrQueueEmpty is returned when a queue operation +// depends on the queue to not be empty, but it is empty +var ErrQueueEmpty = fmt.Errorf("queue is empty") + +// Queue is a generic queue implementation that implements FIFO +type Queue[T any] struct { + items []T + mutex *sync.Mutex +} + +// NewQueue creates a new instance of queue and initializes it +func NewQueue[T any]() *Queue[T] { + return &Queue[T]{ + items: make([]T, 0), + mutex: &sync.Mutex{}, + } +} + +// Push enqueue an item +func (q *Queue[T]) Push(item T) { + q.mutex.Lock() + defer q.mutex.Unlock() + q.items = append(q.items, item) +} + +// Top returns the top level item without removing it +func (q *Queue[T]) Top() (T, error) { + q.mutex.Lock() + defer q.mutex.Unlock() + var v T + if len(q.items) == 0 { + return v, ErrQueueEmpty + } + return q.items[0], nil +} + +// Pop returns the top level item and unqueues it +func (q *Queue[T]) Pop() (T, error) { + q.mutex.Lock() + defer q.mutex.Unlock() + var v T + if len(q.items) == 0 { + return v, ErrQueueEmpty + } + v = q.items[0] + q.items = q.items[1:] + return v, nil +} + +// Len returns the size of the queue +func (q *Queue[T]) Len() int { + q.mutex.Lock() + defer q.mutex.Unlock() + return len(q.items) +} + +// IsEmpty returns false if the queue has itens, otherwise true +func (q *Queue[T]) IsEmpty() bool { + return q.Len() == 0 +} diff --git a/state/queue_test.go b/state/queue_test.go new file mode 100644 index 0000000000..240c1a0fba --- /dev/null +++ b/state/queue_test.go @@ -0,0 +1,52 @@ +package state + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestQueue(t *testing.T) { + q := NewQueue[int]() + + q.Push(10) + q.Push(20) + q.Push(30) + + top, err := q.Top() + require.NoError(t, err) + assert.Equal(t, 10, top) + assert.Equal(t, 3, q.Len()) + assert.Equal(t, false, q.IsEmpty()) + + pop, err := q.Pop() + require.NoError(t, err) + assert.Equal(t, 10, pop) + assert.Equal(t, 2, q.Len()) + assert.Equal(t, false, q.IsEmpty()) + + top, err = q.Top() + require.NoError(t, err) + assert.Equal(t, 20, top) + assert.Equal(t, 2, q.Len()) + assert.Equal(t, false, q.IsEmpty()) + + pop, err = q.Pop() + require.NoError(t, err) + assert.Equal(t, 20, pop) + assert.Equal(t, 1, q.Len()) + assert.Equal(t, false, q.IsEmpty()) + + pop, err = q.Pop() + require.NoError(t, err) + assert.Equal(t, 30, pop) + assert.Equal(t, 0, q.Len()) + assert.Equal(t, true, q.IsEmpty()) + + _, err = q.Top() + require.Error(t, ErrQueueEmpty, err) + + _, err = q.Pop() + require.Error(t, ErrQueueEmpty, err) +} diff --git a/state/runtime/executor/errors.go b/state/runtime/executor/errors.go index cc7bf71af0..f509bf116f 100644 --- a/state/runtime/executor/errors.go +++ b/state/runtime/executor/errors.go @@ -326,6 +326,30 @@ func ExecutorErr(errorCode ExecutorError) error { return runtime.ErrExecutorSMMainHashKReadOutOfRange case ExecutorError_EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE: return runtime.ErrExecutorSMMainHashPReadOutOfRange + case ExecutorError_EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT: + return runtime.ErrExecutorErrorInvalidOldStateRoot + case ExecutorError_EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH: + return runtime.ErrExecutorErrorInvalidOldAccInputHash + case ExecutorError_EXECUTOR_ERROR_INVALID_CHAIN_ID: + return runtime.ErrExecutorErrorInvalidChainId + case ExecutorError_EXECUTOR_ERROR_INVALID_BATCH_L2_DATA: + return runtime.ErrExecutorErrorInvalidBatchL2Data + case ExecutorError_EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT: + return runtime.ErrExecutorErrorInvalidGlobalExitRoot + case ExecutorError_EXECUTOR_ERROR_INVALID_COINBASE: + return runtime.ErrExecutorErrorInvalidCoinbase + case ExecutorError_EXECUTOR_ERROR_INVALID_FROM: + return runtime.ErrExecutorErrorInvalidFrom + case ExecutorError_EXECUTOR_ERROR_INVALID_DB_KEY: + return runtime.ErrExecutorErrorInvalidDbKey + case ExecutorError_EXECUTOR_ERROR_INVALID_DB_VALUE: + return runtime.ErrExecutorErrorInvalidDbValue + case ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY: + return runtime.ErrExecutorErrorInvalidContractsBytecodeKey + case ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE: + return runtime.ErrExecutorErrorInvalidContractsBytecodeValue + case ExecutorError_EXECUTOR_ERROR_INVALID_GET_KEY: + return runtime.ErrExecutorErrorInvalidGetKey } return ErrExecutorUnknown } @@ -469,6 +493,30 @@ func ExecutorErrorCode(err error) ExecutorError { return ExecutorError_EXECUTOR_ERROR_SM_MAIN_HASHK_READ_OUT_OF_RANGE case runtime.ErrExecutorSMMainHashPReadOutOfRange: return ExecutorError_EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE + case runtime.ErrExecutorErrorInvalidOldStateRoot: + return ExecutorError_EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT + case runtime.ErrExecutorErrorInvalidOldAccInputHash: + return ExecutorError_EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH + case runtime.ErrExecutorErrorInvalidChainId: + return ExecutorError_EXECUTOR_ERROR_INVALID_CHAIN_ID + case runtime.ErrExecutorErrorInvalidBatchL2Data: + return ExecutorError_EXECUTOR_ERROR_INVALID_BATCH_L2_DATA + case runtime.ErrExecutorErrorInvalidGlobalExitRoot: + return ExecutorError_EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT + case runtime.ErrExecutorErrorInvalidCoinbase: + return ExecutorError_EXECUTOR_ERROR_INVALID_COINBASE + case runtime.ErrExecutorErrorInvalidFrom: + return ExecutorError_EXECUTOR_ERROR_INVALID_FROM + case runtime.ErrExecutorErrorInvalidDbKey: + return ExecutorError_EXECUTOR_ERROR_INVALID_DB_KEY + case runtime.ErrExecutorErrorInvalidDbValue: + return ExecutorError_EXECUTOR_ERROR_INVALID_DB_VALUE + case runtime.ErrExecutorErrorInvalidContractsBytecodeKey: + return ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY + case runtime.ErrExecutorErrorInvalidContractsBytecodeValue: + return ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE + case runtime.ErrExecutorErrorInvalidGetKey: + return ExecutorError_EXECUTOR_ERROR_INVALID_GET_KEY } return math.MaxInt32 } diff --git a/state/runtime/executor/executor.pb.go b/state/runtime/executor/executor.pb.go index 4f660d33ad..5420a372b5 100644 --- a/state/runtime/executor/executor.pb.go +++ b/state/runtime/executor/executor.pb.go @@ -1,17 +1,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.31.0 +// protoc v4.24.4 // source: executor.proto package executor import ( + reflect "reflect" + sync "sync" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" ) const ( @@ -320,6 +321,30 @@ const ( ExecutorError_EXECUTOR_ERROR_SM_MAIN_HASHK_READ_OUT_OF_RANGE ExecutorError = 67 // EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE indicates that the main execution Poseidon check found read out of range ExecutorError_EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE ExecutorError = 68 + // EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT indicates that the input parameter old_state_root is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT ExecutorError = 69 + // EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH indicates that the input parameter old_acc_input_hash is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH ExecutorError = 70 + // EXECUTOR_ERROR_INVALID_CHAIN_ID indicates that the input parameter chain_id is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_CHAIN_ID ExecutorError = 71 + // EXECUTOR_ERROR_INVALID_BATCH_L2_DATA indicates that the input parameter batch_l2_data is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_BATCH_L2_DATA ExecutorError = 72 + // EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT indicates that the input parameter global_exit_root is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT ExecutorError = 73 + // EXECUTOR_ERROR_INVALID_COINBASE indicates that the input parameter coinbase (i.e. sequencer address) is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_COINBASE ExecutorError = 74 + // EXECUTOR_ERROR_INVALID_FROM indicates that the input parameter from is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_FROM ExecutorError = 75 + // EXECUTOR_ERROR_INVALID_DB_KEY indicates that the input parameter db key is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_DB_KEY ExecutorError = 76 + // EXECUTOR_ERROR_INVALID_DB_VALUE indicates that the input parameter db value is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_DB_VALUE ExecutorError = 77 + // EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY indicates that the input parameter contracts_bytecode key is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY ExecutorError = 78 + // EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE indicates that the input parameter contracts_bytecode value is invalid + ExecutorError_EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE ExecutorError = 79 + // EXECUTOR_ERROR_INVALID_GET_KEY indicates that the input parameter get key is invalid, e.g. is true but fork_id<5 + ExecutorError_EXECUTOR_ERROR_INVALID_GET_KEY ExecutorError = 80 ) // Enum value maps for ExecutorError. @@ -394,6 +419,18 @@ var ( 66: "EXECUTOR_ERROR_SM_MAIN_JMPN_OUT_OF_RANGE", 67: "EXECUTOR_ERROR_SM_MAIN_HASHK_READ_OUT_OF_RANGE", 68: "EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE", + 69: "EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT", + 70: "EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH", + 71: "EXECUTOR_ERROR_INVALID_CHAIN_ID", + 72: "EXECUTOR_ERROR_INVALID_BATCH_L2_DATA", + 73: "EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT", + 74: "EXECUTOR_ERROR_INVALID_COINBASE", + 75: "EXECUTOR_ERROR_INVALID_FROM", + 76: "EXECUTOR_ERROR_INVALID_DB_KEY", + 77: "EXECUTOR_ERROR_INVALID_DB_VALUE", + 78: "EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY", + 79: "EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE", + 80: "EXECUTOR_ERROR_INVALID_GET_KEY", } ExecutorError_value = map[string]int32{ "EXECUTOR_ERROR_UNSPECIFIED": 0, @@ -465,6 +502,18 @@ var ( "EXECUTOR_ERROR_SM_MAIN_JMPN_OUT_OF_RANGE": 66, "EXECUTOR_ERROR_SM_MAIN_HASHK_READ_OUT_OF_RANGE": 67, "EXECUTOR_ERROR_SM_MAIN_HASHP_READ_OUT_OF_RANGE": 68, + "EXECUTOR_ERROR_INVALID_OLD_STATE_ROOT": 69, + "EXECUTOR_ERROR_INVALID_OLD_ACC_INPUT_HASH": 70, + "EXECUTOR_ERROR_INVALID_CHAIN_ID": 71, + "EXECUTOR_ERROR_INVALID_BATCH_L2_DATA": 72, + "EXECUTOR_ERROR_INVALID_GLOBAL_EXIT_ROOT": 73, + "EXECUTOR_ERROR_INVALID_COINBASE": 74, + "EXECUTOR_ERROR_INVALID_FROM": 75, + "EXECUTOR_ERROR_INVALID_DB_KEY": 76, + "EXECUTOR_ERROR_INVALID_DB_VALUE": 77, + "EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_KEY": 78, + "EXECUTOR_ERROR_INVALID_CONTRACTS_BYTECODE_VALUE": 79, + "EXECUTOR_ERROR_INVALID_GET_KEY": 80, } ) @@ -518,6 +567,8 @@ type ProcessBatchRequest struct { Db map[string]string `protobuf:"bytes,13,rep,name=db,proto3" json:"db,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` ContractsBytecode map[string]string `protobuf:"bytes,14,rep,name=contracts_bytecode,json=contractsBytecode,proto3" json:"contracts_bytecode,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // For debug/testing purpposes only. Don't fill this on production TraceConfig *TraceConfig `protobuf:"bytes,15,opt,name=trace_config,json=traceConfig,proto3" json:"trace_config,omitempty"` + ContextId string `protobuf:"bytes,16,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + GetKeys uint32 `protobuf:"varint,17,opt,name=get_keys,json=getKeys,proto3" json:"get_keys,omitempty"` // if 1, the keys used to read or write storage values will be returned } func (x *ProcessBatchRequest) Reset() { @@ -657,6 +708,20 @@ func (x *ProcessBatchRequest) GetTraceConfig() *TraceConfig { return nil } +func (x *ProcessBatchRequest) GetContextId() string { + if x != nil { + return x.ContextId + } + return "" +} + +func (x *ProcessBatchRequest) GetGetKeys() uint32 { + if x != nil { + return x.GetKeys + } + return 0 +} + type ProcessBatchResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -680,6 +745,8 @@ type ProcessBatchResponse struct { FlushId uint64 `protobuf:"varint,16,opt,name=flush_id,json=flushId,proto3" json:"flush_id,omitempty"` StoredFlushId uint64 `protobuf:"varint,17,opt,name=stored_flush_id,json=storedFlushId,proto3" json:"stored_flush_id,omitempty"` ProverId string `protobuf:"bytes,18,opt,name=prover_id,json=proverId,proto3" json:"prover_id,omitempty"` + NodesKeys [][]byte `protobuf:"bytes,19,rep,name=nodes_keys,json=nodesKeys,proto3" json:"nodes_keys,omitempty"` + ProgramKeys [][]byte `protobuf:"bytes,20,rep,name=program_keys,json=programKeys,proto3" json:"program_keys,omitempty"` } func (x *ProcessBatchResponse) Reset() { @@ -840,6 +907,20 @@ func (x *ProcessBatchResponse) GetProverId() string { return "" } +func (x *ProcessBatchResponse) GetNodesKeys() [][]byte { + if x != nil { + return x.NodesKeys + } + return nil +} + +func (x *ProcessBatchResponse) GetProgramKeys() [][]byte { + if x != nil { + return x.ProgramKeys + } + return nil +} + // * // @dev GetFlushStatusResponse // @param {last_sent_flush_id} - id of the last flush data sent to database @@ -1481,6 +1562,8 @@ type Contract struct { Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` Gas uint64 `protobuf:"varint,5,opt,name=gas,proto3" json:"gas,omitempty"` + // Define type of internal call: CREATE, CREATE2, CALL, CALLCODE, DELEGATECALL, STATICCALL + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` } func (x *Contract) Reset() { @@ -1550,6 +1633,13 @@ func (x *Contract) GetGas() uint64 { return 0 } +func (x *Contract) GetType() string { + if x != nil { + return x.Type + } + return "" +} + type ProcessTransactionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1585,6 +1675,10 @@ type ProcessTransactionResponse struct { // Efective Gas Price EffectiveGasPrice string `protobuf:"bytes,15,opt,name=effective_gas_price,json=effectiveGasPrice,proto3" json:"effective_gas_price,omitempty"` EffectivePercentage uint32 `protobuf:"varint,16,opt,name=effective_percentage,json=effectivePercentage,proto3" json:"effective_percentage,omitempty"` + // Flag to indicate if opcode 'GASPRICE' has been called + HasGaspriceOpcode uint32 `protobuf:"varint,17,opt,name=has_gasprice_opcode,json=hasGaspriceOpcode,proto3" json:"has_gasprice_opcode,omitempty"` + // Flag to indicate if opcode 'BALANCE' has been called + HasBalanceOpcode uint32 `protobuf:"varint,18,opt,name=has_balance_opcode,json=hasBalanceOpcode,proto3" json:"has_balance_opcode,omitempty"` } func (x *ProcessTransactionResponse) Reset() { @@ -1724,6 +1818,20 @@ func (x *ProcessTransactionResponse) GetEffectivePercentage() uint32 { return 0 } +func (x *ProcessTransactionResponse) GetHasGaspriceOpcode() uint32 { + if x != nil { + return x.HasGaspriceOpcode + } + return 0 +} + +func (x *ProcessTransactionResponse) GetHasBalanceOpcode() uint32 { + if x != nil { + return x.HasBalanceOpcode + } + return 0 +} + type Log struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1997,7 +2105,7 @@ var file_executor_proto_rawDesc = []byte{ 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x06, 0x0a, 0x13, 0x50, + 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x06, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6f, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x53, @@ -2038,561 +2146,607 @@ var file_executor_proto_rawDesc = []byte{ 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x35, - 0x0a, 0x07, 0x44, 0x62, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x67, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x35, 0x0a, 0x07, 0x44, 0x62, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x42, 0x79, 0x74, 0x65, + 0x63, 0x6f, 0x64, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x73, 0x42, 0x79, 0x74, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb4, 0x07, 0x0a, 0x14, - 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, - 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x6e, 0x65, - 0x77, 0x5f, 0x61, 0x63, 0x63, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x41, 0x63, 0x63, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x13, 0x6e, 0x65, 0x77, 0x5f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x6e, 0x65, 0x77, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x78, - 0x69, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6e, - 0x65, 0x77, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6e, - 0x74, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6e, 0x74, 0x4b, 0x65, 0x63, 0x63, 0x61, 0x6b, - 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x11, 0x63, 0x6e, 0x74, 0x50, 0x6f, 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, - 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x63, 0x6e, 0x74, 0x50, 0x6f, 0x73, 0x65, 0x69, 0x64, - 0x6f, 0x6e, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x6e, - 0x74, 0x5f, 0x6d, 0x65, 0x6d, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6e, 0x74, 0x4d, 0x65, 0x6d, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6e, 0x74, 0x5f, 0x61, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x65, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x6e, 0x74, 0x41, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x65, 0x74, 0x69, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6e, 0x74, - 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0b, 0x63, 0x6e, 0x74, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x63, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x08, 0x63, 0x6e, 0x74, 0x53, 0x74, 0x65, 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x6d, - 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, - 0x12, 0x30, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x6b, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x39, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x72, 0x65, 0x61, - 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, - 0x19, 0x0a, 0x08, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x07, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x46, 0x6c, 0x75, 0x73, 0x68, - 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x64, 0x1a, - 0x61, 0x0a, 0x17, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xe7, 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, - 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x46, 0x6c, - 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, - 0x22, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x6c, 0x75, 0x73, - 0x68, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, - 0x6f, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x46, 0x6c, - 0x75, 0x73, 0x68, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x70, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x70, 0x72, 0x6f, - 0x67, 0x72, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x64, 0x22, 0xba, 0x02, 0x0a, - 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, - 0x2c, 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, - 0x21, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, - 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x1e, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, - 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, - 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x7a, 0x0a, 0x09, 0x43, 0x61, - 0x6c, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x65, 0x70, 0x52, - 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, - 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, - 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, - 0x0a, 0x0e, 0x6f, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x6f, 0x6f, 0x74, 0x22, 0xa7, 0x03, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x65, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x0e, 0x0a, - 0x02, 0x70, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x70, 0x63, 0x12, 0x10, 0x0a, - 0x03, 0x67, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, - 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x61, - 0x73, 0x5f, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, - 0x67, 0x61, 0x73, 0x52, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x63, 0x6b, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, - 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, - 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6d, 0x65, - 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x31, - 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x78, - 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x22, 0xd9, 0x04, 0x0a, 0x1a, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, - 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x15, 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x72, 0x6c, 0x70, 0x54, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x07, 0x67, 0x61, 0x73, 0x4c, 0x65, 0x66, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, - 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, - 0x55, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x61, 0x73, 0x5f, 0x72, 0x65, 0x66, 0x75, - 0x6e, 0x64, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x67, 0x61, 0x73, 0x52, - 0x65, 0x66, 0x75, 0x6e, 0x64, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x04, 0x6c, 0x6f, - 0x67, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, - 0x12, 0x48, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x65, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0a, 0x63, 0x61, - 0x6c, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, - 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, - 0x65, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x61, 0x67, 0x65, 0x22, 0xd7, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x18, 0x0a, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x19, - 0x0a, 0x08, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, - 0x63, 0x68, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x62, - 0x61, 0x74, 0x63, 0x68, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xef, - 0x03, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x02, 0x70, 0x63, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x65, - 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x47, 0x61, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, - 0x73, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, - 0x73, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x0a, - 0x0b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0a, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x07, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x65, 0x70, 0x2e, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x61, 0x73, 0x5f, - 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x67, 0x61, - 0x73, 0x52, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x2a, 0xc6, 0x08, 0x0a, 0x08, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, - 0x15, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x4f, 0x4d, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, - 0x12, 0x18, 0x0a, 0x14, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, - 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x47, 0x41, 0x53, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x4f, - 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x5f, 0x4f, 0x56, - 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x4f, 0x4d, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x5f, 0x55, 0x4e, 0x44, 0x45, - 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x05, 0x12, 0x28, 0x0a, - 0x24, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, - 0x41, 0x43, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, - 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x4f, 0x4d, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x45, 0x56, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x22, 0x0a, 0x1e, 0x52, 0x4f, 0x4d, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, - 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x10, 0x08, 0x12, 0x24, 0x0a, - 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, - 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4b, 0x45, 0x43, 0x43, 0x41, - 0x4b, 0x10, 0x09, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, - 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x4f, 0x4d, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, - 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4d, 0x45, 0x4d, 0x10, 0x0b, 0x12, 0x23, 0x0a, 0x1f, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf6, 0x07, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, + 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x6e, 0x65, 0x77, 0x5f, 0x61, 0x63, 0x63, 0x5f, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x41, 0x63, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x2d, 0x0a, 0x13, 0x6e, 0x65, 0x77, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, + 0x78, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, + 0x6e, 0x65, 0x77, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x78, 0x69, 0x74, 0x52, 0x6f, 0x6f, 0x74, + 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, + 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x4e, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x63, 0x63, + 0x61, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x63, 0x6e, 0x74, 0x4b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x63, + 0x6e, 0x74, 0x50, 0x6f, 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, + 0x5f, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x13, 0x63, 0x6e, 0x74, 0x50, 0x6f, 0x73, 0x65, 0x69, 0x64, 0x6f, 0x6e, 0x50, 0x61, 0x64, 0x64, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x6e, 0x74, 0x5f, 0x6d, 0x65, 0x6d, 0x5f, + 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6e, + 0x74, 0x4d, 0x65, 0x6d, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6e, + 0x74, 0x5f, 0x61, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x65, 0x74, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x6e, 0x74, 0x41, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x65, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6e, 0x74, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6e, 0x74, 0x42, 0x69, + 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6e, 0x74, 0x5f, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6e, 0x74, 0x53, 0x74, + 0x65, 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x11, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x55, + 0x73, 0x65, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x6b, 0x0a, 0x14, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, + 0x65, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x6e, + 0x6f, 0x64, 0x65, 0x73, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x61, 0x0a, 0x17, 0x52, + 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe7, + 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x6c, + 0x61, 0x73, 0x74, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x64, 0x12, + 0x33, 0x0a, 0x16, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x6f, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, + 0x6f, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x23, 0x0a, + 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x1b, 0x0a, 0x09, 0x70, + 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x64, 0x22, 0xba, 0x02, 0x0a, 0x0b, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x21, 0x74, 0x78, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x54, 0x6f, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x12, 0x41, 0x0a, 0x1e, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x74, 0x6f, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x74, 0x78, 0x48, 0x61, + 0x73, 0x68, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x61, + 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x7a, 0x0a, 0x09, 0x43, 0x61, 0x6c, 0x6c, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x32, + 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, 0x74, 0x65, + 0x70, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, + 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, + 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, + 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72, + 0x69, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x61, 0x73, 0x50, 0x72, + 0x69, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6f, 0x6c, + 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, + 0x22, 0xa7, 0x03, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x0e, 0x0a, 0x02, 0x70, 0x63, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x70, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, + 0x61, 0x73, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, + 0x61, 0x73, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x61, 0x73, 0x5f, 0x72, 0x65, + 0x66, 0x75, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x67, 0x61, 0x73, 0x52, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x2b, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6d, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xb7, 0x05, 0x0a, 0x1a, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x72, 0x6c, 0x70, 0x54, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x4c, 0x65, 0x66, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, + 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, + 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x61, 0x73, 0x5f, 0x72, 0x65, 0x66, + 0x75, 0x6e, 0x64, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x67, 0x61, 0x73, + 0x52, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x04, 0x6c, + 0x6f, 0x67, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, + 0x73, 0x12, 0x48, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x65, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0a, 0x63, + 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, + 0x6c, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, + 0x63, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x68, 0x61, 0x73, 0x5f, 0x67, 0x61, 0x73, + 0x70, 0x72, 0x69, 0x63, 0x65, 0x5f, 0x6f, 0x70, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x11, 0x68, 0x61, 0x73, 0x47, 0x61, 0x73, 0x70, 0x72, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x68, 0x61, 0x73, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x70, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x10, 0x68, 0x61, 0x73, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x70, 0x63, + 0x6f, 0x64, 0x65, 0x22, 0xd7, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x61, + 0x74, 0x63, 0x68, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xef, 0x03, + 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x02, 0x70, 0x63, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x6f, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x65, 0x6d, + 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x47, 0x61, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, + 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, + 0x43, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0a, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x07, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x65, 0x70, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x61, 0x73, 0x5f, 0x72, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x67, 0x61, 0x73, + 0x52, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, + 0xc6, 0x08, 0x0a, 0x08, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x15, + 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x4f, 0x4d, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, + 0x5f, 0x4f, 0x46, 0x5f, 0x47, 0x41, 0x53, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x4f, 0x4d, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x5f, 0x4f, 0x56, 0x45, + 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x4f, 0x4d, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x52, + 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, + 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x05, 0x12, 0x28, 0x0a, 0x24, + 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x41, + 0x43, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x49, + 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, + 0x56, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x22, 0x0a, 0x1e, 0x52, 0x4f, 0x4d, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, - 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, - 0x0c, 0x12, 0x25, 0x0a, 0x21, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, - 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x50, - 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x0d, 0x12, 0x26, 0x0a, 0x22, 0x52, 0x4f, 0x4d, 0x5f, + 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, + 0x10, 0x09, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, + 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x50, 0x4f, 0x53, 0x45, 0x49, 0x44, 0x4f, 0x4e, 0x10, 0x0e, - 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4a, 0x55, 0x4d, 0x50, 0x10, 0x0f, 0x12, 0x1c, 0x0a, 0x18, - 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x5f, 0x4f, 0x50, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x4f, - 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, - 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x11, 0x12, 0x28, 0x0a, 0x24, 0x52, 0x4f, 0x4d, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x53, 0x5f, 0x45, 0x46, - 0x10, 0x12, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x10, 0x13, 0x12, 0x28, 0x0a, - 0x24, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, - 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x48, 0x41, - 0x49, 0x4e, 0x5f, 0x49, 0x44, 0x10, 0x14, 0x12, 0x25, 0x0a, 0x21, 0x52, 0x4f, 0x4d, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, - 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4e, 0x4f, 0x4e, 0x43, 0x45, 0x10, 0x15, 0x12, 0x29, - 0x0a, 0x25, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, - 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x41, - 0x53, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x16, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x4f, 0x4d, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, - 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, - 0x10, 0x17, 0x12, 0x2f, 0x0a, 0x2b, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4d, 0x45, 0x4d, 0x10, 0x0b, 0x12, 0x23, 0x0a, 0x1f, 0x52, + 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, + 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, 0x0c, + 0x12, 0x25, 0x0a, 0x21, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x50, 0x41, + 0x44, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x0d, 0x12, 0x26, 0x0a, 0x22, 0x52, 0x4f, 0x4d, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x43, 0x4f, 0x55, 0x4e, + 0x54, 0x45, 0x52, 0x53, 0x5f, 0x50, 0x4f, 0x53, 0x45, 0x49, 0x44, 0x4f, 0x4e, 0x10, 0x0e, 0x12, + 0x1a, 0x0a, 0x16, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4a, 0x55, 0x4d, 0x50, 0x10, 0x0f, 0x12, 0x1c, 0x0a, 0x18, 0x52, + 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x4f, 0x50, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x4f, 0x4d, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x11, 0x12, 0x28, 0x0a, 0x24, 0x52, 0x4f, 0x4d, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x53, 0x5f, 0x45, 0x46, 0x10, + 0x12, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, + 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x10, 0x13, 0x12, 0x28, 0x0a, 0x24, + 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, + 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x48, 0x41, 0x49, + 0x4e, 0x5f, 0x49, 0x44, 0x10, 0x14, 0x12, 0x25, 0x0a, 0x21, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4e, 0x4f, 0x4e, 0x43, 0x45, 0x10, 0x15, 0x12, 0x29, 0x0a, + 0x25, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, + 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x41, 0x53, + 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x16, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x4f, 0x4d, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x10, + 0x17, 0x12, 0x2f, 0x0a, 0x2b, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, + 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, + 0x10, 0x18, 0x12, 0x2b, 0x0a, 0x27, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x4c, 0x49, 0x4d, 0x49, - 0x54, 0x10, 0x18, 0x12, 0x2b, 0x0a, 0x27, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x49, 0x4e, 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x19, - 0x12, 0x27, 0x0a, 0x23, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, - 0x54, 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x54, 0x58, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x4f, - 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x1a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x4f, 0x4d, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x44, 0x41, 0x54, - 0x41, 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x42, 0x49, 0x47, 0x10, 0x1b, 0x12, 0x21, 0x0a, 0x1d, 0x52, - 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, - 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x1c, 0x12, 0x19, - 0x0a, 0x15, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, - 0x4c, 0x49, 0x44, 0x5f, 0x52, 0x4c, 0x50, 0x10, 0x1d, 0x2a, 0xa2, 0x1a, 0x0a, 0x0d, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x44, 0x42, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, - 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x53, 0x10, 0x03, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, - 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, - 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, 0x10, 0x04, 0x12, 0x33, - 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, - 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, - 0x59, 0x10, 0x05, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, - 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, - 0x4d, 0x45, 0x4d, 0x10, 0x06, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, - 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, 0x07, 0x12, 0x34, 0x0a, 0x30, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, - 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, - 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x12, - 0x35, 0x0a, 0x31, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, - 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x4f, 0x53, 0x45, - 0x49, 0x44, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x26, 0x0a, 0x22, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, - 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, - 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x0a, 0x12, 0x23, - 0x0a, 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, - 0x48, 0x10, 0x0b, 0x12, 0x1d, 0x0a, 0x19, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x46, 0x45, 0x41, 0x32, 0x53, 0x43, 0x41, 0x4c, 0x41, 0x52, - 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x4f, 0x53, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x2e, 0x0a, 0x2a, - 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, - 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x55, - 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x54, 0x58, 0x10, 0x0e, 0x12, 0x2e, 0x0a, 0x2a, - 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, - 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4e, - 0x4f, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x10, 0x0f, 0x12, 0x39, 0x0a, 0x35, - 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, - 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, 0x45, 0x43, 0x52, - 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x44, 0x49, 0x56, 0x49, 0x44, 0x45, 0x5f, 0x42, 0x59, - 0x5f, 0x5a, 0x45, 0x52, 0x4f, 0x10, 0x10, 0x12, 0x2f, 0x0a, 0x2b, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, - 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x11, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, 0x45, 0x43, + 0x44, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x19, 0x12, + 0x27, 0x0a, 0x23, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x54, + 0x52, 0x49, 0x4e, 0x53, 0x49, 0x43, 0x5f, 0x54, 0x58, 0x5f, 0x47, 0x41, 0x53, 0x5f, 0x4f, 0x56, + 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x1a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x4f, 0x4d, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x42, 0x49, 0x47, 0x10, 0x1b, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x4f, + 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, + 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x1c, 0x12, 0x19, 0x0a, + 0x15, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x5f, 0x52, 0x4c, 0x50, 0x10, 0x1d, 0x2a, 0x92, 0x1e, 0x0a, 0x0d, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x45, 0x58, + 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x58, + 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x44, 0x42, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x10, 0x02, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, + 0x5f, 0x53, 0x54, 0x45, 0x50, 0x53, 0x10, 0x03, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4e, 0x45, 0x47, 0x41, 0x54, - 0x49, 0x56, 0x45, 0x10, 0x12, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, - 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, - 0x4b, 0x45, 0x59, 0x10, 0x13, 0x12, 0x20, 0x0a, 0x1c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, - 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x4b, 0x10, 0x14, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, 0x55, 0x54, - 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x15, 0x12, 0x32, 0x0a, 0x2e, 0x45, + 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, 0x10, 0x04, 0x12, 0x33, 0x0a, + 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, + 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, + 0x10, 0x05, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4d, + 0x45, 0x4d, 0x10, 0x06, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, + 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, 0x07, 0x12, 0x34, 0x0a, 0x30, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, + 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x12, 0x35, + 0x0a, 0x31, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, + 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x4f, 0x53, 0x45, 0x49, + 0x44, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x26, 0x0a, 0x22, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, + 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x0a, 0x12, 0x23, 0x0a, + 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x10, 0x0b, 0x12, 0x1d, 0x0a, 0x19, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x46, 0x45, 0x41, 0x32, 0x53, 0x43, 0x41, 0x4c, 0x41, 0x52, 0x10, + 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x4f, 0x53, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x50, 0x4f, 0x53, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x45, 0x47, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x16, 0x12, - 0x40, 0x0a, 0x3c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, - 0x50, 0x4f, 0x53, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x17, 0x12, 0x38, 0x0a, 0x34, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, - 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, - 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x18, 0x12, 0x34, 0x0a, 0x30, 0x45, + 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x55, 0x4e, + 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x54, 0x58, 0x10, 0x0e, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, - 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, - 0x19, 0x12, 0x20, 0x0a, 0x1c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, - 0x50, 0x10, 0x1a, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, - 0x53, 0x48, 0x50, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x1b, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x4e, 0x45, 0x47, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x1c, 0x12, 0x40, 0x0a, 0x3c, 0x45, + 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4e, 0x4f, + 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x10, 0x0f, 0x12, 0x39, 0x0a, 0x35, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x4f, 0x53, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, - 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x1d, 0x12, 0x38, 0x0a, - 0x34, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, - 0x45, 0x53, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x34, 0x0a, 0x30, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, 0x45, 0x43, 0x52, 0x45, + 0x43, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x44, 0x49, 0x56, 0x49, 0x44, 0x45, 0x5f, 0x42, 0x59, 0x5f, + 0x5a, 0x45, 0x52, 0x4f, 0x10, 0x10, 0x12, 0x2f, 0x0a, 0x2b, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, + 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x11, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x1f, 0x12, 0x37, 0x0a, - 0x33, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, - 0x5f, 0x4f, 0x46, 0x46, 0x53, 0x45, 0x54, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, - 0x41, 0x4e, 0x47, 0x45, 0x10, 0x20, 0x12, 0x2a, 0x0a, 0x26, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4e, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4e, 0x45, 0x47, 0x41, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x12, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x53, + 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4b, + 0x45, 0x59, 0x10, 0x13, 0x12, 0x20, 0x0a, 0x1c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, + 0x41, 0x53, 0x48, 0x4b, 0x10, 0x14, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, - 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x46, 0x52, 0x45, 0x45, 0x49, 0x4e, - 0x10, 0x21, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x53, 0x53, - 0x45, 0x52, 0x54, 0x10, 0x22, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, - 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x4d, 0x45, 0x4d, 0x4f, 0x52, 0x59, 0x10, 0x23, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, - 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x24, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, + 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x15, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, - 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x25, 0x12, 0x2f, 0x0a, - 0x2b, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x26, 0x12, 0x31, - 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x50, 0x4f, 0x53, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x45, 0x47, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x16, 0x12, 0x40, + 0x0a, 0x3c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x50, - 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, - 0x27, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, - 0x4b, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, - 0x28, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, - 0x4b, 0x4c, 0x45, 0x4e, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x4d, 0x49, 0x53, 0x4d, - 0x41, 0x54, 0x43, 0x48, 0x10, 0x29, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, - 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x4c, 0x45, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, - 0x5f, 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, 0x2a, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4e, - 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x36, 0x0a, 0x32, 0x45, 0x58, + 0x4f, 0x53, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x5f, 0x53, 0x49, 0x5a, + 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, + 0x12, 0x38, 0x0a, 0x34, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, + 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x18, 0x12, 0x34, 0x0a, 0x30, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, - 0x5f, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, - 0x10, 0x2c, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, - 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, - 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, 0x2d, 0x12, 0x2f, 0x0a, 0x2b, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x49, - 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x2e, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, - 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x2f, 0x12, 0x2e, 0x0a, 0x2a, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x30, 0x12, 0x33, 0x0a, 0x2f, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, - 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x4c, 0x45, 0x4e, 0x5f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x31, - 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x19, + 0x12, 0x20, 0x0a, 0x1c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, - 0x4c, 0x45, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x57, 0x49, 0x43, 0x45, - 0x10, 0x32, 0x12, 0x36, 0x0a, 0x32, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, + 0x10, 0x1a, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, - 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, - 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x33, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, + 0x48, 0x50, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, + 0x41, 0x4e, 0x47, 0x45, 0x10, 0x1b, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x4e, 0x45, 0x47, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x1c, 0x12, 0x40, 0x0a, 0x3c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, - 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, - 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, 0x34, 0x12, - 0x29, 0x0a, 0x25, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, - 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x35, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, + 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x4f, 0x53, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x1d, 0x12, 0x38, 0x0a, 0x34, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, + 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x34, 0x0a, 0x30, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x1f, 0x12, 0x37, 0x0a, 0x33, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, + 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, + 0x4f, 0x46, 0x46, 0x53, 0x45, 0x54, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, + 0x4e, 0x47, 0x45, 0x10, 0x20, 0x12, 0x2a, 0x0a, 0x26, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, + 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x46, 0x52, 0x45, 0x45, 0x49, 0x4e, 0x10, + 0x21, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x53, 0x53, 0x45, + 0x52, 0x54, 0x10, 0x22, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, + 0x45, 0x4d, 0x4f, 0x52, 0x59, 0x10, 0x23, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, + 0x4e, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4d, + 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x24, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, + 0x41, 0x49, 0x4e, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x57, 0x52, 0x49, 0x54, + 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x25, 0x12, 0x2f, 0x0a, 0x2b, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, + 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x56, 0x41, 0x4c, + 0x55, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x26, 0x12, 0x31, 0x0a, + 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x50, 0x41, + 0x44, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x27, + 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, + 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x28, + 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, + 0x4c, 0x45, 0x4e, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, + 0x54, 0x43, 0x48, 0x10, 0x29, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, + 0x48, 0x41, 0x53, 0x48, 0x4b, 0x4c, 0x45, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, + 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, 0x2a, 0x12, 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, + 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x36, 0x0a, 0x32, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, + 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, + 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, + 0x2c, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, + 0x4b, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, 0x54, + 0x57, 0x49, 0x43, 0x45, 0x10, 0x2d, 0x12, 0x2f, 0x0a, 0x2b, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4d, 0x49, 0x53, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x2e, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, + 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x50, 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x5f, + 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x2f, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, - 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, 0x45, 0x43, 0x52, 0x45, 0x43, - 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x36, 0x12, - 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, - 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x37, 0x12, - 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, - 0x5f, 0x53, 0x55, 0x42, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x38, 0x12, - 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, - 0x5f, 0x4c, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x39, 0x12, 0x2e, + 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x30, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, + 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, + 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x4c, 0x45, 0x4e, 0x5f, 0x4c, 0x45, + 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x31, 0x12, + 0x30, 0x0a, 0x2c, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x4c, + 0x45, 0x4e, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, + 0x32, 0x12, 0x36, 0x0a, 0x32, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, + 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x4d, + 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x33, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, + 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, + 0x43, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x57, 0x49, 0x43, 0x45, 0x10, 0x34, 0x12, 0x29, + 0x0a, 0x25, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, 0x4d, + 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x35, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, + 0x41, 0x49, 0x4e, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x5f, 0x45, 0x43, 0x52, 0x45, 0x43, 0x4f, + 0x56, 0x45, 0x52, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x36, 0x12, 0x2e, + 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, + 0x41, 0x44, 0x44, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x37, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, - 0x53, 0x4c, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3a, 0x12, 0x2d, + 0x53, 0x55, 0x42, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x38, 0x12, 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, - 0x45, 0x51, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3b, 0x12, 0x2e, 0x0a, + 0x4c, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x39, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3c, 0x12, 0x2d, 0x0a, + 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x53, + 0x4c, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3a, 0x12, 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x4f, - 0x52, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3d, 0x12, 0x2e, 0x0a, 0x2a, + 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x45, + 0x51, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3b, 0x12, 0x2e, 0x0a, 0x2a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, - 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x58, 0x4f, - 0x52, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3e, 0x12, 0x32, 0x0a, 0x2e, + 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x41, 0x4e, + 0x44, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3c, 0x12, 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, - 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, - 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3f, - 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, - 0x49, 0x47, 0x4e, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x38, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, - 0x54, 0x43, 0x48, 0x10, 0x40, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, + 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3d, 0x12, 0x2e, 0x0a, 0x2a, 0x45, + 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, + 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x58, 0x4f, 0x52, + 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3e, 0x12, 0x32, 0x0a, 0x2e, 0x45, + 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, + 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x57, + 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x3f, 0x12, + 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x38, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, + 0x43, 0x48, 0x10, 0x40, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x4d, + 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4d, 0x49, 0x53, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x41, 0x12, 0x2c, 0x0a, 0x28, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, + 0x4e, 0x5f, 0x4a, 0x4d, 0x50, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, + 0x4e, 0x47, 0x45, 0x10, 0x42, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x5f, - 0x4d, 0x45, 0x4d, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4d, 0x49, - 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x41, 0x12, 0x2c, 0x0a, 0x28, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x4a, 0x4d, 0x50, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, - 0x41, 0x4e, 0x47, 0x45, 0x10, 0x42, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, - 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, 0x41, 0x49, 0x4e, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x5f, - 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x43, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, - 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, - 0x4d, 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, - 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x44, 0x32, 0xb9, - 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x20, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, 0x79, 0x67, - 0x6f, 0x6e, 0x48, 0x65, 0x72, 0x6d, 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, 0x2d, 0x6e, - 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x48, 0x41, 0x53, 0x48, 0x4b, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, + 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x43, 0x12, 0x32, 0x0a, 0x2e, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x4d, 0x5f, 0x4d, + 0x41, 0x49, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, + 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x44, 0x12, 0x29, 0x0a, + 0x25, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x4f, 0x4c, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x45, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x10, 0x45, 0x12, 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x5f, 0x4f, 0x4c, 0x44, 0x5f, 0x41, 0x43, 0x43, 0x5f, 0x49, 0x4e, 0x50, 0x55, 0x54, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x46, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, 0x49, 0x44, 0x10, 0x47, 0x12, 0x28, 0x0a, 0x24, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x4c, 0x32, 0x5f, + 0x44, 0x41, 0x54, 0x41, 0x10, 0x48, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x5f, 0x52, 0x4f, 0x4f, + 0x54, 0x10, 0x49, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4f, + 0x49, 0x4e, 0x42, 0x41, 0x53, 0x45, 0x10, 0x4a, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x10, 0x4b, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x42, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x4c, 0x12, 0x23, 0x0a, 0x1f, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x42, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, + 0x4d, 0x12, 0x31, 0x0a, 0x2d, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x54, + 0x52, 0x41, 0x43, 0x54, 0x53, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4b, + 0x45, 0x59, 0x10, 0x4e, 0x12, 0x33, 0x0a, 0x2f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, + 0x4f, 0x4e, 0x54, 0x52, 0x41, 0x43, 0x54, 0x53, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x4f, 0x12, 0x22, 0x0a, 0x1e, 0x45, 0x58, 0x45, + 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x50, 0x32, 0xb9, 0x01, + 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x20, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x46, + 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, 0x79, 0x67, 0x6f, + 0x6e, 0x48, 0x65, 0x72, 0x6d, 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, 0x2d, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/state/runtime/executor/executor_grpc.pb.go b/state/runtime/executor/executor_grpc.pb.go index 29b51509db..2922a4ebd2 100644 --- a/state/runtime/executor/executor_grpc.pb.go +++ b/state/runtime/executor/executor_grpc.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.12 +// - protoc v4.24.4 // source: executor.proto package executor import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/state/runtime/instrumentation/js/goja.go b/state/runtime/instrumentation/js/goja.go index b3ed95318d..f13500e8a3 100644 --- a/state/runtime/instrumentation/js/goja.go +++ b/state/runtime/instrumentation/js/goja.go @@ -235,7 +235,12 @@ func (t *jsTracer) CaptureStart(env *fakevm.FakeEVM, from common.Address, to com t.ctx["to"] = t.vm.ToValue(to.Bytes()) t.ctx["input"] = t.vm.ToValue(input) t.ctx["gas"] = t.vm.ToValue(t.gasLimit) - t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice) + gasPriceBig, err := t.toBig(t.vm, env.TxContext.GasPrice.String()) + if err != nil { + t.err = err + return + } + t.ctx["gasPrice"] = gasPriceBig valueBig, err := t.toBig(t.vm, value.String()) if err != nil { t.err = err diff --git a/state/runtime/instrumentation/js/internal/tracers/4byte_tracer_legacy.js b/state/runtime/instrumentation/js/internal/tracers/4byte_tracer_legacy.js index 462b4ad4cb..e4714b8bfb 100644 --- a/state/runtime/instrumentation/js/internal/tracers/4byte_tracer_legacy.js +++ b/state/runtime/instrumentation/js/internal/tracers/4byte_tracer_legacy.js @@ -46,7 +46,7 @@ return false; }, - // store save the given indentifier and datasize. + // store save the given identifier and datasize. store: function(id, size){ var key = "" + toHex(id) + "-" + size; this.ids[key] = this.ids[key] + 1 || 1; diff --git a/state/runtime/instrumentation/js/internal/tracers/call_tracer_legacy.js b/state/runtime/instrumentation/js/internal/tracers/call_tracer_legacy.js index 3ca7377738..0545127354 100644 --- a/state/runtime/instrumentation/js/internal/tracers/call_tracer_legacy.js +++ b/state/runtime/instrumentation/js/internal/tracers/call_tracer_legacy.js @@ -220,7 +220,7 @@ return this.finalize(result); }, - // finalize recreates a call object using the final desired field oder for json + // finalize recreates a call object using the final desired field order for json // serialization. This is a nicety feature to pass meaningfully ordered results // to users who don't interpret it, just display it. finalize: function(call) { diff --git a/state/runtime/instrumentation/tracers/native/gen_callframe_json.go b/state/runtime/instrumentation/tracers/native/gen_callframe_json.go index b17cb22dd8..39816aa465 100644 --- a/state/runtime/instrumentation/tracers/native/gen_callframe_json.go +++ b/state/runtime/instrumentation/tracers/native/gen_callframe_json.go @@ -6,9 +6,9 @@ import ( "encoding/json" "math/big" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/fakevm" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/fakevm" ) var _ = (*callFrameMarshaling)(nil) @@ -16,7 +16,7 @@ var _ = (*callFrameMarshaling)(nil) // MarshalJSON marshals as JSON. func (c callFrame) MarshalJSON() ([]byte, error) { type callFrame0 struct { - Type fakevm.OpCode `json:"-"` + Type fakevm.OpCode `json:"-"` From common.Address `json:"from"` Gas hexutil.Uint64 `json:"gas"` GasUsed hexutil.Uint64 `json:"gasUsed"` @@ -50,7 +50,7 @@ func (c callFrame) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (c *callFrame) UnmarshalJSON(input []byte) error { type callFrame0 struct { - Type *fakevm.OpCode `json:"-"` + Type *fakevm.OpCode `json:"-"` From *common.Address `json:"from"` Gas *hexutil.Uint64 `json:"gas"` GasUsed *hexutil.Uint64 `json:"gasUsed"` diff --git a/state/runtime/runtime.go b/state/runtime/runtime.go index 4ef83e664f..ddb40a3645 100644 --- a/state/runtime/runtime.go +++ b/state/runtime/runtime.go @@ -207,6 +207,30 @@ var ( ErrExecutorSMMainHashKReadOutOfRange = errors.New("main execution Keccak check found read out of range") // ErrExecutorSMMainHashPReadOutOfRange indicates that the main execution Poseidon check found read out of range ErrExecutorSMMainHashPReadOutOfRange = errors.New("main execution Poseidon check found read out of range") + // ErrExecutorErrorInvalidOldStateRoot indicates that the input parameter old_state_root is invalid + ErrExecutorErrorInvalidOldStateRoot = errors.New("old_state_root is invalid") + // ErrExecutorErrorInvalidOldAccInputHash indicates that the input parameter old_acc_input_hash is invalid + ErrExecutorErrorInvalidOldAccInputHash = errors.New("old_acc_input_hash is invalid") + // ErrExecutorErrorInvalidChainId indicates that the input parameter chain_id is invalid + ErrExecutorErrorInvalidChainId = errors.New("chain_id is invalid") + // ErrExecutorErrorInvalidBatchL2Data indicates that the input parameter batch_l2_data is invalid + ErrExecutorErrorInvalidBatchL2Data = errors.New("batch_l2_data is invalid") + // ErrExecutorErrorInvalidGlobalExitRoot indicates that the input parameter global_exit_root is invalid + ErrExecutorErrorInvalidGlobalExitRoot = errors.New("global_exit_root is invalid") + // ErrExecutorErrorInvalidCoinbase indicates that the input parameter coinbase (i.e. sequencer address) is invalid + ErrExecutorErrorInvalidCoinbase = errors.New("coinbase (i.e. sequencer address) is invalid") + // ErrExecutorErrorInvalidFrom indicates that the input parameter from is invalid + ErrExecutorErrorInvalidFrom = errors.New("from is invalid") + // ErrExecutorErrorInvalidDbKey indicates that the input parameter db key is invalid + ErrExecutorErrorInvalidDbKey = errors.New("db key is invalid") + // ErrExecutorErrorInvalidDbValue indicates that the input parameter db value is invalid + ErrExecutorErrorInvalidDbValue = errors.New("db value is invalid") + // ErrExecutorErrorInvalidContractsBytecodeKey indicates that the input parameter contracts_bytecode key is invalid + ErrExecutorErrorInvalidContractsBytecodeKey = errors.New("contracts_bytecode key is invalid") + // ErrExecutorErrorInvalidContractsBytecodeValue indicates that the input parameter contracts_bytecode value is invalid + ErrExecutorErrorInvalidContractsBytecodeValue = errors.New("contracts_bytecode value is invalid") + // ErrExecutorErrorInvalidGetKey indicates that the input parameter key value is invalid + ErrExecutorErrorInvalidGetKey = errors.New("key is invalid") // GRPC ERRORS // =========== diff --git a/state/state.go b/state/state.go index 487c19f726..46213f30c0 100644 --- a/state/state.go +++ b/state/state.go @@ -10,11 +10,12 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" "google.golang.org/protobuf/types/known/emptypb" ) +const newL2BlockEventBufferSize = 500 + var ( // ZeroHash is the hash 0x0000000000000000000000000000000000000000000000000000000000000000 ZeroHash = common.Hash{} @@ -30,7 +31,6 @@ type State struct { tree *merkletree.StateTree eventLog *event.EventLog - lastL2BlockSeen types.Block newL2BlockEvents chan NewL2BlockEvent newL2BlockEventHandlers []NewL2BlockEventHandler } @@ -48,7 +48,7 @@ func NewState(cfg Config, storage *PostgresStorage, executorClient executor.Exec executorClient: executorClient, tree: stateTree, eventLog: eventLog, - newL2BlockEvents: make(chan NewL2BlockEvent), + newL2BlockEvents: make(chan NewL2BlockEvent, newL2BlockEventBufferSize), newL2BlockEventHandlers: []NewL2BlockEventHandler{}, } diff --git a/state/state_test.go b/state/state_test.go index d72c82e89d..224df066af 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -38,6 +38,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/google/uuid" "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -58,6 +59,8 @@ var ( stateCfg = state.Config{ MaxCumulativeGasUsed: 800000, ChainID: 1000, + MaxLogsCount: 10000, + MaxLogsBlockRange: 10000, ForkIDIntervals: []state.ForkIDInterval{{ FromBatchNumber: 0, ToBatchNumber: math.MaxUint64, @@ -117,7 +120,7 @@ func TestMain(m *testing.M) { } eventLog := event.NewEventLog(event.Config{}, eventStorage) - testState = state.NewState(stateCfg, state.NewPostgresStorage(stateDb), executorClient, stateTree, eventLog) + testState = state.NewState(stateCfg, state.NewPostgresStorage(stateCfg, stateDb), executorClient, stateTree, eventLog) result := m.Run() @@ -245,7 +248,7 @@ func TestOpenCloseBatch(t *testing.T) { require.NoError(t, err) receipt1.BatchL2Data = data - err = testState.StoreTransactions(ctx, 1, txsBatch1, dbTx) + err = testState.StoreTransactions(ctx, 1, txsBatch1, nil, dbTx) require.NoError(t, err) // Close batch #1 err = testState.CloseBatch(ctx, receipt1, dbTx) @@ -520,6 +523,7 @@ func TestExecuteTransaction(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } log.Debugf("%v", processBatchRequest) @@ -637,7 +641,7 @@ func TestGetTxsHashesByBatchNumber(t *testing.T) { Tx: tx2, }, } - err = testState.StoreTransactions(ctx, 1, txsBatch1, dbTx) + err = testState.StoreTransactions(ctx, 1, txsBatch1, nil, dbTx) require.NoError(t, err) txs, err := testState.GetTxsHashesByBatchNumber(ctx, 1, dbTx) @@ -754,6 +758,7 @@ func TestExecutor(t *testing.T) { Db: db, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -834,6 +839,7 @@ func TestExecutorRevert(t *testing.T) { UpdateMerkleTree: 0, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } fmt.Println("batchL2Data: ", batchL2Data) processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -883,7 +889,12 @@ func TestExecutorRevert(t *testing.T) { receipt.BlockHash = l2Block.Hash() - err = testState.AddL2Block(ctx, 0, l2Block, receipts, state.MaxEffectivePercentage, dbTx) + storeTxsEGPData := []state.StoreTxEGPData{} + for range transactions { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + } + + err = testState.AddL2Block(ctx, 0, l2Block, receipts, storeTxsEGPData, dbTx) require.NoError(t, err) l2Block, err = testState.GetL2BlockByHash(ctx, l2Block.Hash(), dbTx) require.NoError(t, err) @@ -958,6 +969,7 @@ func TestExecutorRevert(t *testing.T) { // Db: genesisDB, // ChainId: stateCfg.ChainID, // ForkId: forkID, +// ContextId: uuid.NewString(), // } // // processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -1038,6 +1050,7 @@ func TestExecutorTransfer(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } // Read Sender Balance before execution @@ -1071,7 +1084,7 @@ func TestExecutorTransfer(t *testing.T) { require.Equal(t, "21002", data.Balance) // Read Modified Addresses from converted response - converted, err := testState.TestConvertToProcessBatchResponse([]types.Transaction{*signedTx}, processBatchResponse) + converted, err := testState.TestConvertToProcessBatchResponse(processBatchResponse) require.NoError(t, err) convertedData := converted.ReadWriteAddresses[receiverAddress] require.Equal(t, uint64(21002), convertedData.Balance.Uint64()) @@ -1185,6 +1198,7 @@ func TestExecutorTxHashAndRLP(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } // Process batch @@ -1294,6 +1308,7 @@ func TestExecutorInvalidNonce(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } // Process batch @@ -1631,7 +1646,7 @@ func TestExecutorUnsignedTransactions(t *testing.T) { assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000000001", hex.EncodeToString(processBatchResponse.Responses[2].ReturnValue)) // Add txs to DB - err = testState.StoreTransactions(context.Background(), 1, processBatchResponse.Responses, dbTx) + err = testState.StoreTransactions(context.Background(), 1, processBatchResponse.Responses, nil, dbTx) require.NoError(t, err) // Close batch err = testState.CloseBatch( @@ -1723,7 +1738,12 @@ func TestAddGetL2Block(t *testing.T) { receipt.BlockHash = l2Block.Hash() - err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, state.MaxEffectivePercentage, dbTx) + storeTxsEGPData := []state.StoreTxEGPData{} + for range transactions { + storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) require.NoError(t, err) result, err := testState.GetL2BlockByHash(ctx, l2Block.Hash(), dbTx) require.NoError(t, err) @@ -1819,6 +1839,7 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { OldLocalExitRoot: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), EthTimestamp: uint64(0), UpdateMerkleTree: 1, + ContextId: uuid.NewString(), } var testCases []TxHashTestCase @@ -1893,7 +1914,8 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { EthTimestamp: uint64(0), UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, - ForkId: forkID, + ForkId: forkID, + ContextId: uuid.NewString(), } // Process batch @@ -1919,6 +1941,7 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { OldLocalExitRoot: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), EthTimestamp: uint64(0), UpdateMerkleTree: 1, + ContextId: uuid.NewString(), } // Process batch @@ -2031,13 +2054,14 @@ func TestExecutorEstimateGas(t *testing.T) { UpdateMerkleTree: 0, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) require.NoError(t, err) assert.NotEqual(t, "", processBatchResponse.Responses[0].Error) - convertedResponse, err := testState.TestConvertToProcessBatchResponse([]types.Transaction{*signedTx0, *signedTx1}, processBatchResponse) + convertedResponse, err := testState.TestConvertToProcessBatchResponse(processBatchResponse) require.NoError(t, err) log.Debugf("%v", len(convertedResponse.Responses)) @@ -2055,7 +2079,7 @@ func TestExecutorEstimateGas(t *testing.T) { err = testState.OpenBatch(ctx, processingContext, dbTx) require.NoError(t, err) - err = testState.StoreTransactions(ctx, processBatchRequest.OldBatchNum+1, convertedResponse.Responses, dbTx) + err = testState.StoreTransactions(ctx, processBatchRequest.OldBatchNum+1, convertedResponse.Responses, nil, dbTx) require.NoError(t, err) processingReceipt := state.ProcessingReceipt{ @@ -2180,7 +2204,8 @@ func TestExecutorGasRefund(t *testing.T) { EthTimestamp: uint64(time.Now().Unix()), UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, - ForkId: forkID, + ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2246,7 +2271,8 @@ func TestExecutorGasRefund(t *testing.T) { EthTimestamp: uint64(time.Now().Unix()), UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, - ForkId: forkID, + ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err = executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2385,6 +2411,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2411,7 +2438,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { require.Equal(t, uint64(1000000000), balance.Uint64()) // Preparation to be able to estimate gas - convertedResponse, err := testState.TestConvertToProcessBatchResponse(transactions, processBatchResponse) + convertedResponse, err := testState.TestConvertToProcessBatchResponse(processBatchResponse) require.NoError(t, err) log.Debugf("%v", len(convertedResponse.Responses)) @@ -2429,7 +2456,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { err = testState.OpenBatch(ctx, processingContext, dbTx) require.NoError(t, err) - err = testState.StoreTransactions(ctx, processBatchRequest.OldBatchNum+1, convertedResponse.Responses, dbTx) + err = testState.StoreTransactions(ctx, processBatchRequest.OldBatchNum+1, convertedResponse.Responses, nil, dbTx) require.NoError(t, err) processingReceipt := state.ProcessingReceipt{ @@ -2472,6 +2499,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { UpdateMerkleTree: 1, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err = executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2529,6 +2557,7 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { UpdateMerkleTree: 0, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2588,6 +2617,7 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { UpdateMerkleTree: 0, ChainId: stateCfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } processBatchResponse, err = executorClient.ProcessBatch(ctx, processBatchRequest) @@ -2680,7 +2710,7 @@ func TestExecutorUnsignedTransactionsWithCorrectL2BlockStateRoot(t *testing.T) { assert.Nil(t, processBatchResponse.Responses[3].RomError) // Add txs to DB - err = testState.StoreTransactions(context.Background(), 1, processBatchResponse.Responses, dbTx) + err = testState.StoreTransactions(context.Background(), 1, processBatchResponse.Responses, nil, dbTx) require.NoError(t, err) // Close batch err = testState.CloseBatch( @@ -2729,3 +2759,50 @@ func TestExecutorUnsignedTransactionsWithCorrectL2BlockStateRoot(t *testing.T) { assert.Nil(t, result.Err) assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000000003", hex.EncodeToString(result.ReturnValue)) } + +func TestBigDataTx(t *testing.T) { + var chainIDSequencer = new(big.Int).SetInt64(400) + var sequencerAddress = common.HexToAddress("0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D") + var sequencerPvtKey = "0x28b2b0318721be8c8339199172cd7cc8f5e273800a35616ec893083a4b32c02e" + var sequencerBalance = 4000000 + + tx := types.NewTx(&types.LegacyTx{ + Nonce: 0, + To: &sequencerAddress, + Value: new(big.Int), + Gas: uint64(sequencerBalance), + GasPrice: new(big.Int).SetUint64(0), + Data: make([]byte, 120000), // large data + }) + + privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(sequencerPvtKey, "0x")) + require.NoError(t, err) + auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainIDSequencer) + require.NoError(t, err) + + signedTx, err := auth.Signer(auth.From, tx) + require.NoError(t, err) + + // Encode transaction + batchL2Data, err := state.EncodeTransaction(*signedTx, state.MaxEffectivePercentage, forkID) + require.NoError(t, err) + + // Create Batch + processBatchRequest := &executor.ProcessBatchRequest{ + OldBatchNum: 0, + Coinbase: sequencerAddress.String(), + BatchL2Data: batchL2Data, + OldStateRoot: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + GlobalExitRoot: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + OldAccInputHash: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + EthTimestamp: uint64(time.Now().Unix()), + UpdateMerkleTree: 1, + ChainId: stateCfg.ChainID, + ForkId: forkID, + ContextId: uuid.NewString(), + } + + response, err := executorClient.ProcessBatch(ctx, processBatchRequest) + require.NoError(t, err) + require.Equal(t, executor.ExecutorError_EXECUTOR_ERROR_INVALID_BATCH_L2_DATA, response.Error) +} diff --git a/state/test/datastream_test.go b/state/test/datastream_test.go new file mode 100644 index 0000000000..9c2002b842 --- /dev/null +++ b/state/test/datastream_test.go @@ -0,0 +1,55 @@ +package test + +import ( + "testing" + + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +func TestL2BlockStartEncode(t *testing.T) { + l2BlockStart := state.DSL2BlockStart{ + BatchNumber: 1, // 8 bytes + L2BlockNumber: 2, // 8 bytes + Timestamp: 3, // 8 bytes + GlobalExitRoot: common.HexToHash("0x04"), // 32 bytes + Coinbase: common.HexToAddress("0x05"), // 20 bytes + ForkID: 5, + } + + encoded := l2BlockStart.Encode() + expected := []byte{1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0} + + assert.Equal(t, expected, encoded) +} + +func TestL2TransactionEncode(t *testing.T) { + l2Transaction := state.DSL2Transaction{ + EffectiveGasPricePercentage: 128, // 1 byte + IsValid: 1, // 1 byte + EncodedLength: 5, // 4 bytes + Encoded: []byte{1, 2, 3, 4, 5}, // 5 bytes + } + + encoded := l2Transaction.Encode() + expected := []byte{128, 1, 5, 0, 0, 0, 1, 2, 3, 4, 5} + assert.Equal(t, expected, encoded) +} + +func TestL2BlockEndEncode(t *testing.T) { + l2BlockEnd := state.DSL2BlockEnd{ + L2BlockNumber: 1, // 8 bytes + BlockHash: common.HexToHash("0x02"), // 32 bytes + StateRoot: common.HexToHash("0x03"), // 32 bytes + } + + encoded := l2BlockEnd.Encode() + expected := []byte{1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3} + + assert.Equal(t, expected, encoded) +} diff --git a/state/transaction.go b/state/transaction.go index 50d9c0a82d..ecdb0c9460 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/google/uuid" "github.com/holiman/uint256" "github.com/jackc/pgx/v4" "google.golang.org/grpc/codes" @@ -118,7 +119,7 @@ func RlpFieldsToLegacyTx(fields [][]byte, v, r, s []byte) (tx *types.LegacyTx, e // StoreTransactions is used by the sequencer to add processed transactions into // an open batch. If the batch already has txs, the processedTxs must be a super // set of the existing ones, preserving order. -func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, processedTxs []*ProcessTransactionResponse, dbTx pgx.Tx) error { +func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, processedTxs []*ProcessTransactionResponse, txsEGPLog []*EffectiveGasPriceLog, dbTx pgx.Tx) error { if dbTx == nil { return ErrDBTxNil } @@ -153,7 +154,7 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce // if the transaction has an intrinsic invalid tx error it means // the transaction has not changed the state, so we don't store it // and just move to the next - if executor.IsIntrinsicError(executor.RomErrorCode(processedTx.RomError)) { + if executor.IsIntrinsicError(executor.RomErrorCode(processedTx.RomError)) || errors.Is(processedTx.RomError, executor.RomErr(executor.RomError_ROM_ERROR_INVALID_RLP)) { continue } @@ -185,8 +186,13 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce receipt.BlockHash = block.Hash() + storeTxsEGPData := []StoreTxEGPData{{EGPLog: nil, EffectivePercentage: uint8(processedTx.EffectivePercentage)}} + if txsEGPLog != nil { + storeTxsEGPData[0].EGPLog = txsEGPLog[i] + } + // Store L2 block and its transaction - if err := s.AddL2Block(ctx, batchNumber, block, receipts, uint8(processedTx.EffectivePercentage), dbTx); err != nil { + if err := s.AddL2Block(ctx, batchNumber, block, receipts, storeTxsEGPData, dbTx); err != nil { return err } } @@ -243,10 +249,19 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has return nil, err } + var txHashToGenerateCallTrace []byte + var txHashToGenerateExecuteTrace []byte + + if traceConfig.IsDefaultTracer() { + txHashToGenerateExecuteTrace = transactionHash.Bytes() + } else { + txHashToGenerateCallTrace = transactionHash.Bytes() + } + // Create Batch traceConfigRequest := &executor.TraceConfig{ - TxHashToGenerateCallTrace: transactionHash.Bytes(), - TxHashToGenerateExecuteTrace: transactionHash.Bytes(), + TxHashToGenerateCallTrace: txHashToGenerateCallTrace, + TxHashToGenerateExecuteTrace: txHashToGenerateExecuteTrace, // set the defaults to the maximum information we can have. // this is needed to process custom tracers later DisableStorage: cFalse, @@ -264,11 +279,11 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has if traceConfig.DisableStack { traceConfigRequest.DisableStack = cTrue } - if traceConfig.EnableMemory { - traceConfigRequest.EnableMemory = cTrue + if !traceConfig.EnableMemory { + traceConfigRequest.EnableMemory = cFalse } - if traceConfig.EnableReturnData { - traceConfigRequest.EnableReturnData = cTrue + if !traceConfig.EnableReturnData { + traceConfigRequest.EnableReturnData = cFalse } } @@ -286,6 +301,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has ChainId: s.cfg.ChainID, ForkId: forkId, TraceConfig: traceConfigRequest, + ContextId: uuid.NewString(), } // Send Batch to the Executor @@ -300,6 +316,8 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has return nil, err } + // Transactions are decoded only for logging purposes + // as they are not longer needed in the convertToProcessBatchResponse function txs, _, _, err := DecodeTxs(batchL2Data, forkId) if err != nil && !errors.Is(err, ErrInvalidData) { return nil, err @@ -309,7 +327,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has log.Debugf(tx.Hash().String()) } - convertedResponse, err := s.convertToProcessBatchResponse(txs, processBatchResponse) + convertedResponse, err := s.convertToProcessBatchResponse(processBatchResponse) if err != nil { return nil, err } @@ -342,6 +360,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has StateRoot: response.StateRoot.Bytes(), StructLogs: response.ExecutionTrace, ExecutorTrace: response.CallTrace, + Err: response.RomError, } // if is the default trace, return the result @@ -435,7 +454,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has fakeDB := &FakeDB{State: s, stateRoot: batch.StateRoot.Bytes()} evm := fakevm.NewFakeEVM(fakevm.BlockContext{BlockNumber: big.NewInt(1)}, fakevm.TxContext{GasPrice: gasPrice}, fakeDB, params.TestChainConfig, fakevm.Config{Debug: true, Tracer: customTracer}) - traceResult, err := s.buildTrace(evm, result.ExecutorTrace, customTracer) + traceResult, err := s.buildTrace(evm, result, customTracer) if err != nil { log.Errorf("debug transaction: failed parse the trace using the tracer: %v", err) return nil, fmt.Errorf("failed parse the trace using the tracer: %v", err) @@ -447,7 +466,8 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has } // ParseTheTraceUsingTheTracer parses the given trace with the given tracer. -func (s *State) buildTrace(evm *fakevm.FakeEVM, trace instrumentation.ExecutorTrace, tracer tracers.Tracer) (json.RawMessage, error) { +func (s *State) buildTrace(evm *fakevm.FakeEVM, result *runtime.ExecutionResult, tracer tracers.Tracer) (json.RawMessage, error) { + trace := result.ExecutorTrace tracer.CaptureTxStart(trace.Context.Gas) contextGas := trace.Context.Gas - trace.Context.GasUsed if len(trace.Steps) > 0 { @@ -483,7 +503,8 @@ func (s *State) buildTrace(evm *fakevm.FakeEVM, trace instrumentation.ExecutorTr fakevm.NewAccount(step.Contract.Caller), fakevm.NewAccount(step.Contract.Address), step.Contract.Value, step.Gas) - contract.CodeAddr = &step.Contract.Address + aux := step.Contract.Address + contract.CodeAddr = &aux // set Scope scope := &fakevm.ScopeContext{ @@ -582,6 +603,8 @@ func (s *State) buildTrace(evm *fakevm.FakeEVM, trace instrumentation.ExecutorTr var err error if reverted { err = fakevm.ErrExecutionReverted + } else if result.Err != nil { + err = result.Err } tracer.CaptureEnd(trace.Context.Output, trace.Context.GasUsed, err) restGas := trace.Context.Gas - trace.Context.GasUsed @@ -601,7 +624,7 @@ func (s *State) getGasUsed(internalTxContextStack *Stack[instrumentation.Interna gasUsed = previousStep.Contract.Gas - step.Gas } else { // otherwise we use the step gas - gasUsed = itCtx.RemainingGas - previousStep.Gas - previousStep.GasCost + gasUsed = itCtx.RemainingGas - previousStep.Gas + previousStep.GasCost } return gasUsed, nil } @@ -721,7 +744,7 @@ func (s *State) PreProcessTransaction(ctx context.Context, tx *types.Transaction response, err := s.internalProcessUnsignedTransaction(ctx, tx, sender, nil, false, dbTx) if err != nil { - return nil, err + return response, err } return response, nil @@ -766,21 +789,6 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type return nil, err } - stateRoot := l2BlockStateRoot - if l2BlockNumber != nil { - l2Block, err := s.GetL2BlockByNumber(ctx, *l2BlockNumber, dbTx) - if err != nil { - return nil, err - } - stateRoot = l2Block.Root() - } - - loadedNonce, err := s.tree.GetNonce(ctx, senderAddress, stateRoot.Bytes()) - if err != nil { - return nil, err - } - nonce := loadedNonce.Uint64() - // Get latest batch from the database to get globalExitRoot and Timestamp lastBatch := lastBatches[0] @@ -790,9 +798,15 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type previousBatch = lastBatches[1] } + stateRoot := l2BlockStateRoot timestamp := uint64(lastBatch.Timestamp.Unix()) - if l2BlockNumber != nil { + l2Block, err := s.GetL2BlockByNumber(ctx, *l2BlockNumber, dbTx) + if err != nil { + return nil, err + } + stateRoot = l2Block.Root() + latestL2BlockNumber, err := s.PostgresStorage.GetLastL2BlockNumber(ctx, dbTx) if err != nil { return nil, err @@ -804,6 +818,11 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type } forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) + loadedNonce, err := s.tree.GetNonce(ctx, senderAddress, stateRoot.Bytes()) + if err != nil { + return nil, err + } + nonce := loadedNonce.Uint64() batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, &nonce, forkID) if err != nil { @@ -824,6 +843,7 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type UpdateMerkleTree: cFalse, ChainId: s.cfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } if noZKEVMCounters { @@ -840,17 +860,18 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type log.Debugf("internalProcessUnsignedTransaction[processBatchRequest.UpdateMerkleTree]: %v", processBatchRequest.UpdateMerkleTree) log.Debugf("internalProcessUnsignedTransaction[processBatchRequest.ChainId]: %v", processBatchRequest.ChainId) log.Debugf("internalProcessUnsignedTransaction[processBatchRequest.ForkId]: %v", processBatchRequest.ForkId) + log.Debugf("internalProcessUnsignedTransaction[processBatchRequest.ContextId]: %v", processBatchRequest.ContextId) // Send Batch to the Executor processBatchResponse, err := s.executorClient.ProcessBatch(ctx, processBatchRequest) if err != nil { - if status.Code(err) == codes.ResourceExhausted || processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR) { + if status.Code(err) == codes.ResourceExhausted || (processBatchResponse != nil && processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR)) { log.Errorf("error processing unsigned transaction ", err) for attempts < s.cfg.MaxResourceExhaustedAttempts { time.Sleep(s.cfg.WaitOnResourceExhaustion.Duration) log.Errorf("retrying to process unsigned transaction") processBatchResponse, err = s.executorClient.ProcessBatch(ctx, processBatchRequest) - if status.Code(err) == codes.ResourceExhausted || processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR) { + if status.Code(err) == codes.ResourceExhausted || (processBatchResponse != nil && processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR)) { log.Errorf("error processing unsigned transaction ", err) attempts++ continue @@ -860,7 +881,7 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type } if err != nil { - if status.Code(err) == codes.ResourceExhausted || processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR) { + if status.Code(err) == codes.ResourceExhausted || (processBatchResponse != nil && processBatchResponse.Error == executor.ExecutorError(executor.ExecutorError_EXECUTOR_ERROR_DB_ERROR)) { log.Error("reporting error as time out") return nil, runtime.ErrGRPCResourceExhaustedAsTimeout } @@ -873,7 +894,10 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type Description: fmt.Sprintf("error processing unsigned transaction %s: %v", tx.Hash(), err), } - err = s.eventLog.LogEvent(context.Background(), event) + err2 := s.eventLog.LogEvent(context.Background(), event) + if err2 != nil { + log.Errorf("error logging event %v", err2) + } log.Errorf("error processing unsigned transaction ", err) return nil, err } @@ -885,7 +909,7 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type return nil, err } - response, err := s.convertToProcessBatchResponse([]types.Transaction{*tx}, processBatchResponse) + response, err := s.convertToProcessBatchResponse(processBatchResponse) if err != nil { return nil, err } @@ -906,20 +930,20 @@ func (s *State) isContractCreation(tx *types.Transaction) bool { } // StoreTransaction is used by the sequencer and trusted state synchronizer to add process a transaction. -func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error { +func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *EffectiveGasPriceLog, dbTx pgx.Tx) (*types.Header, error) { if dbTx == nil { - return ErrDBTxNil + return nil, ErrDBTxNil } // if the transaction has an intrinsic invalid tx error it means // the transaction has not changed the state, so we don't store it if executor.IsIntrinsicError(executor.RomErrorCode(processedTx.RomError)) { - return nil + return nil, nil } lastL2Block, err := s.GetLastL2Block(ctx, dbTx) if err != nil { - return err + return nil, err } header := &types.Header{ @@ -942,12 +966,14 @@ func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, proces receipt.BlockHash = block.Hash() + storeTxsEGPData := []StoreTxEGPData{{EGPLog: egpLog, EffectivePercentage: uint8(processedTx.EffectivePercentage)}} + // Store L2 block and its transaction - if err := s.AddL2Block(ctx, batchNumber, block, receipts, uint8(processedTx.EffectivePercentage), dbTx); err != nil { - return err + if err := s.AddL2Block(ctx, batchNumber, block, receipts, storeTxsEGPData, dbTx); err != nil { + return nil, err } - return nil + return block.Header(), nil } // CheckSupersetBatchTransactions verifies that processedTransactions is a @@ -979,6 +1005,21 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common return 0, nil, err } + stateRoot := l2BlockStateRoot + if l2BlockNumber != nil { + l2Block, err := s.GetL2BlockByNumber(ctx, *l2BlockNumber, dbTx) + if err != nil { + return 0, nil, err + } + stateRoot = l2Block.Root() + } + + loadedNonce, err := s.tree.GetNonce(ctx, senderAddress, stateRoot.Bytes()) + if err != nil { + return 0, nil, err + } + nonce := loadedNonce.Uint64() + // Get latest batch from the database to get globalExitRoot and Timestamp lastBatch := lastBatches[0] @@ -994,7 +1035,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common } if lowEnd == ethTransferGas && transaction.To() != nil { - code, err := s.tree.GetCode(ctx, *transaction.To(), l2BlockStateRoot.Bytes()) + code, err := s.tree.GetCode(ctx, *transaction.To(), stateRoot.Bytes()) if err != nil { log.Warnf("error while getting transaction.to() code %v", err) } else if len(code) == 0 { @@ -1011,7 +1052,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common var availableBalance *big.Int if senderAddress != ZeroAddress { - senderBalance, err := s.tree.GetBalance(ctx, senderAddress, l2BlockStateRoot.Bytes()) + senderBalance, err := s.tree.GetBalance(ctx, senderAddress, stateRoot.Bytes()) if err != nil { if errors.Is(err, ErrNotFound) { senderBalance = big.NewInt(0) @@ -1045,9 +1086,9 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common // Run the transaction with the specified gas value. // Returns a status indicating if the transaction failed, if it was reverted and the accompanying error - testTransaction := func(gas uint64, shouldOmitErr bool) (failed, reverted bool, gasUsed uint64, returnValue []byte, err error) { + testTransaction := func(gas uint64, nonce uint64, shouldOmitErr bool) (failed, reverted bool, gasUsed uint64, returnValue []byte, err error) { tx := types.NewTx(&types.LegacyTx{ - Nonce: transaction.Nonce(), + Nonce: nonce, To: transaction.To(), Value: transaction.Value(), Gas: gas, @@ -1068,7 +1109,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common OldBatchNum: lastBatch.BatchNumber, BatchL2Data: batchL2Data, From: senderAddress.String(), - OldStateRoot: l2BlockStateRoot.Bytes(), + OldStateRoot: stateRoot.Bytes(), GlobalExitRoot: lastBatch.GlobalExitRoot.Bytes(), OldAccInputHash: previousBatch.AccInputHash.Bytes(), EthTimestamp: uint64(lastBatch.Timestamp.Unix()), @@ -1076,6 +1117,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common UpdateMerkleTree: cFalse, ChainId: s.cfg.ChainID, ForkId: forkID, + ContextId: uuid.NewString(), } log.Debugf("EstimateGas[processBatchRequest.OldBatchNum]: %v", processBatchRequest.OldBatchNum) @@ -1089,6 +1131,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common log.Debugf("EstimateGas[processBatchRequest.UpdateMerkleTree]: %v", processBatchRequest.UpdateMerkleTree) log.Debugf("EstimateGas[processBatchRequest.ChainId]: %v", processBatchRequest.ChainId) log.Debugf("EstimateGas[processBatchRequest.ForkId]: %v", processBatchRequest.ForkId) + log.Debugf("EstimateGas[processBatchRequest.ContextId]: %v", processBatchRequest.ContextId) txExecutionOnExecutorTime := time.Now() processBatchResponse, err := s.executorClient.ProcessBatch(ctx, processBatchRequest) @@ -1097,12 +1140,12 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common log.Errorf("error estimating gas: %v", err) return false, false, gasUsed, nil, err } - gasUsed = processBatchResponse.Responses[0].GasUsed if processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(processBatchResponse.Error) s.eventLog.LogExecutorError(ctx, processBatchResponse.Error, processBatchRequest) return false, false, gasUsed, nil, err } + gasUsed = processBatchResponse.Responses[0].GasUsed // Check if an out of gas error happened during EVM execution if processBatchResponse.Responses[0].Error != executor.RomError_ROM_ERROR_NO_ERROR { @@ -1132,7 +1175,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common var totalExecutionTime time.Duration // Check if the highEnd is a good value to make the transaction pass - failed, reverted, gasUsed, returnValue, err := testTransaction(highEnd, false) + failed, reverted, gasUsed, returnValue, err := testTransaction(highEnd, nonce, false) log.Debugf("Estimate gas. Trying to execute TX with %v gas", highEnd) if failed { if reverted { @@ -1158,7 +1201,7 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common log.Debugf("Estimate gas. Trying to execute TX with %v gas", mid) - failed, reverted, _, _, testErr := testTransaction(mid, true) + failed, reverted, _, _, testErr := testTransaction(mid, nonce, true) executionTime := time.Since(txExecutionStart) totalExecutionTime += executionTime txExecutions = append(txExecutions, executionTime) diff --git a/state/types.go b/state/types.go index 0eb2b83a75..ba478a3b42 100644 --- a/state/types.go +++ b/state/types.go @@ -34,7 +34,6 @@ type ProcessBatchResponse struct { UsedZkCounters ZKCounters Responses []*ProcessTransactionResponse ExecutorError error - IsBatchProcessed bool ReadWriteAddresses map[common.Address]*InfoReadWrite IsRomLevelError bool IsExecutorLevelError bool @@ -79,6 +78,36 @@ type ProcessTransactionResponse struct { EffectiveGasPrice string //EffectivePercentage effective percentage used for the tx EffectivePercentage uint32 + //HasGaspriceOpcode flag to indicate if opcode 'GASPRICE' has been called + HasGaspriceOpcode bool + //HasBalanceOpcode flag to indicate if opcode 'BALANCE' has been called + HasBalanceOpcode bool +} + +// EffectiveGasPriceLog contains all the data needed to calculate the effective gas price for logging purposes +type EffectiveGasPriceLog struct { + Enabled bool + ValueFinal *big.Int + ValueFirst *big.Int + ValueSecond *big.Int + FinalDeviation *big.Int + MaxDeviation *big.Int + GasUsedFirst uint64 + GasUsedSecond uint64 + GasPrice *big.Int + Percentage uint8 + Reprocess bool + GasPriceOC bool + BalanceOC bool + L1GasPrice uint64 + L2GasPrice uint64 + Error string +} + +// StoreTxEGPData contains the data related to the effective gas price that needs to be stored when storing a tx +type StoreTxEGPData struct { + EGPLog *EffectiveGasPriceLog + EffectivePercentage uint8 } // ZKCounters counters for the tx diff --git a/synchronizer/block_range.go b/synchronizer/block_range.go new file mode 100644 index 0000000000..7aa7e9afdc --- /dev/null +++ b/synchronizer/block_range.go @@ -0,0 +1,60 @@ +package synchronizer + +import ( + "errors" + "fmt" +) + +const ( + latestBlockNumber uint64 = ^uint64(0) + invalidBlockNumber uint64 = uint64(0) +) + +var ( + errBlockRangeInvalidIsNil = errors.New("block Range Invalid: block range is nil") + errBlockRangeInvalidIsZero = errors.New("block Range Invalid: Invalid: from or to are 0") + errBlockRangeInvalidIsWrong = errors.New("block Range Invalid: fromBlock is greater than toBlock") +) + +type blockRange struct { + fromBlock uint64 + toBlock uint64 +} + +func blockNumberToString(b uint64) string { + if b == latestBlockNumber { + return "earliest" + } + if b == invalidBlockNumber { + return "invalid" + } + return fmt.Sprintf("%d", b) +} + +func (b *blockRange) String() string { + return fmt.Sprintf("[%s, %s]", blockNumberToString(b.fromBlock), blockNumberToString(b.toBlock)) +} + +func (b *blockRange) len() uint64 { + if b.toBlock == latestBlockNumber || b.fromBlock == latestBlockNumber { + return 0 + } + return b.toBlock - b.fromBlock + 1 +} + +func (b *blockRange) isValid() error { + if b == nil { + return errBlockRangeInvalidIsNil + } + if b.fromBlock == invalidBlockNumber || b.toBlock == invalidBlockNumber { + return errBlockRangeInvalidIsZero + } + if b.fromBlock > b.toBlock { + return errBlockRangeInvalidIsWrong + } + return nil +} + +func (b *blockRange) overlaps(br blockRange) bool { + return b.fromBlock <= br.toBlock && br.fromBlock <= b.toBlock +} diff --git a/synchronizer/config.go b/synchronizer/config.go index 5142aaef7b..774c55dd37 100644 --- a/synchronizer/config.go +++ b/synchronizer/config.go @@ -12,4 +12,53 @@ type Config struct { SyncChunkSize uint64 `mapstructure:"SyncChunkSize"` // TrustedSequencerURL is the rpc url to connect and sync the trusted state TrustedSequencerURL string `mapstructure:"TrustedSequencerURL"` + + // L1SynchronizationMode define how to synchronize with L1: + // - parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data + // - sequential: Request data to L1 and execute + L1SynchronizationMode string `jsonschema:"enum=sequential,enum=parallel"` + // L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') + L1ParallelSynchronization L1ParallelSynchronizationConfig +} + +// L1ParallelSynchronizationConfig Configuration for parallel mode (if UL1SynchronizationMode equal to 'parallel') +type L1ParallelSynchronizationConfig struct { + // MaxClients Number of clients used to synchronize with L1 + MaxClients uint64 + // MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync + // sugested twice of NumberOfParallelOfEthereumClients + MaxPendingNoProcessedBlocks uint64 + + // RequestLastBlockPeriod is the time to wait to request the + // last block to L1 to known if we need to retrieve more data. + // This value only apply when the system is synchronized + RequestLastBlockPeriod types.Duration + + // Consumer Configuration for the consumer of rollup information from L1 + PerformanceWarning L1PerformanceCheckConfig + + // RequestLastBlockTimeout Timeout for request LastBlock On L1 + RequestLastBlockTimeout types.Duration + // RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1 + RequestLastBlockMaxRetries int + // StatisticsPeriod how ofter show a log with statistics (0 is disabled) + StatisticsPeriod types.Duration + // TimeOutMainLoop is the timeout for the main loop of the L1 synchronizer when is not updated + TimeOutMainLoop types.Duration + // RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1 + RollupInfoRetriesSpacing types.Duration + // FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized + FallbackToSequentialModeOnSynchronized bool +} + +// L1PerformanceCheckConfig Configuration for the consumer of rollup information from L1 +type L1PerformanceCheckConfig struct { + // AceptableInacctivityTime is the expected maximum time that the consumer + // could wait until new data is produced. If the time is greater it emmit a log to warn about + // that. The idea is keep working the consumer as much as possible, so if the producer is not + // fast enought then you could increse the number of parallel clients to sync with L1 + AceptableInacctivityTime types.Duration + // ApplyAfterNumRollupReceived is the number of iterations to + // start checking the time waiting for new rollup info data + ApplyAfterNumRollupReceived int } diff --git a/synchronizer/control_flush_id.go b/synchronizer/control_flush_id.go new file mode 100644 index 0000000000..2d6b176419 --- /dev/null +++ b/synchronizer/control_flush_id.go @@ -0,0 +1,133 @@ +package synchronizer + +import ( + "context" + "fmt" + "time" + + "github.com/0xPolygonHermez/zkevm-node/event" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/jackc/pgx/v4" +) + +// FlushIDController is an interface to control the flushID and ProverID +type FlushIDController interface { + // UpdateAndCheckProverID check the incomming proverID from executor with the last one, if no match finalize synchronizer + // if there are no previous one it keep this value as the current one + UpdateAndCheckProverID(proverID string) + // BlockUntilLastFlushIDIsWritten blocks until the last flushID is written in DB. It keep in a loop asking to executor + // the flushid written, also check ProverID + BlockUntilLastFlushIDIsWritten(dbTx pgx.Tx) error + // SetPendingFlushIDAndCheckProverID set the pending flushID to be written in DB and check proverID + SetPendingFlushIDAndCheckProverID(flushID uint64, proverID string, callDescription string) +} + +// ClientFlushIDControl is a struct to control the flushID and ProverID, implements FlushIDController interface +type ClientFlushIDControl struct { + state stateInterface + ctx context.Context + eventLog *event.EventLog + + // Id of the 'process' of the executor. Each time that it starts this value changes + // This value is obtained from the call state.GetStoredFlushID + // It starts as an empty string and it is filled in the first call + // later the value is checked to be the same (in function checkFlushID) + proverID string + // Previous value returned by state.GetStoredFlushID, is used for decide if write a log or not + previousExecutorFlushID uint64 + latestFlushID uint64 + // If true the lastFlushID is stored in DB and we don't need to check again + latestFlushIDIsFulfilled bool +} + +// NewFlushIDController create a new struct ClientFlushIDControl +func NewFlushIDController(state stateInterface, ctx context.Context, eventLog *event.EventLog) *ClientFlushIDControl { + return &ClientFlushIDControl{ + state: state, + ctx: ctx, + eventLog: eventLog, + proverID: "", + previousExecutorFlushID: 0, + } +} + +// SetPendingFlushIDAndCheckProverID set the pending flushID to be written in DB and check proverID +func (s *ClientFlushIDControl) SetPendingFlushIDAndCheckProverID(flushID uint64, proverID string, callDescription string) { + log.Infof("new executor [%s] pending flushID: %d", callDescription, flushID) + s.latestFlushID = flushID + s.latestFlushIDIsFulfilled = false + s.UpdateAndCheckProverID(proverID) +} + +// UpdateAndCheckProverID check the incomming proverID from executor with the last one, if no match finalize synchronizer +// if there are no previous one it keep this value as the current one +func (s *ClientFlushIDControl) UpdateAndCheckProverID(proverID string) { + if s.proverID == "" { + log.Infof("Current proverID is %s", proverID) + s.proverID = proverID + return + } + if s.proverID != proverID { + event := &event.Event{ + ReceivedAt: time.Now(), + Source: event.Source_Node, + Component: event.Component_Synchronizer, + Level: event.Level_Critical, + EventID: event.EventID_SynchronizerRestart, + Description: fmt.Sprintf("proverID changed from %s to %s, restarting Synchonizer ", s.proverID, proverID), + } + + err := s.eventLog.LogEvent(context.Background(), event) + if err != nil { + log.Errorf("error storing event payload: %v", err) + } + + log.Fatal("restarting synchronizer because executor have restarted (old=%s, new=%s)", s.proverID, proverID) + } +} + +// BlockUntilLastFlushIDIsWritten blocks until the last flushID is written in DB. It keep in a loop asking to executor +// the flushid written, also check ProverID +func (s *ClientFlushIDControl) BlockUntilLastFlushIDIsWritten(dbTx pgx.Tx) error { + if s.latestFlushIDIsFulfilled { + log.Debugf("no pending flushID, nothing to do. Last pending fulfilled flushID: %d, last executor flushId received: %d", s.latestFlushID, s.latestFlushID) + return nil + } + storedFlushID, proverID, err := s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + if (s.previousExecutorFlushID != storedFlushID) || (s.proverID != proverID) { + log.Infof("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } else { + log.Debugf("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } + s.UpdateAndCheckProverID(proverID) + log.Debugf("storedFlushID (executor reported): %d, latestFlushID (pending): %d", storedFlushID, s.latestFlushID) + if storedFlushID < s.latestFlushID { + log.Infof("Synchornizer BLOCKED!: Wating for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d", + s.latestFlushID, + storedFlushID) + iteration := 0 + start := time.Now() + for storedFlushID < s.latestFlushID { + log.Debugf("Waiting for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d iteration:%d elpased:%s", + s.latestFlushID, storedFlushID, iteration, time.Since(start)) + time.Sleep(100 * time.Millisecond) //nolint:gomnd + storedFlushID, _, err = s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + iteration++ + } + log.Infof("Synchornizer resumed, flushID stored: %d", s.latestFlushID) + } + log.Infof("Pending Flushid fullfiled: %d, executor have write %d", s.latestFlushID, storedFlushID) + s.latestFlushIDIsFulfilled = true + s.previousExecutorFlushID = storedFlushID + return nil +} diff --git a/synchronizer/ext_control.go b/synchronizer/ext_control.go new file mode 100644 index 0000000000..3cee92158c --- /dev/null +++ b/synchronizer/ext_control.go @@ -0,0 +1,129 @@ +package synchronizer + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + externalControlFilename = "/tmp/synchronizer_in" + filePermissions = 0644 + sleepTimeToReadFile = 500 * time.Millisecond +) + +// This is a local end-point in filesystem to send commands to a running synchronizer +// this is used for debugging purposes, to provide a way to reproduce some situations that are difficult +// to reproduce in a real test. +// It accept next commands: +// l1_producer_stop: stop producer +// l1_orchestrator_reset: reset orchestrator to a given block number +// +// example of usage (first you need to run the service): +// echo "l1_producer_stop" >> /tmp/synchronizer_in +// echo "l1_orchestrator_reset|8577060" >> /tmp/synchronizer_in +type externalControl struct { + producer *l1RollupInfoProducer + orquestrator *l1SyncOrchestration +} + +func newExternalControl(producer *l1RollupInfoProducer, orquestrator *l1SyncOrchestration) *externalControl { + return &externalControl{producer: producer, orquestrator: orquestrator} +} + +func (e *externalControl) start() { + log.Infof("EXT:start: starting external control opening %s", externalControlFilename) + file, err := os.OpenFile(externalControlFilename, os.O_APPEND|os.O_CREATE|os.O_RDONLY, filePermissions) + if err != nil { + log.Warnf("EXT:start:error opening file %s: %v", externalControlFilename, err) + return + } + _, err = file.Seek(0, io.SeekEnd) + if err != nil { + log.Warnf("EXT:start:error seeking file %s: %v", externalControlFilename, err) + } + go e.readFile(file) +} + +// https://medium.com/@arunprabhu.1/tailing-a-file-in-golang-72944204f22b +func (e *externalControl) readFile(file *os.File) { + defer file.Close() + reader := bufio.NewReader(file) + for { + for { + line, err := reader.ReadString('\n') + + if err != nil { + if err == io.EOF { + // without this sleep you would hogg the CPU + time.Sleep(sleepTimeToReadFile) + continue + } + + break + } + log.Infof("EXT:readFile: new command: %s", line) + e.process(line) + } + } +} + +func (e *externalControl) process(line string) { + cmd := strings.Split(line, "|") + if len(cmd) < 1 { + return + } + switch strings.TrimSpace(cmd[0]) { + case "l1_producer_stop": + e.cmdL1ProducerStop(cmd[1:]) + case "l1_orchestrator_reset": + e.cmdL1OrchestratorReset(cmd[1:]) + case "l1_orchestrator_stop": + e.cmdL1OrchestratorAbort(cmd[1:]) + default: + log.Warnf("EXT:process: unknown command: %s", cmd[0]) + } +} + +func (e *externalControl) cmdL1OrchestratorReset(args []string) { + log.Infof("EXT:cmdL1OrchestratorReset: %s", args) + if len(args) < 1 { + log.Infof("EXT:cmdL1OrchestratorReset: missing block number") + return + } + blockNumber, err := strconv.ParseUint(strings.TrimSpace(args[0]), 10, 64) + if err != nil { + log.Infof("EXT:cmdL1OrchestratorReset: error parsing block number: %s", err) + return + } + log.Infof("EXT:cmdL1OrchestratorReset: calling orchestrator reset(%d)", blockNumber) + e.orquestrator.reset(blockNumber) + log.Infof("EXT:cmdL1OrchestratorReset: calling orchestrator reset(%d) returned", blockNumber) +} + +func (e *externalControl) cmdL1OrchestratorAbort(args []string) { + log.Infof("EXT:cmdL1OrchestratorAbort: %s", args) + if e.orquestrator == nil { + log.Infof("EXT:cmdL1OrchestratorAbort: orquestrator is nil") + return + } + log.Infof("EXT:cmdL1OrchestratorAbort: calling orquestrator stop") + e.orquestrator.abort() + log.Infof("EXT:cmdL1OrchestratorAbort: calling orquestrator stop returned") +} + +func (e *externalControl) cmdL1ProducerStop(args []string) { + log.Infof("EXT:cmdL1Stop: %s", args) + if e.producer == nil { + log.Infof("EXT:cmdL1Stop: producer is nil") + return + } + log.Infof("EXT:cmdL1Stop: calling producer stop") + e.producer.Stop() + log.Infof("EXT:cmdL1Stop: calling producer stop returned") +} diff --git a/synchronizer/generic_cache.go b/synchronizer/generic_cache.go new file mode 100644 index 0000000000..8696955a76 --- /dev/null +++ b/synchronizer/generic_cache.go @@ -0,0 +1,104 @@ +package synchronizer + +import ( + "time" +) + +type cacheItem[T any] struct { + value T + validTime time.Time +} + +// Cache is a generic cache implementation with TOL (time of live) for each item +type Cache[K comparable, T any] struct { + data map[K]cacheItem[T] // map[K]T is a map with key type K and value type T + timeOfLiveItems time.Duration + timerProvider TimeProvider +} + +// NewCache creates a new cache +func NewCache[K comparable, T any](timerProvider TimeProvider, timeOfLiveItems time.Duration) *Cache[K, T] { + return &Cache[K, T]{ + data: make(map[K]cacheItem[T]), + timeOfLiveItems: timeOfLiveItems, + timerProvider: timerProvider} +} + +// Get returns the value of the key and true if the key exists and is not outdated +func (c *Cache[K, T]) Get(key K) (T, bool) { + item, ok := c.data[key] + if !ok { + var zeroValue T + return zeroValue, false + } + // If the item is outdated, return zero value and remove from cache + if item.validTime.Before(c.timerProvider.Now()) { + delete(c.data, key) + var zeroValue T + return zeroValue, false + } + // We extend the life of the item if it is used + item.validTime = c.timerProvider.Now().Add(c.timeOfLiveItems) + c.data[key] = item + return item.value, true +} + +// Set sets the value of the key +func (c *Cache[K, T]) Set(key K, value T) { + c.data[key] = cacheItem[T]{value: value, validTime: c.timerProvider.Now().Add(c.timeOfLiveItems)} +} + +// Delete deletes the key from the cache +func (c *Cache[K, T]) Delete(key K) { + delete(c.data, key) +} + +// Len returns the number of items in the cache +func (c *Cache[K, T]) Len() int { + return len(c.data) +} + +// Keys returns the keys of the cache +func (c *Cache[K, T]) Keys() []K { + keys := make([]K, 0, len(c.data)) + for k := range c.data { + keys = append(keys, k) + } + return keys +} + +// Values returns the values of the cache +func (c *Cache[K, T]) Values() []T { + values := make([]T, 0, len(c.data)) + for _, v := range c.data { + values = append(values, v.value) + } + return values +} + +// Clear clears the cache +func (c *Cache[K, T]) Clear() { + c.data = make(map[K]cacheItem[T]) +} + +// DeleteOutdated deletes the outdated items from the cache +func (c *Cache[K, T]) DeleteOutdated() { + for k, v := range c.data { + if isOutdated(v.validTime, c.timerProvider.Now()) { + delete(c.data, k) + } + } +} + +func isOutdated(validTime time.Time, now time.Time) bool { + return validTime.Before(now) +} + +// RenewEntry renews the entry of the key +func (c *Cache[K, T]) RenewEntry(key K, validTime time.Time) { + item, ok := c.data[key] + if ok { + item.validTime = c.timerProvider.Now().Add(c.timeOfLiveItems) + c.data[key] = item + } +} diff --git a/synchronizer/generic_cache_test.go b/synchronizer/generic_cache_test.go new file mode 100644 index 0000000000..b18dc81226 --- /dev/null +++ b/synchronizer/generic_cache_test.go @@ -0,0 +1,176 @@ +package synchronizer + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type mockTimerProvider struct { + now time.Time +} + +func (m *mockTimerProvider) Now() time.Time { + return m.now +} +func TestCacheGet(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + + // Add an item to the cache + cache.Set("key1", "value1") + + // Test that the item can be retrieved from the cache + value, ok := cache.Get("key1") + assert.True(t, ok) + assert.Equal(t, "value1", value) + + // Test that an item that doesn't exist in the cache returns false + _, ok = cache.Get("key2") + assert.False(t, ok) + + // Test that an item that has expired is removed from the cache + timerProvider.now = time.Now().Add(2 * time.Hour) + _, ok = cache.Get("key1") + assert.False(t, ok) +} + +func TestCacheSet(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + + // Add an item to the cache + cache.Set("key1", "value1") + + // Test that the item can be retrieved from the cache + value, ok := cache.Get("key1") + assert.True(t, ok) + assert.Equal(t, "value1", value) + + // Test that an item that doesn't exist in the cache returns false + _, ok = cache.Get("key2") + assert.False(t, ok) + + // Test that an item that has expired is removed from the cache + timerProvider.now = time.Now().Add(2 * time.Hour) + _, ok = cache.Get("key1") + assert.False(t, ok) + + // Test that an item can be updated in the cache + cache.Set("key1", "value2") + value, ok = cache.Get("key1") + assert.True(t, ok) + assert.Equal(t, "value2", value) + + // Test that a new item can be added to the cache + cache.Set("key2", "value3") + value, ok = cache.Get("key2") + assert.True(t, ok) + assert.Equal(t, "value3", value) +} + +func TestCacheDelete(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + + // Add an item to the cache + cache.Set("key1", "value1") + + // Delete the item from the cache + cache.Delete("key1") + + // Test that the item has been removed from the cache + _, ok := cache.Get("key1") + assert.False(t, ok) + + // Test that deleting a non-existent item does not cause an error + cache.Delete("key2") +} +func TestCacheClear(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + + // Add some items to the cache + cache.Set("key1", "value1") + cache.Set("key2", "value2") + cache.Set("key3", "value3") + + // Clear the cache + cache.Clear() + + // Test that all items have been removed from the cache + _, ok := cache.Get("key1") + assert.False(t, ok) + _, ok = cache.Get("key2") + assert.False(t, ok) + _, ok = cache.Get("key3") + assert.False(t, ok) +} + +func TestCacheDeleteOutdated(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + now := time.Now() + timerProvider.now = now + // Add some items to the cache + cache.Set("key1", "value1") + cache.Set("key2", "value2") + timerProvider.now = now.Add(2 * time.Hour) + cache.Set("key3", "value3") + + // Call DeleteOutdated to remove the outdated items + cache.DeleteOutdated() + assert.Equal(t, 1, cache.Len()) + + // Test that key1 and key2 have been removed, but key3 is still present + _, ok := cache.Get("key1") + assert.False(t, ok) + _, ok = cache.Get("key2") + assert.False(t, ok) + _, ok = cache.Get("key3") + assert.True(t, ok) +} + +func TestCacheGetDoesntReturnsOutdatedValues(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + now := time.Now() + timerProvider.now = now + // Add some items to the cache + cache.Set("key1", "value1") + cache.Set("key2", "value2") + timerProvider.now = now.Add(2 * time.Hour) + cache.Set("key3", "value3") + + // Test that key1 and key2 are outdated, but key3 is still present + _, ok := cache.Get("key1") + assert.False(t, ok) + _, ok = cache.Get("key2") + assert.False(t, ok) + _, ok = cache.Get("key3") + assert.True(t, ok) +} + +func TestCacheGetExtendsTimeOfLiveOfItems(t *testing.T) { + timerProvider := &mockTimerProvider{} + cache := NewCache[string, string](timerProvider, time.Hour) + now := time.Now() + timerProvider.now = now + // Add some items to the cache + cache.Set("key1", "value1") + cache.Set("key2", "value2") + timerProvider.now = now.Add(59 * time.Minute) + _, ok := cache.Get("key1") + assert.True(t, ok) + timerProvider.now = now.Add(61 * time.Minute) + cache.Set("key3", "value3") + + // Test that key1 have been extended, key2 are outdated, and key3 is still present + _, ok = cache.Get("key1") + assert.True(t, ok) + _, ok = cache.Get("key2") + assert.False(t, ok) + _, ok = cache.Get("key3") + assert.True(t, ok) +} diff --git a/synchronizer/interfaces.go b/synchronizer/interfaces.go index cfbb92e01a..6a53e22a8d 100644 --- a/synchronizer/interfaces.go +++ b/synchronizer/interfaces.go @@ -14,8 +14,8 @@ import ( "github.com/jackc/pgx/v4" ) -// ethermanInterface contains the methods required to interact with ethereum. -type ethermanInterface interface { +// EthermanInterface contains the methods required to interact with ethereum. +type EthermanInterface interface { HeaderByNumber(ctx context.Context, number *big.Int) (*ethTypes.Header, error) GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) EthBlockByNumber(ctx context.Context, blockNumber uint64) (*ethTypes.Block, error) @@ -46,7 +46,7 @@ type stateInterface interface { OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error ProcessBatch(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) - StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error + StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, dbTx pgx.Tx) (*ethTypes.Header, error) GetStateRootByBatchNumber(ctx context.Context, batchNum uint64, dbTx pgx.Tx) (common.Hash, error) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*state.VerifiedBatch, error) @@ -55,8 +55,8 @@ type stateInterface interface { AddAccumulatedInputHash(ctx context.Context, batchNum uint64, accInputHash common.Hash, dbTx pgx.Tx) error AddTrustedReorg(ctx context.Context, trustedReorg *state.TrustedReorg, dbTx pgx.Tx) error GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*ethTypes.Transaction, error) - ResetForkID(ctx context.Context, batchNumber, forkID uint64, version string, dbTx pgx.Tx) error - GetForkIDTrustedReorgCount(ctx context.Context, forkID uint64, version string, dbTx pgx.Tx) (uint64, error) + ResetForkID(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error + GetForkIDs(ctx context.Context, dbTx pgx.Tx) ([]state.ForkIDInterval, error) AddForkIDInterval(ctx context.Context, newForkID state.ForkIDInterval, dbTx pgx.Tx) error SetLastBatchInfoSeenOnEthereum(ctx context.Context, lastBatchNumberSeen, lastBatchNumberVerified uint64, dbTx pgx.Tx) error SetInitSyncBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error diff --git a/synchronizer/l1_common.go b/synchronizer/l1_common.go new file mode 100644 index 0000000000..69dc968989 --- /dev/null +++ b/synchronizer/l1_common.go @@ -0,0 +1,61 @@ +package synchronizer + +import ( + "context" + "time" + + "golang.org/x/exp/constraints" +) + +// TDOO: There is no min/max function in golang?? +func min[T constraints.Ordered](a, b T) T { + if a < b { + return a + } + return b +} + +func max[T constraints.Ordered](a, b T) T { + if a > b { + return a + } + return b +} + +type contextWithCancel struct { + ctx context.Context + cancelCtx context.CancelFunc +} + +func (c *contextWithCancel) Done() <-chan struct{} { + return c.ctx.Done() +} + +func (c *contextWithCancel) isInvalid() bool { + return c.ctx == nil || c.cancelCtx == nil || (c.ctx != nil && c.ctx.Err() != nil) +} + +func (c *contextWithCancel) createWithCancel(ctxParent context.Context) { + c.ctx, c.cancelCtx = context.WithCancel(ctxParent) +} + +func (c *contextWithCancel) createWithTimeout(ctxParent context.Context, timeout time.Duration) { + c.ctx, c.cancelCtx = context.WithTimeout(ctxParent, timeout) +} + +func (c *contextWithCancel) cancel() { + if c.cancelCtx != nil { + c.cancelCtx() + } +} + +func newContextWithTimeout(ctxParent context.Context, timeout time.Duration) contextWithCancel { + ctx := contextWithCancel{} + ctx.createWithTimeout(ctxParent, timeout) + return ctx +} + +func newContextWithNone(ctxParent context.Context) contextWithCancel { + ctx := contextWithCancel{ctx: ctxParent} + return ctx +} diff --git a/synchronizer/l1_data_message.go b/synchronizer/l1_data_message.go new file mode 100644 index 0000000000..575f9fb833 --- /dev/null +++ b/synchronizer/l1_data_message.go @@ -0,0 +1,98 @@ +// package synchronizer +// This file contains common struct definitions and functions used by L1 sync. +// l1DataMessage : struct to hold L1 rollup info data package send from producer to consumer +// +// This packages could contain data or control information. +// - data is a real rollup info +// - control send actions to consumer +// +// Constructors: +// - newL1PackageDataControl: create a l1PackageData with only control information +// - newL1PackageData: create a l1PackageData with data and control information +package synchronizer + +import ( + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +// l1SyncMessage : struct to hold L1 rollup info data package +// It could contain data or control information, or both. +// A control package is used to send actions to consumer or to notify that producer is fully synced. +type l1SyncMessage struct { + // dataIsValid : true if data field is valid + dataIsValid bool + // data: is the rollup info data + data rollupInfoByBlockRangeResult + // ctrlIsValid : true if ctrl field is valid + ctrlIsValid bool + // ctrl : control package, it send actions to consumer + ctrl l1ConsumerControl +} + +type l1ConsumerControl struct { + event eventEnum +} + +type eventEnum int8 + +const ( + eventNone eventEnum = 0 + eventStop eventEnum = 1 + eventProducerIsFullySynced eventEnum = 2 +) + +func newL1SyncMessageControl(event eventEnum) *l1SyncMessage { + return &l1SyncMessage{ + dataIsValid: false, + ctrlIsValid: true, + ctrl: l1ConsumerControl{ + event: event, + }, + } +} + +func newL1SyncMessageData(result *rollupInfoByBlockRangeResult) *l1SyncMessage { + if result == nil { + log.Fatal("newL1PackageDataFromResult: result is nil, the idea of this func is create packages with data") + } + return &l1SyncMessage{ + dataIsValid: true, + data: *result, + ctrlIsValid: false, + } +} + +func (a eventEnum) String() string { + switch a { + case eventNone: + return "actionNone" + case eventStop: + return "actionStop" + case eventProducerIsFullySynced: + return "eventIsFullySynced" + default: + return "actionUnknown" + } +} + +func (l *l1ConsumerControl) String() string { + return fmt.Sprintf("action:%s", l.event.String()) +} + +func (l *l1SyncMessage) toStringBrief() string { + res := "" + if l.dataIsValid { + res += fmt.Sprintf("data:%v ", l.data.toStringBrief()) + } else { + res += " NO_DATA " + } + if l.ctrlIsValid { + res += fmt.Sprintf("ctrl:%v ", l.ctrl.String()) + } else { + res += " NO_CTRL " + } + + return res +} diff --git a/synchronizer/l1_filter_send_orderer_results_to_synchronizer.go b/synchronizer/l1_filter_send_orderer_results_to_synchronizer.go new file mode 100644 index 0000000000..d504775896 --- /dev/null +++ b/synchronizer/l1_filter_send_orderer_results_to_synchronizer.go @@ -0,0 +1,127 @@ +// Impelements + +package synchronizer + +import ( + "fmt" + "sync" + + "github.com/0xPolygonHermez/zkevm-node/log" + "golang.org/x/exp/slices" +) + +type filterToSendOrdererResultsToConsumer struct { + mutex sync.Mutex + lastBlockOnSynchronizer uint64 + // pendingResults is a queue of results that are waiting to be sent to the consumer + pendingResults []l1SyncMessage +} + +func newFilterToSendOrdererResultsToConsumer(lastBlockOnSynchronizer uint64) *filterToSendOrdererResultsToConsumer { + return &filterToSendOrdererResultsToConsumer{lastBlockOnSynchronizer: lastBlockOnSynchronizer} +} + +func (s *filterToSendOrdererResultsToConsumer) ToStringBrief() string { + return fmt.Sprintf("lastBlockSenedToSync[%v] len(pending_results)[%d]", + s.lastBlockOnSynchronizer, len(s.pendingResults)) +} + +func (s *filterToSendOrdererResultsToConsumer) numItemBlockedInQueue() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.pendingResults) +} +func (s *filterToSendOrdererResultsToConsumer) Reset(lastBlockOnSynchronizer uint64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.lastBlockOnSynchronizer = lastBlockOnSynchronizer + s.pendingResults = []l1SyncMessage{} +} + +func (s *filterToSendOrdererResultsToConsumer) Filter(data l1SyncMessage) []l1SyncMessage { + s.mutex.Lock() + defer s.mutex.Unlock() + s.checkValidDataUnsafe(&data) + s.addPendingResultUnsafe(&data) + res := []l1SyncMessage{} + res = s.sendResultIfPossibleUnsafe(res) + return res +} + +func (s *filterToSendOrdererResultsToConsumer) checkValidDataUnsafe(result *l1SyncMessage) { + if result.dataIsValid { + if result.data.blockRange.fromBlock < s.lastBlockOnSynchronizer { + log.Warnf("It's not possible to receive a old block [%s] range that have been already send to synchronizer. Ignoring it. status:[%s]", + result.data.blockRange.String(), s.ToStringBrief()) + return + } + + if !s.matchNextBlockUnsafe(&result.data) { + log.Debugf("The range %s is not the next block to be send, adding to pending results status:%s", + result.data.blockRange.String(), s.ToStringBrief()) + } + } +} + +// sendResultIfPossibleUnsafe returns true is have send any result +func (s *filterToSendOrdererResultsToConsumer) sendResultIfPossibleUnsafe(previous []l1SyncMessage) []l1SyncMessage { + resultListPackages := previous + indexToRemove := []int{} + send := false + for i := range s.pendingResults { + result := s.pendingResults[i] + if result.dataIsValid { + if s.matchNextBlockUnsafe(&result.data) { + send = true + resultListPackages = append(resultListPackages, result) + highestBlockNumber := result.data.getHighestBlockNumberInResponse() + + s.setLastBlockOnSynchronizerCorrespondingLatBlockRangeSendUnsafe(highestBlockNumber) + indexToRemove = append(indexToRemove, i) + break + } + } else { + // If it's a ctrl package only the first one could be send because it means that the previous one have been send + if i == 0 { + resultListPackages = append(resultListPackages, result) + indexToRemove = append(indexToRemove, i) + send = true + break + } + } + } + s.removeIndexFromPendingResultsUnsafe(indexToRemove) + + if send { + // Try to send more results + resultListPackages = s.sendResultIfPossibleUnsafe(resultListPackages) + } + return resultListPackages +} + +func (s *filterToSendOrdererResultsToConsumer) removeIndexFromPendingResultsUnsafe(indexToRemove []int) { + newPendingResults := []l1SyncMessage{} + for j := range s.pendingResults { + if slices.Contains(indexToRemove, j) { + continue + } + newPendingResults = append(newPendingResults, s.pendingResults[j]) + } + s.pendingResults = newPendingResults +} + +func (s *filterToSendOrdererResultsToConsumer) setLastBlockOnSynchronizerCorrespondingLatBlockRangeSendUnsafe(highestBlockNumber uint64) { + if highestBlockNumber == invalidBlockNumber { + return + } + log.Debug("Moving lastBlockSend from ", s.lastBlockOnSynchronizer, " to ", highestBlockNumber) + s.lastBlockOnSynchronizer = highestBlockNumber +} + +func (s *filterToSendOrdererResultsToConsumer) matchNextBlockUnsafe(results *rollupInfoByBlockRangeResult) bool { + return results.blockRange.fromBlock == s.lastBlockOnSynchronizer+1 +} + +func (s *filterToSendOrdererResultsToConsumer) addPendingResultUnsafe(results *l1SyncMessage) { + s.pendingResults = append(s.pendingResults, *results) +} diff --git a/synchronizer/l1_filter_send_orderer_results_to_synchronizer_test.go b/synchronizer/l1_filter_send_orderer_results_to_synchronizer_test.go new file mode 100644 index 0000000000..86a43db86e --- /dev/null +++ b/synchronizer/l1_filter_send_orderer_results_to_synchronizer_test.go @@ -0,0 +1,332 @@ +package synchronizer + +import ( + "math/big" + "testing" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + types "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +func TestSORMulticaseWithReset(t *testing.T) { + tcs := []struct { + description string + lastBlock uint64 + packages []l1SyncMessage + expected []l1SyncMessage + expectedlastBlockOnSynchronizer uint64 + resetOnPackageNumber int + resetToBlock uint64 + }{ + { + description: "inverse_br", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(131, 141), + *newDataPackage(120, 130), + *newDataPackage(101, 119)}, + expected: []l1SyncMessage{ + *newDataPackage(101, 119), + *newDataPackage(120, 130), + }, + expectedlastBlockOnSynchronizer: 130, + resetOnPackageNumber: 1, + resetToBlock: 100, + }, + { + description: "crtl_linked_to_br", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(131, 141), + *newActionPackage(eventNone), + *newDataPackage(120, 130), + *newDataPackage(101, 119)}, + expected: []l1SyncMessage{ + *newActionPackage(eventNone), + *newDataPackage(101, 119), + *newDataPackage(120, 130), + }, + expectedlastBlockOnSynchronizer: 130, + resetOnPackageNumber: 1, + resetToBlock: 100, + }, + } + for _, tc := range tcs { + t.Run(tc.description, func(t *testing.T) { + sut := newFilterToSendOrdererResultsToConsumer(tc.lastBlock) + sendData := []l1SyncMessage{} + for i, p := range tc.packages { + if i == tc.resetOnPackageNumber { + sut.Reset(tc.resetToBlock) + } + dataToSend := sut.Filter(p) + sendData = append(sendData, dataToSend...) + } + + require.Equal(t, tc.expected, sendData) + require.Equal(t, tc.expectedlastBlockOnSynchronizer, sut.lastBlockOnSynchronizer) + }) + } +} + +func TestSORMulticase(t *testing.T) { + tcs := []struct { + description string + lastBlock uint64 + packages []l1SyncMessage + expected []l1SyncMessage + excpectedLastBlockOnSynchronizer uint64 + }{ + { + description: "empty_case", + lastBlock: 100, + packages: []l1SyncMessage{}, + expected: []l1SyncMessage{}, + excpectedLastBlockOnSynchronizer: 100, + }, + { + description: "just_ctrl", + lastBlock: 100, + packages: []l1SyncMessage{*newActionPackage(eventNone)}, + expected: []l1SyncMessage{*newActionPackage(eventNone)}, + excpectedLastBlockOnSynchronizer: 100, + }, + { + description: "just_br", + lastBlock: 100, + packages: []l1SyncMessage{*newDataPackage(101, 119)}, + expected: []l1SyncMessage{*newDataPackage(101, 119)}, + excpectedLastBlockOnSynchronizer: 119, + }, + { + description: "just_br_missing_intermediate_block", + lastBlock: 100, + packages: []l1SyncMessage{*newDataPackage(102, 119)}, + expected: []l1SyncMessage{}, + excpectedLastBlockOnSynchronizer: 100, + }, + { + description: "inverse_br", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(131, 141), + *newDataPackage(120, 130), + *newDataPackage(101, 119)}, + expected: []l1SyncMessage{ + *newDataPackage(101, 119), + *newDataPackage(120, 130), + *newDataPackage(131, 141), + }, + excpectedLastBlockOnSynchronizer: 141, + }, + { + description: "crtl_linked_to_br", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(131, 141), + *newActionPackage(eventNone), + *newDataPackage(120, 130), + *newDataPackage(101, 119)}, + expected: []l1SyncMessage{ + *newDataPackage(101, 119), + *newDataPackage(120, 130), + *newDataPackage(131, 141), + *newActionPackage(eventNone), + }, + excpectedLastBlockOnSynchronizer: 141, + }, + { + description: "crtl_linked_to_last_br", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, 140), + *newActionPackage(eventNone), + *newDataPackage(101, 110)}, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, 140), + *newActionPackage(eventNone), + }, + excpectedLastBlockOnSynchronizer: 140, + }, + { + description: "latest with no data doesnt change last block", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(101, 110)}, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + }, + excpectedLastBlockOnSynchronizer: 130, + }, + { + description: "two latest one empty and one with data change to highest block in rollupinfo", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(101, 110), + *newDataPackageWithData(131, latestBlockNumber, 140), + }, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackageWithData(131, latestBlockNumber, 140), + }, + excpectedLastBlockOnSynchronizer: 140, + }, + { + description: "one latest one normal", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newDataPackage(131, 140), + *newActionPackage(eventNone), + *newDataPackage(101, 110), + }, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newDataPackage(131, 140), + *newActionPackage(eventNone), + }, + excpectedLastBlockOnSynchronizer: 140, + }, + { + description: "a rollupinfo with data", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackageWithData(121, 130, 125), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(101, 110), + *newDataPackage(131, 140), + }, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackageWithData(121, 130, 125), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, 140), + }, + excpectedLastBlockOnSynchronizer: 140, + }, + { + description: "two latest empty with control in between", + lastBlock: 100, + packages: []l1SyncMessage{ + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(101, 110), + *newDataPackage(131, 140), + }, + expected: []l1SyncMessage{ + *newDataPackage(101, 110), + *newDataPackage(111, 120), + *newDataPackage(121, 130), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, latestBlockNumber), + *newActionPackage(eventNone), + *newDataPackage(131, 140), + }, + excpectedLastBlockOnSynchronizer: 140, + }, + } + for _, tc := range tcs { + t.Run(tc.description, func(t *testing.T) { + sut := newFilterToSendOrdererResultsToConsumer(tc.lastBlock) + sendData := []l1SyncMessage{} + for _, p := range tc.packages { + dataToSend := sut.Filter(p) + sendData = append(sendData, dataToSend...) + } + require.Equal(t, len(tc.expected), len(sendData)) + require.Equal(t, tc.expected, sendData) + require.Equal(t, tc.excpectedLastBlockOnSynchronizer, sut.lastBlockOnSynchronizer) + }) + } +} + +func newDataPackage(fromBlock, toBlock uint64) *l1SyncMessage { + res := l1SyncMessage{ + data: rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: fromBlock, + toBlock: toBlock, + }, + lastBlockOfRange: types.NewBlock(&types.Header{Number: big.NewInt(int64(toBlock))}, nil, nil, nil, nil), + }, + dataIsValid: true, + ctrlIsValid: false, + } + if toBlock == latestBlockNumber { + res.data.lastBlockOfRange = nil + } + return &res +} + +func newDataPackageWithData(fromBlock, toBlock uint64, blockWithData uint64) *l1SyncMessage { + res := l1SyncMessage{ + data: rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: fromBlock, + toBlock: toBlock, + }, + blocks: []etherman.Block{{BlockNumber: uint64(blockWithData)}}, + }, + dataIsValid: true, + ctrlIsValid: false, + } + + return &res +} + +func newActionPackage(action eventEnum) *l1SyncMessage { + return &l1SyncMessage{ + dataIsValid: false, + data: rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 0, + toBlock: 0, + }, + }, + + ctrlIsValid: true, + ctrl: l1ConsumerControl{ + event: action, + }, + } +} diff --git a/synchronizer/l1_live_block_ranges.go b/synchronizer/l1_live_block_ranges.go new file mode 100644 index 0000000000..daf09c143a --- /dev/null +++ b/synchronizer/l1_live_block_ranges.go @@ -0,0 +1,99 @@ +package synchronizer + +import ( + "errors" + "fmt" +) + +type liveBlockRangeItem struct { + blockRange blockRange +} + +type liveBlockRanges struct { + ranges []liveBlockRangeItem +} + +func (l *liveBlockRanges) String() string { + res := l.toStringBrief() + "[" + for _, r := range l.ranges { + res += fmt.Sprintf("%s ,", r.blockRange.String()) + } + return res + "]" +} + +func (l *liveBlockRanges) toStringBrief() string { + return fmt.Sprintf("len(ranges): %v", len(l.ranges)) +} + +var ( + errBlockRangeInvalidOverlap = errors.New("block Range Invalid: block range overlaps") + errBlockRangeNotFound = errors.New("block Range not found") + errBlockRangeIsEmpty = errors.New("block Range is empty") +) + +func newLiveBlockRanges() liveBlockRanges { + return liveBlockRanges{} +} + +func (l *liveBlockRanges) addBlockRange(br blockRange) error { + if err := br.isValid(); err != nil { + return err + } + if l.overlaps(br) { + return errBlockRangeInvalidOverlap + } + l.ranges = append(l.ranges, liveBlockRangeItem{br}) + return nil +} + +func (l *liveBlockRanges) removeBlockRange(br blockRange) error { + for i, r := range l.ranges { + if r.blockRange == br { + l.ranges = append(l.ranges[:i], l.ranges[i+1:]...) + return nil + } + } + return errBlockRangeNotFound +} + +func (l *liveBlockRanges) getFirstBlockRange() (blockRange, error) { + if l.len() == 0 { + return blockRange{}, errBlockRangeIsEmpty + } + return l.ranges[0].blockRange, nil +} + +func (l *liveBlockRanges) getSuperBlockRange() *blockRange { + fromBlock := invalidBlockNumber + toBlock := invalidBlockNumber + for i, r := range l.ranges { + if i == 0 { + toBlock = r.blockRange.toBlock + fromBlock = r.blockRange.fromBlock + } + if r.blockRange.toBlock > toBlock { + toBlock = r.blockRange.toBlock + } + if r.blockRange.fromBlock < fromBlock { + fromBlock = r.blockRange.fromBlock + } + } + res := blockRange{fromBlock, toBlock} + if res.isValid() == nil { + return &res + } + return nil +} + +func (l *liveBlockRanges) len() int { + return len(l.ranges) +} + +func (l *liveBlockRanges) overlaps(br blockRange) bool { + for _, r := range l.ranges { + if r.blockRange.overlaps(br) { + return true + } + } + return false +} diff --git a/synchronizer/l1_live_block_ranges_test.go b/synchronizer/l1_live_block_ranges_test.go new file mode 100644 index 0000000000..0946851151 --- /dev/null +++ b/synchronizer/l1_live_block_ranges_test.go @@ -0,0 +1,77 @@ +package synchronizer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestInsertBR(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + require.Equal(t, sut.len(), 1) +} +func TestInsertOverlappedBR(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 5, toBlock: 15}) + require.Error(t, err) + require.Equal(t, sut.len(), 1) +} + +func TestInsertDuplicatedBR(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.Error(t, err) + require.Equal(t, sut.len(), 1) +} + +func TestRemoveExistingBR(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 11, toBlock: 20}) + require.NoError(t, err) + err = sut.removeBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + require.Equal(t, sut.len(), 1) +} + +func TestInsertWrongBR1(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{}) + require.Error(t, err) + require.Equal(t, sut.len(), 0) +} +func TestInsertWrongBR2(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 10, toBlock: 5}) + require.Error(t, err) + require.Equal(t, sut.len(), 0) +} + +func TestGetSuperBlockRangeEmpty(t *testing.T) { + sut := newLiveBlockRanges() + res := sut.getSuperBlockRange() + require.Nil(t, res) +} + +func TestGetSuperBlockRangeWithData(t *testing.T) { + sut := newLiveBlockRanges() + err := sut.addBlockRange(blockRange{fromBlock: 1, toBlock: 10}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 11, toBlock: 20}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 21, toBlock: 109}) + require.NoError(t, err) + err = sut.addBlockRange(blockRange{fromBlock: 110, toBlock: 200}) + require.NoError(t, err) + + res := sut.getSuperBlockRange() + require.NotNil(t, res) + require.Equal(t, *res, blockRange{fromBlock: 1, toBlock: 200}) +} diff --git a/synchronizer/l1_rollup_info_consumer.go b/synchronizer/l1_rollup_info_consumer.go new file mode 100644 index 0000000000..e6f6d91ece --- /dev/null +++ b/synchronizer/l1_rollup_info_consumer.go @@ -0,0 +1,284 @@ +package synchronizer + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" + types "github.com/ethereum/go-ethereum/core/types" +) + +const ( + minNumIterationsBeforeStartCheckingTimeWaitingForNewRollupInfoData = 5 + minAcceptableTimeWaitingForNewRollupInfoData = 1 * time.Second +) + +var ( + errContextCanceled = errors.New("consumer:context canceled") + errConsumerStopped = errors.New("consumer:stopped by request") + errConsumerStoppedBecauseIsSynchronized = errors.New("consumer:stopped because is synchronized") + errL1Reorg = errors.New("consumer: L1 reorg detected") +) + +type configConsumer struct { + ApplyAfterNumRollupReceived int + AceptableInacctivityTime time.Duration +} + +// synchronizerProcessBlockRangeInterface is the interface with synchronizer +// to execute blocks. This interface is used to mock the synchronizer in the tests +type synchronizerProcessBlockRangeInterface interface { + processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error +} + +// l1RollupInfoConsumer is the object that process the rollup info data incomming from channel chIncommingRollupInfo +type l1RollupInfoConsumer struct { + mutex sync.Mutex + synchronizer synchronizerProcessBlockRangeInterface + chIncommingRollupInfo chan l1SyncMessage + ctx context.Context + statistics l1RollupInfoConsumerStatistics + lastEthBlockSynced *state.Block // Have been written in DB + lastEthBlockReceived *state.Block // is a memory cache + highestBlockProcessed uint64 +} + +func newL1RollupInfoConsumer(cfg configConsumer, + synchronizer synchronizerProcessBlockRangeInterface, ch chan l1SyncMessage) *l1RollupInfoConsumer { + if cfg.AceptableInacctivityTime < minAcceptableTimeWaitingForNewRollupInfoData { + log.Warnf("consumer: the AceptableInacctivityTime is too low (%s) minimum recommended %s", cfg.AceptableInacctivityTime, minAcceptableTimeWaitingForNewRollupInfoData) + } + if cfg.ApplyAfterNumRollupReceived < minNumIterationsBeforeStartCheckingTimeWaitingForNewRollupInfoData { + log.Warnf("consumer: the ApplyAfterNumRollupReceived is too low (%d) minimum recommended %d", cfg.ApplyAfterNumRollupReceived, minNumIterationsBeforeStartCheckingTimeWaitingForNewRollupInfoData) + } + + return &l1RollupInfoConsumer{ + synchronizer: synchronizer, + chIncommingRollupInfo: ch, + statistics: l1RollupInfoConsumerStatistics{ + startTime: time.Now(), + cfg: cfg, + }, + highestBlockProcessed: invalidBlockNumber, + } +} + +func (l *l1RollupInfoConsumer) Start(ctx context.Context, lastEthBlockSynced *state.Block) error { + l.ctx = ctx + l.lastEthBlockSynced = lastEthBlockSynced + if l.highestBlockProcessed == invalidBlockNumber && lastEthBlockSynced != nil { + log.Infof("consumer: Starting consumer. setting HighestBlockProcessed: %d (lastEthBlockSynced)", lastEthBlockSynced.BlockNumber) + l.highestBlockProcessed = lastEthBlockSynced.BlockNumber + } + log.Infof("consumer: Starting consumer. HighestBlockProcessed: %d", l.highestBlockProcessed) + l.statistics.onStart() + err := l.step() + for ; err == nil; err = l.step() { + } + if err != errConsumerStopped && err != errConsumerStoppedBecauseIsSynchronized { + return err + } + // The errConsumerStopped||errConsumerStoppedBecauseIsSynchronized are not an error, so we return nil meaning that the process finished in a normal way + return nil +} + +func (l *l1RollupInfoConsumer) Reset(startingBlockNumber uint64) { + l.mutex.Lock() + defer l.mutex.Unlock() + l.highestBlockProcessed = startingBlockNumber + l.lastEthBlockSynced = nil + l.statistics.onReset() +} + +func (l *l1RollupInfoConsumer) step() error { + l.statistics.onStartStep() + var err error + select { + case <-l.ctx.Done(): + return errContextCanceled + case rollupInfo := <-l.chIncommingRollupInfo: + if rollupInfo.dataIsValid { + err = l.processIncommingRollupInfoData(rollupInfo.data) + if err != nil { + log.Error("consumer: error processing package.RollupInfoData. Error: ", err) + } + } + if rollupInfo.ctrlIsValid { + err = l.processIncommingRollupControlData(rollupInfo.ctrl) + if err != nil && !errors.Is(err, errConsumerStoppedBecauseIsSynchronized) && !errors.Is(err, errConsumerStopped) { + log.Error("consumer: error processing package.ControlData. Error: ", err) + } + log.Infof("consumer: processed ControlData[%s]. Result: %s", rollupInfo.ctrl.String(), err) + } + } + return err +} +func (l *l1RollupInfoConsumer) processIncommingRollupControlData(control l1ConsumerControl) error { + log.Debugf("consumer: processing controlPackage: %s", control.String()) + l.mutex.Lock() + defer l.mutex.Unlock() + if control.event == eventStop { + log.Infof("consumer: received a stop, so it stops processing. ignoring rest of items on channel len=%d", len(l.chIncommingRollupInfo)) + return errConsumerStopped + } + if control.event == eventProducerIsFullySynced { + itemsInChannel := len(l.chIncommingRollupInfo) + if itemsInChannel == 0 { + log.Infof("consumer: received a fullSync and nothing pending in channel to process, so stopping consumer") + return errConsumerStoppedBecauseIsSynchronized + } else { + log.Infof("consumer: received a fullSync but still have %d items in channel to process, so not stopping consumer", itemsInChannel) + } + } + return nil +} + +func checkPreviousBlocks(rollupInfo rollupInfoByBlockRangeResult, cachedBlock *state.Block) error { + if cachedBlock == nil { + return nil + } + if rollupInfo.previousBlockOfRange == nil { + return nil + } + if cachedBlock.BlockNumber == rollupInfo.previousBlockOfRange.NumberU64() { + if cachedBlock.BlockHash != rollupInfo.previousBlockOfRange.Hash() { + log.Errorf("consumer: Previous block %d hash is not the same", cachedBlock.BlockNumber) + return errL1Reorg + } + if cachedBlock.ParentHash != rollupInfo.previousBlockOfRange.ParentHash() { + log.Errorf("consumer: Previous block %d parentHash is not the same", cachedBlock.BlockNumber) + return errL1Reorg + } + log.Infof("consumer: Verified previous block %d not the same: OK", cachedBlock.BlockNumber) + } + return nil +} + +func (l *l1RollupInfoConsumer) processIncommingRollupInfoData(rollupInfo rollupInfoByBlockRangeResult) error { + l.mutex.Lock() + defer l.mutex.Unlock() + var err error + if (l.highestBlockProcessed != invalidBlockNumber) && (l.highestBlockProcessed+1 != rollupInfo.blockRange.fromBlock) { + log.Warnf("consumer: received a rollupInfo with a wrong block range. Ignoring it. Highest block synced: %d. RollupInfo block range: %s", + l.highestBlockProcessed, rollupInfo.blockRange.String()) + return nil + } + l.highestBlockProcessed = rollupInfo.getHighestBlockNumberInResponse() + // Uncommented that line to produce a infinite loop of errors, and resets! (just for develop) + //return errors.New("forcing an continuous error!") + statisticsMsg := l.statistics.onStartProcessIncommingRollupInfoData(rollupInfo) + log.Infof("consumer: processing rollupInfo #%000d: range:%s num_blocks [%d] statistics:%s", l.statistics.numProcessedRollupInfo, rollupInfo.blockRange.String(), len(rollupInfo.blocks), statisticsMsg) + timeProcessingStart := time.Now() + + if l.lastEthBlockReceived != nil { + err = checkPreviousBlocks(rollupInfo, l.lastEthBlockReceived) + if err != nil { + log.Errorf("consumer: error checking previous blocks: %s", err.Error()) + return err + } + } + l.lastEthBlockReceived = rollupInfo.getHighestBlockReceived() + + lastBlockProcessed, err := l.processUnsafe(rollupInfo) + if err == nil && lastBlockProcessed != nil { + l.lastEthBlockSynced = lastBlockProcessed + } + l.statistics.onFinishProcessIncommingRollupInfoData(rollupInfo, time.Since(timeProcessingStart), err) + if err != nil { + log.Infof("consumer: error processing rollupInfo %s. Error: %s", rollupInfo.blockRange.String(), err.Error()) + return err + } + l.statistics.numProcessedBlocks += uint64(len(rollupInfo.blocks)) + return nil +} + +// GetLastEthBlockSynced returns the last block synced, if true is returned, otherwise it returns false +func (l *l1RollupInfoConsumer) GetLastEthBlockSynced() (state.Block, bool) { + l.mutex.Lock() + defer l.mutex.Unlock() + if l.lastEthBlockSynced == nil { + return state.Block{}, false + } + return *l.lastEthBlockSynced, true +} + +func (l *l1RollupInfoConsumer) StopAfterProcessChannelQueue() { + log.Infof("consumer: Sending stop package: it will stop consumer (current channel len=%d)", len(l.chIncommingRollupInfo)) + l.sendStopPackage() +} + +func (l *l1RollupInfoConsumer) sendStopPackage() { + // Send a stop to the channel to stop the consumer when reach this point + l.chIncommingRollupInfo <- *newL1SyncMessageControl(eventStop) +} + +func (l *l1RollupInfoConsumer) processUnsafe(rollupInfo rollupInfoByBlockRangeResult) (*state.Block, error) { + blocks := rollupInfo.blocks + order := rollupInfo.order + var lastEthBlockSynced *state.Block + + if len(blocks) == 0 { + lb := rollupInfo.lastBlockOfRange + if lb == nil { + log.Info("consumer: Empty block range: ", rollupInfo.blockRange.String()) + return nil, nil + } + b := convertL1BlockToEthBlock(lb) + err := l.synchronizer.processBlockRange([]etherman.Block{b}, order) + if err != nil { + log.Error("consumer: Error processing last block of range: ", rollupInfo.blockRange, " err:", err) + return nil, err + } + block := convertL1BlockToStateBlock(lb) + lastEthBlockSynced = &block + log.Debug("consumer: Storing empty block. BlockNumber: ", b.BlockNumber, ". BlockHash: ", b.BlockHash) + } else { + tmpStateBlock := convertEthmanBlockToStateBlock(&blocks[len(blocks)-1]) + lastEthBlockSynced = &tmpStateBlock + logBlocks(blocks) + err := l.synchronizer.processBlockRange(blocks, order) + if err != nil { + log.Info("consumer: Error processing block range: ", rollupInfo.blockRange, " err:", err) + return nil, err + } + } + return lastEthBlockSynced, nil +} + +func logBlocks(blocks []etherman.Block) { + for i := range blocks { + log.Debug("consumer: Position: [", i, "/", len(blocks), "] . BlockNumber: ", blocks[i].BlockNumber, ". BlockHash: ", blocks[i].BlockHash) + } +} + +func convertL1BlockToEthBlock(fb *types.Block) etherman.Block { + return etherman.Block{ + BlockNumber: fb.NumberU64(), + BlockHash: fb.Hash(), + ParentHash: fb.ParentHash(), + ReceivedAt: time.Unix(int64(fb.Time()), 0), + } +} + +func convertL1BlockToStateBlock(fb *types.Block) state.Block { + return state.Block{ + BlockNumber: fb.NumberU64(), + BlockHash: fb.Hash(), + ParentHash: fb.ParentHash(), + ReceivedAt: time.Unix(int64(fb.Time()), 0), + } +} + +func convertEthmanBlockToStateBlock(fb *etherman.Block) state.Block { + return state.Block{ + BlockNumber: fb.BlockNumber, + BlockHash: fb.BlockHash, + ParentHash: fb.ParentHash, + ReceivedAt: fb.ReceivedAt, + } +} diff --git a/synchronizer/l1_rollup_info_consumer_statistics.go b/synchronizer/l1_rollup_info_consumer_statistics.go new file mode 100644 index 0000000000..f987c2e4a4 --- /dev/null +++ b/synchronizer/l1_rollup_info_consumer_statistics.go @@ -0,0 +1,69 @@ +package synchronizer + +import ( + "fmt" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/synchronizer/metrics" +) + +type l1RollupInfoConsumerStatistics struct { + numProcessedRollupInfo uint64 + numProcessedRollupInfoForCheckTime uint64 + numProcessedBlocks uint64 + startTime time.Time + timePreviousProcessingDuration time.Duration + startStepTime time.Time + cfg configConsumer +} + +func (l *l1RollupInfoConsumerStatistics) onStart() { + l.startTime = time.Now() + l.startStepTime = time.Time{} + l.numProcessedRollupInfoForCheckTime = 0 +} + +func (l *l1RollupInfoConsumerStatistics) onStartStep() { + l.startStepTime = time.Now() +} + +func (l *l1RollupInfoConsumerStatistics) onReset() { + l.numProcessedRollupInfoForCheckTime = 0 + l.startStepTime = time.Time{} +} + +func (l *l1RollupInfoConsumerStatistics) onStartProcessIncommingRollupInfoData(rollupInfo rollupInfoByBlockRangeResult) string { + now := time.Now() + // Time have have been blocked in the select statement + waitingTimeForData := now.Sub(l.startStepTime) + blocksPerSecond := float64(l.numProcessedBlocks) / time.Since(l.startTime).Seconds() + generatedWarning := false + if l.numProcessedRollupInfoForCheckTime > uint64(l.cfg.ApplyAfterNumRollupReceived) && waitingTimeForData > l.cfg.AceptableInacctivityTime { + msg := fmt.Sprintf("wasted waiting for new rollupInfo from L1: %s last_process: %s new range: %s block_per_second: %f", + waitingTimeForData, l.timePreviousProcessingDuration, rollupInfo.blockRange.String(), blocksPerSecond) + log.Warnf("consumer:: Too much wasted time (waiting to receive a new data):%s", msg) + generatedWarning = true + } + l.numProcessedRollupInfo++ + l.numProcessedRollupInfoForCheckTime++ + msg := fmt.Sprintf("wasted_time_waiting_for_data [%s] last_process_time [%s] block_per_second [%f]", + waitingTimeForData.Round(time.Second).String(), + l.timePreviousProcessingDuration, + blocksPerSecond) + if waitingTimeForData > l.cfg.AceptableInacctivityTime { + msg = msg + " WASTED_TIME_EXCEED " + } + if generatedWarning { + msg = msg + " WARNING_WASTED_TIME " + } + return msg +} + +func (l *l1RollupInfoConsumerStatistics) onFinishProcessIncommingRollupInfoData(rollupInfo rollupInfoByBlockRangeResult, executionTime time.Duration, err error) { + l.timePreviousProcessingDuration = executionTime + if err == nil { + l.numProcessedBlocks += uint64(len(rollupInfo.blocks)) + metrics.ProcessL1DataTime(executionTime) + } +} diff --git a/synchronizer/l1_rollup_info_consumer_statistics_test.go b/synchronizer/l1_rollup_info_consumer_statistics_test.go new file mode 100644 index 0000000000..82ec6bb8a1 --- /dev/null +++ b/synchronizer/l1_rollup_info_consumer_statistics_test.go @@ -0,0 +1,117 @@ +package synchronizer + +import ( + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/stretchr/testify/assert" +) + +func TestL1RollupInfoConsumerStatistics(t *testing.T) { + cfg := configConsumer{ + ApplyAfterNumRollupReceived: 10, + AceptableInacctivityTime: 5 * time.Second, + } + stats := l1RollupInfoConsumerStatistics{ + cfg: cfg, + } + + stats.onStart() + stats.onStartStep() + + // Test onFinishProcessIncommingRollupInfoData + rollupInfo := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 1, + toBlock: 10, + }, + blocks: []etherman.Block{}, + } + executionTime := 2 * time.Second + stats.onStartProcessIncommingRollupInfoData(rollupInfo) + stats.onFinishProcessIncommingRollupInfoData(rollupInfo, executionTime, error(nil)) + assert.Equal(t, stats.timePreviousProcessingDuration, executionTime) + assert.Equal(t, stats.numProcessedRollupInfo, uint64(1)) + assert.Equal(t, stats.numProcessedBlocks, uint64(len(rollupInfo.blocks))) + + stats.onStart() + stats.onStartStep() + + msg := stats.onStartProcessIncommingRollupInfoData(rollupInfo) + assert.Contains(t, msg, "wasted_time_waiting_for_data") + assert.Contains(t, msg, "last_process_time") + assert.Contains(t, msg, "block_per_second") + assert.NotContains(t, msg, "WASTED_TIME_EXCEED") + assert.NotContains(t, msg, "WARNING_WASTED_TIME") +} + +func TestL1RollupInfoConsumerStatisticsWithExceedTimeButNoWarningGenerated(t *testing.T) { + cfg := configConsumer{ + ApplyAfterNumRollupReceived: 10, + AceptableInacctivityTime: 0 * time.Second, + } + stats := l1RollupInfoConsumerStatistics{ + cfg: cfg, + } + + stats.onStart() + stats.onStartStep() + + // Test onFinishProcessIncommingRollupInfoData + rollupInfo := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 1, + toBlock: 10, + }, + blocks: []etherman.Block{}, + } + executionTime := 2 * time.Second + err := error(nil) + stats.onStartProcessIncommingRollupInfoData(rollupInfo) + stats.onFinishProcessIncommingRollupInfoData(rollupInfo, executionTime, err) + + stats.onStartStep() + msg := stats.onStartProcessIncommingRollupInfoData(rollupInfo) + assert.Contains(t, msg, "wasted_time_waiting_for_data") + assert.Contains(t, msg, "last_process_time") + assert.Contains(t, msg, "block_per_second") + assert.Contains(t, msg, "WASTED_TIME_EXCEED") + assert.NotContains(t, msg, "WARNING_WASTED_TIME") +} + +func TestL1RollupInfoConsumerStatisticsWithExceedTimeButAndWarningGenerated(t *testing.T) { + cfg := configConsumer{ + ApplyAfterNumRollupReceived: 1, + AceptableInacctivityTime: 0 * time.Second, + } + stats := l1RollupInfoConsumerStatistics{ + cfg: cfg, + } + + stats.onStart() + stats.onStartStep() + + // Test onFinishProcessIncommingRollupInfoData + rollupInfo := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 1, + toBlock: 10, + }, + blocks: []etherman.Block{}, + } + executionTime := 2 * time.Second + err := error(nil) + stats.onStartProcessIncommingRollupInfoData(rollupInfo) + stats.onFinishProcessIncommingRollupInfoData(rollupInfo, executionTime, err) + stats.onStartProcessIncommingRollupInfoData(rollupInfo) + stats.onFinishProcessIncommingRollupInfoData(rollupInfo, executionTime, err) + + stats.onStartStep() + msg := stats.onStartProcessIncommingRollupInfoData(rollupInfo) + assert.Contains(t, msg, "wasted_time_waiting_for_data") + assert.Contains(t, msg, "last_process_time") + assert.Contains(t, msg, "block_per_second") + assert.Contains(t, msg, "WASTED_TIME_EXCEED") + assert.Contains(t, msg, "WARNING_WASTED_TIME") +} diff --git a/synchronizer/l1_rollup_info_consumer_test.go b/synchronizer/l1_rollup_info_consumer_test.go new file mode 100644 index 0000000000..588b2a1803 --- /dev/null +++ b/synchronizer/l1_rollup_info_consumer_test.go @@ -0,0 +1,157 @@ +package synchronizer + +import ( + "context" + "errors" + "math/big" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/ethereum/go-ethereum/common" + types "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type consumerTestData struct { + sut *l1RollupInfoConsumer + syncMock *synchronizerProcessBlockRangeMock + ch chan l1SyncMessage +} + +func TestGivenConsumerWhenReceiveAFullSyncAndChannelIsEmptyThenStopOk(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + err := data.sut.Start(ctxTimeout, nil) + require.NoError(t, err) +} +func TestGivenConsumerWhenReceiveAFullSyncAndChannelIsNotEmptyThenDontStop(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + data.ch <- *newL1SyncMessageControl(eventNone) + err := data.sut.Start(ctxTimeout, nil) + require.Error(t, err) + require.Equal(t, errContextCanceled, err) +} + +func TestGivenConsumerWhenFailsToProcessRollupThenDontKnownLastEthBlock(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + responseRollupInfoByBlockRange := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 100, + toBlock: 200, + }, + blocks: []etherman.Block{}, + order: map[common.Hash][]etherman.Order{}, + lastBlockOfRange: types.NewBlock(&types.Header{Number: big.NewInt(123)}, nil, nil, nil, nil), + } + data.syncMock. + On("processBlockRange", mock.Anything, mock.Anything). + Return(errors.New("error")). + Once() + data.ch <- *newL1SyncMessageData(&responseRollupInfoByBlockRange) + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + err := data.sut.Start(ctxTimeout, nil) + require.Error(t, err) + _, ok := data.sut.GetLastEthBlockSynced() + require.False(t, ok) +} + +func TestGivenConsumerWhenReceiveNoNextBlockThenDoNothing(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + responseRollupInfoByBlockRange := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 100, + toBlock: 200, + }, + blocks: []etherman.Block{}, + order: map[common.Hash][]etherman.Order{}, + lastBlockOfRange: nil, + } + // Is not going to call processBlockRange + data.ch <- *newL1SyncMessageData(&responseRollupInfoByBlockRange) + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + data.sut.Reset(1234) + err := data.sut.Start(ctxTimeout, nil) + require.NoError(t, err) + _, ok := data.sut.GetLastEthBlockSynced() + require.False(t, ok) +} + +func TestGivenConsumerWhenNextBlockNumberIsNoSetThenAcceptAnythingAndProcess(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + responseRollupInfoByBlockRange := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 100, + toBlock: 200, + }, + blocks: []etherman.Block{}, + order: map[common.Hash][]etherman.Order{}, + lastBlockOfRange: types.NewBlock(&types.Header{Number: big.NewInt(123)}, nil, nil, nil, nil), + } + + data.ch <- *newL1SyncMessageData(&responseRollupInfoByBlockRange) + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + data.syncMock. + On("processBlockRange", mock.Anything, mock.Anything). + Return(nil). + Once() + err := data.sut.Start(ctxTimeout, nil) + require.NoError(t, err) + resultBlock, ok := data.sut.GetLastEthBlockSynced() + require.True(t, ok) + require.Equal(t, uint64(123), resultBlock.BlockNumber) +} + +func TestGivenConsumerWhenNextBlockNumberIsNoSetThenFirstRollupInfoSetIt(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + data := setupConsumerTest(t) + defer cancel() + responseRollupInfoByBlockRange := rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 100, + toBlock: 200, + }, + blocks: []etherman.Block{}, + order: map[common.Hash][]etherman.Order{}, + lastBlockOfRange: types.NewBlock(&types.Header{Number: big.NewInt(123)}, nil, nil, nil, nil), + } + // Fist package set highestBlockProcessed + data.ch <- *newL1SyncMessageData(&responseRollupInfoByBlockRange) + // The repeated package is ignored because is not the next BlockRange + data.ch <- *newL1SyncMessageData(&responseRollupInfoByBlockRange) + data.ch <- *newL1SyncMessageControl(eventProducerIsFullySynced) + data.syncMock. + On("processBlockRange", mock.Anything, mock.Anything). + Return(nil). + Once() + err := data.sut.Start(ctxTimeout, nil) + require.NoError(t, err) + resultBlock, ok := data.sut.GetLastEthBlockSynced() + require.True(t, ok) + require.Equal(t, uint64(123), resultBlock.BlockNumber) +} + +func setupConsumerTest(t *testing.T) consumerTestData { + syncMock := newSynchronizerProcessBlockRangeMock(t) + ch := make(chan l1SyncMessage, 10) + + cfg := configConsumer{ + ApplyAfterNumRollupReceived: minNumIterationsBeforeStartCheckingTimeWaitingForNewRollupInfoData, + AceptableInacctivityTime: minAcceptableTimeWaitingForNewRollupInfoData, + } + sut := newL1RollupInfoConsumer(cfg, syncMock, ch) + return consumerTestData{sut, syncMock, ch} +} diff --git a/synchronizer/l1_rollup_info_producer.go b/synchronizer/l1_rollup_info_producer.go new file mode 100644 index 0000000000..235f58c9aa --- /dev/null +++ b/synchronizer/l1_rollup_info_producer.go @@ -0,0 +1,596 @@ +// package synchronizer +// Implements the logic to retrieve data from L1 and send it to the synchronizer +// - multiples etherman to do it in parallel +// - generate blocks to be retrieved +// - retrieve blocks (parallel) +// - when reach the update state: +// - send a update to channel and keep retrieving last block to ask for new rollup info +// +// +// TODO: +// - Check all log.fatals to remove it or add status before the panic + +package synchronizer + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/0xPolygonHermez/zkevm-node" + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + minTTLOfLastBlock = time.Second + minTimeoutForRequestLastBlockOnL1 = time.Second * 1 + minNumOfAllowedRetriesForRequestLastBlockOnL1 = 1 + minTimeOutMainLoop = time.Minute * 5 + timeForShowUpStatisticsLog = time.Second * 60 + conversionFactorPercentage = 100 + lenCommandsChannels = 5 + maximumBlockDistanceFromLatestToFinalized = 96 // https://www.alchemy.com/overviews/ethereum-commitment-levels +) + +type filter interface { + ToStringBrief() string + Filter(data l1SyncMessage) []l1SyncMessage + Reset(lastBlockOnSynchronizer uint64) + numItemBlockedInQueue() int +} + +type syncStatusInterface interface { + // Verify that configuration and lastBlock are right + Verify() error + // Reset synchronization to a new starting point + Reset(lastBlockStoreOnStateDB uint64) + // String returns a string representation of the object + String() string + // GetNextRange returns the next Block to be retrieved + GetNextRange() *blockRange + // GetNextRangeOnlyRetries returns the fist Block pending to retry + GetNextRangeOnlyRetries() *blockRange + // IsNodeFullySynchronizedWithL1 returns true there nothing pending to retrieved and have finished all workers + // so all the rollupinfo are in the channel to be processed by consumer + IsNodeFullySynchronizedWithL1() bool + // HaveRequiredAllBlocksToBeSynchronized returns true if have been requested all rollupinfo + // but maybe there are some pending retries or still working in some BlockRange + HaveRequiredAllBlocksToBeSynchronized() bool + // DoesItHaveAllTheNeedDataToWork returns true if have all the data to start working + DoesItHaveAllTheNeedDataToWork() bool + // GetLastBlockOnL1 returns the last block on L1 or InvalidBlock if not set + GetLastBlockOnL1() uint64 + + // OnStartedNewWorker a new worker has been started + OnStartedNewWorker(br blockRange) + // OnFinishWorker a worker has finished, returns true if the data have to be processed + OnFinishWorker(br blockRange, successful bool, highestBlockNumberInResponse uint64) bool + // OnNewLastBlockOnL1 a new last block on L1 has been received + OnNewLastBlockOnL1(lastBlock uint64) onNewLastBlockResponse + // BlockNumberIsInsideUnsafeArea returns if this block is beyond Finalized (so it could be reorg) + // If blockNumber == invalidBlockNumber then it uses the highestBlockRequested (the last block requested) + BlockNumberIsInsideUnsafeArea(blockNumber uint64) bool +} + +type workersInterface interface { + // initialize object + initialize() error + // finalize object + stop() + // waits until all workers have finish the current task + waitFinishAllWorkers() + asyncRequestRollupInfoByBlockRange(ctx context.Context, request requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error) + requestLastBlockWithRetries(ctx context.Context, timeout time.Duration, maxPermittedRetries int) responseL1LastBlock + getResponseChannelForRollupInfo() chan responseRollupInfoByBlockRange + String() string + howManyRunningWorkers() int +} + +type producerStatusEnum int32 + +const ( + producerIdle producerStatusEnum = 0 + producerWorking producerStatusEnum = 1 + producerSynchronized producerStatusEnum = 2 + producerNoRunning producerStatusEnum = 3 + // producerReseting: is in a reset process, so is going to reject all rollup info + producerReseting producerStatusEnum = 4 +) + +func (s producerStatusEnum) String() string { + return [...]string{"idle", "working", "synchronized", "no_running", "reseting"}[s] +} + +type configProducer struct { + syncChunkSize uint64 + ttlOfLastBlockOnL1 time.Duration + + timeoutForRequestLastBlockOnL1 time.Duration + numOfAllowedRetriesForRequestLastBlockOnL1 int + + //timeout for main loop if no is synchronized yet, this time is a safeguard because is not needed + timeOutMainLoop time.Duration + //how ofter we show a log with statistics, 0 means disabled + timeForShowUpStatisticsLog time.Duration + minTimeBetweenRetriesForRollupInfo time.Duration +} + +func (cfg *configProducer) String() string { + return fmt.Sprintf("syncChunkSize:%d ttlOfLastBlockOnL1:%s timeoutForRequestLastBlockOnL1:%s numOfAllowedRetriesForRequestLastBlockOnL1:%d timeOutMainLoop:%s timeForShowUpStatisticsLog:%s", + cfg.syncChunkSize, cfg.ttlOfLastBlockOnL1, cfg.timeoutForRequestLastBlockOnL1, cfg.numOfAllowedRetriesForRequestLastBlockOnL1, cfg.timeOutMainLoop, cfg.timeForShowUpStatisticsLog) +} + +func (cfg *configProducer) normalize() { + if cfg.syncChunkSize == 0 { + log.Fatalf("producer:config: SyncChunkSize must be greater than 0") + } + if cfg.ttlOfLastBlockOnL1 < minTTLOfLastBlock { + log.Warnf("producer:config: ttlOfLastBlockOnL1 is too low (%s) minimum recomender value %s", cfg.ttlOfLastBlockOnL1, minTTLOfLastBlock) + } + if cfg.timeoutForRequestLastBlockOnL1 < minTimeoutForRequestLastBlockOnL1 { + log.Warnf("producer:config: timeRequestInitialValueOfLastBlock is too low (%s) minimum recomender value%s", cfg.timeoutForRequestLastBlockOnL1, minTimeoutForRequestLastBlockOnL1) + } + if cfg.numOfAllowedRetriesForRequestLastBlockOnL1 < minNumOfAllowedRetriesForRequestLastBlockOnL1 { + log.Warnf("producer:config: retriesForRequestnitialValueOfLastBlock is too low (%d) minimum recomender value %d", cfg.numOfAllowedRetriesForRequestLastBlockOnL1, minNumOfAllowedRetriesForRequestLastBlockOnL1) + } + if cfg.timeOutMainLoop < minTimeOutMainLoop { + log.Warnf("producer:config: timeOutMainLoop is too low (%s) minimum recomender value %s", cfg.timeOutMainLoop, minTimeOutMainLoop) + } + if cfg.minTimeBetweenRetriesForRollupInfo <= 0 { + log.Warnf("producer:config: minTimeBetweenRetriesForRollup is too low (%s)", cfg.minTimeBetweenRetriesForRollupInfo) + } +} + +type producerCmdEnum int32 + +const ( + producerNop producerCmdEnum = 0 + producerStop producerCmdEnum = 1 + producerReset producerCmdEnum = 2 +) + +func (s producerCmdEnum) String() string { + return [...]string{"nop", "stop", "reset"}[s] +} + +type producerCmd struct { + cmd producerCmdEnum + param1 uint64 +} + +type l1RollupInfoProducer struct { + mutex sync.Mutex + ctxParent context.Context + ctxWithCancel contextWithCancel + workers workersInterface + syncStatus syncStatusInterface + outgoingChannel chan l1SyncMessage + timeLastBLockOnL1 time.Time + status producerStatusEnum + // filter is an object that sort l1DataMessage to be send ordered by block number + filterToSendOrdererResultsToConsumer filter + statistics l1RollupInfoProducerStatistics + cfg configProducer + channelCmds chan producerCmd +} + +func (l *l1RollupInfoProducer) toStringBrief() string { + l.mutex.Lock() + defer l.mutex.Unlock() + return l.toStringBriefUnsafe() +} + +func (l *l1RollupInfoProducer) toStringBriefUnsafe() string { + return fmt.Sprintf("status:%s syncStatus:[%s] workers:[%s] filter:[%s] cfg:[%s]", l.getStatus(), l.syncStatus.String(), l.workers.String(), l.filterToSendOrdererResultsToConsumer.ToStringBrief(), l.cfg.String()) +} + +// l1DataRetrieverStatistics : create an instance of l1RollupInfoProducer +func newL1DataRetriever(cfg configProducer, ethermans []EthermanInterface, outgoingChannel chan l1SyncMessage) *l1RollupInfoProducer { + if cap(outgoingChannel) < len(ethermans) { + log.Warnf("producer: outgoingChannel must have a capacity (%d) of at least equal to number of ether clients (%d)", cap(outgoingChannel), len(ethermans)) + } + cfg.normalize() + // The timeout for clients are set to infinite because the time to process a rollup segment is not known + // TODO: move this to config file + workersConfig := workersConfig{timeoutRollupInfo: time.Duration(math.MaxInt64)} + + result := l1RollupInfoProducer{ + syncStatus: newSyncStatus(invalidBlockNumber, cfg.syncChunkSize), + workers: newWorkerDecoratorLimitRetriesByTime(newWorkers(ethermans, workersConfig), cfg.minTimeBetweenRetriesForRollupInfo), + filterToSendOrdererResultsToConsumer: newFilterToSendOrdererResultsToConsumer(invalidBlockNumber), + outgoingChannel: outgoingChannel, + statistics: newRollupInfoProducerStatistics(invalidBlockNumber, DefaultTimeProvider{}), + status: producerNoRunning, + cfg: cfg, + channelCmds: make(chan producerCmd, lenCommandsChannels), + } + return &result +} + +// ResetAndStop: reset the object and stop the current process. Set first block to be retrieved +// This function could be call from outside of main goroutine +func (l *l1RollupInfoProducer) Reset(startingBlockNumber uint64) { + log.Infof("producer: Reset(%d) queue cmd and discarding all info in channel", startingBlockNumber) + l.setStatusReseting() + l.emptyChannel() + l.channelCmds <- producerCmd{cmd: producerReset, param1: startingBlockNumber} +} + +func (l *l1RollupInfoProducer) resetUnsafe(startingBlockNumber uint64) { + log.Infof("producer: Reset L1 sync process to blockNumber %d st=%s", startingBlockNumber, l.toStringBrief()) + l.setStatusReseting() + log.Debugf("producer: Reset(%d): stop previous run (state=%s)", startingBlockNumber, l.getStatus().String()) + log.Debugf("producer: Reset(%d): syncStatus.reset", startingBlockNumber) + l.syncStatus.Reset(startingBlockNumber) + l.statistics.reset(startingBlockNumber) + log.Debugf("producer: Reset(%d): stopping workers", startingBlockNumber) + l.workers.stop() + // Empty pending rollupinfos + log.Debugf("producer: Reset(%d): emptyChannel", startingBlockNumber) + l.emptyChannel() + log.Debugf("producer: Reset(%d): reset Filter", startingBlockNumber) + l.filterToSendOrdererResultsToConsumer.Reset(startingBlockNumber) + l.setStatus(producerIdle) + log.Infof("producer: Reset(%d): reset done!", startingBlockNumber) +} + +func (l *l1RollupInfoProducer) isProducerRunning() bool { + return l.getStatus() != producerNoRunning +} + +func (l *l1RollupInfoProducer) setStatusReseting() { + l.mutex.Lock() + defer l.mutex.Unlock() + l.setStatus(producerReseting) +} + +func (l *l1RollupInfoProducer) getStatus() producerStatusEnum { + return producerStatusEnum(atomic.LoadInt32((*int32)(&l.status))) +} + +func (l *l1RollupInfoProducer) setStatus(newStatus producerStatusEnum) { + previousStatus := l.getStatus() + atomic.StoreInt32((*int32)(&l.status), int32(newStatus)) + if previousStatus != newStatus { + log.Infof("producer: Status changed from [%s] to [%s]", previousStatus.String(), newStatus.String()) + if newStatus == producerSynchronized { + log.Infof("producer: send a message to consumer to indicate that we are synchronized") + l.sendPackages([]l1SyncMessage{*newL1SyncMessageControl(eventProducerIsFullySynced)}) + } + } +} +func (l *l1RollupInfoProducer) Abort() { + l.emptyChannel() + l.ctxWithCancel.cancelCtx() + l.ctxWithCancel.createWithCancel(l.ctxParent) +} + +func (l *l1RollupInfoProducer) Stop() { + log.Infof("producer: Stop() queue cmd") + l.channelCmds <- producerCmd{cmd: producerStop} +} + +func (l *l1RollupInfoProducer) stopUnsafe() { + log.Infof("producer: stop() called st=%s", l.toStringBrief()) + + if l.isProducerRunning() { + log.Infof("producer:Stop:was running -> stopping producer") + l.ctxWithCancel.cancel() + } + + l.setStatus(producerNoRunning) + log.Debugf("producer:Stop: stop workers and wait for finish (%s)", l.workers.String()) + l.workers.stop() +} + +func (l *l1RollupInfoProducer) emptyChannel() { + for len(l.outgoingChannel) > 0 { + <-l.outgoingChannel + } +} + +// verify: test params and status without if not allowModify avoid doing connection or modification of objects +func (l *l1RollupInfoProducer) verify() error { + return l.syncStatus.Verify() +} + +func (l *l1RollupInfoProducer) initialize(ctx context.Context) error { + log.Debug("producer: initialize") + err := l.verify() + if err != nil { + log.Debug("producer: initialize, syncstatus not ready: %s", err.Error()) + } + if l.ctxParent != ctx || l.ctxWithCancel.isInvalid() { + log.Debug("producer: start called and need to create a new context") + l.ctxParent = ctx + l.ctxWithCancel.createWithCancel(l.ctxParent) + } + err = l.workers.initialize() + if err != nil { + return err + } + return nil +} + +// Before calling Start you must set lastBlockOnDB calling ResetAndStop +func (l *l1RollupInfoProducer) Start(ctx context.Context) error { + log.Infof("producer: starting L1 sync from:%s", l.syncStatus.String()) + err := l.initialize(ctx) + if err != nil { + log.Infof("producer: can't start because: %s", err.Error()) + return err + } + l.setStatus(producerIdle) + log.Debugf("producer: starting configuration: %s", l.cfg.String()) + var waitDuration = time.Duration(0) + for l.step(&waitDuration) { + } + l.setStatus(producerNoRunning) + l.workers.waitFinishAllWorkers() + return nil +} + +func (l *l1RollupInfoProducer) step(waitDuration *time.Duration) bool { + if atomic.CompareAndSwapInt32((*int32)(&l.status), int32(producerNoRunning), int32(producerIdle)) { // l.getStatus() == producerNoRunning + log.Info("producer: step: status is no running, changing to idle %s", l.getStatus().String()) + } + log.Infof("producer: build_time:%s step: status:%s", zkevm.BuildDate, l.toStringBrief()) + select { + case <-l.ctxWithCancel.Done(): + log.Debugf("producer: context canceled") + return false + case cmd := <-l.channelCmds: + log.Infof("producer: received a command") + res := l.executeCmd(cmd) + if !res { + log.Info("producer: cmd %s stop the process", cmd.cmd.String()) + return false + } + // That timeout is not need, but just in case that stop launching request + case <-time.After(*waitDuration): + log.Debugf("producer: reach timeout of step loop it was of %s", *waitDuration) + case resultRollupInfo := <-l.workers.getResponseChannelForRollupInfo(): + l.onResponseRollupInfo(resultRollupInfo) + } + switch l.getStatus() { + case producerIdle: + // Is ready to start working? + l.renewLastBlockOnL1IfNeeded("idle") + if l.syncStatus.DoesItHaveAllTheNeedDataToWork() { + log.Infof("producer: producerIdle: have all the data to work, moving to working status. status:%s", l.syncStatus.String()) + l.setStatus(producerWorking) + // This is for wakeup the step again to launch a new work + l.channelCmds <- producerCmd{cmd: producerNop} + } else { + log.Infof("producer: producerIdle: still dont have all the data to work status:%s", l.syncStatus.String()) + } + case producerWorking: + // launch new Work + _, err := l.launchWork() + if err != nil { + log.Errorf("producer: producerWorking: error launching work: %s", err.Error()) + return false + } + // If I'm have required all blocks to L1? + if l.syncStatus.HaveRequiredAllBlocksToBeSynchronized() { + log.Debugf("producer: producerWorking: haveRequiredAllBlocksToBeSynchronized -> renewLastBlockOnL1IfNeeded") + l.renewLastBlockOnL1IfNeeded("HaveRequiredAllBlocksToBeSynchronized") + } + if l.syncStatus.BlockNumberIsInsideUnsafeArea(invalidBlockNumber) { + log.Debugf("producer: producerWorking: we are inside unsafe area!, renewLastBlockOnL1IfNeeded") + l.renewLastBlockOnL1IfNeeded("unsafe block area") + } + // If after asking for a new lastBlockOnL1 we are still synchronized then we are synchronized + if l.syncStatus.IsNodeFullySynchronizedWithL1() { + l.setStatus(producerSynchronized) + } else { + log.Infof("producer: producerWorking: still not synchronized with the new block range launch workers again") + _, err := l.launchWork() + if err != nil { + log.Errorf("producer: producerSynchronized: error launching work: %s", err.Error()) + return false + } + } + case producerSynchronized: + // renew last block on L1 if needed + log.Debugf("producer: producerSynchronized") + l.renewLastBlockOnL1IfNeeded("producerSynchronized") + + numLaunched, err := l.launchWork() + if err != nil { + log.Errorf("producer: producerSynchronized: error launching work: %s", err.Error()) + return false + } + if numLaunched > 0 { + l.setStatus(producerWorking) + } + case producerReseting: + log.Infof("producer: producerReseting") + } + + if l.cfg.timeForShowUpStatisticsLog != 0 && time.Since(l.statistics.lastShowUpTime) > l.cfg.timeForShowUpStatisticsLog { + log.Infof("producer: Statistics:%s", l.statistics.getStatisticsDebugString()) + l.statistics.lastShowUpTime = time.Now() + } + *waitDuration = l.getNextTimeout() + log.Debugf("producer: Next timeout: %s status:%s ", *waitDuration, l.toStringBrief()) + return true +} + +// return if the producer must keep running (false -> stop) +func (l *l1RollupInfoProducer) executeCmd(cmd producerCmd) bool { + switch cmd.cmd { + case producerStop: + log.Infof("producer: received a stop, so it stops processing") + l.stopUnsafe() + return false + case producerReset: + log.Infof("producer: received a reset(%d)", cmd.param1) + l.resetUnsafe(cmd.param1) + return true + } + return true +} + +func (l *l1RollupInfoProducer) ttlOfLastBlockOnL1() time.Duration { + return l.cfg.ttlOfLastBlockOnL1 +} + +func (l *l1RollupInfoProducer) getNextTimeout() time.Duration { + timeOutMainLoop := l.cfg.timeOutMainLoop + status := l.getStatus() + switch status { + case producerIdle: + return timeOutMainLoop + case producerWorking: + return timeOutMainLoop + case producerSynchronized: + nextRenewLastBlock := time.Since(l.timeLastBLockOnL1) + l.ttlOfLastBlockOnL1() + return max(nextRenewLastBlock, time.Second) + case producerNoRunning: + return timeOutMainLoop + case producerReseting: + return timeOutMainLoop + default: + log.Fatalf("producer: Unknown status: %s", status.String()) + } + return timeOutMainLoop +} + +// OnNewLastBlock is called when a new last block on L1 is received +func (l *l1RollupInfoProducer) onNewLastBlock(lastBlock uint64) onNewLastBlockResponse { + resp := l.syncStatus.OnNewLastBlockOnL1(lastBlock) + l.statistics.updateLastBlockNumber(resp.fullRange.toBlock) + l.timeLastBLockOnL1 = time.Now() + if resp.extendedRange != nil { + log.Infof("producer: New last block on L1: %v -> %s", resp.fullRange.toBlock, resp.toString()) + } + return resp +} + +func (l *l1RollupInfoProducer) canISendNewRequestsUnsafe() (bool, string) { + queued := l.filterToSendOrdererResultsToConsumer.numItemBlockedInQueue() + inChannel := len(l.outgoingChannel) + maximum := cap(l.outgoingChannel) + msg := fmt.Sprintf("inFilter:%d + inChannel:%d > maximum:%d?", queued, inChannel, maximum) + if queued+inChannel > maximum { + msg = msg + " ==> only allow retries" + return false, msg + } + msg = msg + " ==> allow new req" + return true, msg +} + +// launchWork: launch new workers if possible and returns new channels created +// returns the number of workers launched +func (l *l1RollupInfoProducer) launchWork() (int, error) { + launchedWorker := 0 + allowNewRequests, allowNewRequestMsg := l.canISendNewRequestsUnsafe() + accDebugStr := "[" + allowNewRequestMsg + "] " + for { + var br *blockRange + if allowNewRequests { + br = l.syncStatus.GetNextRange() + } else { + br = l.syncStatus.GetNextRangeOnlyRetries() + } + if br == nil { + // No more work to do + accDebugStr += "[NoNextRange] " + break + } + // The request include previous block only if a latest request, because then it starts from l + request := requestRollupInfoByBlockRange{blockRange: *br, + sleepBefore: noSleepTime, + requestLastBlockIfNoBlocksInAnswer: requestLastBlockModeIfNoBlocksInAnswer, + requestPreviousBlock: false, + } + unsafeAreaMsg := "" + // GetLastBlockOnL1 is the lastest block on L1, if we are not in safe zone of reorgs we ask for previous and last block + // to be able to check that there is no reorgs + if l.syncStatus.BlockNumberIsInsideUnsafeArea(br.fromBlock) { + log.Debugf("L1 unsafe zone: asking for previous and last block") + request.requestLastBlockIfNoBlocksInAnswer = requestLastBlockModeAlways + request.requestPreviousBlock = true + unsafeAreaMsg = "/UNSAFE" + } + blockRangeMsg := br.String() + unsafeAreaMsg + _, err := l.workers.asyncRequestRollupInfoByBlockRange(l.ctxWithCancel.ctx, request) + if err != nil { + if errors.Is(err, errAllWorkersBusy) { + accDebugStr += fmt.Sprintf(" segment %s -> [Error:%s] ", blockRangeMsg, err.Error()) + } + break + } else { + accDebugStr += fmt.Sprintf(" segment %s -> [LAUNCHED] ", blockRangeMsg) + } + launchedWorker++ + log.Debugf("producer: launch_worker: Launched worker for segment %s, num_workers_in_this_iteration: %d", blockRangeMsg, launchedWorker) + l.syncStatus.OnStartedNewWorker(*br) + } + log.Infof("producer: launch_worker: num of launched workers: %d result: %s status_comm:%s", launchedWorker, accDebugStr, l.outgoingPackageStatusDebugString()) + + return launchedWorker, nil +} + +func (l *l1RollupInfoProducer) outgoingPackageStatusDebugString() string { + return fmt.Sprintf("outgoint_channel[%d/%d], filter:%s workers:%s", len(l.outgoingChannel), cap(l.outgoingChannel), l.filterToSendOrdererResultsToConsumer.ToStringBrief(), l.workers.String()) +} + +func (l *l1RollupInfoProducer) renewLastBlockOnL1IfNeeded(reason string) { + elapsed := time.Since(l.timeLastBLockOnL1) + ttl := l.ttlOfLastBlockOnL1() + oldBlock := l.syncStatus.GetLastBlockOnL1() + if elapsed > ttl { + log.Infof("producer: Need a new value for Last Block On L1, doing the request reason:%s", reason) + result := l.workers.requestLastBlockWithRetries(l.ctxWithCancel.ctx, l.cfg.timeoutForRequestLastBlockOnL1, l.cfg.numOfAllowedRetriesForRequestLastBlockOnL1) + log.Infof("producer: Need a new value for Last Block On L1, doing the request old_block:%v -> new block:%v", oldBlock, result.result.block) + if result.generic.err != nil { + log.Error(result.generic.err) + return + } + l.onNewLastBlock(result.result.block) + } +} + +func (l *l1RollupInfoProducer) onResponseRollupInfo(result responseRollupInfoByBlockRange) { + log.Infof("producer: Received responseRollupInfoByBlockRange: %s", result.toStringBrief()) + if l.getStatus() == producerReseting { + log.Infof("producer: Ignoring result because is in reseting status: %s", result.toStringBrief()) + return + } + l.statistics.onResponseRollupInfo(result) + isOk := (result.generic.err == nil) + var highestBlockNumberInResponse uint64 = invalidBlockNumber + if isOk { + highestBlockNumberInResponse = result.getHighestBlockNumberInResponse() + } + if !l.syncStatus.OnFinishWorker(result.result.blockRange, isOk, highestBlockNumberInResponse) { + log.Infof("producer: Ignoring result because the range is not longer valid: %s", result.toStringBrief()) + return + } + if isOk { + outgoingPackages := l.filterToSendOrdererResultsToConsumer.Filter(*newL1SyncMessageData(result.result)) + log.Infof("producer: filtered Br[%s/%d], outgoing %d filter_status:%s", result.result.blockRange.String(), result.result.getHighestBlockNumberInResponse(), len(outgoingPackages), l.filterToSendOrdererResultsToConsumer.ToStringBrief()) + l.sendPackages(outgoingPackages) + } else { + if errors.Is(result.generic.err, context.Canceled) { + log.Infof("producer: Error while trying to get rollup info by block range: %v", result.generic.err) + } else { + log.Warnf("producer: Error while trying to get rollup info by block range: %v", result.generic.err) + } + } +} + +func (l *l1RollupInfoProducer) sendPackages(outgoingPackages []l1SyncMessage) { + for _, pkg := range outgoingPackages { + log.Infof("producer: Sending results [data] to consumer:%s: status_comm:%s", pkg.toStringBrief(), l.outgoingPackageStatusDebugString()) + l.outgoingChannel <- pkg + } +} + +// https://stackoverflow.com/questions/4220745/how-to-select-for-input-on-a-dynamic-list-of-channels-in-go diff --git a/synchronizer/l1_rollup_info_producer_statistics.go b/synchronizer/l1_rollup_info_producer_statistics.go new file mode 100644 index 0000000000..90375557f8 --- /dev/null +++ b/synchronizer/l1_rollup_info_producer_statistics.go @@ -0,0 +1,90 @@ +package synchronizer + +import ( + "fmt" + "time" + + "github.com/0xPolygonHermez/zkevm-node/synchronizer/metrics" +) + +// This object keep track of the statistics of the process, to be able to estimate the ETA +type l1RollupInfoProducerStatistics struct { + initialBlockNumber uint64 + lastBlockNumber uint64 + numRollupInfoOk uint64 + numRollupInfoErrors uint64 + numRetrievedBlocks uint64 + startTime time.Time + lastShowUpTime time.Time + accumulatedTimeProcessingRollup time.Duration + timeProvider TimeProvider +} + +func newRollupInfoProducerStatistics(startingBlockNumber uint64, timeProvider TimeProvider) l1RollupInfoProducerStatistics { + return l1RollupInfoProducerStatistics{ + initialBlockNumber: startingBlockNumber, + startTime: timeProvider.Now(), + timeProvider: timeProvider, + accumulatedTimeProcessingRollup: time.Duration(0), + } +} + +func (l *l1RollupInfoProducerStatistics) reset(startingBlockNumber uint64) { + l.initialBlockNumber = startingBlockNumber + l.startTime = l.timeProvider.Now() + l.numRollupInfoOk = 0 + l.numRollupInfoErrors = 0 + l.numRetrievedBlocks = 0 + l.lastShowUpTime = l.timeProvider.Now() +} + +func (l *l1RollupInfoProducerStatistics) updateLastBlockNumber(lastBlockNumber uint64) { + l.lastBlockNumber = lastBlockNumber +} + +func (l *l1RollupInfoProducerStatistics) onResponseRollupInfo(result responseRollupInfoByBlockRange) { + metrics.ReadL1DataTime(result.generic.duration) + isOk := (result.generic.err == nil) + if isOk { + l.numRollupInfoOk++ + l.numRetrievedBlocks += uint64(result.result.blockRange.len()) + l.accumulatedTimeProcessingRollup += result.generic.duration + } else { + l.numRollupInfoErrors++ + } +} + +func (l *l1RollupInfoProducerStatistics) getStatisticsDebugString() string { + numTotalOfBlocks := l.lastBlockNumber - l.initialBlockNumber + if l.numRetrievedBlocks == 0 { + return "N/A" + } + now := l.timeProvider.Now() + elapsedTime := now.Sub(l.startTime) + eta := l.getEstimatedTimeOfArrival() + percent := l.getPercent() + blocksPerSeconds := l.getBlocksPerSecond(elapsedTime) + return fmt.Sprintf(" EstimatedTimeOfArrival: %s percent:%2.2f blocks_per_seconds:%2.2f pending_block:%v/%v num_errors:%v", + eta, percent, blocksPerSeconds, l.numRetrievedBlocks, numTotalOfBlocks, l.numRollupInfoErrors) +} + +func (l *l1RollupInfoProducerStatistics) getEstimatedTimeOfArrival() time.Duration { + numTotalOfBlocks := l.lastBlockNumber - l.initialBlockNumber + if l.numRetrievedBlocks == 0 { + return time.Duration(0) + } + elapsedTime := time.Since(l.startTime) + eta := time.Duration(float64(elapsedTime) / float64(l.numRetrievedBlocks) * float64(numTotalOfBlocks-l.numRetrievedBlocks)) + return eta +} + +func (l *l1RollupInfoProducerStatistics) getPercent() float64 { + numTotalOfBlocks := l.lastBlockNumber - l.initialBlockNumber + percent := float64(l.numRetrievedBlocks) / float64(numTotalOfBlocks) * conversionFactorPercentage + return percent +} + +func (l *l1RollupInfoProducerStatistics) getBlocksPerSecond(elapsedTime time.Duration) float64 { + blocksPerSeconds := float64(l.numRetrievedBlocks) / float64(elapsedTime.Seconds()) + return blocksPerSeconds +} diff --git a/synchronizer/l1_rollup_info_producer_statistics_test.go b/synchronizer/l1_rollup_info_producer_statistics_test.go new file mode 100644 index 0000000000..467ad96967 --- /dev/null +++ b/synchronizer/l1_rollup_info_producer_statistics_test.go @@ -0,0 +1,31 @@ +package synchronizer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestProducerStatisticsPercent(t *testing.T) { + sut := newRollupInfoProducerStatistics(100, &mockTimerProvider{}) + sut.updateLastBlockNumber(200) + require.Equal(t, float64(0.0), sut.getPercent()) + + sut.onResponseRollupInfo(responseRollupInfoByBlockRange{ + generic: genericResponse{ + err: nil, + duration: 0, + }, + result: &rollupInfoByBlockRangeResult{ + blockRange: blockRange{ + fromBlock: 101, + toBlock: 200, + }, + }, + }) + + require.Equal(t, float64(100.0), sut.getPercent()) + + sut.reset(100) + require.Equal(t, float64(0.0), sut.getPercent()) +} diff --git a/synchronizer/l1_rollup_info_producer_test.go b/synchronizer/l1_rollup_info_producer_test.go new file mode 100644 index 0000000000..5a3e0abbcb --- /dev/null +++ b/synchronizer/l1_rollup_info_producer_test.go @@ -0,0 +1,139 @@ +package synchronizer + +import ( + "context" + "math/big" + "testing" + "time" + + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExploratoryL1Get(t *testing.T) { + t.Skip("Exploratory test") + sut, ethermans, _ := setup(t) + etherman := ethermans[0] + header := new(ethTypes.Header) + header.Number = big.NewInt(150) + etherman. + On("HeaderByNumber", mock.Anything, mock.Anything). + Return(header, nil). + Once() + + err := sut.initialize(context.Background()) + require.NoError(t, err) + _, err = sut.launchWork() + require.NoError(t, err) +} + +func TestGivenNeedSyncWhenStartThenAskForRollupInfo(t *testing.T) { + sut, ethermans, _ := setup(t) + expectedForGettingL1LastBlock(t, ethermans[0], 150) + expectedRollupInfoCalls(t, ethermans[1], 1) + err := sut.initialize(context.Background()) + require.NoError(t, err) + _, err = sut.launchWork() + require.NoError(t, err) + var waitDuration = time.Duration(0) + + sut.step(&waitDuration) + sut.step(&waitDuration) + sut.workers.waitFinishAllWorkers() +} + +func TestGivenNoNeedSyncWhenStartsSendAndEventOfSynchronized(t *testing.T) { + sut, ethermans, ch := setup(t) + etherman := ethermans[0] + // Our last block is 100 in DB and it returns 100 as last block on L1 + // so is synchronized + expectedForGettingL1LastBlock(t, etherman, 100) + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + defer cancel() + err := sut.Start(ctx) + require.NoError(t, err) + // read everything in channel ch + for len(ch) > 0 { + data := <-ch + if data.ctrlIsValid == true && data.ctrl.event == eventProducerIsFullySynced { + return // ok + } + } + require.Fail(t, "should not have send a eventProducerIsFullySynced in channel") +} + +// Given: Need to synchronize +// When: Ask for last block +// Then: Ask for rollupinfo +func TestGivenNeedSyncWhenReachLastBlockThenSendAndEventOfSynchronized(t *testing.T) { + sut, ethermans, ch := setup(t) + // Our last block is 100 in DB and it returns 101 as last block on L1 + // so it need to retrieve 1 rollupinfo + expectedForGettingL1LastBlock(t, ethermans[0], 101) + expectedRollupInfoCalls(t, ethermans[1], 1) + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + defer cancel() + res := sut.Start(ctx) + require.NoError(t, res) + + // read everything in channel ch + for len(ch) > 0 { + data := <-ch + if data.ctrlIsValid == true && data.ctrl.event == eventProducerIsFullySynced { + return // ok + } + } + require.Fail(t, "should not have send a eventProducerIsFullySynced in channel") +} + +func TestGivenNoSetFirstBlockWhenCallStartThenDontReturnError(t *testing.T) { + sut, ethermans, _ := setupNoResetCall(t) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cancel() + expectedForGettingL1LastBlock(t, ethermans[0], 101) + err := sut.Start(ctx) + require.NoError(t, err) +} + +func setup(t *testing.T) (*l1RollupInfoProducer, []*ethermanMock, chan l1SyncMessage) { + sut, ethermansMock, resultChannel := setupNoResetCall(t) + sut.Reset(100) + return sut, ethermansMock, resultChannel +} + +func setupNoResetCall(t *testing.T) (*l1RollupInfoProducer, []*ethermanMock, chan l1SyncMessage) { + ethermansMock := []*ethermanMock{newEthermanMock(t), newEthermanMock(t)} + ethermans := []EthermanInterface{ethermansMock[0], ethermansMock[1]} + resultChannel := make(chan l1SyncMessage, 100) + cfg := configProducer{ + syncChunkSize: 100, + ttlOfLastBlockOnL1: time.Second, + timeOutMainLoop: time.Second, + } + + sut := newL1DataRetriever(cfg, ethermans, resultChannel) + return sut, ethermansMock, resultChannel +} + +func expectedForGettingL1LastBlock(t *testing.T, etherman *ethermanMock, blockNumber int64) { + header := new(ethTypes.Header) + header.Number = big.NewInt(blockNumber) + etherman. + On("HeaderByNumber", mock.Anything, mock.Anything). + Return(header, nil). + Maybe() +} + +func expectedRollupInfoCalls(t *testing.T, etherman *ethermanMock, calls int) { + etherman. + On("GetRollupInfoByBlockRange", mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil, nil). + Times(calls) + + etherman. + On("EthBlockByNumber", mock.Anything, mock.Anything). + Return(nil, nil). + Maybe() +} diff --git a/synchronizer/l1_sync_orchestration.go b/synchronizer/l1_sync_orchestration.go new file mode 100644 index 0000000000..604b4a42a8 --- /dev/null +++ b/synchronizer/l1_sync_orchestration.go @@ -0,0 +1,189 @@ +package synchronizer + +import ( + "context" + "errors" + "sync" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" +) + +/* +This object is used to coordinate the producer and the consumer process. +*/ +type l1RollupProducerInterface interface { + // Start launch a new process to retrieve data from L1 + Start(ctx context.Context) error + // Stop cancel current process + Stop() + // Abort execution + Abort() + // Reset set a new starting point and cancel current process if any + Reset(startingBlockNumber uint64) +} + +type l1RollupConsumerInterface interface { + Start(ctx context.Context, lastEthBlockSynced *state.Block) error + StopAfterProcessChannelQueue() + GetLastEthBlockSynced() (state.Block, bool) + // Reset set a new starting point + Reset(startingBlockNumber uint64) +} + +type l1SyncOrchestration struct { + mutex sync.Mutex + producer l1RollupProducerInterface + consumer l1RollupConsumerInterface + // Producer is running? + producerRunning bool + consumerRunning bool + // The orchestrator is running? + isRunning bool + wg sync.WaitGroup + chProducer chan error + chConsumer chan error + ctxParent context.Context + ctxWithCancel contextWithCancel +} + +const ( + errMissingLastEthBlockSynced = "orchestration: missing last eth block synced" +) + +func newL1SyncOrchestration(ctx context.Context, producer l1RollupProducerInterface, consumer l1RollupConsumerInterface) *l1SyncOrchestration { + res := l1SyncOrchestration{ + producer: producer, + consumer: consumer, + producerRunning: false, + consumerRunning: false, + chProducer: make(chan error, 1), + chConsumer: make(chan error, 1), + ctxParent: ctx, + } + res.ctxWithCancel.createWithCancel(ctx) + return &res +} + +func (l *l1SyncOrchestration) reset(startingBlockNumber uint64) { + log.Warnf("orchestration: Reset L1 sync process to blockNumber %d", startingBlockNumber) + if l.isRunning { + log.Infof("orchestration: reset(%d) is going to reset producer", startingBlockNumber) + } + l.consumer.Reset(startingBlockNumber) + l.producer.Reset(startingBlockNumber) + // If orchestrator is running then producer is going to be started by orchestrate() select function when detects that producer has finished +} + +func (l *l1SyncOrchestration) start(lastEthBlockSynced *state.Block) (*state.Block, error) { + l.isRunning = true + l.launchProducer(l.ctxWithCancel.ctx, l.chProducer, &l.wg) + l.launchConsumer(l.ctxWithCancel.ctx, lastEthBlockSynced, l.chConsumer, &l.wg) + return l.orchestrate(l.ctxParent, &l.wg, l.chProducer, l.chConsumer) +} + +func (l *l1SyncOrchestration) abort() { + l.producer.Abort() + l.ctxWithCancel.cancel() + l.wg.Wait() + l.ctxWithCancel.createWithCancel(l.ctxParent) +} + +func (l *l1SyncOrchestration) isProducerRunning() bool { + l.mutex.Lock() + defer l.mutex.Unlock() + return l.producerRunning +} + +func (l *l1SyncOrchestration) launchProducer(ctx context.Context, chProducer chan error, wg *sync.WaitGroup) { + l.mutex.Lock() + defer l.mutex.Unlock() + if !l.producerRunning { + if wg != nil { + wg.Add(1) + } + // Start producer: L1DataRetriever from L1 + l.producerRunning = true + + go func() { + if wg != nil { + defer wg.Done() + } + log.Infof("orchestration: starting producer") + err := l.producer.Start(ctx) + if err != nil { + log.Warnf("orchestration: producer error . Error: %s", err) + } + l.mutex.Lock() + l.producerRunning = false + l.mutex.Unlock() + log.Infof("orchestration: producer finished") + chProducer <- err + }() + } +} + +func (l *l1SyncOrchestration) launchConsumer(ctx context.Context, lastEthBlockSynced *state.Block, chConsumer chan error, wg *sync.WaitGroup) { + l.mutex.Lock() + if l.consumerRunning { + l.mutex.Unlock() + return + } + l.consumerRunning = true + l.mutex.Unlock() + + wg.Add(1) + go func() { + defer wg.Done() + log.Infof("orchestration: starting consumer") + err := l.consumer.Start(ctx, lastEthBlockSynced) + l.mutex.Lock() + l.consumerRunning = false + l.mutex.Unlock() + if err != nil { + log.Warnf("orchestration: consumer error. Error: %s", err) + } + log.Infof("orchestration: consumer finished") + chConsumer <- err + }() +} + +func (l *l1SyncOrchestration) orchestrate(ctx context.Context, wg *sync.WaitGroup, chProducer chan error, chConsumer chan error) (*state.Block, error) { + // Wait a cond_var for known if consumer have finish + var err error + done := false + for !done { + select { + case <-ctx.Done(): + log.Warnf("orchestration: context cancelled") + done = true + case err = <-chProducer: + // Producer has finished + log.Infof("orchestration: producer has finished. Error: %s, stopping consumer", err) + l.consumer.StopAfterProcessChannelQueue() + case err = <-chConsumer: + if err != nil && err != errAllWorkersBusy { + log.Warnf("orchestration: consumer have finished with Error: %s", err) + } else { + log.Info("orchestration: consumer has finished. No error") + } + done = true + } + } + l.isRunning = false + retBlock, ok := l.consumer.GetLastEthBlockSynced() + + if err == nil { + if ok { + log.Infof("orchestration: finished L1 sync orchestration With LastBlock. Last block synced: %d err:nil", retBlock.BlockNumber) + return &retBlock, nil + } else { + err := errors.New(errMissingLastEthBlockSynced) + log.Warnf("orchestration: finished L1 sync orchestration No LastBlock. Last block synced: %s err:%s", "", err) + return nil, err + } + } else { + log.Warnf("orchestration: finished L1 sync orchestration With Error. Last block synced: %s err:%s", "IGNORED (nil)", err) + return nil, err + } +} diff --git a/synchronizer/l1_sync_orchestration_test.go b/synchronizer/l1_sync_orchestration_test.go new file mode 100644 index 0000000000..28f03802ff --- /dev/null +++ b/synchronizer/l1_sync_orchestration_test.go @@ -0,0 +1,50 @@ +package synchronizer + +import ( + "context" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type mocksOrgertration struct { + producer *l1RollupProducerInterfaceMock + consumer *l1RollupConsumerInterfaceMock +} + +func TestGivenOrquestrationWhenHappyPathThenReturnsBlockAndNoErrorAndProducerIsRunning(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + sut, mocks := setupOrchestrationTest(t, ctxTimeout) + mocks.producer.On("Reset", mock.Anything).Return() + mocks.producer.On("Start", mock.Anything).Return(func(context.Context) error { + time.Sleep(time.Second * 2) + return nil + }) + block := state.Block{} + mocks.consumer.On("Reset", mock.Anything).Return() + mocks.consumer.On("GetLastEthBlockSynced").Return(block, true) + mocks.consumer.On("Start", mock.Anything, mock.Anything).Return(func(context.Context, *state.Block) error { + time.Sleep(time.Millisecond * 100) + return nil + }) + sut.reset(123) + returnedBlock, err := sut.start(&block) + require.NoError(t, err) + require.Equal(t, block, *returnedBlock) + require.Equal(t, true, sut.producerRunning) + require.Equal(t, false, sut.consumerRunning) +} + +func setupOrchestrationTest(t *testing.T, ctx context.Context) (*l1SyncOrchestration, mocksOrgertration) { + producer := newL1RollupProducerInterfaceMock(t) + consumer := newL1RollupConsumerInterfaceMock(t) + + return newL1SyncOrchestration(ctx, producer, consumer), mocksOrgertration{ + producer: producer, + consumer: consumer, + } +} diff --git a/synchronizer/l1_syncstatus.go b/synchronizer/l1_syncstatus.go new file mode 100644 index 0000000000..109de2f7a1 --- /dev/null +++ b/synchronizer/l1_syncstatus.go @@ -0,0 +1,332 @@ +package synchronizer + +import ( + "errors" + "fmt" + "sync" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + invalidLastBlock = 0 +) + +var ( + errSyncChunkSizeMustBeGreaterThanZero = errors.New("SyncChunkSize must be greater than 0") + errStartingBlockNumberMustBeDefined = errors.New("startingBlockNumber must be defined, call producer ResetAndStop() to set a new starting point") +) + +type syncStatus struct { + mutex sync.Mutex + lastBlockStoreOnStateDB uint64 + highestBlockRequested uint64 + lastBlockOnL1 uint64 + amountOfBlocksInEachRange uint64 + // This ranges are being processed + processingRanges liveBlockRanges + // This ranges need to be retried because the last execution was an error + errorRanges liveBlockRanges +} + +func (s *syncStatus) String() string { + return fmt.Sprintf(" lastBlockStoreOnStateDB: %s, highestBlockRequested:%s, lastBlockOnL1: %s, amountOfBlocksInEachRange: %d, processingRanges: %s, errorRanges: %s", + blockNumberToString(s.lastBlockStoreOnStateDB), + blockNumberToString(s.highestBlockRequested), + blockNumberToString(s.lastBlockOnL1), s.amountOfBlocksInEachRange, s.processingRanges.toStringBrief(), s.errorRanges.toStringBrief()) +} + +func (s *syncStatus) toString() string { + brief := s.String() + brief += fmt.Sprintf(" processingRanges:{ %s }", s.processingRanges.String()) + brief += fmt.Sprintf(" errorRanges:{ %s }", s.errorRanges.String()) + return brief +} + +// newSyncStatus create a new syncStatus object +// lastBlockStoreOnStateDB: last block stored on stateDB +// amountOfBlocksInEachRange: amount of blocks to be retrieved in each range +// lastBlockTTLDuration: TTL of the last block on L1 (it could be ttlOfLastBlockInfinity that means that is no renewed) +func newSyncStatus(lastBlockStoreOnStateDB uint64, amountOfBlocksInEachRange uint64) *syncStatus { + return &syncStatus{ + lastBlockStoreOnStateDB: lastBlockStoreOnStateDB, + highestBlockRequested: lastBlockStoreOnStateDB, + amountOfBlocksInEachRange: amountOfBlocksInEachRange, + lastBlockOnL1: invalidLastBlock, + processingRanges: newLiveBlockRanges(), + } +} +func (s *syncStatus) Reset(lastBlockStoreOnStateDB uint64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.lastBlockStoreOnStateDB = lastBlockStoreOnStateDB + s.highestBlockRequested = lastBlockStoreOnStateDB + s.processingRanges = newLiveBlockRanges() + //s.lastBlockOnL1 = invalidLastBlock +} + +func (s *syncStatus) GetLastBlockOnL1() uint64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.lastBlockOnL1 +} + +// All pending blocks have been requested or are currently being requested +func (s *syncStatus) HaveRequiredAllBlocksToBeSynchronized() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.lastBlockOnL1 <= s.highestBlockRequested +} + +// IsNodeFullySynchronizedWithL1 returns true if the node is fully synchronized with L1 +// it means that all blocks until the last block on L1 are requested (maybe not finish yet) and there are no pending errors +func (s *syncStatus) IsNodeFullySynchronizedWithL1() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.lastBlockOnL1 == invalidLastBlock { + log.Warnf("Can't decide if it's fully synced because last block on L1 is no valid: %d", s.lastBlockOnL1) + return false + } + + if s.lastBlockOnL1 <= s.highestBlockRequested && s.errorRanges.len() == 0 && s.processingRanges.len() == 0 { + log.Debug("No blocks to ask, we have requested and responsed all blocks from L1!") + return true + } + return false +} + +func (s *syncStatus) GetNextRangeOnlyRetries() *blockRange { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.getNextRangeOnlyRetriesUnsafe() +} + +func (s *syncStatus) getNextRangeOnlyRetriesUnsafe() *blockRange { + // Check if there are any range that need to be retried + blockRangeToRetry, err := s.errorRanges.getFirstBlockRange() + if err == nil { + if blockRangeToRetry.toBlock == latestBlockNumber { + // If is a latestBlockNumber must be discarded + log.Debugf("Discarding error block range: %s because it's a latestBlockNumber", blockRangeToRetry.String()) + err := s.errorRanges.removeBlockRange(blockRangeToRetry) + if err != nil { + log.Errorf("syncstatus: error removing an error br: %s current_status:%s err:%s", blockRangeToRetry.String(), s.String(), err.Error()) + } + return nil + } + return &blockRangeToRetry + } + return nil +} + +func (s *syncStatus) getHighestBlockRequestedUnsafe() uint64 { + res := invalidBlockNumber + for _, r := range s.processingRanges.ranges { + if r.blockRange.toBlock > res { + res = r.blockRange.toBlock + } + } + + for _, r := range s.errorRanges.ranges { + if r.blockRange.toBlock > res { + res = r.blockRange.toBlock + } + } + + return res +} + +// GetNextRange: if there are pending work it returns the next block to ask for +// +// it could be a retry from a previous error or a new range +func (s *syncStatus) GetNextRange() *blockRange { + s.mutex.Lock() + defer s.mutex.Unlock() + // Check if there are any range that need to be retried + blockRangeToRetry := s.getNextRangeOnlyRetriesUnsafe() + if blockRangeToRetry != nil { + return blockRangeToRetry + } + + if s.lastBlockOnL1 == invalidLastBlock { + log.Debug("Last block is no valid: ", s.lastBlockOnL1) + return nil + } + if s.lastBlockOnL1 <= s.highestBlockRequested { + log.Debug("No blocks to ask, we have requested all blocks from L1!") + return nil + } + highestBlockInProcess := s.getHighestBlockRequestedUnsafe() + if highestBlockInProcess == latestBlockNumber { + log.Debug("No blocks to ask, we have requested all blocks from L1!") + return nil + } + br := getNextBlockRangeFromUnsafe(max(s.lastBlockStoreOnStateDB, s.getHighestBlockRequestedUnsafe()), s.lastBlockOnL1, s.amountOfBlocksInEachRange) + err := br.isValid() + if err != nil { + log.Error(s.toString()) + log.Fatal(err) + } + return br +} + +func (s *syncStatus) OnStartedNewWorker(br blockRange) { + s.mutex.Lock() + defer s.mutex.Unlock() + // Try to remove from error Blocks + err := s.errorRanges.removeBlockRange(br) + if err == nil { + log.Infof("Retrying ranges: %s ", br.String()) + } + err = s.processingRanges.addBlockRange(br) + if err != nil { + log.Error(s.toString()) + log.Fatal(err) + } + if br.toBlock == latestBlockNumber { + s.highestBlockRequested = s.lastBlockOnL1 + } else if br.toBlock > s.highestBlockRequested { + s.highestBlockRequested = br.toBlock + } +} + +// return true is a valid blockRange +func (s *syncStatus) OnFinishWorker(br blockRange, successful bool, highestBlockNumberInResponse uint64) bool { + s.mutex.Lock() + defer s.mutex.Unlock() + log.Debugf("onFinishWorker(br=%s, successful=%v) initial_status: %s", br.String(), successful, s.String()) + // The work have been done, remove the range from pending list + // also move the s.lastBlockStoreOnStateDB to the end of the range if needed + err := s.processingRanges.removeBlockRange(br) + if err != nil { + log.Infof("Unexpected finished block_range %s, ignoring it: %s", br.String(), err) + return false + } + + if successful { + // If this range is the first in the window, we need to move the s.lastBlockStoreOnStateDB to next range + // example: + // lbs = 99 + // pending = [100, 200], [201, 300], [301, 400] + // if process the [100,200] -> lbs = 200 + if highestBlockNumberInResponse != invalidBlockNumber && highestBlockNumberInResponse > s.lastBlockStoreOnStateDB { + newValue := highestBlockNumberInResponse + log.Debugf("Moving s.lastBlockStoreOnStateDB from %d to %d (diff %d)", s.lastBlockStoreOnStateDB, newValue, newValue-s.lastBlockStoreOnStateDB) + s.lastBlockStoreOnStateDB = newValue + } + } else { + log.Infof("Range %s was not successful, adding to errorRanges to be retried", br.String()) + err := s.errorRanges.addBlockRange(br) + if err != nil { + log.Error(s.toString()) + log.Fatal(err) + } + } + log.Debugf("onFinishWorker final_status: %s", s.String()) + return true +} + +func getNextBlockRangeFromUnsafe(lastBlockInState uint64, lastBlockInL1 uint64, amountOfBlocksInEachRange uint64) *blockRange { + fromBlock := lastBlockInState + 1 + toBlock := min(lastBlockInL1, fromBlock+amountOfBlocksInEachRange) + if toBlock == lastBlockInL1 { + toBlock = latestBlockNumber + } + return &blockRange{fromBlock: fromBlock, toBlock: toBlock} +} + +func (s *syncStatus) setLastBlockOnL1(lastBlock uint64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.setLastBlockOnL1Unsafe(lastBlock) +} + +func (s *syncStatus) setLastBlockOnL1Unsafe(lastBlock uint64) { + s.lastBlockOnL1 = lastBlock +} + +type onNewLastBlockResponse struct { + // New fullRange of pending blocks + fullRange blockRange + // New extendedRange of pending blocks due to new last block + extendedRange *blockRange +} + +func (n *onNewLastBlockResponse) toString() string { + res := fmt.Sprintf("fullRange: %s", n.fullRange.String()) + if n.extendedRange != nil { + res += fmt.Sprintf(" extendedRange: %s", n.extendedRange.String()) + } else { + res += " extendedRange: nil" + } + return res +} + +func (s *syncStatus) OnNewLastBlockOnL1(lastBlock uint64) onNewLastBlockResponse { + s.mutex.Lock() + defer s.mutex.Unlock() + log.Debugf("onNewLastBlockOnL1(%v) initial_status: %s", lastBlock, s.String()) + response := onNewLastBlockResponse{ + fullRange: blockRange{fromBlock: s.lastBlockStoreOnStateDB, toBlock: lastBlock}, + } + + if s.lastBlockOnL1 == invalidLastBlock { + // No previous last block + response.extendedRange = &blockRange{ + fromBlock: s.lastBlockStoreOnStateDB, + toBlock: lastBlock, + } + s.setLastBlockOnL1Unsafe(lastBlock) + return response + } + oldLastBlock := s.lastBlockOnL1 + if lastBlock > oldLastBlock { + response.extendedRange = &blockRange{ + fromBlock: oldLastBlock + 1, + toBlock: lastBlock, + } + s.setLastBlockOnL1Unsafe(lastBlock) + return response + } + if lastBlock == oldLastBlock { + response.extendedRange = nil + s.setLastBlockOnL1Unsafe(lastBlock) + return response + } + if lastBlock < oldLastBlock { + log.Warnf("new block [%d] is less than old block [%d]!", lastBlock, oldLastBlock) + lastBlock = oldLastBlock + response.fullRange = blockRange{fromBlock: s.lastBlockStoreOnStateDB, toBlock: lastBlock} + return response + } + log.Debugf("onNewLastBlockOnL1(%d) final_status: %s", lastBlock, s.String()) + return response +} + +func (s *syncStatus) DoesItHaveAllTheNeedDataToWork() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.lastBlockOnL1 != invalidLastBlock && s.lastBlockStoreOnStateDB != invalidBlockNumber +} + +func (s *syncStatus) Verify() error { + if s.amountOfBlocksInEachRange == 0 { + return errSyncChunkSizeMustBeGreaterThanZero + } + if s.lastBlockStoreOnStateDB == invalidBlockNumber { + return errStartingBlockNumberMustBeDefined + } + return nil +} + +// It returns if this block is beyond Finalized (so it could be reorg) +// If blockNumber == invalidBlockNumber then it uses the highestBlockRequested (the last block requested) +func (s *syncStatus) BlockNumberIsInsideUnsafeArea(blockNumber uint64) bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if blockNumber == invalidBlockNumber { + blockNumber = s.highestBlockRequested + } + distanceInBlockToLatest := s.lastBlockOnL1 - blockNumber + return distanceInBlockToLatest < maximumBlockDistanceFromLatestToFinalized +} diff --git a/synchronizer/l1_syncstatus_test.go b/synchronizer/l1_syncstatus_test.go new file mode 100644 index 0000000000..60cfb13a4c --- /dev/null +++ b/synchronizer/l1_syncstatus_test.go @@ -0,0 +1,281 @@ +package synchronizer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGivenObjectWithDataWhenResetThenDontForgetLastBlockOnL1AndgetNextRangeReturnsNil(t *testing.T) { + s := newSyncStatus(1617, 10) + s.setLastBlockOnL1(1982) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: 1920}) + + s.Reset(1234) + + // lose lastBlockOnL1 so it returns a nil + br := s.GetNextRange() + require.Equal(t, *br, blockRange{fromBlock: 1235, toBlock: 1245}) +} + +func TestGivenObjectWithDataWhenResetAndSetLastBlockOnL1ThenGetNextRangeReturnsNextRange(t *testing.T) { + s := newSyncStatus(1617, 10) + s.setLastBlockOnL1(1982) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: 1920}) + + s.Reset(1234) + s.setLastBlockOnL1(1982) + // lose lastBlockOnL1 so it returns a nil + br := s.GetNextRange() + require.Equal(t, *br, blockRange{fromBlock: 1235, toBlock: 1245}) +} + +// Only could be 1 request to latest block +func TestGivenSychronizationWithThereAreARequestToLatestBlockWhenAskForNewBlockRangeItResponseNil(t *testing.T) { + s := newSyncStatus(1617, 10) + s.setLastBlockOnL1(1982) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}) + s.setLastBlockOnL1(1983) + // Only could be 1 request to latest block + br := s.GetNextRange() + require.Nil(t, br) + s.OnFinishWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, true, uint64(1984)) + // We have a new segment to ask for because the last block have moved to 1984 + br = s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1985, toBlock: latestBlockNumber}, *br) +} + +func TestGivenSychronizationIAliveWhenWeAreInLatestBlockThenResponseNoNewBlockRange(t *testing.T) { + s := newSyncStatus(1819, 10) + s.setLastBlockOnL1(1823) + br := s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, *br) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}) + s.setLastBlockOnL1(1824) + // Only could be 1 request to latest block + br = s.GetNextRange() + require.Nil(t, br) + s.OnFinishWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, true, invalidBlockNumber) + // We have a new segment to ask for because the last block have moved to 1984 + br = s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, *br) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}) + s.OnFinishWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, true, 1830) + // We have the latest block 1830, so we don't need to ask for something els until we update the last block on L1 (setLastBlockOnL1) + br = s.GetNextRange() + require.Nil(t, br) +} +func TestGivenThereAreALatestBlockErrorRangeIfMoveLastBlockBeyoundChunkThenDiscardErrorBR(t *testing.T) { + s := newSyncStatus(1819, 10) + s.setLastBlockOnL1(1823) + br := s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, *br) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}) + s.setLastBlockOnL1(1824) + // Only could be 1 request to latest block + br = s.GetNextRange() + require.Nil(t, br) + s.OnFinishWorker(blockRange{fromBlock: 1820, toBlock: latestBlockNumber}, false, invalidBlockNumber) + s.setLastBlockOnL1(1850) + // We have a new segment to ask for because the last block have moved to 1984 + br = s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1820, toBlock: 1830}, *br) +} + +func TestFirstRunWithPendingBlocksToRetrieve(t *testing.T) { + tcs := []struct { + description string + lastStoredBlock uint64 + lastL1Block uint64 + chuncks uint64 + expectedBlockRangeNil bool + expectedBlockRange blockRange + }{ + {"normal", 100, 150, 10, false, blockRange{fromBlock: 101, toBlock: 111}}, + {"sync", 150, 150, 50, true, blockRange{}}, + {"less_chunk", 145, 150, 100, false, blockRange{fromBlock: 146, toBlock: latestBlockNumber}}, + {"1wide_range", 149, 150, 100, false, blockRange{fromBlock: 150, toBlock: latestBlockNumber}}, + } + for _, tc := range tcs { + s := newSyncStatus(tc.lastStoredBlock, tc.chuncks) + s.setLastBlockOnL1(tc.lastL1Block) + br := s.GetNextRange() + if tc.expectedBlockRangeNil { + require.Nil(t, br, tc.description) + } else { + require.NotNil(t, br, tc.description) + require.Equal(t, *br, tc.expectedBlockRange, tc.description) + } + } +} + +func TestWhenReceiveAndNoStartedBlockRangeThenIgnore(t *testing.T) { + s := newSyncStatus(1617, 10) + s.setLastBlockOnL1(1982) + res := s.OnFinishWorker(blockRange{fromBlock: 1618, toBlock: 1628}, true, uint64(1628)) + require.False(t, res) + br := s.GetNextRange() + require.Equal(t, blockRange{fromBlock: 1618, toBlock: 1628}, *br) +} + +func TestWhenAllRequestAreSendThenGetNextRangeReturnsNil(t *testing.T) { + s := newSyncStatus(1617, 10) + s.setLastBlockOnL1(1982) + s.OnStartedNewWorker(blockRange{fromBlock: 1820, toBlock: 1920}) + s.OnStartedNewWorker(blockRange{fromBlock: 1921, toBlock: 1982}) + br := s.GetNextRange() + require.Nil(t, br) +} + +func TestSecondRunWithPendingBlocksToRetrieve(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 112, toBlock: 122}) +} + +func TestGenerateNextRangeWithPreviousResult(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 112, toBlock: 122}) + require.Equal(t, s.processingRanges.len(), 1) +} + +func TestGenerateNextRangeWithProcessedResult(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + res := s.OnFinishWorker(blockRange{fromBlock: 101, toBlock: 111}, true, uint64(111)) + require.True(t, res) + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 112, toBlock: 122}) + require.Equal(t, s.processingRanges.len(), 0) +} + +func TestGivenMultiplesWorkersWhenBrInMiddleFinishThenDontChangeLastBlock(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + //previousValue := s.lastBlockStoreOnStateDB + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + s.OnStartedNewWorker(blockRange{fromBlock: 112, toBlock: 122}) + s.OnStartedNewWorker(blockRange{fromBlock: 123, toBlock: 133}) + res := s.OnFinishWorker(blockRange{fromBlock: 112, toBlock: 122}, true, uint64(122)) + require.True(t, res) + //require.Equal(t, previousValue, s.lastBlockStoreOnStateDB) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, blockRange{fromBlock: 134, toBlock: 144}, *br) +} + +func TestGivenMultiplesWorkersWhenFirstFinishThenChangeLastBlock(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + s.OnStartedNewWorker(blockRange{fromBlock: 112, toBlock: 122}) + s.OnStartedNewWorker(blockRange{fromBlock: 123, toBlock: 133}) + res := s.OnFinishWorker(blockRange{fromBlock: 101, toBlock: 111}, true, uint64(111)) + require.True(t, res) + require.Equal(t, uint64(111), s.lastBlockStoreOnStateDB) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 134, toBlock: 144}) +} + +func TestGivenMultiplesWorkersWhenLastFinishThenDontChangeLastBlock(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(150) + //previousValue := s.lastBlockStoreOnStateDB + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + s.OnStartedNewWorker(blockRange{fromBlock: 112, toBlock: 122}) + s.OnStartedNewWorker(blockRange{fromBlock: 123, toBlock: 133}) + res := s.OnFinishWorker(blockRange{fromBlock: 123, toBlock: 133}, true, uint64(133)) + require.True(t, res) + //require.Equal(t, previousValue, s.lastBlockStoreOnStateDB) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, blockRange{fromBlock: 134, toBlock: 144}, *br) +} + +func TestGivenMultiplesWorkersWhenLastFinishAndFinishAlsoNextOneThenDontChangeLastBlock(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(200) + //previousValue := s.lastBlockStoreOnStateDB + s.OnStartedNewWorker(blockRange{fromBlock: 101, toBlock: 111}) + s.OnStartedNewWorker(blockRange{fromBlock: 112, toBlock: 122}) + s.OnStartedNewWorker(blockRange{fromBlock: 123, toBlock: 133}) + res := s.OnFinishWorker(blockRange{fromBlock: 123, toBlock: 133}, true, uint64(133)) + require.True(t, res) + s.OnStartedNewWorker(blockRange{fromBlock: 134, toBlock: 144}) + //require.Equal(t, previousValue, s.lastBlockStoreOnStateDB) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 145, toBlock: 155}) +} + +func TestGivenMultiplesWorkersWhenNextRangeThenTheRangeIsCappedToLastBlockOnL1(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(105) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 101, toBlock: latestBlockNumber}) +} + +func TestWhenRequestALatestBlockThereIsNoMoreBlocks(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(105) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 101, toBlock: latestBlockNumber}) + + s.OnStartedNewWorker(*br) + br = s.GetNextRange() + require.Nil(t, br) +} + +func TestWhenFinishALatestBlockIfNoNewLastBlockOnL1NothingToDo(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(105) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, blockRange{fromBlock: 101, toBlock: latestBlockNumber}, *br) + + s.OnStartedNewWorker(*br) + noBR := s.GetNextRange() + require.Nil(t, noBR) + + s.OnFinishWorker(*br, true, uint64(105)) + br = s.GetNextRange() + require.Nil(t, br) +} + +func TestWhenFinishALatestBlockIfThereAreNewLastBlockOnL1ThenThereIsANewRange(t *testing.T) { + s := newSyncStatus(100, 10) + s.setLastBlockOnL1(105) + + br := s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 101, toBlock: latestBlockNumber}) + + s.OnStartedNewWorker(*br) + noBR := s.GetNextRange() + require.Nil(t, noBR) + + s.setLastBlockOnL1(106) + s.OnFinishWorker(*br, true, invalidBlockNumber) // No block info in the answer + br = s.GetNextRange() + require.NotNil(t, br) + require.Equal(t, *br, blockRange{fromBlock: 101, toBlock: latestBlockNumber}) +} diff --git a/synchronizer/l1_worker_etherman.go b/synchronizer/l1_worker_etherman.go new file mode 100644 index 0000000000..7932327448 --- /dev/null +++ b/synchronizer/l1_worker_etherman.go @@ -0,0 +1,390 @@ +package synchronizer + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" + types "github.com/ethereum/go-ethereum/core/types" +) + +type ethermanStatusEnum int8 + +const ( + ethermanIdle ethermanStatusEnum = 0 + ethermanWorking ethermanStatusEnum = 1 + ethermanError ethermanStatusEnum = 2 +) + +func (s ethermanStatusEnum) String() string { + return [...]string{"idle", "working", "error"}[s] +} + +type typeOfRequest int8 + +const ( + typeRequestNone typeOfRequest = 0 + typeRequestRollupInfo typeOfRequest = 1 + typeRequestLastBlock typeOfRequest = 2 + typeRequestEOF typeOfRequest = 3 +) + +func (s typeOfRequest) String() string { + return [...]string{"none", "rollup", "lastBlock", "EOF"}[s] +} + +const ( + errWorkerBusy = "worker is busy" +) + +// genericResponse struct contains all common data for any kind of transaction +type genericResponse struct { + err error + duration time.Duration + typeOfRequest typeOfRequest +} + +func (r *genericResponse) String() string { + return fmt.Sprintf("typeOfRequest: [%v] duration: [%v] err: [%v] ", + r.typeOfRequest.String(), r.duration, r.err) +} + +type responseRollupInfoByBlockRange struct { + generic genericResponse + result *rollupInfoByBlockRangeResult +} + +type requestLastBlockMode int32 + +const ( + requestLastBlockModeNone requestLastBlockMode = 0 + requestLastBlockModeIfNoBlocksInAnswer requestLastBlockMode = 1 + requestLastBlockModeAlways requestLastBlockMode = 2 +) + +func (s requestLastBlockMode) String() string { + return [...]string{"none", "ifNoBlocksInAnswer", "always"}[s] +} + +type requestRollupInfoByBlockRange struct { + blockRange blockRange + sleepBefore time.Duration + requestLastBlockIfNoBlocksInAnswer requestLastBlockMode + requestPreviousBlock bool +} + +func (r *requestRollupInfoByBlockRange) String() string { + return fmt.Sprintf("blockRange: %s sleepBefore: %s lastBlock: %s prevBlock:%t", + r.blockRange.String(), r.sleepBefore, r.requestLastBlockIfNoBlocksInAnswer.String(), r.requestPreviousBlock) +} + +func (r *responseRollupInfoByBlockRange) getHighestBlockNumberInResponse() uint64 { + if r.result == nil { + return invalidBlockNumber + } + return r.result.getHighestBlockNumberInResponse() +} + +func (r *responseRollupInfoByBlockRange) toStringBrief() string { + result := fmt.Sprintf(" generic:[%s] ", + r.generic.String()) + if r.result != nil { + result += fmt.Sprintf(" result:[%s]", r.result.toStringBrief()) + } else { + result += " result:[nil]" + } + return result +} + +type rollupInfoByBlockRangeResult struct { + blockRange blockRange + blocks []etherman.Block + order map[common.Hash][]etherman.Order + // If there are no blocks in this range, it gets the last one + // so it could be nil if there are no blocks. + lastBlockOfRange *types.Block + previousBlockOfRange *types.Block +} + +func (r *rollupInfoByBlockRangeResult) toStringBrief() string { + isLastBlockOfRangeSet := r.lastBlockOfRange != nil + ispreviousBlockOfRange := r.previousBlockOfRange != nil + return fmt.Sprintf(" blockRange: %s len_blocks: [%d] len_order:[%d] lastBlockOfRangeSet [%t] previousBlockSet [%t]", + r.blockRange.String(), + len(r.blocks), len(r.order), isLastBlockOfRangeSet, ispreviousBlockOfRange) +} + +func (r *rollupInfoByBlockRangeResult) getRealHighestBlockNumberInResponse() uint64 { + highest := invalidBlockNumber + for _, block := range r.blocks { + if block.BlockNumber > highest { + highest = block.BlockNumber + } + } + if r.lastBlockOfRange != nil && r.lastBlockOfRange.Number().Uint64() > highest { + highest = r.lastBlockOfRange.Number().Uint64() + } + return highest +} + +// getHighestBlockNumberInResponse returns the highest block number in the response if toBlock or the real one if latestBlockNumber +func (r *rollupInfoByBlockRangeResult) getHighestBlockNumberInResponse() uint64 { + if r.blockRange.toBlock != latestBlockNumber { + return r.blockRange.toBlock + } else { + return r.getRealHighestBlockNumberInResponse() + } +} + +func (r *rollupInfoByBlockRangeResult) getHighestBlockReceived() *state.Block { + var highest *state.Block = nil + if r.lastBlockOfRange != nil { + stateBlock := convertL1BlockToStateBlock(r.lastBlockOfRange) + return &stateBlock + } + for _, block := range r.blocks { + if highest == nil || block.BlockNumber > highest.BlockNumber { + blockCopy := block + stateBlock := convertEthmanBlockToStateBlock(&blockCopy) + highest = &stateBlock + } + } + return highest +} + +type responseL1LastBlock struct { + generic genericResponse + result *retrieveL1LastBlockResult +} + +type retrieveL1LastBlockResult struct { + block uint64 +} + +type workerEtherman struct { + mutex sync.Mutex + etherman EthermanInterface + status ethermanStatusEnum + typeOfCurrentRequest typeOfRequest + request requestRollupInfoByBlockRange + startTime time.Time +} + +func (w *workerEtherman) String() string { + w.mutex.Lock() + defer w.mutex.Unlock() + timeSince := time.Since(w.startTime) + if w.isBusyUnsafe() { + return fmt.Sprintf("status:%s br:%s time:%s", w.status.String(), w.request.String(), timeSince.Round(time.Second).String()) + } + return fmt.Sprintf("status:%s", w.status.String()) +} + +func newWorker(etherman EthermanInterface) *workerEtherman { + return &workerEtherman{etherman: etherman, status: ethermanIdle} +} + +// sleep returns false if must stop execution +func (w *workerEtherman) sleep(ctx contextWithCancel, ch chan responseRollupInfoByBlockRange, request requestRollupInfoByBlockRange) bool { + if request.sleepBefore > 0 { + log.Debugf("worker: RollUpInfo(%s) sleeping %s before executing...", request.blockRange.String(), request.sleepBefore) + select { + case <-ctx.ctx.Done(): + log.Debugf("worker: RollUpInfo(%s) cancelled in sleep", request.blockRange.String()) + w.setStatus(ethermanIdle) + ch <- newResponseRollupInfo(context.Canceled, 0, typeRequestRollupInfo, &rollupInfoByBlockRangeResult{blockRange: request.blockRange}) + return false + case <-time.After(request.sleepBefore): + } + } + return true +} + +func mustRequestLastBlock(mode requestLastBlockMode, lenBlocks int, lastBlockRequest uint64) bool { + switch mode { + case requestLastBlockModeNone: + return false + case requestLastBlockModeIfNoBlocksInAnswer: + return lenBlocks == 0 && lastBlockRequest != latestBlockNumber + case requestLastBlockModeAlways: + return lastBlockRequest != latestBlockNumber + default: + return lastBlockRequest != latestBlockNumber + } +} + +// The order of the request are important: +// +// The previous and last block are used to guarantee that the blocks belongs to the same chain. +// Check next example: +// Request1: LAST(200) Rollup(100-200) PREVIOUS(99) +// Request2: LAST(300) Rollup(201-300) PREVIOUS(200) +// Request3: LAST(400) Rollup(301-400) PREVIOUS(300) +// +// If there are a reorg in Request2: +// +// Request2: [P1] LAST(300) [P2] Rollup(201-300) [P3] PREVIOUS(200) [P4] +// +// P1: PREVIOUS(200) are not going to match with the same in Request1 LAST(200) +// P2: PREVIOUS(200) are not going to match with the same in Request1 LAST(200) +// P3: PREVIOUS(200) are not going to match with the same in Request1 LAST(200) +// P4: LAST(300) are not going to match with Request3 PREVIOUS(300) +// +// In case of Rollup(100-latest): +// Request1: ----- Rollup(100..)[B120] PREVIOUS(99) +// Request2: ----- Rollup(121..)[B122] PREVIOUS(120) +// Works in the same way + +func (w *workerEtherman) asyncRequestRollupInfoByBlockRange(ctx contextWithCancel, ch chan responseRollupInfoByBlockRange, wg *sync.WaitGroup, request requestRollupInfoByBlockRange) error { + w.mutex.Lock() + defer w.mutex.Unlock() + if w.isBusyUnsafe() { + ctx.cancel() + if wg != nil { + wg.Done() + } + return errors.New(errWorkerBusy) + } + w.status = ethermanWorking + w.typeOfCurrentRequest = typeRequestRollupInfo + w.request = request + w.startTime = time.Now() + launch := func() { + defer ctx.cancel() + if wg != nil { + defer wg.Done() + } + if !w.sleep(ctx, ch, request) { + return + } + + // Uncomment these lines to respond with a nil result to generate fast responses (just for develop!) + //w.setStatus(ethermanIdle) + //ch <- newResponseRollupInfo(nil, time.Second, typeRequestRollupInfo, &rollupInfoByBlockRangeResult{blockRange, nil, nil, nil}) + + now := time.Now() + data, err := w.executeRequestRollupInfoByBlockRange(ctx, ch, request) + duration := time.Since(now) + result := newResponseRollupInfo(err, duration, typeRequestRollupInfo, data) + w.setStatus(ethermanIdle) + if err != nil && !errors.Is(err, context.Canceled) { + log.Debugf("worker: RollUpInfo(%s) result err=%s", request.blockRange.String(), err.Error()) + } + ch <- result + } + go launch() + return nil +} + +func (w *workerEtherman) executeRequestRollupInfoByBlockRange(ctx contextWithCancel, ch chan responseRollupInfoByBlockRange, request requestRollupInfoByBlockRange) (*rollupInfoByBlockRangeResult, error) { + resultRollupInfo := rollupInfoByBlockRangeResult{request.blockRange, nil, nil, nil, nil} + if err := w.fillLastBlock(&resultRollupInfo, ctx, request, false); err != nil { + return &resultRollupInfo, err + } + if err := w.fillRollup(&resultRollupInfo, ctx, request); err != nil { + return &resultRollupInfo, err + } + if err := w.fillLastBlock(&resultRollupInfo, ctx, request, true); err != nil { + return &resultRollupInfo, err + } + if err := w.fillPreviousBlock(&resultRollupInfo, ctx, request); err != nil { + return &resultRollupInfo, err + } + return &resultRollupInfo, nil +} + +func (w *workerEtherman) fillPreviousBlock(result *rollupInfoByBlockRangeResult, ctx contextWithCancel, request requestRollupInfoByBlockRange) error { + if request.requestPreviousBlock && request.blockRange.fromBlock > 2 { + log.Debugf("worker: RollUpInfo(%s) request previousBlock calling EthBlockByNumber(%d)", request.blockRange.String(), request.blockRange.fromBlock) + var err error + result.previousBlockOfRange, err = w.etherman.EthBlockByNumber(ctx.ctx, request.blockRange.fromBlock-1) + return err + } + return nil +} + +func (w *workerEtherman) fillRollup(result *rollupInfoByBlockRangeResult, ctx contextWithCancel, request requestRollupInfoByBlockRange) error { + var toBlock *uint64 = nil + // If latest we send a nil + if request.blockRange.toBlock != latestBlockNumber { + toBlock = &request.blockRange.toBlock + } + var err error + result.blocks, result.order, err = w.etherman.GetRollupInfoByBlockRange(ctx.ctx, request.blockRange.fromBlock, toBlock) + if err != nil { + return err + } + return nil +} + +func (w *workerEtherman) fillLastBlock(result *rollupInfoByBlockRangeResult, ctx contextWithCancel, request requestRollupInfoByBlockRange, haveExecutedRollupInfo bool) error { + if result.lastBlockOfRange != nil { + return nil + } + lenBlocks := len(result.blocks) + if !haveExecutedRollupInfo { + lenBlocks = -1 + } + if mustRequestLastBlock(request.requestLastBlockIfNoBlocksInAnswer, lenBlocks, request.blockRange.toBlock) { + log.Debugf("worker: RollUpInfo(%s) request lastBlock calling EthBlockByNumber(%d) (before rollup) ", request.blockRange.String(), request.blockRange.toBlock) + lastBlock, err := w.etherman.EthBlockByNumber(ctx.ctx, request.blockRange.toBlock) + if err != nil { + return err + } + result.lastBlockOfRange = lastBlock + } + return nil +} + +func (w *workerEtherman) requestLastBlock(ctx context.Context) responseL1LastBlock { + w.mutex.Lock() + if w.isBusyUnsafe() { + w.mutex.Unlock() + return newResponseL1LastBlock(errors.New(errWorkerBusy), time.Duration(0), typeRequestLastBlock, nil) + } + w.status = ethermanWorking + w.typeOfCurrentRequest = typeRequestLastBlock + w.mutex.Unlock() + now := time.Now() + header, err := w.etherman.HeaderByNumber(ctx, nil) + duration := time.Since(now) + var result responseL1LastBlock + if err == nil { + result = newResponseL1LastBlock(err, duration, typeRequestLastBlock, &retrieveL1LastBlockResult{header.Number.Uint64()}) + } else { + result = newResponseL1LastBlock(err, duration, typeRequestLastBlock, nil) + } + w.setStatus(ethermanIdle) + return result +} + +func (w *workerEtherman) setStatus(status ethermanStatusEnum) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.status = status + w.typeOfCurrentRequest = typeRequestNone +} + +func (w *workerEtherman) isIdle() bool { + w.mutex.Lock() + defer w.mutex.Unlock() + return w.status == ethermanIdle +} + +func (w *workerEtherman) isBusyUnsafe() bool { + return w.status != ethermanIdle +} + +func newResponseRollupInfo(err error, duration time.Duration, typeOfRequest typeOfRequest, result *rollupInfoByBlockRangeResult) responseRollupInfoByBlockRange { + return responseRollupInfoByBlockRange{genericResponse{err, duration, typeOfRequest}, result} +} + +func newResponseL1LastBlock(err error, duration time.Duration, typeOfRequest typeOfRequest, result *retrieveL1LastBlockResult) responseL1LastBlock { + return responseL1LastBlock{genericResponse{err, duration, typeOfRequest}, result} +} diff --git a/synchronizer/l1_worker_etherman_test.go b/synchronizer/l1_worker_etherman_test.go new file mode 100644 index 0000000000..d133046a91 --- /dev/null +++ b/synchronizer/l1_worker_etherman_test.go @@ -0,0 +1,273 @@ +package synchronizer + +import ( + context "context" + "errors" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExploratoryWorker(t *testing.T) { + t.Skip("no real test, just exploratory") + cfg := etherman.Config{ + URL: "http://localhost:8545", + } + + l1Config := etherman.L1Config{ + L1ChainID: 1337, + ZkEVMAddr: common.HexToAddress("0x610178dA211FEF7D417bC0e6FeD39F05609AD788"), + MaticAddr: common.HexToAddress("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + GlobalExitRootManagerAddr: common.HexToAddress("0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6"), + } + + ethermanClient, err := etherman.NewClient(cfg, l1Config) + require.NoError(t, err) + worker := newWorker(ethermanClient) + ch := make(chan responseRollupInfoByBlockRange) + blockRange := blockRange{ + fromBlock: 9847396, + toBlock: 9847396, + } + err = worker.asyncRequestRollupInfoByBlockRange(newContextWithNone(context.Background()), ch, nil, newRequestNoSleep(blockRange)) + require.NoError(t, err) + result := <-ch + log.Info(result.toStringBrief()) + for i := range result.result.blocks { + for _, element := range result.result.order[result.result.blocks[i].BlockHash] { + switch element.Name { + case etherman.SequenceBatchesOrder: + for i := range result.result.blocks[i].SequencedBatches { + log.Infof("SequenceBatchesOrder %v %v %v", element.Pos, result.result.blocks[i].SequencedBatches[element.Pos][i].BatchNumber, + result.result.blocks[i].BlockNumber) + } + default: + log.Info("unknown order", element.Name) + } + } + } + require.Equal(t, result.generic.err.Error(), "not found") +} + +func TestIfRollupRequestReturnsErrorDontRequestEthBlockByNumber(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + blockRange := blockRange{ + fromBlock: 100, + toBlock: 20000, + } + ctx := newContextWithTimeout(context.Background(), time.Second) + var wg sync.WaitGroup + wg.Add(1) + expectedCallsForEmptyRollupInfo(mockEtherman, blockRange, errors.New("error"), nil) + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, &wg, newRequestNoSleep(blockRange)) + require.NoError(t, err) + wg.Wait() +} + +func TestIfWorkerIsBusyReturnsAnErrorUpdateWaitGroupAndCancelContext(t *testing.T) { + sut, _, ch := setupWorkerEthermanTest(t) + blockRange := blockRange{ + fromBlock: 100, + toBlock: 20000, + } + ctx := newContextWithTimeout(context.Background(), time.Second) + var wg sync.WaitGroup + wg.Add(1) + sut.setStatus(ethermanWorking) + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, &wg, newRequestNoSleep(blockRange)) + require.Error(t, err) + wg.Wait() + select { + case <-ctx.Done(): + default: + require.Fail(t, "The context should be cancelled") + } +} + +// Given: a request to get the rollup info by block range that is OK +// When: the request is finished +// Then: the context is canceled +func TestGivenOkRequestWhenFinishThenCancelTheContext(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + blockRange := blockRange{ + fromBlock: 100, + toBlock: 20000, + } + ctx := newContextWithTimeout(context.Background(), time.Second) + expectedCallsForEmptyRollupInfo(mockEtherman, blockRange, nil, nil) + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, nil, newRequestNoSleep(blockRange)) + require.NoError(t, err) + result := <-ch + require.NoError(t, result.generic.err) + select { + case <-ctx.Done(): + default: + require.Fail(t, "The context should be cancelled") + } +} + +func TestGivenOkRequestWithSleepWhenFinishThenMustExuctedTheSleep(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + blockRange := blockRange{ + fromBlock: 100, + toBlock: 20000, + } + ctx := newContextWithTimeout(context.Background(), time.Second) + expectedCallsForEmptyRollupInfo(mockEtherman, blockRange, nil, nil) + startTime := time.Now() + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, nil, newRequestSleep(blockRange, time.Millisecond*500)) + require.NoError(t, err) + result := <-ch + require.NoError(t, result.generic.err) + require.GreaterOrEqual(t, time.Since(startTime).Milliseconds(), int64(500)) +} + +func TestCheckIsIdleFunction(t *testing.T) { + tcs := []struct { + status ethermanStatusEnum + expectedIsIdle bool + }{ + {status: ethermanIdle, expectedIsIdle: true}, + {status: ethermanWorking, expectedIsIdle: false}, + {status: ethermanError, expectedIsIdle: false}, + } + for _, tc := range tcs { + t.Run(tc.status.String(), func(t *testing.T) { + sut, _, _ := setupWorkerEthermanTest(t) + sut.setStatus(tc.status) + require.Equal(t, tc.expectedIsIdle, sut.isIdle()) + }) + } +} + +func TestIfRollupInfoFailGettingLastBlockContainBlockRange(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + var wg sync.WaitGroup + wg.Add(1) + ctx := newContextWithTimeout(context.Background(), time.Second) + blockRange := blockRange{fromBlock: 100, toBlock: 20000} + request := newRequestNoSleep(blockRange) + request.requestPreviousBlock = true + request.requestLastBlockIfNoBlocksInAnswer = requestLastBlockModeAlways + + mockEtherman. + On("EthBlockByNumber", mock.Anything, blockRange.toBlock). + Return(ethTypes.NewBlockWithHeader(ðTypes.Header{Number: big.NewInt(int64(blockRange.toBlock))}), fmt.Errorf("error")). + Once() + mockEtherman. + On("GetRollupInfoByBlockRange", mock.Anything, blockRange.fromBlock, mock.Anything). + Return([]etherman.Block{}, map[common.Hash][]etherman.Order{}, nil). + Maybe() + + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, &wg, request) + require.NoError(t, err) + result := <-ch + require.Error(t, result.generic.err) + require.True(t, result.result != nil) + require.Equal(t, result.result.blockRange, blockRange) +} + +func TestIfRollupInfoFailGettingRollupContainBlockRange(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + var wg sync.WaitGroup + wg.Add(1) + ctx := newContextWithTimeout(context.Background(), time.Second) + blockRange := blockRange{fromBlock: 100, toBlock: 20000} + request := newRequestNoSleep(blockRange) + request.requestPreviousBlock = true + request.requestLastBlockIfNoBlocksInAnswer = requestLastBlockModeAlways + + mockEtherman. + On("EthBlockByNumber", mock.Anything, blockRange.toBlock). + Return(ethTypes.NewBlockWithHeader(ðTypes.Header{Number: big.NewInt(int64(blockRange.toBlock))}), nil). + Maybe() + mockEtherman. + On("GetRollupInfoByBlockRange", mock.Anything, blockRange.fromBlock, mock.Anything). + Return([]etherman.Block{}, map[common.Hash][]etherman.Order{}, fmt.Errorf("error")). + Once() + + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, &wg, request) + require.NoError(t, err) + result := <-ch + require.Error(t, result.generic.err) + require.True(t, result.result != nil) + require.Equal(t, result.result.blockRange, blockRange) +} + +func TestIfRollupInfoFailPreviousBlockContainBlockRange(t *testing.T) { + sut, mockEtherman, ch := setupWorkerEthermanTest(t) + var wg sync.WaitGroup + wg.Add(1) + ctx := newContextWithTimeout(context.Background(), time.Second) + blockRange := blockRange{fromBlock: 100, toBlock: 20000} + request := newRequestNoSleep(blockRange) + request.requestPreviousBlock = true + request.requestLastBlockIfNoBlocksInAnswer = requestLastBlockModeAlways + + mockEtherman. + On("EthBlockByNumber", mock.Anything, blockRange.toBlock). + Return(ethTypes.NewBlockWithHeader(ðTypes.Header{Number: big.NewInt(int64(blockRange.toBlock))}), nil). + Maybe() + mockEtherman. + On("GetRollupInfoByBlockRange", mock.Anything, blockRange.fromBlock, mock.Anything). + Return([]etherman.Block{}, map[common.Hash][]etherman.Order{}, nil). + Maybe() + mockEtherman. + On("EthBlockByNumber", mock.Anything, blockRange.fromBlock-1). + Return(ethTypes.NewBlockWithHeader(ðTypes.Header{Number: big.NewInt(int64(blockRange.fromBlock - 1))}), fmt.Errorf("error")). + Once() + + err := sut.asyncRequestRollupInfoByBlockRange(ctx, ch, &wg, request) + require.NoError(t, err) + result := <-ch + require.Error(t, result.generic.err) + require.True(t, result.result != nil) + require.Equal(t, result.result.blockRange, blockRange) +} + +func expectedCallsForEmptyRollupInfo(mockEtherman *ethermanMock, blockRange blockRange, getRollupError error, ethBlockError error) { + mockEtherman. + On("GetRollupInfoByBlockRange", mock.Anything, blockRange.fromBlock, mock.Anything). + Return([]etherman.Block{}, map[common.Hash][]etherman.Order{}, getRollupError). + Once() + + if getRollupError == nil { + mockEtherman. + On("EthBlockByNumber", mock.Anything, blockRange.toBlock). + Return(ethTypes.NewBlockWithHeader(ðTypes.Header{Number: big.NewInt(int64(blockRange.toBlock))}), ethBlockError). + Once() + } +} + +func setupWorkerEthermanTest(t *testing.T) (*workerEtherman, *ethermanMock, chan responseRollupInfoByBlockRange) { + mockEtherman := newEthermanMock(t) + worker := newWorker(mockEtherman) + ch := make(chan responseRollupInfoByBlockRange, 2) + return worker, mockEtherman, ch +} + +func newRequestNoSleep(blockRange blockRange) requestRollupInfoByBlockRange { + return requestRollupInfoByBlockRange{ + blockRange: blockRange, + sleepBefore: noSleepTime, + requestLastBlockIfNoBlocksInAnswer: requestLastBlockModeIfNoBlocksInAnswer, + requestPreviousBlock: false, + } +} + +func newRequestSleep(blockRange blockRange, sleep time.Duration) requestRollupInfoByBlockRange { + return requestRollupInfoByBlockRange{ + blockRange: blockRange, + sleepBefore: sleep, + requestLastBlockIfNoBlocksInAnswer: requestLastBlockModeIfNoBlocksInAnswer, + } +} diff --git a/synchronizer/l1_workers.go b/synchronizer/l1_workers.go new file mode 100644 index 0000000000..b7109515c9 --- /dev/null +++ b/synchronizer/l1_workers.go @@ -0,0 +1,224 @@ +package synchronizer + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + noSleepTime = time.Duration(0) + minimumNumberOfEthermans = 2 +) + +var ( + errAllWorkersBusy = errors.New("all workers are busy") + errRequiredEtherman = errors.New("required etherman") +) + +// worker: is the expected functions of a worker +type worker interface { + String() string + asyncRequestRollupInfoByBlockRange(ctx contextWithCancel, ch chan responseRollupInfoByBlockRange, wg *sync.WaitGroup, request requestRollupInfoByBlockRange) error + requestLastBlock(ctx context.Context) responseL1LastBlock + isIdle() bool +} + +type workersConfig struct { + timeoutRollupInfo time.Duration +} + +type workerData struct { + worker worker + ctx contextWithCancel +} + +func (w *workerData) String() string { + return fmt.Sprintf("worker:%s ctx:%v", w.worker.String(), w.ctx) +} + +type workers struct { + mutex sync.Mutex + // worker for asking lastBlock on L1 (to avoid that all of them are busy) + workerForLastBlock workerData + workers []workerData + // Channel to send to outside the responses from worker | workers --> client + chOutgoingRollupInfo chan responseRollupInfoByBlockRange + + // Channel that receive the responses from worker | worker --> workers + chIncommingRollupInfo chan responseRollupInfoByBlockRange + + waitGroups [typeRequestEOF]sync.WaitGroup + + cfg workersConfig +} + +func (w *workers) String() string { + result := fmt.Sprintf("num_workers:%d ch[%d,%d] ", len(w.workers), len(w.chOutgoingRollupInfo), len(w.chIncommingRollupInfo)) + for i := range w.workers { + if !w.workers[i].worker.isIdle() { + result += fmt.Sprintf(" worker[%d]: %s", i, w.workers[i].worker.String()) + } + } + return result +} + +func newWorkers(ethermans []EthermanInterface, cfg workersConfig) *workers { + result := workers{chIncommingRollupInfo: make(chan responseRollupInfoByBlockRange, len(ethermans)+1), + cfg: cfg} + if (len(ethermans)) < minimumNumberOfEthermans { + log.Fatalf("workers: at least %d ethermans are required, got %d", minimumNumberOfEthermans, len(ethermans)) + } + workers := make([]workerData, len(ethermans)) + for i, etherman := range ethermans { + workers[i].worker = newWorker(etherman) + } + result.workers = workers[1:] + result.workerForLastBlock = workers[0] + result.chOutgoingRollupInfo = make(chan responseRollupInfoByBlockRange, len(ethermans)+1) + return &result +} + +func (w *workers) initialize() error { + if len(w.workers) == 0 { + return errRequiredEtherman + } + return nil +} + +func (w *workers) howManyRunningWorkers() int { + result := 0 + for _, worker := range w.workers { + if !worker.worker.isIdle() { + result++ + } + } + return result +} + +func (w *workers) stop() { + log.Infof("workers: stopping workers %s", w.String()) + for i := range w.workers { + wd := &w.workers[i] + if !wd.worker.isIdle() { + log.Debugf("workers: stopping worker[%d] %s", i, wd.String()) + } + wd.ctx.cancel() + } + //w.waitFinishAllWorkers() +} + +func (w *workers) getResponseChannelForRollupInfo() chan responseRollupInfoByBlockRange { + return w.chOutgoingRollupInfo +} + +func (w *workers) asyncRequestRollupInfoByBlockRange(ctx context.Context, request requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error) { + requestStrForDebug := fmt.Sprintf("GetRollupInfoByBlockRange(%s)", request.String()) + f := func(worker worker, ctx contextWithCancel, wg *sync.WaitGroup) error { + res := worker.asyncRequestRollupInfoByBlockRange(ctx, w.getResponseChannelForRollupInfo(), wg, request) + return res + } + res := w.asyncGenericRequest(ctx, typeRequestRollupInfo, requestStrForDebug, f) + return w.chOutgoingRollupInfo, res +} + +func (w *workers) requestLastBlockWithRetries(ctx context.Context, timeout time.Duration, maxPermittedRetries int) responseL1LastBlock { + for { + log.Debugf("workers: Retrieving last block on L1 (remaining tries=%v, timeout=%v)", maxPermittedRetries, timeout) + result := w.requestLastBlock(ctx, timeout) + if result.generic.err == nil { + return result + } + maxPermittedRetries-- + log.Debugf("workers: fail request pending retries:%d : err:%s ", maxPermittedRetries, result.generic.err) + if maxPermittedRetries == 0 { + log.Error("workers: exhausted retries for last block on L1, returning error: ", result.generic.err) + return result + } + time.Sleep(time.Second) + } +} + +func (w *workers) requestLastBlock(ctx context.Context, timeout time.Duration) responseL1LastBlock { + ctxTimeout := newContextWithTimeout(ctx, timeout) + defer ctxTimeout.cancel() + w.mutex.Lock() + defer w.mutex.Unlock() + //workerIndex, worker := w.getIdleWorkerUnsafe() + worker := &w.workerForLastBlock + if worker == nil { + log.Debugf("workers: call:[%s] failed err:%s", "requestLastBlock", errAllWorkersBusy) + return newResponseL1LastBlock(errAllWorkersBusy, time.Duration(0), typeRequestLastBlock, nil) + } + worker.ctx = ctxTimeout + + log.Debugf("workers: worker : launching requestLatBlock (timeout=%s)", timeout.String()) + result := worker.worker.requestLastBlock(ctxTimeout.ctx) + return result +} + +// asyncGenericRequest launches a generic request to the workers +func (w *workers) asyncGenericRequest(ctx context.Context, requestType typeOfRequest, requestStrForDebug string, + funcRequest func(worker worker, ctx contextWithCancel, wg *sync.WaitGroup) error) error { + w.mutex.Lock() + defer w.mutex.Unlock() + + workerIndex, worker := w.getIdleWorkerUnsafe() + if worker == nil { + log.Debugf("workers: call:[%s] failed err:%s", requestStrForDebug, errAllWorkersBusy) + return errAllWorkersBusy + } + ctxWithCancel := newContextWithTimeout(ctx, w.cfg.timeoutRollupInfo) + w.workers[workerIndex].ctx = ctxWithCancel + w.launchGoroutineForRoutingResponse(ctxWithCancel.ctx, workerIndex) + wg := &w.waitGroups[requestType] + wg.Add(1) + + err := funcRequest(worker, ctxWithCancel, wg) + if err == nil { + log.Debugf("workers: worker[%d] started call:[%s]", workerIndex, requestStrForDebug) + } else { + log.Debugf("workers: worker[%d] started failed! call:[%s] failed err:[%s]", workerIndex, requestStrForDebug, err.Error()) + } + return err +} + +func (w *workers) launchGoroutineForRoutingResponse(ctx context.Context, workerIndex int) { + log.Debugf("workers: launching goroutine to route response for worker[%d]", workerIndex) + go func() { + for { + select { + case <-ctx.Done(): + return + case resultRollupInfo := <-w.chIncommingRollupInfo: + w.onResponseRollupInfo(resultRollupInfo) + } + } + }() +} + +func (w *workers) onResponseRollupInfo(v responseRollupInfoByBlockRange) { + msg := fmt.Sprintf("workers: worker finished:[ %s ]", v.toStringBrief()) + log.Infof(msg) + w.chOutgoingRollupInfo <- v +} + +func (w *workers) waitFinishAllWorkers() { + for i := 0; i < len(w.waitGroups); i++ { + log.Debugf("workers: waiting for waitGroup[%d]", i) + w.waitGroups[i].Wait() + } +} + +func (w *workers) getIdleWorkerUnsafe() (int, worker) { + for idx, worker := range w.workers { + if worker.worker.isIdle() { + return idx, worker.worker + } + } + return -1, nil +} diff --git a/synchronizer/l1_workers_decorator_limit_retries_by_time.go b/synchronizer/l1_workers_decorator_limit_retries_by_time.go new file mode 100644 index 0000000000..1d0180394f --- /dev/null +++ b/synchronizer/l1_workers_decorator_limit_retries_by_time.go @@ -0,0 +1,75 @@ +package synchronizer + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" +) + +const ( + timeOfLiveOfEntries = time.Hour +) + +type controlWorkerFlux struct { + time time.Time + retries int +} + +func (c *controlWorkerFlux) String() string { + return fmt.Sprintf("time:%s retries:%d", c.time, c.retries) +} + +// TODO: Change processingRanges by a cache that take full requests in consideration (no sleep time!) + +type workerDecoratorLimitRetriesByTime struct { + mutex sync.Mutex + workersInterface + processingRanges Cache[blockRange, controlWorkerFlux] + minTimeBetweenCalls time.Duration +} + +func newWorkerDecoratorLimitRetriesByTime(workers workersInterface, minTimeBetweenCalls time.Duration) *workerDecoratorLimitRetriesByTime { + return &workerDecoratorLimitRetriesByTime{ + workersInterface: workers, + minTimeBetweenCalls: minTimeBetweenCalls, + processingRanges: *NewCache[blockRange, controlWorkerFlux](DefaultTimeProvider{}, timeOfLiveOfEntries), + } +} + +func (w *workerDecoratorLimitRetriesByTime) String() string { + return fmt.Sprintf("[FILTERED_LRBT Active/%s]", w.minTimeBetweenCalls) + w.workersInterface.String() +} + +func (w *workerDecoratorLimitRetriesByTime) stop() { + w.mutex.Lock() + defer w.mutex.Unlock() + w.processingRanges.Clear() +} + +func (w *workerDecoratorLimitRetriesByTime) asyncRequestRollupInfoByBlockRange(ctx context.Context, request requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error) { + w.mutex.Lock() + defer w.mutex.Unlock() + //ctrl, found := w.processingRanges.getTagByBlockRange(request.blockRange) + ctrl, found := w.processingRanges.Get(request.blockRange) + if found { + lastCallElapsedTime := time.Since(ctrl.time) + if lastCallElapsedTime < w.minTimeBetweenCalls { + sleepTime := w.minTimeBetweenCalls - lastCallElapsedTime + log.Infof("workerDecoratorLimitRetriesByTime: br:%s retries:%d last call elapsed time %s < %s, sleeping %s", request.blockRange.String(), ctrl.retries, lastCallElapsedTime, w.minTimeBetweenCalls, sleepTime) + request.sleepBefore = sleepTime - request.sleepBefore + } + } + + res, err := w.workersInterface.asyncRequestRollupInfoByBlockRange(ctx, request) + + if !errors.Is(err, errAllWorkersBusy) { + // update the tag + w.processingRanges.Set(request.blockRange, controlWorkerFlux{time: time.Now(), retries: ctrl.retries + 1}) + } + w.processingRanges.DeleteOutdated() + return res, err +} diff --git a/synchronizer/l1_workers_decorator_limit_retries_by_time_test.go b/synchronizer/l1_workers_decorator_limit_retries_by_time_test.go new file mode 100644 index 0000000000..c362823611 --- /dev/null +++ b/synchronizer/l1_workers_decorator_limit_retries_by_time_test.go @@ -0,0 +1,54 @@ +// BEGIN: 9c3d4f5g2hj6 +package synchronizer + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestWorkerDecoratorLimitRetriesByTime_asyncRequestRollupInfoByBlockRange(t *testing.T) { + // Create a new worker decorator with a minimum time between calls of 1 second + workersMock := newWorkersMock(t) + decorator := newWorkerDecoratorLimitRetriesByTime(workersMock, time.Second) + + // Create a block range to use for testing + blockRange := blockRange{1, 10} + + // Test the case where there is no previous call to the block range + ctx := context.Background() + workersMock.On("asyncRequestRollupInfoByBlockRange", ctx, requestRollupInfoByBlockRange{blockRange: blockRange, sleepBefore: noSleepTime, requestLastBlockIfNoBlocksInAnswer: requestLastBlockModeIfNoBlocksInAnswer}).Return(nil, nil).Once() + _, err := decorator.asyncRequestRollupInfoByBlockRange(ctx, newRequestNoSleep(blockRange)) + assert.NoError(t, err) + + // Test the case where there is a previous call to the block range + workersMock.On("asyncRequestRollupInfoByBlockRange", ctx, mock.MatchedBy(func(req requestRollupInfoByBlockRange) bool { return req.sleepBefore > 0 })).Return(nil, nil).Once() + _, err = decorator.asyncRequestRollupInfoByBlockRange(ctx, newRequestNoSleep(blockRange)) + assert.NoError(t, err) +} + +func TestWorkerDecoratorLimitRetriesByTimeIfRealWorkerReturnsAllBusyDoesntCountAsRetry(t *testing.T) { + // Create a new worker decorator with a minimum time between calls of 1 second + workersMock := newWorkersMock(t) + decorator := newWorkerDecoratorLimitRetriesByTime(workersMock, time.Second) + + // Create a block range to use for testing + blockRange := blockRange{1, 10} + + // Test the case where there is no previous call to the block range + ctx := context.Background() + workersMock.On("asyncRequestRollupInfoByBlockRange", ctx, requestRollupInfoByBlockRange{blockRange: blockRange, sleepBefore: noSleepTime, requestLastBlockIfNoBlocksInAnswer: requestLastBlockModeIfNoBlocksInAnswer}). + Return(nil, errAllWorkersBusy). + Once() + _, err := decorator.asyncRequestRollupInfoByBlockRange(ctx, newRequestNoSleep(blockRange)) + assert.Error(t, err) + + // Test the case where there is a previous call to the block range + workersMock.On("asyncRequestRollupInfoByBlockRange", ctx, mock.MatchedBy(func(req requestRollupInfoByBlockRange) bool { return req.sleepBefore == 0 })).Return(nil, nil). + Once() + _, err = decorator.asyncRequestRollupInfoByBlockRange(ctx, newRequestNoSleep(blockRange)) + assert.NoError(t, err) +} diff --git a/synchronizer/mock_datacommitteeclient.go b/synchronizer/mock_datacommitteeclient.go index b1e4e73da6..8377c1b8af 100644 --- a/synchronizer/mock_datacommitteeclient.go +++ b/synchronizer/mock_datacommitteeclient.go @@ -82,4 +82,4 @@ func newDataCommitteeClientMock(t mockConstructorTestingTnewDataCommitteeClientM t.Cleanup(func() { mock.AssertExpectations(t) }) return mock -} \ No newline at end of file +} diff --git a/synchronizer/mock_datacommitteeclientfactory.go b/synchronizer/mock_datacommitteeclientfactory.go index bb0213d3fd..167d0250ad 100644 --- a/synchronizer/mock_datacommitteeclientfactory.go +++ b/synchronizer/mock_datacommitteeclientfactory.go @@ -41,4 +41,4 @@ func newDataCommitteeClientFactoryMock(t mockConstructorTestingTnewDataCommittee t.Cleanup(func() { mock.AssertExpectations(t) }) return mock -} \ No newline at end of file +} diff --git a/synchronizer/mock_dbtx.go b/synchronizer/mock_dbtx.go index 730d38c936..6ccb4c9921 100644 --- a/synchronizer/mock_dbtx.go +++ b/synchronizer/mock_dbtx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -283,13 +283,12 @@ func (_m *dbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResult return r0 } -type mockConstructorTestingTnewDbTxMock interface { +// newDbTxMock creates a new instance of dbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newDbTxMock(t interface { mock.TestingT Cleanup(func()) -} - -// newDbTxMock creates a new instance of dbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newDbTxMock(t mockConstructorTestingTnewDbTxMock) *dbTxMock { +}) *dbTxMock { mock := &dbTxMock{} mock.Mock.Test(t) diff --git a/synchronizer/mock_etherman.go b/synchronizer/mock_etherman.go index f466cc2bb4..e468f476e3 100644 --- a/synchronizer/mock_etherman.go +++ b/synchronizer/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -15,7 +15,7 @@ import ( types "github.com/ethereum/go-ethereum/core/types" ) -// ethermanMock is an autogenerated mock type for the ethermanInterface type +// ethermanMock is an autogenerated mock type for the EthermanInterface type type ethermanMock struct { mock.Mock } @@ -203,6 +203,20 @@ func (_m *ethermanMock) VerifyGenBlockNumber(ctx context.Context, genBlockNumber return r0, r1 } +// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ethermanMock { + mock := ðermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + // GetCurrentDataCommittee provides a mock function with given fields: func (_m *ethermanMock) GetCurrentDataCommittee() (*etherman.DataCommittee, error) { ret := _m.Called() @@ -228,18 +242,3 @@ func (_m *ethermanMock) GetCurrentDataCommittee() (*etherman.DataCommittee, erro return r0, r1 } - -type mockConstructorTestingTnewEthermanMock interface { - mock.TestingT - Cleanup(func()) -} - -// newEthermanMock creates a new instance of ethermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEthermanMock(t mockConstructorTestingTnewEthermanMock) *ethermanMock { - mock := ðermanMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/synchronizer/mock_ethtxmanager.go b/synchronizer/mock_ethtxmanager.go index 31f6dd3ef0..68f8ede4c7 100644 --- a/synchronizer/mock_ethtxmanager.go +++ b/synchronizer/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -28,13 +28,12 @@ func (_m *ethTxManagerMock) Reorg(ctx context.Context, fromBlockNumber uint64, d return r0 } -type mockConstructorTestingTnewEthTxManagerMock interface { +// newEthTxManagerMock creates a new instance of ethTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newEthTxManagerMock(t interface { mock.TestingT Cleanup(func()) -} - -// newEthTxManagerMock creates a new instance of ethTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newEthTxManagerMock(t mockConstructorTestingTnewEthTxManagerMock) *ethTxManagerMock { +}) *ethTxManagerMock { mock := ðTxManagerMock{} mock.Mock.Test(t) diff --git a/synchronizer/mock_l1_rollup_consumer_interface.go b/synchronizer/mock_l1_rollup_consumer_interface.go new file mode 100644 index 0000000000..99a4c62cb0 --- /dev/null +++ b/synchronizer/mock_l1_rollup_consumer_interface.go @@ -0,0 +1,77 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package synchronizer + +import ( + context "context" + + state "github.com/0xPolygonHermez/zkevm-node/state" + mock "github.com/stretchr/testify/mock" +) + +// l1RollupConsumerInterfaceMock is an autogenerated mock type for the l1RollupConsumerInterface type +type l1RollupConsumerInterfaceMock struct { + mock.Mock +} + +// GetLastEthBlockSynced provides a mock function with given fields: +func (_m *l1RollupConsumerInterfaceMock) GetLastEthBlockSynced() (state.Block, bool) { + ret := _m.Called() + + var r0 state.Block + var r1 bool + if rf, ok := ret.Get(0).(func() (state.Block, bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() state.Block); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(state.Block) + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Reset provides a mock function with given fields: startingBlockNumber +func (_m *l1RollupConsumerInterfaceMock) Reset(startingBlockNumber uint64) { + _m.Called(startingBlockNumber) +} + +// Start provides a mock function with given fields: ctx, lastEthBlockSynced +func (_m *l1RollupConsumerInterfaceMock) Start(ctx context.Context, lastEthBlockSynced *state.Block) error { + ret := _m.Called(ctx, lastEthBlockSynced) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Block) error); ok { + r0 = rf(ctx, lastEthBlockSynced) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StopAfterProcessChannelQueue provides a mock function with given fields: +func (_m *l1RollupConsumerInterfaceMock) StopAfterProcessChannelQueue() { + _m.Called() +} + +// newL1RollupConsumerInterfaceMock creates a new instance of l1RollupConsumerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newL1RollupConsumerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *l1RollupConsumerInterfaceMock { + mock := &l1RollupConsumerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_l1_rollup_producer_interface.go b/synchronizer/mock_l1_rollup_producer_interface.go new file mode 100644 index 0000000000..ac24de2ebb --- /dev/null +++ b/synchronizer/mock_l1_rollup_producer_interface.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package synchronizer + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// l1RollupProducerInterfaceMock is an autogenerated mock type for the l1RollupProducerInterface type +type l1RollupProducerInterfaceMock struct { + mock.Mock +} + +// Abort provides a mock function with given fields: +func (_m *l1RollupProducerInterfaceMock) Abort() { + _m.Called() +} + +// Reset provides a mock function with given fields: startingBlockNumber +func (_m *l1RollupProducerInterfaceMock) Reset(startingBlockNumber uint64) { + _m.Called(startingBlockNumber) +} + +// Start provides a mock function with given fields: ctx +func (_m *l1RollupProducerInterfaceMock) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *l1RollupProducerInterfaceMock) Stop() { + _m.Called() +} + +// newL1RollupProducerInterfaceMock creates a new instance of l1RollupProducerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newL1RollupProducerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *l1RollupProducerInterfaceMock { + mock := &l1RollupProducerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_l1_worker.go b/synchronizer/mock_l1_worker.go new file mode 100644 index 0000000000..b2ee4ea776 --- /dev/null +++ b/synchronizer/mock_l1_worker.go @@ -0,0 +1,85 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package synchronizer + +import ( + context "context" + sync "sync" + + mock "github.com/stretchr/testify/mock" +) + +// workerMock is an autogenerated mock type for the worker type +type workerMock struct { + mock.Mock +} + +// String provides a mock function with given fields: +func (_m *workerMock) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// asyncRequestRollupInfoByBlockRange provides a mock function with given fields: ctx, ch, wg, request +func (_m *workerMock) asyncRequestRollupInfoByBlockRange(ctx contextWithCancel, ch chan responseRollupInfoByBlockRange, wg *sync.WaitGroup, request requestRollupInfoByBlockRange) error { + ret := _m.Called(ctx, ch, wg, request) + + var r0 error + if rf, ok := ret.Get(0).(func(contextWithCancel, chan responseRollupInfoByBlockRange, *sync.WaitGroup, requestRollupInfoByBlockRange) error); ok { + r0 = rf(ctx, ch, wg, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// isIdle provides a mock function with given fields: +func (_m *workerMock) isIdle() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// requestLastBlock provides a mock function with given fields: ctx +func (_m *workerMock) requestLastBlock(ctx context.Context) responseL1LastBlock { + ret := _m.Called(ctx) + + var r0 responseL1LastBlock + if rf, ok := ret.Get(0).(func(context.Context) responseL1LastBlock); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(responseL1LastBlock) + } + + return r0 +} + +// newWorkerMock creates a new instance of workerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newWorkerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *workerMock { + mock := &workerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_pool.go b/synchronizer/mock_pool.go index 0db2e31cb0..bda4090b1c 100644 --- a/synchronizer/mock_pool.go +++ b/synchronizer/mock_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -43,13 +43,12 @@ func (_m *poolMock) StoreTx(ctx context.Context, tx types.Transaction, ip string return r0 } -type mockConstructorTestingTnewPoolMock interface { +// newPoolMock creates a new instance of poolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newPoolMock(t interface { mock.TestingT Cleanup(func()) -} - -// newPoolMock creates a new instance of poolMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newPoolMock(t mockConstructorTestingTnewPoolMock) *poolMock { +}) *poolMock { mock := &poolMock{} mock.Mock.Test(t) diff --git a/synchronizer/mock_state.go b/synchronizer/mock_state.go index 5d52540531..8dccdf2108 100644 --- a/synchronizer/mock_state.go +++ b/synchronizer/mock_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -257,49 +257,25 @@ func (_m *stateMock) GetForkIDByBatchNumber(batchNumber uint64) uint64 { return r0 } -// GetBatchL2DataByNumber provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *stateMock) GetBatchL2DataByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]byte, error) { - ret := _m.Called(ctx, batchNumber, dbTx) +// GetForkIDs provides a mock function with given fields: ctx, dbTx +func (_m *stateMock) GetForkIDs(ctx context.Context, dbTx pgx.Tx) ([]state.ForkIDInterval, error) { + ret := _m.Called(ctx, dbTx) - var r0 []byte + var r0 []state.ForkIDInterval var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) ([]byte, error)); ok { - return rf(ctx, batchNumber, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) ([]state.ForkIDInterval, error)); ok { + return rf(ctx, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) []byte); ok { - r0 = rf(ctx, batchNumber, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) []state.ForkIDInterval); ok { + r0 = rf(ctx, dbTx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) + r0 = ret.Get(0).([]state.ForkIDInterval) } } - if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { - r1 = rf(ctx, batchNumber, dbTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetForkIDTrustedReorgCount provides a mock function with given fields: ctx, forkID, version, dbTx -func (_m *stateMock) GetForkIDTrustedReorgCount(ctx context.Context, forkID uint64, version string, dbTx pgx.Tx) (uint64, error) { - ret := _m.Called(ctx, forkID, version, dbTx) - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, string, pgx.Tx) (uint64, error)); ok { - return rf(ctx, forkID, version, dbTx) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, string, pgx.Tx) uint64); ok { - r0 = rf(ctx, forkID, version, dbTx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, string, pgx.Tx) error); ok { - r1 = rf(ctx, forkID, version, dbTx) + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { + r1 = rf(ctx, dbTx) } else { r1 = ret.Error(1) } @@ -636,13 +612,13 @@ func (_m *stateMock) Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) return r0 } -// ResetForkID provides a mock function with given fields: ctx, batchNumber, forkID, version, dbTx -func (_m *stateMock) ResetForkID(ctx context.Context, batchNumber uint64, forkID uint64, version string, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, forkID, version, dbTx) +// ResetForkID provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *stateMock) ResetForkID(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, string, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, forkID, version, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) } else { r0 = ret.Error(0) } @@ -718,18 +694,30 @@ func (_m *stateMock) SetLastBatchInfoSeenOnEthereum(ctx context.Context, lastBat return r0 } -// StoreTransaction provides a mock function with given fields: ctx, batchNumber, processedTx, coinbase, timestamp, dbTx -func (_m *stateMock) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, processedTx, coinbase, timestamp, dbTx) +// StoreTransaction provides a mock function with given fields: ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx +func (_m *stateMock) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, dbTx pgx.Tx) (*types.Header, error) { + ret := _m.Called(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, dbTx) + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) (*types.Header, error)); ok { + return rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) *types.Header); ok { + r0 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, uint64, *state.ProcessTransactionResponse, common.Address, uint64, *state.EffectiveGasPriceLog, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, processedTx, coinbase, timestamp, egpLog, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // UpdateBatchL2Data provides a mock function with given fields: ctx, batchNumber, batchL2Data, dbTx @@ -746,13 +734,12 @@ func (_m *stateMock) UpdateBatchL2Data(ctx context.Context, batchNumber uint64, return r0 } -type mockConstructorTestingTnewStateMock interface { +// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newStateMock(t interface { mock.TestingT Cleanup(func()) -} - -// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newStateMock(t mockConstructorTestingTnewStateMock) *stateMock { +}) *stateMock { mock := &stateMock{} mock.Mock.Test(t) @@ -760,3 +747,29 @@ func newStateMock(t mockConstructorTestingTnewStateMock) *stateMock { return mock } + +// GetBatchL2DataByNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *stateMock) GetBatchL2DataByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]byte, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) ([]byte, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) []byte); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/synchronizer/mock_synchronizer_process_block_range.go b/synchronizer/mock_synchronizer_process_block_range.go new file mode 100644 index 0000000000..5b1a5714b2 --- /dev/null +++ b/synchronizer/mock_synchronizer_process_block_range.go @@ -0,0 +1,43 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package synchronizer + +import ( + etherman "github.com/0xPolygonHermez/zkevm-node/etherman" + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// synchronizerProcessBlockRangeMock is an autogenerated mock type for the synchronizerProcessBlockRangeInterface type +type synchronizerProcessBlockRangeMock struct { + mock.Mock +} + +// processBlockRange provides a mock function with given fields: blocks, order +func (_m *synchronizerProcessBlockRangeMock) processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { + ret := _m.Called(blocks, order) + + var r0 error + if rf, ok := ret.Get(0).(func([]etherman.Block, map[common.Hash][]etherman.Order) error); ok { + r0 = rf(blocks, order) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// newSynchronizerProcessBlockRangeMock creates a new instance of synchronizerProcessBlockRangeMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newSynchronizerProcessBlockRangeMock(t interface { + mock.TestingT + Cleanup(func()) +}) *synchronizerProcessBlockRangeMock { + mock := &synchronizerProcessBlockRangeMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_workers.go b/synchronizer/mock_workers.go new file mode 100644 index 0000000000..c1a2369539 --- /dev/null +++ b/synchronizer/mock_workers.go @@ -0,0 +1,137 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package synchronizer + +import ( + context "context" + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// workersMock is an autogenerated mock type for the workersInterface type +type workersMock struct { + mock.Mock +} + +// String provides a mock function with given fields: +func (_m *workersMock) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// asyncRequestRollupInfoByBlockRange provides a mock function with given fields: ctx, request +func (_m *workersMock) asyncRequestRollupInfoByBlockRange(ctx context.Context, request requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error) { + ret := _m.Called(ctx, request) + + var r0 chan responseRollupInfoByBlockRange + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, requestRollupInfoByBlockRange) chan responseRollupInfoByBlockRange); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan responseRollupInfoByBlockRange) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, requestRollupInfoByBlockRange) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getResponseChannelForRollupInfo provides a mock function with given fields: +func (_m *workersMock) getResponseChannelForRollupInfo() chan responseRollupInfoByBlockRange { + ret := _m.Called() + + var r0 chan responseRollupInfoByBlockRange + if rf, ok := ret.Get(0).(func() chan responseRollupInfoByBlockRange); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan responseRollupInfoByBlockRange) + } + } + + return r0 +} + +// howManyRunningWorkers provides a mock function with given fields: +func (_m *workersMock) howManyRunningWorkers() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// initialize provides a mock function with given fields: +func (_m *workersMock) initialize() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// requestLastBlockWithRetries provides a mock function with given fields: ctx, timeout, maxPermittedRetries +func (_m *workersMock) requestLastBlockWithRetries(ctx context.Context, timeout time.Duration, maxPermittedRetries int) responseL1LastBlock { + ret := _m.Called(ctx, timeout, maxPermittedRetries) + + var r0 responseL1LastBlock + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, int) responseL1LastBlock); ok { + r0 = rf(ctx, timeout, maxPermittedRetries) + } else { + r0 = ret.Get(0).(responseL1LastBlock) + } + + return r0 +} + +// stop provides a mock function with given fields: +func (_m *workersMock) stop() { + _m.Called() +} + +// waitFinishAllWorkers provides a mock function with given fields: +func (_m *workersMock) waitFinishAllWorkers() { + _m.Called() +} + +// newWorkersMock creates a new instance of workersMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newWorkersMock(t interface { + mock.TestingT + Cleanup(func()) +}) *workersMock { + mock := &workersMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_zkevmclient.go b/synchronizer/mock_zkevmclient.go index cc65d007a2..fa1839c8b8 100644 --- a/synchronizer/mock_zkevmclient.go +++ b/synchronizer/mock_zkevmclient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.22.1. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package synchronizer @@ -66,13 +66,12 @@ func (_m *zkEVMClientMock) BatchNumber(ctx context.Context) (uint64, error) { return r0, r1 } -type mockConstructorTestingTnewZkEVMClientMock interface { +// newZkEVMClientMock creates a new instance of zkEVMClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newZkEVMClientMock(t interface { mock.TestingT Cleanup(func()) -} - -// newZkEVMClientMock creates a new instance of zkEVMClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newZkEVMClientMock(t mockConstructorTestingTnewZkEVMClientMock) *zkEVMClientMock { +}) *zkEVMClientMock { mock := &zkEVMClientMock{} mock.Mock.Test(t) diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 84300bf767..601bebf3da 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -18,6 +18,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/synchronizer/metrics" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" @@ -26,6 +27,10 @@ import ( const ( forkID5 = 5 + // ParallelMode is the value for L1SynchronizationMode to run in parallel mode + ParallelMode = "parallel" + // SequentialMode is the value for L1SynchronizationMode to run in sequential mode + SequentialMode = "sequential" ) // Synchronizer connects L1 and L2 @@ -37,10 +42,11 @@ type Synchronizer interface { // ClientSynchronizer connects L1 and L2 type ClientSynchronizer struct { isTrustedSequencer bool + etherMan EthermanInterface latestFlushID uint64 // If true the lastFlushID is stored in DB and we don't need to check again latestFlushIDIsFulfilled bool - etherMan ethermanInterface + etherManForL1 []EthermanInterface state stateInterface pool poolInterface ethTxManager ethTxManager @@ -64,27 +70,30 @@ type ClientSynchronizer struct { committeeMembers []etherman.DataCommitteeMember selectedCommitteeMember int dataCommitteeClientFactory client.ClientFactoryInterface + l1SyncOrchestration *l1SyncOrchestration } // NewSynchronizer creates and initializes an instance of Synchronizer func NewSynchronizer( isTrustedSequencer bool, - ethMan ethermanInterface, + ethMan EthermanInterface, + etherManForL1 []EthermanInterface, st stateInterface, pool poolInterface, ethTxManager ethTxManager, zkEVMClient zkEVMClientInterface, eventLog *event.EventLog, genesis state.Genesis, - cfg Config, - clientFactory client.ClientFactoryInterface) (Synchronizer, error) { + cfg Config, clientFactory client.ClientFactoryInterface, + runInDevelopmentMode bool) (Synchronizer, error) { ctx, cancel := context.WithCancel(context.Background()) metrics.Register() - c := &ClientSynchronizer{ + res := &ClientSynchronizer{ isTrustedSequencer: isTrustedSequencer, state: st, etherMan: ethMan, + etherManForL1: etherManForL1, pool: pool, ctx: ctx, cancelCtx: cancel, @@ -96,13 +105,54 @@ func NewSynchronizer( proverID: "", previousExecutorFlushID: 0, dataCommitteeClientFactory: clientFactory, + l1SyncOrchestration: nil, } - err := c.loadCommittee() - return c, err + switch cfg.L1SynchronizationMode { + case ParallelMode: + log.Info("L1SynchronizationMode is parallel") + var err error + res.l1SyncOrchestration, err = newL1SyncParallel(ctx, cfg, etherManForL1, res, runInDevelopmentMode) + if err != nil { + log.Fatalf("Can't initialize L1SyncParallel. Error: %s", err) + } + case SequentialMode: + log.Info("L1SynchronizationMode is sequential") + default: + log.Fatalf("L1SynchronizationMode is not valid. Valid values are: %s, %s", ParallelMode, SequentialMode) + } + err := res.loadCommittee() + return res, err } var waitDuration = time.Duration(0) +func newL1SyncParallel(ctx context.Context, cfg Config, etherManForL1 []EthermanInterface, sync *ClientSynchronizer, runExternalControl bool) (*l1SyncOrchestration, error) { + chIncommingRollupInfo := make(chan l1SyncMessage, cfg.L1ParallelSynchronization.MaxPendingNoProcessedBlocks) + cfgConsumer := configConsumer{ + ApplyAfterNumRollupReceived: cfg.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived, + AceptableInacctivityTime: cfg.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime.Duration, + } + L1DataProcessor := newL1RollupInfoConsumer(cfgConsumer, sync, chIncommingRollupInfo) + + cfgProducer := configProducer{ + syncChunkSize: cfg.SyncChunkSize, + ttlOfLastBlockOnL1: cfg.L1ParallelSynchronization.RequestLastBlockPeriod.Duration, + timeoutForRequestLastBlockOnL1: cfg.L1ParallelSynchronization.RequestLastBlockTimeout.Duration, + numOfAllowedRetriesForRequestLastBlockOnL1: cfg.L1ParallelSynchronization.RequestLastBlockMaxRetries, + timeForShowUpStatisticsLog: cfg.L1ParallelSynchronization.StatisticsPeriod.Duration, + timeOutMainLoop: cfg.L1ParallelSynchronization.TimeOutMainLoop.Duration, + minTimeBetweenRetriesForRollupInfo: cfg.L1ParallelSynchronization.RollupInfoRetriesSpacing.Duration, + } + l1DataRetriever := newL1DataRetriever(cfgProducer, etherManForL1, chIncommingRollupInfo) + l1SyncOrchestration := newL1SyncOrchestration(ctx, l1DataRetriever, L1DataProcessor) + if runExternalControl { + log.Infof("Starting external control") + externalControl := newExternalControl(l1DataRetriever, l1SyncOrchestration) + externalControl.start() + } + return l1SyncOrchestration, nil +} + // Sync function will read the last state synced and will continue from that point. // Sync() will read blockchain events to detect rollup updates func (s *ClientSynchronizer) Sync() error { @@ -182,7 +232,7 @@ func (s *ClientSynchronizer) Sync() error { log.Errorf("error rolling back state. RollbackErr: %v", rollbackErr) return rollbackErr } - return fmt.Errorf("Calculated newRoot should be %s instead of %s", s.genesis.Root.String(), root.String()) + return fmt.Errorf("calculated newRoot should be %s instead of %s", s.genesis.Root.String(), root.String()) } log.Debug("Genesis root matches!") } else { @@ -262,13 +312,26 @@ func (s *ClientSynchronizer) Sync() error { metrics.FullTrustedSyncTime(time.Since(startTrusted)) if err != nil { log.Warn("error syncing trusted state. Error: ", err) + s.trustedState.lastTrustedBatches = nil + s.trustedState.lastStateRoot = nil continue } waitDuration = s.cfg.SyncInterval.Duration } //Sync L1Blocks startL1 := time.Now() - lastEthBlockSynced, err = s.syncBlocks(lastEthBlockSynced) + if s.l1SyncOrchestration != nil && (latestSyncedBatch < latestSequencedBatchNumber || !s.cfg.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized) { + log.Infof("Syncing L1 blocks in parallel lastEthBlockSynced=%d", lastEthBlockSynced.BlockNumber) + lastEthBlockSynced, err = s.syncBlocksParallel(lastEthBlockSynced) + } else { + if s.l1SyncOrchestration != nil { + log.Infof("Switching to sequential mode, stopping parallel sync and deleting object") + s.l1SyncOrchestration.abort() + s.l1SyncOrchestration = nil + } + log.Infof("Syncing L1 blocks sequentially lastEthBlockSynced=%d", lastEthBlockSynced.BlockNumber) + lastEthBlockSynced, err = s.syncBlocksSequential(lastEthBlockSynced) + } metrics.FullL1SyncTime(time.Since(startL1)) if err != nil { log.Warn("error syncing blocks: ", err) @@ -276,6 +339,11 @@ func (s *ClientSynchronizer) Sync() error { if err != nil { log.Fatal("error getting lastEthBlockSynced to resume the synchronization... Error: ", err) } + if s.l1SyncOrchestration != nil { + // If have failed execution and get starting point from DB, we must reset parallel sync to this point + // producer must start requesting this block + s.l1SyncOrchestration.reset(lastEthBlockSynced.BlockNumber) + } if s.ctx.Err() != nil { continue } @@ -287,7 +355,34 @@ func (s *ClientSynchronizer) Sync() error { } // This function syncs the node from a specific block to the latest -func (s *ClientSynchronizer) syncBlocks(lastEthBlockSynced *state.Block) (*state.Block, error) { +// lastEthBlockSynced -> last block synced in the db +func (s *ClientSynchronizer) syncBlocksParallel(lastEthBlockSynced *state.Block) (*state.Block, error) { + // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. + block, err := s.checkReorg(lastEthBlockSynced) + if err != nil { + log.Errorf("error checking reorgs. Retrying... Err: %v", err) + return lastEthBlockSynced, fmt.Errorf("error checking reorgs") + } + if block != nil { + log.Infof("reorg detected. Resetting the state from block %v to block %v", lastEthBlockSynced.BlockNumber, block.BlockNumber) + err = s.resetState(block.BlockNumber) + if err != nil { + log.Errorf("error resetting the state to a previous block. Retrying... Err: %v", err) + s.l1SyncOrchestration.reset(lastEthBlockSynced.BlockNumber) + return lastEthBlockSynced, fmt.Errorf("error resetting the state to a previous block") + } + return block, nil + } + if !s.l1SyncOrchestration.isProducerRunning() { + log.Infof("producer is not running. Resetting the state to start from block %v (last on DB)", lastEthBlockSynced.BlockNumber) + s.l1SyncOrchestration.producer.Reset(lastEthBlockSynced.BlockNumber) + } + log.Infof("Starting L1 sync orchestrator in parallel block: %d", lastEthBlockSynced.BlockNumber) + return s.l1SyncOrchestration.start(lastEthBlockSynced) +} + +// This function syncs the node from a specific block to the latest +func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Block) (*state.Block, error) { // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. block, err := s.checkReorg(lastEthBlockSynced) if err != nil { @@ -391,17 +486,17 @@ func (s *ClientSynchronizer) syncTrustedState(latestSyncedBatch uint64) error { return nil } - log.Info("Getting trusted state info") + log.Info("syncTrustedState: Getting trusted state info") start := time.Now() lastTrustedStateBatchNumber, err := s.zkEVMClient.BatchNumber(s.ctx) metrics.GetTrustedBatchNumberTime(time.Since(start)) if err != nil { - log.Warn("error syncing trusted state. Error: ", err) + log.Warn("syncTrustedState: error syncing trusted state. Error: ", err) return err } - log.Debug("lastTrustedStateBatchNumber ", lastTrustedStateBatchNumber) - log.Debug("latestSyncedBatch ", latestSyncedBatch) + log.Debug("syncTrustedState: lastTrustedStateBatchNumber ", lastTrustedStateBatchNumber) + log.Debug("syncTrustedState: latestSyncedBatch ", latestSyncedBatch) if lastTrustedStateBatchNumber < latestSyncedBatch { return nil } @@ -416,41 +511,41 @@ func (s *ClientSynchronizer) syncTrustedState(latestSyncedBatch uint64) error { batchToSync, err := s.zkEVMClient.BatchByNumber(s.ctx, big.NewInt(0).SetUint64(batchNumberToSync)) metrics.GetTrustedBatchInfoTime(time.Since(start)) if err != nil { - log.Warnf("failed to get batch %d from trusted state. Error: %v", batchNumberToSync, err) + log.Warnf("syncTrustedState: failed to get batch %d from trusted state. Error: %v", batchNumberToSync, err) return err } dbTx, err := s.state.BeginStateTransaction(s.ctx) if err != nil { - log.Errorf("error creating db transaction to sync trusted batch %d: %v", batchNumberToSync, err) + log.Errorf("syncTrustedState: error creating db transaction to sync trusted batch %d: %v", batchNumberToSync, err) return err } start = time.Now() cbatches, lastStateRoot, err := s.processTrustedBatch(batchToSync, dbTx) metrics.ProcessTrustedBatchTime(time.Since(start)) if err != nil { - log.Errorf("error processing trusted batch %d: %v", batchNumberToSync, err) + log.Errorf("syncTrustedState: error processing trusted batch %d: %v", batchNumberToSync, err) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { - log.Errorf("error rolling back db transaction to sync trusted batch %d: %v", batchNumberToSync, rollbackErr) + log.Errorf("syncTrustedState: error rolling back db transaction to sync trusted batch %d: %v", batchNumberToSync, rollbackErr) return rollbackErr } return err } - log.Debug("Checking FlushID to commit trustedState data to db") + log.Debug("syncTrustedState: Checking FlushID to commit trustedState data to db") err = s.checkFlushID(dbTx) if err != nil { - log.Errorf("error checking flushID. Error: %v", err) + log.Errorf("syncTrustedState: error checking flushID. Error: %v", err) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) + log.Errorf("syncTrustedState: error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) return rollbackErr } return err } if err := dbTx.Commit(s.ctx); err != nil { - log.Errorf("error committing db transaction to sync trusted batch %v: %v", batchNumberToSync, err) + log.Errorf("syncTrustedState: error committing db transaction to sync trusted batch %v: %v", batchNumberToSync, err) return err } s.trustedState.lastTrustedBatches = cbatches @@ -458,7 +553,7 @@ func (s *ClientSynchronizer) syncTrustedState(latestSyncedBatch uint64) error { batchNumberToSync++ } - log.Info("Trusted state fully synchronized") + log.Info("syncTrustedState: Trusted state fully synchronized") return nil } @@ -585,7 +680,9 @@ func (s *ClientSynchronizer) resetState(blockNumber uint64) error { log.Error("error committing the resetted state. Error: ", err) return err } - + if s.l1SyncOrchestration != nil { + s.l1SyncOrchestration.reset(blockNumber) + } return nil } @@ -615,6 +712,7 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, } // Compare hashes if (block.Hash() != latestBlock.BlockHash || block.ParentHash() != latestBlock.ParentHash) && latestBlock.BlockNumber > s.genesis.GenesisBlockNum { + log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", latestBlock.BlockNumber, block.Hash() == latestBlock.BlockHash, block.ParentHash() == latestBlock.ParentHash) log.Debug("[checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) log.Debug("[checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) log.Debug("[checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) @@ -652,7 +750,7 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, } } if latestEthBlockSynced.BlockHash != latestBlock.BlockHash { - log.Info("Reorg detected in block: ", latestEthBlockSynced.BlockNumber) + log.Info("Reorg detected in block: ", latestEthBlockSynced.BlockNumber, " last block OK: ", latestBlock.BlockNumber) return latestBlock, nil } return nil, nil @@ -689,7 +787,12 @@ func (s *ClientSynchronizer) checkTrustedState(batch state.Batch, tBatch *state. if reorgReasons.Len() > 0 { reason := reorgReasons.String() - log.Warnf("Missmatch in trusted state detected for Batch Number: %d. Reasons: %s", tBatch.BatchNumber, reason) + + if tBatch.StateRoot == (common.Hash{}) { + log.Warnf("incomplete trusted batch %d detected. Syncing full batch from L1", tBatch.BatchNumber) + } else { + log.Warnf("missmatch in trusted state detected for Batch Number: %d. Reasons: %s", tBatch.BatchNumber, reason) + } if s.isTrustedSequencer { s.halt(s.ctx, fmt.Errorf("TRUSTED REORG DETECTED! Batch: %d", batch.BatchNumber)) } @@ -716,6 +819,21 @@ func (s *ClientSynchronizer) processForkID(forkID etherman.ForkID, blockNumber u BlockNumber: blockNumber, } + // If forkID affects to a batch from the past. State must be reseted. + log.Debugf("ForkID: %d, synchronization must use the new forkID since batch: %d", forkID.ForkID, forkID.BatchNumber+1) + fIds, err := s.state.GetForkIDs(s.ctx, dbTx) + if err != nil { + log.Error("error getting ForkIDTrustedReorg. Error: ", err) + rollbackErr := dbTx.Rollback(s.ctx) + if rollbackErr != nil { + log.Errorf("error rolling back state get forkID trusted state. BlockNumber: %d, rollbackErr: %s, error : %v", blockNumber, rollbackErr.Error(), err) + return rollbackErr + } + return err + } + if len(fIds) != 0 && fIds[len(fIds)-1].ForkId == fID.ForkId { // If the forkID reset was already done + return nil + } //If the forkID.batchnumber is a future batch latestBatchNumber, err := s.state.GetLastBatchNumber(s.ctx, dbTx) if err != nil && !errors.Is(err, state.ErrStateNotSynchronized) { @@ -727,41 +845,26 @@ func (s *ClientSynchronizer) processForkID(forkID etherman.ForkID, blockNumber u } return err } - if latestBatchNumber <= forkID.BatchNumber || s.isTrustedSequencer { //If the forkID will start in a future batch or isTrustedSequencer - log.Infof("Just adding forkID. Skipping reset forkID. ForkID: %+v.", fID) - // Add new forkID to the state - err := s.state.AddForkIDInterval(s.ctx, fID, dbTx) - if err != nil { - log.Error("error adding new forkID interval to the state. Error: ", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blockNumber, rollbackErr.Error(), err) - return rollbackErr - } - return err - } - return nil - } - - // If forkID affects to a batch from the past. State must be reseted. - log.Debugf("ForkID: %d, Reverting synchronization to batch: %d", forkID.ForkID, forkID.BatchNumber+1) - count, err := s.state.GetForkIDTrustedReorgCount(s.ctx, forkID.ForkID, forkID.Version, dbTx) + // Add new forkID to the state + err = s.state.AddForkIDInterval(s.ctx, fID, dbTx) if err != nil { - log.Error("error getting ForkIDTrustedReorg. Error: ", err) + log.Error("error adding new forkID interval to the state. Error: ", err) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { - log.Errorf("error rolling back state get forkID trusted state. BlockNumber: %d, rollbackErr: %s, error : %v", blockNumber, rollbackErr.Error(), err) + log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blockNumber, rollbackErr.Error(), err) return rollbackErr } return err } - if count > 0 { // If the forkID reset was already done + if latestBatchNumber <= forkID.BatchNumber || s.isTrustedSequencer { //If the forkID will start in a future batch or isTrustedSequencer + log.Infof("Just adding forkID. Skipping reset forkID. ForkID: %+v.", fID) return nil } log.Info("ForkID received in the permissionless node that affects to a batch from the past") //Reset DB only if permissionless node - err = s.state.ResetForkID(s.ctx, forkID.BatchNumber+1, forkID.ForkID, forkID.Version, dbTx) + log.Debugf("ForkID: %d, Reverting synchronization to batch: %d", forkID.ForkID, forkID.BatchNumber+1) + err = s.state.ResetForkID(s.ctx, forkID.BatchNumber+1, dbTx) if err != nil { log.Error("error resetting the state. Error: ", err) rollbackErr := dbTx.Rollback(s.ctx) @@ -772,18 +875,6 @@ func (s *ClientSynchronizer) processForkID(forkID etherman.ForkID, blockNumber u return err } - // Add new forkID to the state - err = s.state.AddForkIDInterval(s.ctx, fID, dbTx) - if err != nil { - log.Error("error adding new forkID interval to the state. Error: ", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blockNumber, rollbackErr.Error(), err) - return rollbackErr - } - return err - } - // Commit because it returns an error to force the resync err = dbTx.Commit(s.ctx) if err != nil { @@ -883,6 +974,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. Timestamp: batch.Timestamp, GlobalExitRoot: batch.GlobalExitRoot, ForcedBatchNum: batch.ForcedBatchNum, + BatchL2Data: &batch.BatchL2Data, } var newRoot common.Hash @@ -890,9 +982,10 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. // First get trusted batch from db tBatch, err := s.state.GetBatchByNumber(s.ctx, batch.BatchNumber, dbTx) if err != nil { - if errors.Is(err, state.ErrNotFound) || errors.Is(err, state.ErrStateNotSynchronized) { + if errors.Is(err, state.ErrNotFound) { log.Debugf("BatchNumber: %d, not found in trusted state. Storing it...", batch.BatchNumber) // If it is not found, store batch + log.Infof("processSequenceBatches: (not found batch) ProcessAndStoreClosedBatch . BatchNumber: %d, BlockNumber: %d", processCtx.BatchNumber, blockNumber) newStateRoot, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error storing trustedBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) @@ -961,9 +1054,19 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. return err } + // Clean trustedState sync variables to avoid sync the trusted state from the wrong starting point. + // This wrong starting point would force the trusted sync to clean the virtualization of the batch reaching an inconsistency. + s.trustedState.lastTrustedBatches = nil + s.trustedState.lastStateRoot = nil + // Reset trusted state previousBatchNumber := batch.BatchNumber - 1 - log.Warnf("Missmatch in trusted state detected, discarding batches until batchNum %d", previousBatchNumber) + if tBatch.StateRoot == (common.Hash{}) { + log.Warnf("cleaning state before inserting batch from L1. Clean until batch: %d", previousBatchNumber) + } else { + log.Warnf("missmatch in trusted state detected, discarding batches until batchNum %d", previousBatchNumber) + } + log.Infof("ResetTrustedState: Resetting trusted state. delete batch > %d, ", previousBatchNumber) err = s.state.ResetTrustedState(s.ctx, previousBatchNumber, dbTx) // This method has to reset the forced batches deleting the batchNumber for higher batchNumbers if err != nil { log.Errorf("error resetting trusted state. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) @@ -975,6 +1078,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. log.Errorf("error resetting trusted state. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) return err } + log.Infof("processSequenceBatches: (deleted previous) ProcessAndStoreClosedBatch . BatchNumber: %d, BlockNumber: %d", processCtx.BatchNumber, blockNumber) _, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error storing trustedBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) @@ -990,6 +1094,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. } // Store virtualBatch + log.Infof("processSequenceBatches: Storing virtualBatch. BatchNumber: %d, BlockNumber: %d", virtualBatch.BatchNumber, blockNumber) err = s.state.AddVirtualBatch(s.ctx, &virtualBatch, dbTx) if err != nil { log.Errorf("error storing virtualBatch. BatchNumber: %d, BlockNumber: %d, error: %v", virtualBatch.BatchNumber, blockNumber, err) @@ -1038,7 +1143,13 @@ func (s *ClientSynchronizer) processSequenceForceBatch(sequenceForceBatch []ethe log.Errorf("error getting lastVirtualBatchNumber. BlockNumber: %d, error: %v", block.BlockNumber, err) return err } - // Second, reset trusted state + // Clean trustedState sync variables to avoid sync the trusted state from the wrong starting point. + // This wrong starting point would force the trusted sync to clean the virtualization of the batch reaching an inconsistency. + s.trustedState.lastTrustedBatches = nil + s.trustedState.lastStateRoot = nil + + // Reset trusted state + log.Infof("ResetTrustedState: processSequenceForceBatch: Resetting trusted state. delete batch > (lastVirtualizedBatchNumber)%d, ", lastVirtualizedBatchNumber) err = s.state.ResetTrustedState(s.ctx, lastVirtualizedBatchNumber, dbTx) // This method has to reset the forced batches deleting the batchNumber for higher batchNumbers if err != nil { log.Errorf("error resetting trusted state. BatchNumber: %d, BlockNumber: %d, error: %v", lastVirtualizedBatchNumber, block.BlockNumber, err) @@ -1098,8 +1209,10 @@ func (s *ClientSynchronizer) processSequenceForceBatch(sequenceForceBatch []ethe Timestamp: block.ReceivedAt, Coinbase: fbatch.Coinbase, ForcedBatchNum: &forcedBatches[i].ForcedBatchNumber, + BatchL2Data: &forcedBatches[i].RawTxsData, } // Process batch + log.Infof("processSequenceFoceBatches: ProcessAndStoreClosedBatch . BatchNumber: %d, BlockNumber: %d", batch.BatchNumber, block.BlockNumber) _, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, batch, forcedBatches[i].RawTxsData, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error processing batch in processSequenceForceBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, block.BlockNumber, err) @@ -1114,6 +1227,7 @@ func (s *ClientSynchronizer) processSequenceForceBatch(sequenceForceBatch []ethe s.pendingFlushID(flushID, proverID) // Store virtualBatch + log.Infof("processSequenceFoceBatches: Storing virtualBatch. BatchNumber: %d, BlockNumber: %d", virtualBatch.BatchNumber, block.BlockNumber) err = s.state.AddVirtualBatch(s.ctx, &virtualBatch, dbTx) if err != nil { log.Errorf("error storing virtualBatch in processSequenceForceBatch. BatchNumber: %d, BlockNumber: %d, error: %v", virtualBatch.BatchNumber, block.BlockNumber, err) @@ -1155,6 +1269,7 @@ func (s *ClientSynchronizer) processForcedBatch(forcedBatch etherman.ForcedBatch RawTxsData: forcedBatch.RawTxsData, ForcedAt: forcedBatch.ForcedAt, } + log.Infof("processForcedBatch: Storing forcedBatch. BatchNumber: %d BlockNumber: %d", forcedBatch.ForcedBatchNumber, forcedBatch.BlockNumber) err := s.state.AddForcedBatch(s.ctx, &forcedB, dbTx) if err != nil { log.Errorf("error storing the forcedBatch in processForcedBatch. BlockNumber: %d", forcedBatch.BlockNumber) @@ -1240,6 +1355,7 @@ func (s *ClientSynchronizer) processTrustedVerifyBatches(lastVerifiedBatch ether TxHash: lastVerifiedBatch.TxHash, IsTrusted: true, } + log.Infof("processTrustedVerifyBatches: Storing verifiedB. BlockNumber: %d, BatchNumber: %d", verifiedB.BlockNumber, verifiedB.BatchNumber) err = s.state.AddVerifiedBatch(s.ctx, &verifiedB, dbTx) if err != nil { log.Errorf("error storing the verifiedB in processTrustedVerifyBatches. verifiedBatch: %+v, lastVerifiedBatch: %+v", verifiedB, lastVerifiedBatch) @@ -1256,7 +1372,7 @@ func (s *ClientSynchronizer) processTrustedVerifyBatches(lastVerifiedBatch ether } func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx pgx.Tx) ([]*state.Batch, *common.Hash, error) { - log.Debugf("Processing trusted batch: %v", trustedBatch.Number) + log.Debugf("Processing trusted batch: %d", uint64(trustedBatch.Number)) trustedBatchL2Data := trustedBatch.BatchL2Data batches := s.trustedState.lastTrustedBatches log.Debug("len(batches): ", len(batches)) @@ -1298,15 +1414,17 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx // check if batch needs to be synchronized if batches[0] != nil { if checkIfSynced(batches, trustedBatch) { - log.Debugf("Batch %v already synchronized", trustedBatch.Number) + log.Debugf("Batch %d already synchronized", uint64(trustedBatch.Number)) return batches, s.trustedState.lastStateRoot, nil } - log.Infof("Batch %v needs to be updated", trustedBatch.Number) + log.Infof("Batch %d needs to be updated", uint64(trustedBatch.Number)) // Find txs to be processed and included in the trusted state if *s.trustedState.lastStateRoot == batches[1].StateRoot { + prevBatch := uint64(trustedBatch.Number) - 1 + log.Infof("ResetTrustedState: processTrustedBatch: %d Cleaning state until batch:%d ", trustedBatch.Number, prevBatch) // Delete txs that were stored before restart. We need to reprocess all txs because the intermediary stateRoot is only stored in memory - err := s.state.ResetTrustedState(s.ctx, uint64(trustedBatch.Number)-1, dbTx) + err := s.state.ResetTrustedState(s.ctx, prevBatch, dbTx) if err != nil { log.Error("error resetting trusted state. Error: ", err) return nil, nil, err @@ -1333,6 +1451,7 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx if forkID >= forkID5 { syncedEfficiencyPercentages = syncedEfficiencyPercentages[len(storedTxs):] } + log.Infof("Processing %d new txs with forkID: %d", len(txsToBeAdded), forkID) request.Transactions, err = state.EncodeTransactions(txsToBeAdded, syncedEfficiencyPercentages, forkID) if err != nil { @@ -1346,7 +1465,15 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx if isBatchClosed { //Sanity check if s.trustedState.lastStateRoot != nil && trustedBatch.StateRoot != *s.trustedState.lastStateRoot { - s.halt(s.ctx, fmt.Errorf("stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", *s.trustedState.lastStateRoot, trustedBatch.StateRoot)) + log.Errorf("batch %d, different batchL2Datas (trustedBatchL2Data: %s, batches[0].BatchL2Data: %s). Decoded txs are len(storedTxs): %d, len(syncedTxs): %d", uint64(trustedBatch.Number), trustedBatchL2Data.Hex(), "0x"+common.Bytes2Hex(batches[0].BatchL2Data), len(storedTxs), len(syncedTxs)) + for _, tx := range storedTxs { + log.Error("stored txHash : ", tx.Hash()) + } + for _, tx := range syncedTxs { + log.Error("synced txHash : ", tx.Hash()) + } + log.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), *s.trustedState.lastStateRoot, trustedBatch.StateRoot) + return nil, nil, fmt.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), *s.trustedState.lastStateRoot, trustedBatch.StateRoot) } receipt := state.ProcessingReceipt{ BatchNumber: uint64(trustedBatch.Number), @@ -1355,14 +1482,14 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx BatchL2Data: trustedBatchL2Data, AccInputHash: trustedBatch.AccInputHash, } - log.Debugf("closing batch %v", trustedBatch.Number) + log.Debugf("closing batch %d", uint64(trustedBatch.Number)) if err := s.state.CloseBatch(s.ctx, receipt, dbTx); err != nil { // This is a workaround to avoid closing a batch that was already closed if err.Error() != state.ErrBatchAlreadyClosed.Error() { - log.Errorf("error closing batch %d", trustedBatch.Number) + log.Errorf("error closing batch %d", uint64(trustedBatch.Number)) return nil, nil, err } else { - log.Warnf("CASE 02: the batch [%d] was already closed", trustedBatch.Number) + log.Warnf("CASE 02: the batch [%d] was already closed", uint64(trustedBatch.Number)) log.Info("batches[0].BatchNumber: ", batches[0].BatchNumber) log.Info("batches[0].AccInputHash: ", batches[0].AccInputHash) log.Info("batches[0].StateRoot: ", batches[0].StateRoot) @@ -1398,13 +1525,13 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx // Update batchL2Data err := s.state.UpdateBatchL2Data(s.ctx, batches[0].BatchNumber, trustedBatchL2Data, dbTx) if err != nil { - log.Errorf("error opening batch %d", trustedBatch.Number) + log.Errorf("error opening batch %d", uint64(trustedBatch.Number)) return nil, nil, err } batches[0].BatchL2Data = trustedBatchL2Data log.Debug("BatchL2Data updated for batch: ", batches[0].BatchNumber) } else { - log.Infof("Batch %v needs to be synchronized", trustedBatch.Number) + log.Infof("Batch %d needs to be synchronized", uint64(trustedBatch.Number)) err := s.openBatch(trustedBatch, dbTx) if err != nil { log.Error("error openning batch. Error: ", err) @@ -1414,7 +1541,7 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx request.Transactions = trustedBatchL2Data } - log.Debugf("Processing sequencer for batch %v", trustedBatch.Number) + log.Debugf("Processing sequencer for batch %d", uint64(trustedBatch.Number)) processBatchResp, err := s.processAndStoreTxs(trustedBatch, request, dbTx) if err != nil { @@ -1427,7 +1554,10 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx if isBatchClosed { //Sanity check if trustedBatch.StateRoot != processBatchResp.NewStateRoot { - s.halt(s.ctx, fmt.Errorf("stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", processBatchResp.NewStateRoot, trustedBatch.StateRoot)) + log.Error("trustedBatchL2Data: ", trustedBatchL2Data) + log.Error("request.Transactions: ", request.Transactions) + log.Errorf("batch: %d after processing some txs, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), processBatchResp.NewStateRoot.String(), trustedBatch.StateRoot.String()) + return nil, nil, fmt.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), processBatchResp.NewStateRoot.String(), trustedBatch.StateRoot.String()) } receipt := state.ProcessingReceipt{ BatchNumber: uint64(trustedBatch.Number), @@ -1437,14 +1567,14 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx AccInputHash: trustedBatch.AccInputHash, } - log.Debugf("closing batch %v", trustedBatch.Number) + log.Debugf("closing batch %d", uint64(trustedBatch.Number)) if err := s.state.CloseBatch(s.ctx, receipt, dbTx); err != nil { // This is a workarround to avoid closing a batch that was already closed if err.Error() != state.ErrBatchAlreadyClosed.Error() { - log.Errorf("error closing batch %d", trustedBatch.Number) + log.Errorf("error closing batch %d", uint64(trustedBatch.Number)) return nil, nil, err } else { - log.Warnf("CASE 01: batch [%d] was already closed", trustedBatch.Number) + log.Warnf("CASE 01: batch [%d] was already closed", uint64(trustedBatch.Number)) } } log.Info("Batch closed right after processing some tx") @@ -1453,10 +1583,11 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx batches[0].AccInputHash = trustedBatch.AccInputHash batches[0].StateRoot = trustedBatch.StateRoot batches[0].LocalExitRoot = trustedBatch.LocalExitRoot + batches[0].BatchL2Data = trustedBatchL2Data } } - log.Infof("Batch %v synchronized", trustedBatch.Number) + log.Infof("Batch %d synchronized", uint64(trustedBatch.Number)) return batches, &processBatchResp.NewStateRoot, nil } @@ -1506,10 +1637,21 @@ func (s *ClientSynchronizer) processAndStoreTxs(trustedBatch *types.Batch, reque s.pendingFlushID(processBatchResp.FlushID, processBatchResp.ProverID) log.Debugf("Storing transactions %d for batch %v", len(processBatchResp.Responses), trustedBatch.Number) + if processBatchResp.IsExecutorLevelError { + log.Warn("executorLevelError detected. Avoid store txs...") + return processBatchResp, nil + } else if processBatchResp.IsRomOOCError { + log.Warn("romOOCError detected. Avoid store txs...") + return processBatchResp, nil + } for _, tx := range processBatchResp.Responses { - if err = s.state.StoreTransaction(s.ctx, uint64(trustedBatch.Number), tx, trustedBatch.Coinbase, uint64(trustedBatch.Timestamp), dbTx); err != nil { - log.Errorf("failed to store transactions for batch: %v", trustedBatch.Number) - return nil, err + if state.IsStateRootChanged(executor.RomErrorCode(tx.RomError)) { + log.Infof("TrustedBatch info: %+v", processBatchResp) + log.Infof("Storing trusted tx %+v", tx) + if _, err = s.state.StoreTransaction(s.ctx, uint64(trustedBatch.Number), tx, trustedBatch.Coinbase, uint64(trustedBatch.Timestamp), nil, dbTx); err != nil { + log.Errorf("failed to store transactions for batch: %v. Tx: %s", trustedBatch.Number, tx.TxHash.String()) + return nil, err + } } } return processBatchResp, nil @@ -1581,7 +1723,7 @@ func (s *ClientSynchronizer) getCurrentBatches(batches []*state.Batch, trustedBa if len(batches) == 0 || batches[0] == nil || (batches[0] != nil && uint64(trustedBatch.Number) != batches[0].BatchNumber) { log.Debug("Updating batch[0] value!") batch, err := s.state.GetBatchByNumber(s.ctx, uint64(trustedBatch.Number), dbTx) - if err != nil && err != state.ErrStateNotSynchronized { + if err != nil && err != state.ErrNotFound { log.Warnf("failed to get batch %v from local trusted state. Error: %v", trustedBatch.Number, err) return nil, err } @@ -1589,7 +1731,7 @@ func (s *ClientSynchronizer) getCurrentBatches(batches []*state.Batch, trustedBa if len(batches) == 0 || batches[0] == nil || (batches[0] != nil && uint64(trustedBatch.Number-1) != batches[0].BatchNumber) { log.Debug("Updating batch[1] value!") prevBatch, err = s.state.GetBatchByNumber(s.ctx, uint64(trustedBatch.Number-1), dbTx) - if err != nil && err != state.ErrStateNotSynchronized { + if err != nil && err != state.ErrNotFound { log.Warnf("failed to get prevBatch %v from local trusted state. Error: %v", trustedBatch.Number-1, err) return nil, err } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 8aabeb24b1..6946386981 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -37,66 +37,72 @@ type mocks struct { //EventLog *eventLogMock } -//func Test_Given_StartingSynchronizer_When_CallFirstTimeExecutor_Then_StoreProverID(t *testing.T) { -//} - // Feature #2220 and #2239: Optimize Trusted state synchronization // // this Check partially point 2: Use previous batch stored in memory to avoid getting from database -func Test_Given_PermissionlessNode_When_SyncronizeAgainSameBatch_Then_UseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) { +func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) { genesis, cfg, m := setupGenericTest(t) + ethermanForL1 := []EthermanInterface{m.Etherman} m.Etherman. On("GetCurrentDataCommittee"). Return(ðerman.DataCommittee{}, nil) - sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, nil) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, nil, false) require.NoError(t, err) - sync, ok := sync_interface.(*ClientSynchronizer) + sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") lastBatchNumber := uint64(10) - batch10With1Tx := createBatch(t, lastBatchNumber, 1) batch10With2Tx := createBatch(t, lastBatchNumber, 2) batch10With3Tx := createBatch(t, lastBatchNumber, 3) + previousBatch09 := createBatch(t, lastBatchNumber-1, 1) - expectedCallsForsyncTrustedState(t, m, sync, batch10With1Tx, batch10With2Tx, true) + expectedCallsForsyncTrustedState(t, m, sync, nil, batch10With2Tx, previousBatch09, true, false) + // Is the first time that appears this batch, so it need to OpenBatch + expectedCallsForOpenBatch(t, m, sync, lastBatchNumber) err = sync.syncTrustedState(lastBatchNumber) require.NoError(t, err) - expectedCallsForsyncTrustedState(t, m, sync, batch10With2Tx, batch10With3Tx, false) + expectedCallsForsyncTrustedState(t, m, sync, batch10With2Tx, batch10With3Tx, previousBatch09, true, true) + expectedCallsForOpenBatch(t, m, sync, lastBatchNumber) err = sync.syncTrustedState(lastBatchNumber) require.NoError(t, err) - require.Equal(t, *sync.trustedState.lastTrustedBatches[0], rpcBatchTostateBatch(batch10With3Tx)) + require.Equal(t, sync.trustedState.lastTrustedBatches[0], rpcBatchTostateBatch(batch10With3Tx)) } // Feature #2220 and #2239: Optimize Trusted state synchronization // // this Check partially point 2: Store last batch in memory (CurrentTrustedBatch) -func Test_Given_PermissionlessNode_When_SyncronizeFirstTimeABatch_Then_StoreItInALocalVar(t *testing.T) { +func TestGivenPermissionlessNodeWhenSyncronizeFirstTimeABatchThenStoreItInALocalVar(t *testing.T) { genesis, cfg, m := setupGenericTest(t) + ethermanForL1 := []EthermanInterface{m.Etherman} m.Etherman. On("GetCurrentDataCommittee"). Return(ðerman.DataCommittee{}, nil) - sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, nil) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, nil, false) require.NoError(t, err) - sync, ok := sync_interface.(*ClientSynchronizer) + sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") lastBatchNumber := uint64(10) batch10With1Tx := createBatch(t, lastBatchNumber, 1) batch10With2Tx := createBatch(t, lastBatchNumber, 2) + previousBatch09 := createBatch(t, lastBatchNumber-1, 1) - expectedCallsForsyncTrustedState(t, m, sync, batch10With1Tx, batch10With2Tx, true) + expectedCallsForsyncTrustedState(t, m, sync, batch10With1Tx, batch10With2Tx, previousBatch09, true, true) + expectedCallsForOpenBatch(t, m, sync, lastBatchNumber) err = sync.syncTrustedState(lastBatchNumber) require.NoError(t, err) - require.Equal(t, *sync.trustedState.lastTrustedBatches[0], rpcBatchTostateBatch(batch10With2Tx)) + require.Equal(t, sync.trustedState.lastTrustedBatches[0], rpcBatchTostateBatch(batch10With2Tx)) } // issue #2220 - +// TODO: this is running against old sequential L1 sync, need to update to parallel L1 sync. +// but it used a feature that is not implemented in new one that is asking beyond the last block on L1 func TestForcedBatch(t *testing.T) { genesis := state.Genesis{ GenesisBlockNum: uint64(123456), } cfg := Config{ - SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, - SyncChunkSize: 10, + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + L1SynchronizationMode: SequentialMode, } m := mocks{ @@ -106,11 +112,11 @@ func TestForcedBatch(t *testing.T) { DbTx: newDbTxMock(t), ZKEVMClient: newZkEVMClientMock(t), } + ethermanForL1 := []EthermanInterface{m.Etherman} m.Etherman. On("GetCurrentDataCommittee"). - Return(nil, nil) - - sync, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, m.DataCommitteeClientFactory) + Return(ðerman.DataCommittee{}, nil) + sync, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, nil, false) require.NoError(t, err) // state preparation @@ -172,7 +178,7 @@ func TestForcedBatch(t *testing.T) { var n *big.Int m.Etherman. - On("HeaderByNumber", ctx, n). + On("HeaderByNumber", mock.Anything, n). Return(ethHeader, nil). Once() @@ -223,7 +229,7 @@ func TestForcedBatch(t *testing.T) { toBlock := fromBlock + cfg.SyncChunkSize m.Etherman. - On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + On("GetRollupInfoByBlockRange", mock.Anything, fromBlock, &toBlock). Return(blocks, order, nil). Once() @@ -248,7 +254,6 @@ func TestForcedBatch(t *testing.T) { On("AddBlock", ctx, stateBlock, m.DbTx). Return(nil). Once() - m.State. On("GetBatchL2DataByNumber", ctx, uint64(2), nil). Return(txs, nil). @@ -343,13 +348,16 @@ func TestForcedBatch(t *testing.T) { require.NoError(t, err) } +// TODO: this is running against old sequential L1 sync, need to update to parallel L1 sync. +// but it used a feature that is not implemented in new one that is asking beyond the last block on L1 func TestSequenceForcedBatch(t *testing.T) { genesis := state.Genesis{ GenesisBlockNum: uint64(123456), } cfg := Config{ - SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, - SyncChunkSize: 10, + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + L1SynchronizationMode: SequentialMode, } m := mocks{ @@ -359,11 +367,11 @@ func TestSequenceForcedBatch(t *testing.T) { DbTx: newDbTxMock(t), ZKEVMClient: newZkEVMClientMock(t), } + ethermanForL1 := []EthermanInterface{m.Etherman} m.Etherman. On("GetCurrentDataCommittee"). Return(nil, nil) - - sync, err := NewSynchronizer(true, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, m.DataCommitteeClientFactory) + sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, nil, false) require.NoError(t, err) // state preparation @@ -529,6 +537,7 @@ func TestSequenceForcedBatch(t *testing.T) { Timestamp: ethBlock.ReceivedAt, GlobalExitRoot: sequencedForceBatch.GlobalExitRoot, ForcedBatchNum: &f, + BatchL2Data: &sequencedForceBatch.Transactions, } m.State. @@ -581,8 +590,20 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { GenesisBlockNum: uint64(123456), } cfg := Config{ - SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, - SyncChunkSize: 10, + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + L1SynchronizationMode: SequentialMode, + L1ParallelSynchronization: L1ParallelSynchronizationConfig{ + MaxClients: 2, + MaxPendingNoProcessedBlocks: 2, + RequestLastBlockPeriod: cfgTypes.Duration{Duration: 1 * time.Second}, + RequestLastBlockTimeout: cfgTypes.Duration{Duration: 1 * time.Second}, + RequestLastBlockMaxRetries: 1, + StatisticsPeriod: cfgTypes.Duration{Duration: 1 * time.Second}, + TimeOutMainLoop: cfgTypes.Duration{Duration: 1 * time.Second}, + RollupInfoRetriesSpacing: cfgTypes.Duration{Duration: 1 * time.Second}, + FallbackToSequentialModeOnSynchronized: false, + }, } m := mocks{ @@ -659,8 +680,11 @@ func createBatch(t *testing.T, batchNumber uint64, howManyTx int) *types.Batch { return batch } -func rpcBatchTostateBatch(rpcBatch *types.Batch) state.Batch { - return state.Batch{ +func rpcBatchTostateBatch(rpcBatch *types.Batch) *state.Batch { + if rpcBatch == nil { + return nil + } + return &state.Batch{ BatchNumber: uint64(rpcBatch.Number), Coinbase: rpcBatch.Coinbase, StateRoot: rpcBatch.StateRoot, @@ -671,8 +695,16 @@ func rpcBatchTostateBatch(rpcBatch *types.Batch) state.Batch { } } +func expectedCallsForOpenBatch(t *testing.T, m *mocks, sync *ClientSynchronizer, batchNumber uint64) { + m.State. + On("OpenBatch", sync.ctx, mock.Anything, m.DbTx). + Return(nil). + Once() +} + func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchronizer, - batchInPermissionLess *types.Batch, batchInTrustedNode *types.Batch, needToRetrieveBatchFromDatabase bool) { + batchInPermissionLess *types.Batch, batchInTrustedNode *types.Batch, previousBatchInPermissionless *types.Batch, + needToRetrieveBatchFromDatabase bool, needUpdateL2Data bool) { batchNumber := uint64(batchInTrustedNode.Number) m.ZKEVMClient. On("BatchNumber", mock.Anything). @@ -690,45 +722,49 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr Once() m.State. - On("BeginStateTransaction", sync.ctx). + On("BeginStateTransaction", mock.Anything). Return(m.DbTx, nil). Once() stateBatchInTrustedNode := rpcBatchTostateBatch(batchInTrustedNode) stateBatchInPermissionLess := rpcBatchTostateBatch(batchInPermissionLess) + statePreviousBatchInPermissionless := rpcBatchTostateBatch(previousBatchInPermissionless) + if needToRetrieveBatchFromDatabase { + if statePreviousBatchInPermissionless != nil { + m.State. + On("GetBatchByNumber", mock.Anything, uint64(batchInTrustedNode.Number-1), mock.Anything). + Return(statePreviousBatchInPermissionless, nil). + Once() + } else { + m.State. + On("GetBatchByNumber", mock.Anything, uint64(batchInTrustedNode.Number-1), mock.Anything). + Return(nil, state.ErrNotFound). + Once() + } + if stateBatchInPermissionLess != nil { + m.State. + On("GetBatchByNumber", mock.Anything, uint64(batchInTrustedNode.Number), mock.Anything). + Return(stateBatchInPermissionLess, nil). + Once() + } else { + m.State. + On("GetBatchByNumber", mock.Anything, uint64(batchInTrustedNode.Number), mock.Anything). + Return(nil, state.ErrNotFound). + Once() + } + } + if needUpdateL2Data { m.State. - On("GetBatchByNumber", mock.Anything, uint64(batchInPermissionLess.Number-1), mock.Anything). - Return(&stateBatchInPermissionLess, nil). + On("ResetTrustedState", sync.ctx, batchNumber-1, mock.Anything). + Return(nil). Once() + m.State. - On("GetBatchByNumber", mock.Anything, uint64(batchInPermissionLess.Number), mock.Anything). - Return(&stateBatchInPermissionLess, nil). + On("UpdateBatchL2Data", mock.Anything, batchNumber, stateBatchInTrustedNode.BatchL2Data, mock.Anything). + Return(nil). Once() } - - m.State. - On("ResetTrustedState", sync.ctx, batchNumber-1, m.DbTx). - Return(nil). - Once() - - processCtx := state.ProcessingContext{ - BatchNumber: uint64(batchInTrustedNode.Number), - Coinbase: common.HexToAddress(batchInTrustedNode.Coinbase.String()), - Timestamp: time.Unix(int64(batchInTrustedNode.Timestamp), 0), - GlobalExitRoot: batchInTrustedNode.GlobalExitRoot, - BatchL2Data: (*[]byte)(&batchInTrustedNode.BatchL2Data), - } - m.State. - On("OpenBatch", sync.ctx, processCtx, m.DbTx). - Return(nil). - Once() - - m.State. - On("UpdateBatchL2Data", sync.ctx, batchNumber, stateBatchInTrustedNode.BatchL2Data, mock.Anything). - Return(nil). - Once() - tx1 := state.ProcessTransactionResponse{} processedBatch := state.ProcessBatchResponse{ FlushID: 1, @@ -736,22 +772,22 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr Responses: []*state.ProcessTransactionResponse{&tx1}, } m.State. - On("ProcessBatch", sync.ctx, mock.Anything, true). + On("ProcessBatch", mock.Anything, mock.Anything, true). Return(&processedBatch, nil). Once() m.State. - On("StoreTransaction", sync.ctx, uint64(stateBatchInTrustedNode.BatchNumber), mock.Anything, stateBatchInTrustedNode.Coinbase, uint64(batchInTrustedNode.Timestamp), m.DbTx). - Return(nil). + On("StoreTransaction", sync.ctx, uint64(stateBatchInTrustedNode.BatchNumber), mock.Anything, stateBatchInTrustedNode.Coinbase, uint64(batchInTrustedNode.Timestamp), mock.Anything, m.DbTx). + Return(ðTypes.Header{}, nil). Once() m.State. - On("GetStoredFlushID", sync.ctx). + On("GetStoredFlushID", mock.Anything). Return(uint64(1), cProverIDExecution, nil). Once() m.DbTx. - On("Commit", sync.ctx). + On("Commit", mock.Anything). Return(nil). Once() } diff --git a/synchronizer/time_provider.go b/synchronizer/time_provider.go new file mode 100644 index 0000000000..fea32d8e53 --- /dev/null +++ b/synchronizer/time_provider.go @@ -0,0 +1,19 @@ +package synchronizer + +import ( + "time" +) + +// TimeProvider is a interface for classes that needs time and we want to be able to unittest it +type TimeProvider interface { + // Now returns current time + Now() time.Time +} + +// DefaultTimeProvider is the default implementation of TimeProvider +type DefaultTimeProvider struct{} + +// Now returns current time +func (d DefaultTimeProvider) Now() time.Time { + return time.Now() +} diff --git a/test/Makefile b/test/Makefile index 52a2ae4881..fe278e1c65 100644 --- a/test/Makefile +++ b/test/Makefile @@ -104,7 +104,7 @@ test-full-non-e2e: stop ## Runs non-e2e tests checking race conditions $(RUNL1NETWORK) sleep 15 docker logs $(DOCKERCOMPOSEZKPROVER) - trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -timeout 60s ../... + trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=../coverage.out -timeout 70s ../... .PHONY: test-e2e-group-1 test-e2e-group-1: stop ## Runs group 1 e2e tests checking race conditions @@ -241,7 +241,6 @@ test-e2e-group-11: stop ## Runs group 11 e2e tests checking race conditions docker logs $(DOCKERCOMPOSEZKPROVER) trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -race -v -p 1 -timeout 2000s ../ci/e2e-group11/... - .PHONY: test-e2e-e2e-group-dac-1 test-e2e-group-dac-1: stop ## Runs x1-validium-1 e2e tests checking race conditions $(RUNSTATEDB) @@ -268,7 +267,7 @@ benchmark-sequencer-eth-transfers: stop $(RUNJSONRPC) docker ps -a docker logs $(DOCKERCOMPOSEZKPROVER) - @ cd benchmarks/sequencer/eth-transfers ; \ + @ cd benchmarks/sequencer/e2e/eth-transfers ; \ mkdir -p results ; \ touch ./results/out.dat ; \ go test -bench=. -timeout=600m | tee ./results/out.dat ; @@ -288,7 +287,27 @@ benchmark-sequencer-erc20-transfers: stop $(RUNJSONRPC) docker ps -a docker logs $(DOCKERCOMPOSEZKPROVER) - @ cd benchmarks/sequencer/erc20-transfers ; \ + @ cd benchmarks/sequencer/e2e/erc20-transfers ; \ + mkdir -p results ; \ + touch ./results/out.dat ; \ + go test -bench=. -timeout=600m | tee ./results/out.dat ; + + +.PHONY: benchmark-sequencer-uniswap-transfers +benchmark-sequencer-uniswap-transfers: stop + $(RUNL1NETWORK) + $(RUNSTATEDB) + $(RUNPOOLDB) + $(RUNEVENTDB) + sleep 5 + $(RUNZKPROVER) + $(RUNSYNC) + sleep 2 + $(RUNL2GASPRICER) + $(RUNJSONRPC) + docker ps -a + docker logs $(DOCKERCOMPOSEZKPROVER) + @ cd benchmarks/sequencer/e2e/uniswap-transfers ; \ mkdir -p results ; \ touch ./results/out.dat ; \ go test -bench=. -timeout=600m | tee ./results/out.dat ; @@ -347,7 +366,7 @@ stop-zkprover: ## Stops zkprover $(STOPZKPROVER) .PHONY: run-l1-explorer -run-l1-explorer: ## Runs L1 blockscan explorer +run-l1-explorer: ## Runs L1 blockscan explorer $(RUNEXPLORERL1DB) $(RUNEXPLORERL1) @@ -401,7 +420,7 @@ run-seqsender: ## runs the sequencer sender .PHONY: stop-seqsender stop-seqsender: ## stops the sequencer sender $(STOPSEQUENCESENDER) - + .PHONY: run-sync run-sync: ## runs the synchronizer $(RUNSYNC) @@ -433,7 +452,7 @@ run-eth-tx-manager: ## Runs the eth tx manager service .PHONY: stop-eth-tx-manager stop-eth-tx-manager: ## Stops the eth tx manager service $(STOPETHTXMANAGER) - + .PHONY: run-agg run-agg: ## Runs the aggregator service $(RUNAGGREGATOR) @@ -451,14 +470,26 @@ stop-grafana: ## Stops the grafana service $(STOPGRAFANA) .PHONY: run-permissionless -run-permissionless: run-node ## Runs the trusted and permissionless node +run-permissionless: run-node run-permissionless-dependencies ## Runs the trusted and permissionless node $(RUNPERMISSIONLESSDB) sleep 3 $(RUNPERMISSIONLESSZKPROVER) $(RUNPERMISSIONLESSNODE) .PHONY: stop-permissionless -stop-permissionless: stop-node## Stops the trusted and permissionless node +stop-permissionless: stop-node stop-permissionless-dependencies ## Stops the permissionless node + $(STOPPERMISSIONLESSNODE) + + +PHONY: run-permissionless-dependencies +run-permissionless-dependencies: ## Runs the permissionless dependencies (db + prover) without the node + $(RUNPERMISSIONLESSDB) + sleep 3 + $(RUNPERMISSIONLESSZKPROVER) + + +PHONY: stop-permissionless-dependencies +stop-permissionless-dependencies: ## Stop the permissionless dependencies (db + prover) without the node $(STOPPERMISSIONLESSNODE) $(STOPPERMISSIONLESSZKPROVER) $(STOPPERMISSIONLESSDB) @@ -496,7 +527,7 @@ run: ## Runs a full node $(RUNAPPROVE) sleep 3 $(RUNSYNC) - sleep 2 + sleep 4 $(RUNDACNODE) $(RUNETHTXMANAGER) $(RUNSEQUENCER) @@ -561,13 +592,18 @@ install-mockery: ## Installs mockery with the correct version to generate the mo go install github.com/vektra/mockery/v2@v2.22.1 .PHONY: generate-mocks -generate-mocks: ## Generates mocks for the tests, using mockery tool +generate-mocks: generate-mocks-jsonrpc generate-mocks-sequencer generate-mocks-synchronizer generate-mocks-etherman generate-mocks-aggregator ## Generates mocks for the tests, using mockery tool + +.PHONY: generate-mocks-jsonrpc +generate-mocks-jsonrpc: ## Generates mocks for jsonrpc , using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=storageInterface --dir=../jsonrpc --output=../jsonrpc --outpkg=jsonrpc --inpackage --structname=storageMock --filename=mock_storage.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=PoolInterface --dir=../jsonrpc/types --output=../jsonrpc/mocks --outpkg=mocks --structname=PoolMock --filename=mock_pool.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../jsonrpc/types --output=../jsonrpc/mocks --outpkg=mocks --structname=StateMock --filename=mock_state.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthermanInterface --dir=../jsonrpc/types --output=../jsonrpc/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../jsonrpc/mocks --outpkg=mocks --structname=DBTxMock --filename=mock_dbtx.go +.PHONY: generate-mocks-sequencer +generate-mocks-sequencer: ## Generates mocks for sequencer , using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=workerInterface --dir=../sequencer --output=../sequencer --outpkg=sequencer --inpackage --structname=WorkerMock --filename=mock_worker.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=stateInterface --dir=../sequencer --output=../sequencer --outpkg=sequencer --inpackage --structname=StateMock --filename=mock_state.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=txPool --dir=../sequencer --output=../sequencer --outpkg=sequencer --inpackage --structname=PoolMock --filename=mock_pool.go @@ -575,15 +611,26 @@ generate-mocks: ## Generates mocks for the tests, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=dbManagerInterface --dir=../sequencer --output=../sequencer --outpkg=sequencer --inpackage --structname=DbManagerMock --filename=mock_db_manager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=etherman --dir=../sequencer --output=../sequencer --outpkg=sequencer --inpackage --structname=EthermanMock --filename=mock_etherman.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ethermanInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=ethermanMock --filename=mock_etherman.go +.PHONY: generate-mocks-synchronizer +generate-mocks-synchronizer: ## Generates mocks for synchronizer , using mockery tool + ## mocks for synchronizer + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthermanInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=ethermanMock --filename=mock_etherman.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=stateInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=stateMock --filename=mock_state.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ethTxManager --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=ethTxManagerMock --filename=mock_ethtxmanager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=poolInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=poolMock --filename=mock_pool.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=zkEVMClientInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=zkEVMClientMock --filename=mock_zkevmclient.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../synchronizer --outpkg=synchronizer --structname=dbTxMock --filename=mock_dbtx.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=l1RollupProducerInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=l1RollupProducerInterfaceMock --filename=mock_l1_rollup_producer_interface.go --inpackage + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=l1RollupConsumerInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=l1RollupConsumerInterfaceMock --filename=mock_l1_rollup_consumer_interface.go --inpackage + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=worker --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=workerMock --filename=mock_l1_worker.go --inpackage + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=synchronizerProcessBlockRangeInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=synchronizerProcessBlockRangeMock --filename=mock_synchronizer_process_block_range.go --inpackage + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=workersInterface --dir=../synchronizer --output=../synchronizer --outpkg=synchronizer --structname=workersMock --filename=mock_workers.go --inpackage + +.PHONY: generate-mocks-etherman +generate-mocks-etherman: ## Generates mocks for etherman , using mockery tool + ## mocks for etherman export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ClientFactoryInterface --srcpkg=github.com/okx/x1-data-availability/client --output=../synchronizer --outpkg=synchronizer --structname=dataCommitteeClientFactoryMock --filename=mock_datacommitteeclientfactory.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ClientInterface --srcpkg=github.com/okx/x1-data-availability/client --output=../synchronizer --outpkg=synchronizer --structname=dataCommitteeClientMock --filename=mock_datacommitteeclient.go - + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ClientInterface --srcpkg=github.com/okx/x1-data-availability/client --output=../synchronizer --outpkg=synchronizer --structname=dataCommitteeClientMock --filename=mock_datacommitteeclient.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=GasPricer --srcpkg=github.com/ethereum/go-ethereum --output=../etherman --outpkg=etherman --structname=etherscanMock --filename=mock_etherscan.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=GasPricer --srcpkg=github.com/ethereum/go-ethereum --output=../etherman --outpkg=etherman --structname=ethGasStationMock --filename=mock_ethgasstation.go @@ -593,6 +640,8 @@ generate-mocks: ## Generates mocks for the tests, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=poolInterface --dir=../gasprice --output=../gasprice --outpkg=gasprice --structname=poolMock --filename=mock_pool.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ethermanInterface --dir=../gasprice --output=../gasprice --outpkg=gasprice --structname=ethermanMock --filename=mock_etherman.go +.PHONY: generate-mocks-aggregator +generate-mocks-aggregator: ## Generates mocks for aggregator , using mockery tool ## mocks for the aggregator tests export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=stateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateMock --filename=mock_state.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=proverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverMock --filename=mock_prover.go diff --git a/test/benchmarks/sequencer/common/metrics/metrics.go b/test/benchmarks/sequencer/common/metrics/metrics.go index e45d8e631a..8d3d3da42c 100644 --- a/test/benchmarks/sequencer/common/metrics/metrics.go +++ b/test/benchmarks/sequencer/common/metrics/metrics.go @@ -4,14 +4,16 @@ import ( "fmt" "net/http" "os/exec" + "strings" "time" - "github.com/0xPolygonHermez/zkevm-node/log" metricsLib "github.com/0xPolygonHermez/zkevm-node/metrics" "github.com/0xPolygonHermez/zkevm-node/sequencer/metrics" metricsState "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/0xPolygonHermez/zkevm-node/test/testutils" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" ) const ( @@ -20,48 +22,85 @@ const ( ) // CalculateAndPrint calculates and prints the results -func CalculateAndPrint(prometheusResp *http.Response, profilingResult string, elapsed time.Duration, sequencerTimeSub, executorTimeSub float64, nTxs int) { - var ( - metricValues Values - err error - ) - if prometheusResp != nil { - metricValues, err = GetValues(prometheusResp) - if err != nil { - log.Fatalf("error getting prometheus metrics: %v", err) - } - } +func CalculateAndPrint( + txsType string, + totalTxs uint64, + client *ethclient.Client, + profilingResult string, + elapsed time.Duration, + sequencerTimeSub, executorTimeSub float64, + allTxs []*types.Transaction, +) { + fmt.Println("##########") + fmt.Println("# Result #") + fmt.Println("##########") + fmt.Printf("Total time (including setup of environment and starting containers): %v\n", elapsed) + totalTime := elapsed.Seconds() - log.Info("##########") - log.Info("# Result #") - log.Info("##########") - log.Infof("Total time (including setup of environment and starting containers): %v", elapsed) - - if prometheusResp != nil { - log.Info("######################") - log.Info("# Prometheus Metrics #") - log.Info("######################") - actualTotalTime := metricValues.SequencerTotalProcessingTime - sequencerTimeSub - actualExecutorTime := metricValues.ExecutorTotalProcessingTime - executorTimeSub - PrintPrometheus(actualTotalTime, actualExecutorTime, metricValues) - log.Infof("[Transactions per second]: %v", float64(nTxs)/actualTotalTime) + prometheusResp, err := FetchPrometheus() + if err != nil { + panic(fmt.Sprintf("error getting prometheus metrics: %v", err)) + } + metricValues, err := GetValues(prometheusResp) + if err != nil { + panic(fmt.Sprintf("error getting prometheus metrics: %v\n", err)) } + actualTotalTime := metricValues.SequencerTotalProcessingTime - sequencerTimeSub + actualExecutorTime := metricValues.ExecutorTotalProcessingTime - executorTimeSub + totalTime = actualTotalTime + PrintSummary(txsType, params.NumberOfOperations, totalTxs, totalTime, actualExecutorTime, GetTotalGasUsedFromTxs(client, allTxs)) + if profilingResult != "" { - log.Info("#####################") - log.Info("# Profiling Metrics #") - log.Info("#####################") - log.Infof("%v", profilingResult) + fmt.Println("#####################") + fmt.Println("# Profiling Metrics #") + fmt.Println("#####################") + fmt.Printf("%v", profilingResult) + } +} + +func PrintSummary( + txsType string, + totalTransactionsSent uint64, + totalTxs uint64, + processingTimeSequencer float64, + processingTimeExecutor float64, + totalGas uint64, +) { + var transactionsTypes *string + if txsType == "uniswap" { + transactionsTypes, totalTransactionsSent = getTransactionsBreakdownForUniswap(totalTransactionsSent) + } + randomTxs := totalTxs - totalTransactionsSent + txsType = strings.ToUpper(txsType) + msg := fmt.Sprintf("# %s Benchmarks Summary #", txsType) + delimiter := strings.Repeat("-", len(msg)) + fmt.Println(delimiter) + fmt.Println(msg) + fmt.Println(delimiter) + + if transactionsTypes != nil { + fmt.Printf("Transactions Types: %s\n", *transactionsTypes) } + fmt.Printf("Total Transactions: %d (%d predefined + %d random transactions)\n\n", totalTxs, totalTransactionsSent, randomTxs) + fmt.Println("Processing Times:") + fmt.Printf("- Total Processing Time: %.2f seconds\n", processingTimeSequencer) + fmt.Printf("- Executor Processing Time: %.2f seconds\n", processingTimeExecutor) + fmt.Printf("- Sequencer Processing Time: %.2f seconds\n\n", processingTimeSequencer-processingTimeExecutor) + fmt.Println("Percentage Breakdown:") + fmt.Printf("- Executor Time Percentage from Total: %.2f%%\n\n", (processingTimeExecutor/processingTimeSequencer)*oneHundred) + fmt.Println("Metrics:") + fmt.Printf("- Transactions per Second: %.2f\n", float64(totalTxs)/processingTimeSequencer) + fmt.Printf("[the rest of the metrics are only for predefined transactions - excluding the random transactions]\n") + fmt.Printf("- Gas per Second: %.2f\n", float64(totalGas)/processingTimeSequencer) + fmt.Printf("- Total Gas Used: %d\n", totalGas) + fmt.Printf("- Average Gas Used per Transaction: %d\n\n", totalGas/totalTxs) } -// PrintPrometheus prints the prometheus metrics -func PrintPrometheus(totalTime float64, executorTime float64, metricValues Values) { - log.Infof("[TOTAL Processing Time]: %v s", totalTime) - log.Infof("[EXECUTOR Processing Time]: %v s", executorTime) - log.Infof("[SEQUENCER Processing Time]: %v s", totalTime-executorTime) - log.Infof("[WORKER Processing Time]: %v s", metricValues.WorkerTotalProcessingTime) - log.Infof("[EXECUTOR Time Percentage from TOTAL]: %.2f %%", (executorTime/totalTime)*oneHundred) - log.Infof("[WORKER Time Percentage from TOTAL]: %.2f %%", (metricValues.WorkerTotalProcessingTime/totalTime)*oneHundred) +func getTransactionsBreakdownForUniswap(numberOfOperations uint64) (*string, uint64) { + transactionsBreakdown := fmt.Sprintf("Deployments, Approvals, Adding Liquidity, %d Swap Cycles (A -> B -> C)", numberOfOperations) + totalTransactionsSent := (numberOfOperations * 2) + 17 + + return &transactionsBreakdown, totalTransactionsSent } type Values struct { @@ -76,7 +115,7 @@ func GetValues(metricsResponse *http.Response) (Values, error) { if metricsResponse == nil { metricsResponse, err = FetchPrometheus() if err != nil { - log.Fatalf("error getting prometheus metrics: %v", err) + panic(fmt.Sprintf("error getting prometheus metrics: %v", err)) } } @@ -102,18 +141,53 @@ func GetValues(metricsResponse *http.Response) (Values, error) { // FetchPrometheus fetches the prometheus metrics func FetchPrometheus() (*http.Response, error) { - log.Infof("Fetching prometheus metrics ...") + fmt.Printf("Fetching prometheus metrics ...\n") return http.Get(fmt.Sprintf("http://localhost:%d%s", params.PrometheusPort, metricsLib.Endpoint)) } // FetchProfiling fetches the profiling metrics func FetchProfiling() (string, error) { fullUrl := fmt.Sprintf("http://localhost:%d%s", profilingPort, metricsLib.ProfileEndpoint) - log.Infof("Fetching profiling metrics from: %s ...", fullUrl) + fmt.Printf("Fetching profiling metrics from: %s ...", fullUrl) cmd := exec.Command("go", "tool", "pprof", "-show=sequencer", "-top", fullUrl) out, err := cmd.CombinedOutput() if err != nil { - log.Fatalf("Error running pprof: %v\n%s", err, out) + panic(fmt.Sprintf("error fetching profiling metrics: %v", err)) } return string(out), err } + +func PrintUniswapDeployments(deployments time.Duration, count uint64) { + fmt.Println("#######################") + fmt.Println("# Uniswap Deployments #") + fmt.Println("#######################") + fmt.Printf("Total time took for the sequencer to deploy all contracts: %v\n", deployments) + fmt.Printf("Number of txs sent: %d\n", count) +} + +// GetTotalGasUsedFromTxs sums the total gas used from the transactions +func GetTotalGasUsedFromTxs(client *ethclient.Client, txs []*types.Transaction) uint64 { + // calculate the total gas used + var totalGas uint64 + for _, tx := range txs { + // Fetch the transaction receipt + receipt, err := client.TransactionReceipt(params.Ctx, tx.Hash()) + if err != nil { + fmt.Println("Unable to fetch transaction receipt", "error", err) + continue + } + + totalGas += receipt.GasUsed + + if receipt.Status != types.ReceiptStatusSuccessful { + reason := "unknown" + if receipt.Status == types.ReceiptStatusFailed { + reason = "reverted" + } + fmt.Println("Transaction failed", "tx", tx.Hash(), "status", receipt.Status, "reason", reason) + continue + } + } + + return totalGas +} diff --git a/test/benchmarks/sequencer/common/params/constants.go b/test/benchmarks/sequencer/common/params/constants.go index 735fd9788f..d30f97b8fe 100644 --- a/test/benchmarks/sequencer/common/params/constants.go +++ b/test/benchmarks/sequencer/common/params/constants.go @@ -11,6 +11,6 @@ const ( MaxCumulativeGasUsed = 80000000000 // PrometheusPort is the port where prometheus is running PrometheusPort = 9092 - // NumberOfTxs is the number of transactions to send - NumberOfTxs = 1000 + // NumberOfOperations is the number of transactions to send + NumberOfOperations = 300 ) diff --git a/test/benchmarks/sequencer/common/setup/setup.go b/test/benchmarks/sequencer/common/setup/setup.go index 0c192b745f..c48842ec32 100644 --- a/test/benchmarks/sequencer/common/setup/setup.go +++ b/test/benchmarks/sequencer/common/setup/setup.go @@ -2,6 +2,8 @@ package setup import ( "context" + "fmt" + "github.com/ethereum/go-ethereum/common" "math/big" "testing" "time" @@ -9,13 +11,12 @@ import ( "github.com/0xPolygonHermez/zkevm-node/config/types" "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/event/nileventstorage" - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/pool/pgpoolstorage" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/0xPolygonHermez/zkevm-node/test/operations" "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" ) @@ -27,6 +28,21 @@ const ( defaultGasPrice = 1000000000 ) +var ( + bc = state.BatchConstraintsCfg{ + MaxTxsPerBatch: 300, + MaxBatchBytesSize: 120000, + MaxCumulativeGasUsed: 30000000, + MaxKeccakHashes: 2145, + MaxPoseidonHashes: 252357, + MaxPoseidonPaddings: 135191, + MaxMemAligns: 236585, + MaxArithmetics: 236585, + MaxBinaries: 473170, + MaxSteps: 7570538, + } +) + // Environment sets up the environment for the benchmark func Environment(ctx context.Context, b *testing.B) (*operations.Manager, *ethclient.Client, *pool.Pool, *bind.TransactOpts) { if testing.Short() { @@ -65,7 +81,7 @@ func Environment(ctx context.Context, b *testing.B) (*operations.Manager, *ethcl require.NoError(b, err) eventLog := event.NewEventLog(event.Config{}, eventStorage) - pl := pool.NewPool(config, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), params.ChainID, eventLog) + pl := pool.NewPool(config, bc, s, st, common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), params.ChainID, eventLog) // Print Info before send senderBalance, err := client.BalanceAt(ctx, auth.From, nil) @@ -74,10 +90,10 @@ func Environment(ctx context.Context, b *testing.B) (*operations.Manager, *ethcl require.NoError(b, err) // Print Initial Stats - log.Infof("Receiver Addr: %v", params.To.String()) - log.Infof("Sender Addr: %v", auth.From.String()) - log.Infof("Sender Balance: %v", senderBalance.String()) - log.Infof("Sender Nonce: %v", senderNonce) + fmt.Printf("Receiver Addr: %v\n", params.To.String()) + fmt.Printf("Sender Addr: %v\n", auth.From.String()) + fmt.Printf("Sender Balance: %v\n", senderBalance.String()) + fmt.Printf("Sender Nonce: %v\n", senderNonce) gasPrice, err := client.SuggestGasPrice(ctx) require.NoError(b, err) @@ -93,7 +109,6 @@ func Environment(ctx context.Context, b *testing.B) (*operations.Manager, *ethcl panic(err) } auth.GasPrice = gasPrice - auth.Nonce = new(big.Int).SetUint64(senderNonce) return opsman, client, pl, auth } @@ -128,8 +143,8 @@ func Components(opsman *operations.Manager) error { // BootstrapSequencer starts the sequencer and waits for it to be ready func BootstrapSequencer(b *testing.B, opsman *operations.Manager) { - log.Debug("Starting sequencer ....") + fmt.Println("Starting sequencer ....") err := operations.StartComponent("seq") require.NoError(b, err) - log.Debug("Sequencer Started!") + fmt.Println("Sequencer Started!") } diff --git a/test/benchmarks/sequencer/common/transactions/transactions.go b/test/benchmarks/sequencer/common/transactions/transactions.go index 0db33871f9..b927e676a3 100644 --- a/test/benchmarks/sequencer/common/transactions/transactions.go +++ b/test/benchmarks/sequencer/common/transactions/transactions.go @@ -2,76 +2,84 @@ package transactions import ( "context" + "errors" + "fmt" "math/big" "strconv" "time" - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" "github.com/0xPolygonHermez/zkevm-node/test/operations" + + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" ) // SendAndWait sends a number of transactions and waits for them to be marked as pending in the pool func SendAndWait( - ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client, - countByStatusFunc func(ctx context.Context, status ...pool.TxStatus) (uint64, error), - nTxs int, + getTxsByStatus func(ctx context.Context, status pool.TxStatus, limit uint64) ([]pool.Transaction, error), + nTxs uint64, erc20SC *ERC20.ERC20, - txSenderFunc func(l2Client *ethclient.Client, gasPrice *big.Int, nonce uint64, auth *bind.TransactOpts, erc20SC *ERC20.ERC20) error, -) error { + uniswapDeployments *pkg.Deployments, + txSenderFunc func(l2Client *ethclient.Client, gasPrice *big.Int, auth *bind.TransactOpts, erc20SC *ERC20.ERC20, uniswapDeployments *pkg.Deployments) ([]*types.Transaction, error), +) ([]*types.Transaction, error) { auth.GasLimit = 2100000 - log.Debugf("Sending %d txs ...", nTxs) - startingNonce := auth.Nonce.Uint64() - maxNonce := uint64(nTxs) + startingNonce - initialPendingCount, err := countByStatusFunc(params.Ctx, pool.TxStatusPending) - if err != nil { - panic(err) + fmt.Printf("Sending %d txs ...\n", nTxs) + if auth.Nonce != nil { + auth.Nonce = nil } - for nonce := startingNonce; nonce < maxNonce; nonce++ { - err = txSenderFunc(client, auth.GasPrice, nonce, auth, erc20SC) + allTxs := make([]*types.Transaction, 0, nTxs) + for i := 0; i < int(nTxs); i++ { + txs, err := txSenderFunc(client, auth.GasPrice, auth, erc20SC, uniswapDeployments) if err != nil { - for err != nil && err.Error() == "nonce intrinsic error" { - log.Warnf("nonce intrinsic error, retrying with nonce %d", nonce) - err = txSenderFunc(client, auth.GasPrice, nonce, auth, erc20SC) - } - if err == nil { - continue - } - return err + return nil, err } + allTxs = append(allTxs, txs...) } - log.Debug("All txs were sent!") - log.Debug("Waiting pending transactions To be added in the pool ...") - err = operations.Poll(1*time.Second, params.DefaultDeadline, func() (bool, error) { + fmt.Println("All txs were sent!") + fmt.Println("Waiting pending transactions To be added in the pool ...") + err := operations.Poll(1*time.Second, params.DefaultDeadline, func() (bool, error) { // using a closure here To capture st and currentBatchNumber - count, err := countByStatusFunc(ctx, pool.TxStatusPending) + pendingTxs, err := getTxsByStatus(params.Ctx, pool.TxStatusPending, 0) if err != nil { - return false, err + panic(err) + } + pendingTxsCount := 0 + for _, tx := range pendingTxs { + sender, err := state.GetSender(tx.Transaction) + if err != nil { + panic(err) + } + if sender == auth.From { + pendingTxsCount++ + } } - log.Debugf("amount of pending txs: %d\n", count) - done := count-initialPendingCount <= 0 + fmt.Printf("amount of pending txs: %d\n\n", pendingTxsCount) + done := pendingTxsCount <= 0 return done, nil }) if err != nil { - return err + return nil, err } - log.Debug("All pending txs are added in the pool!") + fmt.Println("All pending txs are added in the pool!") - return nil + return allTxs, nil } // WaitStatusSelected waits for a number of transactions to be marked as selected in the pool func WaitStatusSelected(countByStatusFunc func(ctx context.Context, status ...pool.TxStatus) (uint64, error), initialCount uint64, nTxs uint64) error { - log.Debug("Wait for sequencer to select all txs from the pool") + fmt.Println("Wait for sequencer to select all txs from the pool") pollingInterval := 1 * time.Second prevCount := uint64(0) @@ -94,7 +102,7 @@ func WaitStatusSelected(countByStatusFunc func(ctx context.Context, status ...po } txsPerSecondAsStr = strconv.Itoa(txsPerSecond) } - log.Debugf("amount of selected txs: %d/%d, estimated txs per second: %s, time to finish: %s", selectedCount-initialCount, nTxs, txsPerSecondAsStr, estimatedTimeToFinish) + fmt.Printf("amount of selected txs: %d/%d, estimated txs per second: %s, time to finish: %s\n", selectedCount-initialCount, nTxs, txsPerSecondAsStr, estimatedTimeToFinish) prevCount = currCount done := (int64(selectedCount) - int64(initialCount)) >= int64(nTxs) @@ -103,3 +111,7 @@ func WaitStatusSelected(countByStatusFunc func(ctx context.Context, status ...po return err } + +func ShouldRetryError(err error) bool { + return errors.Is(err, state.ErrStateNotSynchronized) || errors.Is(err, state.ErrInsufficientFunds) || errors.Is(err, pool.ErrNonceTooHigh) +} diff --git a/test/benchmarks/sequencer/e2e/erc20-transfers/deployment.go b/test/benchmarks/sequencer/e2e/erc20-transfers/deployment.go new file mode 100644 index 0000000000..452fd42f20 --- /dev/null +++ b/test/benchmarks/sequencer/e2e/erc20-transfers/deployment.go @@ -0,0 +1,43 @@ +package erc20_transfers + +import ( + "context" + "fmt" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +const ( + txTimeout = 60 * time.Second +) + +func DeployERC20Contract(client *ethclient.Client, ctx context.Context, auth *bind.TransactOpts) (*ERC20.ERC20, error) { + var ( + tx *types.Transaction + err error + ) + fmt.Println("Sending TX to deploy ERC20 SC") + _, tx, erc20SC, err := ERC20.DeployERC20(auth, client, "Test Coin", "TCO") + if err != nil { + panic(err) + } + err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) + if err != nil { + panic(err) + } + fmt.Println("Sending TX to do a ERC20 mint") + tx, err = erc20SC.Mint(auth, mintAmountBig) + if err != nil { + panic(err) + } + err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) + if err != nil { + panic(err) + } + return erc20SC, err +} diff --git a/test/benchmarks/sequencer/e2e/erc20-transfers/erc20_test.go b/test/benchmarks/sequencer/e2e/erc20-transfers/erc20_test.go new file mode 100644 index 0000000000..3f813b203d --- /dev/null +++ b/test/benchmarks/sequencer/e2e/erc20-transfers/erc20_test.go @@ -0,0 +1,80 @@ +package erc20_transfers + +import ( + "fmt" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/setup" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + "github.com/stretchr/testify/require" +) + +const ( + profilingEnabled = false +) + +var ( + erc20SC *ERC20.ERC20 +) + +func BenchmarkSequencerERC20TransfersPoolProcess(b *testing.B) { + var err error + start := time.Now() + opsman, client, pl, auth := setup.Environment(params.Ctx, b) + setup.BootstrapSequencer(b, opsman) + timeForSetup := time.Since(start) + startDeploySCTime := time.Now() + erc20SC, err = DeployERC20Contract(client, params.Ctx, auth) + require.NoError(b, err) + deploySCElapsed := time.Since(startDeploySCTime) + deployMetricsValues, err := metrics.GetValues(nil) + if err != nil { + return + } + allTxs, err := transactions.SendAndWait( + auth, + client, + pl.GetTxsByStatus, + params.NumberOfOperations, + erc20SC, + nil, + TxSender, + ) + require.NoError(b, err) + + var ( + elapsed time.Duration + ) + + elapsed = time.Since(start) + fmt.Printf("Total elapsed time: %s\n", elapsed) + + var profilingResult string + if profilingEnabled { + profilingResult, err = metrics.FetchProfiling() + require.NoError(b, err) + } + + startMetrics := time.Now() + metrics.CalculateAndPrint( + "erc20", + uint64(len(allTxs)), + client, + profilingResult, + elapsed, + deployMetricsValues.SequencerTotalProcessingTime, + deployMetricsValues.ExecutorTotalProcessingTime, + allTxs, + ) + timeForFetchAndPrintMetrics := time.Since(startMetrics) + fmt.Println("########################################") + fmt.Println("# Deploying ERC20 SC and Mint Tx took: #") + fmt.Println("########################################") + fmt.Printf("%s\n", deploySCElapsed) + fmt.Printf("Time for setup: %s\n", timeForSetup) + fmt.Printf("Time for fetching metrics: %s\n", timeForFetchAndPrintMetrics) +} diff --git a/test/benchmarks/sequencer/e2e/erc20-transfers/tx_sender.go b/test/benchmarks/sequencer/e2e/erc20-transfers/tx_sender.go new file mode 100644 index 0000000000..a30bc0e679 --- /dev/null +++ b/test/benchmarks/sequencer/e2e/erc20-transfers/tx_sender.go @@ -0,0 +1,47 @@ +package erc20_transfers + +import ( + "fmt" + "math/big" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +const ( + mintAmount = 1000000000000000000 +) + +var ( + sleepTime = 1 * time.Second + mintAmountBig = big.NewInt(mintAmount) + countTxs = 0 +) + +// TxSender sends ERC20 transfer to the sequencer +func TxSender(l2Client *ethclient.Client, gasPrice *big.Int, auth *bind.TransactOpts, erc20SC *ERC20.ERC20, uniswapDeployments *uniswap.Deployments) ([]*types.Transaction, error) { + fmt.Printf("sending tx num: %d\n", countTxs+1) + var actualTransferAmount *big.Int + if countTxs%2 == 0 { + actualTransferAmount = big.NewInt(0) + } else { + actualTransferAmount = big.NewInt(1) + } + tx, err := erc20SC.Transfer(auth, params.To, actualTransferAmount) + if transactions.ShouldRetryError(err) { + time.Sleep(sleepTime) + tx, err = erc20SC.Transfer(auth, params.To, actualTransferAmount) + } + + if err == nil { + countTxs += 1 + } + + return []*types.Transaction{tx}, err +} diff --git a/test/benchmarks/sequencer/eth-transfers/pool_processing_eth_test.go b/test/benchmarks/sequencer/e2e/eth-transfers/eth_test.go similarity index 58% rename from test/benchmarks/sequencer/eth-transfers/pool_processing_eth_test.go rename to test/benchmarks/sequencer/e2e/eth-transfers/eth_test.go index 215da9d979..9969eafcf6 100644 --- a/test/benchmarks/sequencer/eth-transfers/pool_processing_eth_test.go +++ b/test/benchmarks/sequencer/e2e/eth-transfers/eth_test.go @@ -2,12 +2,11 @@ package eth_transfers import ( "fmt" - "net/http" "testing" "time" - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/setup" @@ -27,22 +26,24 @@ func BenchmarkSequencerEthTransfersPoolProcess(b *testing.B) { require.NoError(b, err) timeForSetup := time.Since(start) setup.BootstrapSequencer(b, opsman) - err = transactions.SendAndWait(params.Ctx, auth, client, pl.CountTransactionsByStatus, params.NumberOfTxs, nil, TxSender) + allTxs, err := transactions.SendAndWait( + auth, + client, + pl.GetTxsByStatus, + params.NumberOfOperations, + nil, + nil, + TxSender, + ) require.NoError(b, err) var ( - elapsed time.Duration - prometheusResponse *http.Response + elapsed time.Duration ) - - b.Run(fmt.Sprintf("sequencer_selecting_%d_txs", params.NumberOfTxs), func(b *testing.B) { - err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, params.NumberOfTxs) - require.NoError(b, err) - elapsed = time.Since(start) - log.Infof("Total elapsed time: %s", elapsed) - prometheusResponse, err = metrics.FetchPrometheus() - require.NoError(b, err) - }) + err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, params.NumberOfOperations) + require.NoError(b, err) + elapsed = time.Since(start) + fmt.Printf("Total elapsed time: %s\n", elapsed) startMetrics := time.Now() var profilingResult string @@ -51,9 +52,18 @@ func BenchmarkSequencerEthTransfersPoolProcess(b *testing.B) { require.NoError(b, err) } - metrics.CalculateAndPrint(prometheusResponse, profilingResult, elapsed, 0, 0, params.NumberOfTxs) + metrics.CalculateAndPrint( + "eth", + uint64(len(allTxs)), + client, + profilingResult, + elapsed, + 0, + 0, + allTxs, + ) fmt.Printf("%s\n", profilingResult) timeForFetchAndPrintMetrics := time.Since(startMetrics) - log.Infof("Time for setup: %s", timeForSetup) - log.Infof("Time for fetching metrics: %s", timeForFetchAndPrintMetrics) + fmt.Printf("Time for setup: %s\n", timeForSetup) + fmt.Printf("Time for fetching metrics: %s\n", timeForFetchAndPrintMetrics) } diff --git a/test/benchmarks/sequencer/e2e/eth-transfers/tx_sender.go b/test/benchmarks/sequencer/e2e/eth-transfers/tx_sender.go new file mode 100644 index 0000000000..8041bb39cb --- /dev/null +++ b/test/benchmarks/sequencer/e2e/eth-transfers/tx_sender.go @@ -0,0 +1,56 @@ +package eth_transfers + +import ( + "fmt" + "math/big" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +var ( + gasLimit = 21000 + ethAmount = big.NewInt(0) + sleepTime = 1 * time.Second + countTxs = 0 +) + +// TxSender sends eth transfer to the sequencer +func TxSender(l2Client *ethclient.Client, gasPrice *big.Int, auth *bind.TransactOpts, erc20SC *ERC20.ERC20, uniswapDeployments *uniswap.Deployments) ([]*types.Transaction, error) { + fmt.Printf("sending tx num: %d\n", countTxs+1) + senderNonce, err := l2Client.PendingNonceAt(params.Ctx, auth.From) + if err != nil { + panic(err) + } + tx := types.NewTx(&types.LegacyTx{ + GasPrice: gasPrice, + Gas: uint64(gasLimit), + To: ¶ms.To, + Value: ethAmount, + Data: nil, + Nonce: senderNonce, + }) + + signedTx, err := auth.Signer(auth.From, tx) + if err != nil { + return nil, err + } + + err = l2Client.SendTransaction(params.Ctx, signedTx) + for transactions.ShouldRetryError(err) { + time.Sleep(sleepTime) + err = l2Client.SendTransaction(params.Ctx, signedTx) + } + + if err == nil { + countTxs += 1 + } + + return []*types.Transaction{signedTx}, err +} diff --git a/test/benchmarks/sequencer/e2e/uniswap-transfers/tx_sender.go b/test/benchmarks/sequencer/e2e/uniswap-transfers/tx_sender.go new file mode 100644 index 0000000000..ce24b1ab2d --- /dev/null +++ b/test/benchmarks/sequencer/e2e/uniswap-transfers/tx_sender.go @@ -0,0 +1,44 @@ +package uniswap_transfers + +import ( + "fmt" + "math/big" + "strings" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +var ( + gasLimit = 21000 + sleepTime = 1 * time.Second + countTxs = 0 + txTimeout = 60 * time.Second +) + +// TxSender sends eth transfer to the sequencer +func TxSender(l2Client *ethclient.Client, gasPrice *big.Int, auth *bind.TransactOpts, erc20SC *ERC20.ERC20, uniswapDeployments *uniswap.Deployments) ([]*types.Transaction, error) { + msg := fmt.Sprintf("# Swap Cycle Number: %d #", countTxs+1) + delimiter := strings.Repeat("#", len(msg)) + fmt.Println(delimiter) + fmt.Println(msg) + fmt.Println(delimiter) + var err error + + txs := uniswap.SwapTokens(l2Client, auth, *uniswapDeployments) + for transactions.ShouldRetryError(err) { + time.Sleep(sleepTime) + txs = uniswap.SwapTokens(l2Client, auth, *uniswapDeployments) + } + + if err == nil { + countTxs += 1 + } + + return txs, err +} diff --git a/test/benchmarks/sequencer/e2e/uniswap-transfers/uniswap_test.go b/test/benchmarks/sequencer/e2e/uniswap-transfers/uniswap_test.go new file mode 100644 index 0000000000..169899eb05 --- /dev/null +++ b/test/benchmarks/sequencer/e2e/uniswap-transfers/uniswap_test.go @@ -0,0 +1,66 @@ +package uniswap_transfers + +import ( + "fmt" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/setup" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" + "github.com/stretchr/testify/require" +) + +const ( + profilingEnabled = false +) + +func BenchmarkSequencerUniswapTransfersPoolProcess(b *testing.B) { + start := time.Now() + //defer func() { require.NoError(b, operations.Teardown()) }() + + opsman, client, pl, auth := setup.Environment(params.Ctx, b) + timeForSetup := time.Since(start) + setup.BootstrapSequencer(b, opsman) + deployments := uniswap.DeployContractsAndAddLiquidity(client, auth) + deploymentTxsCount := uniswap.GetExecutedTransactionsCount() + elapsedTimeForDeployments := time.Since(start) + allTxs, err := transactions.SendAndWait( + auth, + client, + pl.GetTxsByStatus, + params.NumberOfOperations, + nil, + &deployments, + TxSender, + ) + require.NoError(b, err) + + elapsed := time.Since(start) + fmt.Printf("Total elapsed time: %s\n", elapsed) + + startMetrics := time.Now() + var profilingResult string + if profilingEnabled { + profilingResult, err = metrics.FetchProfiling() + require.NoError(b, err) + } + + metrics.CalculateAndPrint( + "uniswap", + deploymentTxsCount+uint64(len(allTxs)), + client, + profilingResult, + elapsed, + 0, + 0, + allTxs, + ) + fmt.Printf("%s\n", profilingResult) + timeForFetchAndPrintMetrics := time.Since(startMetrics) + metrics.PrintUniswapDeployments(elapsedTimeForDeployments, deploymentTxsCount) + fmt.Printf("Time for setup: %s\n", timeForSetup) + fmt.Printf("Time for fetching metrics: %s\n", timeForFetchAndPrintMetrics) +} diff --git a/test/benchmarks/sequencer/erc20-transfers/pool_processing_erc20_test.go b/test/benchmarks/sequencer/erc20-transfers/pool_processing_erc20_test.go deleted file mode 100644 index b6e2e871c6..0000000000 --- a/test/benchmarks/sequencer/erc20-transfers/pool_processing_erc20_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package erc20_transfers - -import ( - "context" - "fmt" - "math/big" - "net/http" - "testing" - "time" - - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/pool" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/setup" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -const ( - txTimeout = 60 * time.Second - profilingEnabled = false -) - -var ( - erc20SC *ERC20.ERC20 -) - -func BenchmarkSequencerERC20TransfersPoolProcess(b *testing.B) { - start := time.Now() - opsman, client, pl, auth := setup.Environment(params.Ctx, b) - setup.BootstrapSequencer(b, opsman) - timeForSetup := time.Since(start) - startDeploySCTime := time.Now() - err := deployERC20Contract(b, client, params.Ctx, auth) - require.NoError(b, err) - deploySCElapsed := time.Since(startDeploySCTime) - deployMetricsValues, err := metrics.GetValues(nil) - if err != nil { - return - } - initialCount, err := pl.CountTransactionsByStatus(params.Ctx, pool.TxStatusSelected) - require.NoError(b, err) - err = transactions.SendAndWait(params.Ctx, auth, client, pl.CountTransactionsByStatus, params.NumberOfTxs, erc20SC, TxSender) - require.NoError(b, err) - - var ( - elapsed time.Duration - prometheusResponse *http.Response - ) - - b.Run(fmt.Sprintf("sequencer_selecting_%d_txs", params.NumberOfTxs), func(b *testing.B) { - // Wait all txs to be selected by the sequencer - err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, params.NumberOfTxs) - require.NoError(b, err) - elapsed = time.Since(start) - log.Infof("Total elapsed time: %s", elapsed) - prometheusResponse, err = metrics.FetchPrometheus() - require.NoError(b, err) - }) - - var profilingResult string - if profilingEnabled { - profilingResult, err = metrics.FetchProfiling() - require.NoError(b, err) - } - - startMetrics := time.Now() - metrics.CalculateAndPrint( - prometheusResponse, - profilingResult, - elapsed, - deployMetricsValues.SequencerTotalProcessingTime, - deployMetricsValues.ExecutorTotalProcessingTime, - params.NumberOfTxs, - ) - timeForFetchAndPrintMetrics := time.Since(startMetrics) - log.Infof("########################################") - log.Infof("# Deploying ERC20 SC and Mint Tx took: #") - log.Infof("########################################") - log.Infof("%s", deploySCElapsed) - log.Infof("Time for setup: %s", timeForSetup) - log.Infof("Time for fetching metrics: %s", timeForFetchAndPrintMetrics) -} - -func deployERC20Contract(b *testing.B, client *ethclient.Client, ctx context.Context, auth *bind.TransactOpts) error { - var ( - tx *types.Transaction - err error - ) - log.Debugf("Sending TX to deploy ERC20 SC") - _, tx, erc20SC, err = ERC20.DeployERC20(auth, client, "Test Coin", "TCO") - require.NoError(b, err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - require.NoError(b, err) - log.Debugf("Sending TX to do a ERC20 mint") - auth.Nonce = big.NewInt(1) // for the mint tx - tx, err = erc20SC.Mint(auth, mintAmountBig) - auth.Nonce = big.NewInt(2) - require.NoError(b, err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - require.NoError(b, err) - return err -} diff --git a/test/benchmarks/sequencer/erc20-transfers/tx_sender.go b/test/benchmarks/sequencer/erc20-transfers/tx_sender.go deleted file mode 100644 index d0e3faa626..0000000000 --- a/test/benchmarks/sequencer/erc20-transfers/tx_sender.go +++ /dev/null @@ -1,40 +0,0 @@ -package erc20_transfers - -import ( - "math/big" - - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/ethclient" -) - -const ( - mintAmount = 1000000000000000000 - transferAmount = 0 -) - -var ( - mintAmountBig = big.NewInt(mintAmount) - transferAmountBig = big.NewInt(transferAmount) - countTxs = 0 -) - -// TxSender sends ERC20 transfer to the sequencer -func TxSender(l2Client *ethclient.Client, gasPrice *big.Int, nonce uint64, auth *bind.TransactOpts, erc20SC *ERC20.ERC20) error { - log.Debugf("sending tx num: %d nonce: %d", countTxs, nonce) - auth.Nonce = new(big.Int).SetUint64(nonce) - var actualTransferAmount *big.Int - if nonce%2 == 0 { - actualTransferAmount = big.NewInt(0).Sub(transferAmountBig, auth.Nonce) - } else { - actualTransferAmount = big.NewInt(0).Add(transferAmountBig, auth.Nonce) - } - _, err := erc20SC.Transfer(auth, params.To, actualTransferAmount) - if err == nil { - countTxs += 1 - } - - return err -} diff --git a/test/benchmarks/sequencer/eth-transfers/tx_sender.go b/test/benchmarks/sequencer/eth-transfers/tx_sender.go deleted file mode 100644 index 02ac3f6c8b..0000000000 --- a/test/benchmarks/sequencer/eth-transfers/tx_sender.go +++ /dev/null @@ -1,47 +0,0 @@ -package eth_transfers - -import ( - "errors" - "math/big" - "time" - - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" -) - -var ( - gasLimit = 21000 - ethAmount = big.NewInt(0) - sleepTime = 5 * time.Second - countTxs = 0 -) - -// TxSender sends eth transfer to the sequencer -func TxSender(l2Client *ethclient.Client, gasPrice *big.Int, nonce uint64, auth *bind.TransactOpts, erc20SC *ERC20.ERC20) error { - log.Debugf("sending tx num: %d nonce: %d", countTxs, nonce) - auth.Nonce = big.NewInt(int64(nonce)) - tx := types.NewTransaction(nonce, params.To, ethAmount, uint64(gasLimit), gasPrice, nil) - signedTx, err := auth.Signer(auth.From, tx) - if err != nil { - return err - } - - err = l2Client.SendTransaction(params.Ctx, signedTx) - if errors.Is(err, state.ErrStateNotSynchronized) { - for errors.Is(err, state.ErrStateNotSynchronized) { - time.Sleep(sleepTime) - err = l2Client.SendTransaction(params.Ctx, signedTx) - } - } - - if err == nil { - countTxs += 1 - } - - return err -} diff --git a/test/benchmarks/sequencer/scripts/.env.example b/test/benchmarks/sequencer/scripts/.env.example new file mode 100644 index 0000000000..c7289860ea --- /dev/null +++ b/test/benchmarks/sequencer/scripts/.env.example @@ -0,0 +1,17 @@ +# Bash Variables (Section from Deployment Docs) +BASTION_HOST= +POOLDB_DBNAME= +POOLDB_EP= +POOLDB_PASS= +POOLDB_USER= +POOLDB_PORT=5433 + +# IP/DNS for PUBLIC TESTNET: sequencer.zkevm-public.aws, INTERNAL TESTNET: sequencer.zkevm-internal.aws, DEV TESTNET: sequencer.zkevm-dev.aws +SEQUENCER_IP= + +# Public URLs (Section from Deployment Docs) +RPC_URL= +CHAIN_ID= + +# Your private key +PRIVATE_KEY= \ No newline at end of file diff --git a/test/benchmarks/sequencer/scripts/README.md b/test/benchmarks/sequencer/scripts/README.md new file mode 100644 index 0000000000..882b0fb84e --- /dev/null +++ b/test/benchmarks/sequencer/scripts/README.md @@ -0,0 +1,51 @@ + +# Benchmark Sequencer Scripts + +This repository contains scripts to benchmark a sequencer. The main script is written in Go and can be used to run a series of commands and perform various operations. + +## Usage + +### 1. Clone the repository: + ``` + git clone git@github.com:0xPolygonHermez/zkevm-node.git + cd zkevm-node/test/benchmarks/sequencer/scripts + ``` + +### 2. Setup Environment Variables: + Copy the `.env.example` file to `.env` and populate it with the appropriate values. + + #### Required environment variables are: + - `BASTION_HOST`: The IP address or domain name of the bastion host. (From `Deployments.doc` under `BASH VARIABLES` section for the specific `Environment`) + - `POOLDB_DBNAME`: Database name for the pool. (From `Deployments.doc` under `BASH VARIABLES` section for the specific `Environment`) + - `POOLDB_EP`: Endpoint for the pool database. (From `Deployments.doc` under `BASH VARIABLES` section for the specific `Environment`) + - `POOLDB_PASS`: Password for the pool database. (From `Deployments.doc` under `BASH VARIABLES` section for the specific `Environment`) + - `POOLDB_USER`: User for the pool database. (From `Deployments.doc` under `BASH VARIABLES` section for the specific `Environment`) + - `SEQUENCER_IP`: The IP address of the sequencer. (`sequencer.zkevm-public.aws` for `public testnet`, `sequencer.zkevm-internal.aws` for `internal testnet`, `sequencer.zkevm-dev.aws` for `dev testnet`) + - `RPC_URL`: The URL for the Remote Procedure Call (RPC) server. (From `Deployments.doc` under `Public URLs` section as a bullet point to `RPC` for the specific `Environment`) + - `CHAIN_ID`: The ID of the blockchain network. (From `Deployments.doc` under `Public URLs` section as a bullet point to `RPC` for the specific `Environment`) + - `PRIVATE_KEY`: Your private key. + + #### Optional environment variables: + - `POOLDB_PORT`: Port for the pool database. (Default is `5433`) + + Example: + ``` + cp .env.example .env + nano .env + ``` +### 3. Run the Benchmark Script: + Run the `main.go` script with the following command-line flags: + - `--type`: The type of transactions to test. Accepted values are `eth`, `erc20` or `uniswap`. + - `--num-ops` (optional): The number of operations to run. Default is 200. + - `--help` (optional): Display the help message. + + Example: + ``` + go run main.go --type erc20 --sequencer-ip + ``` + +## Notes + +- Ensure that the `.env` file exists and contains all the required environment variables before running the script. +- The script will perform various operations based on the provided command-line flags and environment variables. +- Ensure that Go is installed on your system to run the script. \ No newline at end of file diff --git a/test/benchmarks/sequencer/scripts/common/environment/constants.go b/test/benchmarks/sequencer/scripts/common/environment/constants.go deleted file mode 100644 index 13a002519c..0000000000 --- a/test/benchmarks/sequencer/scripts/common/environment/constants.go +++ /dev/null @@ -1,35 +0,0 @@ -package environment - -import ( - "strconv" - - "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/0xPolygonHermez/zkevm-node/test/testutils" -) - -var ( - // IntBase is the base for the conversion of strings to integers - IntBase = 10 - // PrivateKey is the private key of the sequencer - PrivateKey = testutils.GetEnv("PRIVATE_KEY", operations.DefaultSequencerPrivateKey) - // L2ChainId is the chain id of the L2 network - L2ChainId = testutils.GetEnv("CHAIN_ID", strconv.FormatUint(operations.DefaultL2ChainID, IntBase)) - //Erc20TokenAddress is the address of the ERC20 token - Erc20TokenAddress = testutils.GetEnv("ERC20_TOKEN_ADDRESS", "0x729fc461b26f69cf75a31182788eaf722b08c240") - - l2NetworkRPCURL = testutils.GetEnv("L2_NETWORK_RPC_URL", operations.DefaultL2NetworkURL) - - // StateDB Credentials - stateDbName = testutils.GetEnv("STATE_DB_NAME", "state_db") - stateDbUser = testutils.GetEnv("STATE_DB_USER", "state_user") - stateDbPass = testutils.GetEnv("STATE_DB_PASS", "state_password") - stateDbHost = testutils.GetEnv("STATE_DB_HOST", "localhost") - stateDbPort = testutils.GetEnv("STATE_DB_PORT", "5432") - - // PoolDB Credentials - poolDbName = testutils.GetEnv("POOL_DB_NAME", "pool_db") - poolDbUser = testutils.GetEnv("POOL_DB_USER", "pool_user") - poolDbPass = testutils.GetEnv("POOL_DB_PASS", "pool_password") - poolDbHost = testutils.GetEnv("POOL_DB_HOST", "localhost") - poolDbPort = testutils.GetEnv("POOL_DB_PORT", "5433") -) diff --git a/test/benchmarks/sequencer/scripts/common/results/print.go b/test/benchmarks/sequencer/scripts/common/results/print.go deleted file mode 100644 index abee515213..0000000000 --- a/test/benchmarks/sequencer/scripts/common/results/print.go +++ /dev/null @@ -1,19 +0,0 @@ -package results - -import ( - "time" - - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" -) - -// Print prints the results of the benchmark -func Print(elapsed time.Duration) { - // Print results - log.Info("##########") - log.Info("# Result #") - log.Info("##########") - log.Infof("Total time took for the sequencer to select all txs from the pool: %v", elapsed) - log.Infof("Number of txs sent: %d", params.NumberOfTxs) - log.Infof("Txs per second: %f", float64(params.NumberOfTxs)/elapsed.Seconds()) -} diff --git a/test/benchmarks/sequencer/scripts/environment/constants.go b/test/benchmarks/sequencer/scripts/environment/constants.go new file mode 100644 index 0000000000..843622e5d6 --- /dev/null +++ b/test/benchmarks/sequencer/scripts/environment/constants.go @@ -0,0 +1,26 @@ +package environment + +import ( + "strconv" + + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/0xPolygonHermez/zkevm-node/test/testutils" +) + +var ( + // IntBase is the base for the conversion of strings to integers + IntBase = 10 + // PrivateKey is the private key of the sequencer + PrivateKey = testutils.GetEnv("PRIVATE_KEY", operations.DefaultSequencerPrivateKey) + // L2ChainId is the chain id of the L2 network + L2ChainId = testutils.GetEnv("CHAIN_ID", strconv.FormatUint(operations.DefaultL2ChainID, IntBase)) + // L2NetworkRPCURL is the RPC URL of the L2 network + L2NetworkRPCURL = testutils.GetEnv("RPC_URL", operations.DefaultL2NetworkURL) + + // PoolDB Credentials + poolDbName = testutils.GetEnv("POOLDB_DBNAME", "pool_db") + poolDbUser = testutils.GetEnv("POOLDB_USER", "pool_user") + poolDbPass = testutils.GetEnv("POOLDB_PASS", "pool_password") + poolDbHost = testutils.GetEnv("POOLDB_HOST", "localhost") + poolDbPort = testutils.GetEnv("POOLDB_PORT", "5433") +) diff --git a/test/benchmarks/sequencer/scripts/common/environment/init.go b/test/benchmarks/sequencer/scripts/environment/init.go similarity index 63% rename from test/benchmarks/sequencer/scripts/common/environment/init.go rename to test/benchmarks/sequencer/scripts/environment/init.go index 3d13a28663..e20c8456e9 100644 --- a/test/benchmarks/sequencer/scripts/common/environment/init.go +++ b/test/benchmarks/sequencer/scripts/environment/init.go @@ -2,14 +2,13 @@ package environment import ( "context" + "fmt" "math/big" "strconv" "strings" "github.com/0xPolygonHermez/zkevm-node/db" - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool/pgpoolstorage" - "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/crypto" @@ -22,7 +21,7 @@ var ( ) // Init sets up the environment for the benchmark -func Init() (context.Context, *pgpoolstorage.PostgresPoolStorage, *state.PostgresStorage, *ethclient.Client, *bind.TransactOpts) { +func Init() (*pgpoolstorage.PostgresPoolStorage, *ethclient.Client, *bind.TransactOpts) { ctx := context.Background() pl, err := pgpoolstorage.NewPostgresPoolStorage(db.Config{ Name: poolDbName, @@ -37,7 +36,7 @@ func Init() (context.Context, *pgpoolstorage.PostgresPoolStorage, *state.Postgre panic(err) } - l2Client, err := ethclient.Dial(l2NetworkRPCURL) + l2Client, err := ethclient.Dial(L2NetworkRPCURL) if err != nil { panic(err) } @@ -50,7 +49,7 @@ func Init() (context.Context, *pgpoolstorage.PostgresPoolStorage, *state.Postgre if err != nil { panic(err) } - log.Infof("L2ChainId: %d", chainId) + fmt.Printf("L2ChainId: %d\n", chainId) // Auth is the auth of the sender auth, err := bind.NewKeyedTransactorWithChainID(privateKey, new(big.Int).SetUint64(chainId)) if err != nil { @@ -67,31 +66,16 @@ func Init() (context.Context, *pgpoolstorage.PostgresPoolStorage, *state.Postgre } // Print Initial Stats - log.Infof("Receiver Addr: %v", params.To.String()) - log.Infof("Sender Addr: %v", auth.From.String()) - log.Infof("Sender Balance: %v", senderBalance.String()) - log.Infof("Sender Nonce: %v", senderNonce) + fmt.Printf("Receiver Addr: %v\n", params.To.String()) + fmt.Printf("Sender Addr: %v\n", auth.From.String()) + fmt.Printf("Sender Balance: %v\n", senderBalance.String()) + fmt.Printf("Sender Nonce: %v\n", senderNonce) gasPrice, err := l2Client.SuggestGasPrice(ctx) if err != nil { panic(err) } auth.GasPrice = gasPrice - stateDbCfg := db.Config{ - User: stateDbUser, - Password: stateDbPass, - Name: stateDbName, - Host: stateDbHost, - Port: stateDbPort, - EnableLog: true, - MaxConns: maxConnections, - } - stateDb, err := db.NewSQLDB(stateDbCfg) - if err != nil { - panic(err) - } - stateStorage := state.NewPostgresStorage(stateDb) - auth.Nonce = new(big.Int).SetUint64(senderNonce) - return ctx, pl, stateStorage, l2Client, auth + return pl, l2Client, auth } diff --git a/test/benchmarks/sequencer/scripts/erc20-transfers/main.go b/test/benchmarks/sequencer/scripts/erc20-transfers/main.go index c05f549649..8039abdfeb 100644 --- a/test/benchmarks/sequencer/scripts/erc20-transfers/main.go +++ b/test/benchmarks/sequencer/scripts/erc20-transfers/main.go @@ -1,41 +1,45 @@ package main import ( - "time" + "flag" + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/environment" "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" - erc20transfers "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/erc20-transfers" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/common/environment" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/common/results" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" - "github.com/ethereum/go-ethereum/common" + erc20transfers "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/e2e/erc20-transfers" ) func main() { var ( err error ) - ctx, pl, state, l2Client, auth := environment.Init() - initialCount, err := pl.CountTransactionsByStatus(params.Ctx, pool.TxStatusSelected) - if err != nil { - panic(err) + + numOps := flag.Uint64("num-ops", 200, "The number of operations to run. Default is 200.") + flag.Parse() + + if numOps == nil { + panic("numOps is nil") } - start := time.Now() - erc20SC, err := ERC20.NewERC20(common.HexToAddress(environment.Erc20TokenAddress), l2Client) + pl, l2Client, auth := environment.Init() + initialCount, err := pl.CountTransactionsByStatus(params.Ctx, pool.TxStatusSelected) if err != nil { panic(err) } - // Send Txs - err = transactions.SendAndWait( - ctx, + + erc20SC, err := erc20transfers.DeployERC20Contract(l2Client, params.Ctx, auth) + + allTxs, err := transactions.SendAndWait( auth, l2Client, - pl.CountTransactionsByStatus, - params.NumberOfTxs, + pl.GetTxsByStatus, + *numOps, erc20SC, + nil, erc20transfers.TxSender, ) if err != nil { @@ -43,15 +47,11 @@ func main() { } // Wait for Txs to be selected - err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, params.NumberOfTxs) + err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, *numOps) if err != nil { panic(err) } - lastL2BlockTimestamp, err := state.GetLastL2BlockCreatedAt(params.Ctx, nil) - if err != nil { - panic(err) - } - elapsed := lastL2BlockTimestamp.Sub(start) - results.Print(elapsed) + totalGas := metrics.GetTotalGasUsedFromTxs(l2Client, allTxs) + fmt.Println("Total Gas: ", totalGas) } diff --git a/test/benchmarks/sequencer/scripts/eth-transfers/main.go b/test/benchmarks/sequencer/scripts/eth-transfers/main.go index 9705a5a234..9b24d7a52e 100644 --- a/test/benchmarks/sequencer/scripts/eth-transfers/main.go +++ b/test/benchmarks/sequencer/scripts/eth-transfers/main.go @@ -1,34 +1,39 @@ package main import ( - "time" + "flag" + "fmt" "github.com/0xPolygonHermez/zkevm-node/pool" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" - ethtransfers "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/eth-transfers" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/common/environment" - "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/common/results" + ethtransfers "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/e2e/eth-transfers" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/environment" ) func main() { var ( err error ) - ctx, pl, state, l2Client, auth := environment.Init() + numOps := flag.Uint64("num-ops", 200, "The number of operations to run. Default is 200.") + flag.Parse() + if numOps == nil { + panic("numOps is nil") + } + + pl, l2Client, auth := environment.Init() initialCount, err := pl.CountTransactionsByStatus(params.Ctx, pool.TxStatusSelected) if err != nil { panic(err) } - start := time.Now() - // Send Txs - err = transactions.SendAndWait( - ctx, + allTxs, err := transactions.SendAndWait( auth, l2Client, - pl.CountTransactionsByStatus, - params.NumberOfTxs, + pl.GetTxsByStatus, + *numOps, + nil, nil, ethtransfers.TxSender, ) @@ -37,15 +42,11 @@ func main() { } // Wait for Txs to be selected - err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, params.NumberOfTxs) + err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, *numOps) if err != nil { panic(err) } - lastL2BlockTimestamp, err := state.GetLastL2BlockCreatedAt(params.Ctx, nil) - if err != nil { - panic(err) - } - elapsed := lastL2BlockTimestamp.Sub(start) - results.Print(elapsed) + totalGas := metrics.GetTotalGasUsedFromTxs(l2Client, allTxs) + fmt.Println("Total Gas: ", totalGas) } diff --git a/test/benchmarks/sequencer/scripts/main.go b/test/benchmarks/sequencer/scripts/main.go new file mode 100644 index 0000000000..bdefb4abf6 --- /dev/null +++ b/test/benchmarks/sequencer/scripts/main.go @@ -0,0 +1,348 @@ +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/joho/godotenv" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" +) + +const ( + maxRetryAttempts = 5 + retryDelay = 1 * time.Second +) + +func main() { + fmt.Println("Starting the program...") + fmt.Println("-----------------------") + + // Command line flags + tType := flag.String("type", "", "The type of transactions to test: erc20, uniswap, or eth.") + numOps := flag.Int("num-ops", 200, "The number of operations to run. Default is 200.") + help := flag.Bool("help", false, "Display help message") + flag.Parse() + + if *help { + fmt.Println("Usage: go run main.go --type TRANSACTIONS_TYPE --sequencer-ip SEQUENCER_IP [--num-ops NUMBER_OF_OPERATIONS]") + flag.PrintDefaults() + return + } + + // Check if .env file exists + if _, err := os.Stat(".env"); os.IsNotExist(err) { + panic(fmt.Sprintf("Error: .env file does not exist. Please create it and add all environment variables from the Deployment Docs." + + "\n ** check env.exmaple for an example. **")) + } + + fmt.Println("Loading .env file...") + fmt.Println("--------------------") + // Load .env file + err := godotenv.Load(".env") + if err != nil { + panic(fmt.Sprintf("Error loading .env file: %v", err)) + } + + fmt.Println("Validating TYPE...") + fmt.Println("------------------") + // Validate TYPE + if *tType == "" || (*tType != "erc20" && *tType != "uniswap" && *tType != "eth") { + panic(fmt.Sprintf("Error: Invalid TYPE argument. Accepted values are 'erc20', 'uniswap', or 'eth'.")) + } + + fmt.Println("Checking environment variables...") + fmt.Println("---------------------------------") + // Check environment variables + checkEnvVar("BASTION_HOST") + checkEnvVar("POOLDB_PORT") + checkEnvVar("POOLDB_EP") + checkEnvVar("RPC_URL") + checkEnvVar("CHAIN_ID") + checkEnvVar("PRIVATE_KEY") + checkEnvVar("SEQUENCER_IP") + + // Forward BASTION Ports + fmt.Println("Forwarding BASTION ports...") + fmt.Println("---------------------------") + sshArgs := []string{"-fN", + "-L", os.Getenv("POOLDB_PORT") + ":" + os.Getenv("POOLDB_EP") + ":5432", + "ubuntu@" + os.Getenv("BASTION_HOST")} + _, err = runCmd("ssh", sshArgs...) + if err != nil { + panic(fmt.Sprintf("Failed to forward BASTION ports: %v", err)) + } + defer killSSHProcess(err) + + // Execute wget to get metrics from the BASTION HOST + sequencerIP := os.Getenv("SEQUENCER_IP") + fmt.Println("Fetching start metrics...") + fmt.Println("--------------------------") + + output, err := retryCmd("ssh", "ubuntu@"+os.Getenv("BASTION_HOST"), "wget", "-qO-", "http://"+sequencerIP+":9091/metrics") + if err != nil { + panic(fmt.Sprintf("Failed to collect start metrics from BASTION HOST: %v", err)) + } + retryTimes := 0 + if err != nil { + fmt.Println(fmt.Sprintf("Failed to collect start metrics from BASTION HOST: %v", err)) + fmt.Println("Retrying...") + time.Sleep(1 * time.Second) + output, err = runCmd("ssh", "ubuntu@"+os.Getenv("BASTION_HOST"), "wget", "-qO-", "http://"+sequencerIP+":9091/metrics") + retryTimes++ + if retryTimes == 5 { + panic(fmt.Sprintf("Failed to collect start metrics from BASTION HOST: %v", err)) + } + } + + err = os.WriteFile("start-metrics.txt", []byte(output), 0644) + if err != nil { + panic(fmt.Sprintf("Failed to write start metrics to file: %v", err)) + } + + // Run the Go script depending on the type argument + var goScript string + switch *tType { + case "erc20": + goScript = "erc20-transfers" + case "uniswap": + goScript = "uniswap-transfers" + case "eth": + goScript = "eth-transfers" + } + + // Run transfers script + fmt.Println("Running transfers script...") + fmt.Println("---------------------------") + lastLine, err := runCmdRealTime("go", "run", "./"+goScript+"/main.go", "--num-ops", strconv.Itoa(*numOps)) + if err != nil { + panic(fmt.Sprintf("Failed to run Go script for %s transactions: %v", *tType, err)) + } + + // Extract Total Gas + fmt.Println("Extracting Total Gas...") + fmt.Println("-----------------------") + var totalGas string + if strings.Contains(lastLine, "Total Gas") { + parts := strings.Split(lastLine, " ") + totalGas = parts[len(parts)-1] + } + if totalGas == "" { + fmt.Println("Warning: Failed to extract Total Gas from Go script output.") + } + + // Execute wget to get metrics from the BASTION HOST + fmt.Println("Fetching end metrics...") + fmt.Println("------------------------") + output, err = retryCmd("ssh", "ubuntu@"+os.Getenv("BASTION_HOST"), "wget", "-qO-", "http://"+sequencerIP+":9091/metrics") + if err != nil { + panic(fmt.Sprintf("Failed to collect end metrics from BASTION HOST: %v", err)) + } + err = os.WriteFile("end-metrics.txt", []byte(output), 0644) + if err != nil { + panic(fmt.Sprintf("Failed to write end metrics to file: %v", err)) + } + + // Run the Go script that calculates the metrics and prints the results + totalGasInt, err := strconv.ParseUint(totalGas, 10, 64) + if err != nil { + fmt.Printf("Failed to convert totalGas to int: %v\n", err) + } + + // Calc and Print Results + fmt.Println("Calculating and printing results...") + fmt.Printf("------------------------------------\n\n") + calculateAndPrintResults(*tType, totalGasInt, uint64(*numOps)) + + fmt.Println("Done!") +} + +func runCmd(command string, args ...string) (string, error) { + cmd := exec.Command(command, args...) + output, err := cmd.CombinedOutput() + return string(output), err +} + +// runCmdWithRetry executes the specified command with arguments and returns the combined output. +// It includes a retryCmd mechanism controlled by the enableRetry flag. +func runCmdWithRetry(enableRetry bool, command string, args ...string) (string, error) { + var output string + var err error + + if enableRetry { + for attempt := 1; attempt <= maxRetryAttempts; attempt++ { + cmd := exec.Command(command, args...) + cmd.Stderr = os.Stderr + result, runErr := cmd.CombinedOutput() + output = string(result) + err = runErr + + if err == nil { + // Command succeeded, no need to retryCmd. + break + } + + fmt.Printf("Attempt %d: Command failed: %v\n", attempt, err) + + if attempt < maxRetryAttempts { + fmt.Println("Retrying...") + time.Sleep(time.Second) // Add a delay between retries (you can adjust the duration). + } + } + } else { + cmd := exec.Command(command, args...) + cmd.Stderr = os.Stderr + result, runErr := cmd.CombinedOutput() + output = string(result) + err = runErr + } + + return output, err +} + +func runCmdRealTime(command string, args ...string) (string, error) { + cmd := exec.Command(command, args...) + stdoutIn, _ := cmd.StdoutPipe() + stderrIn, _ := cmd.StderrPipe() + + cmd.Start() + + var lastLine string + go func() { + scanner := bufio.NewScanner(stdoutIn) + for scanner.Scan() { + line := scanner.Text() + fmt.Println(line) + lastLine = line + } + }() + + go func() { + scanner := bufio.NewScanner(stderrIn) + for scanner.Scan() { + m := scanner.Text() + _, err := fmt.Fprintln(os.Stderr, m) + if err != nil { + fmt.Println("Error printing stderr: ", err) + return + } + } + }() + + err := cmd.Wait() + if err != nil { + return "", err + } + return lastLine, nil +} + +func checkEnvVar(varName string) { + if os.Getenv(varName) == "" { + panic(fmt.Sprintf("Error: %s is not set. Please export all environment variables from the Deployment Docs.", varName)) + } +} + +func killSSHProcess(err error) { + fmt.Println("Killing SSH process...") + _, err = runCmd("pkill", "-f", "ssh -fN -L "+os.Getenv("POOLDB_PORT")) + if err != nil { + panic(fmt.Sprintf("Failed to kill the SSH process: %v", err)) + } +} + +func calculateAndPrintResults(txsType string, totalGas uint64, numberOfOperations uint64) { + totalTransactionsSent := numberOfOperations + + startData := parseFile("start-metrics.txt") + endData := parseFile("end-metrics.txt") + + totalTxs := uint64(endData["sequencer_processing_time"].processingTimeCount - startData["sequencer_processing_time"].processingTimeCount) + + processingTimeSequencer := endData["sequencer_processing_time"].processingTimeSum - startData["sequencer_processing_time"].processingTimeSum + processingTimeExecutor := endData["state_executor_processing_time{caller=\"sequencer\"}"].processingTimeSum - startData["state_executor_processing_time{caller=\"sequencer\"}"].processingTimeSum + + fmt.Println("########################") + fmt.Println("# Results #") + fmt.Printf("########################\n\n") + + metrics.PrintSummary( + txsType, + totalTransactionsSent, + totalTxs, + processingTimeSequencer, + processingTimeExecutor, + totalGas, + ) +} + +type timeData struct { + processingTimeSum float64 + processingTimeCount int +} + +func parseLine(line string) (key string, value float64) { + parts := strings.Split(line, " ") + key = parts[0] + value, _ = strconv.ParseFloat(parts[1], 64) + return +} + +func parseFile(filename string) map[string]timeData { + file, err := os.Open(filename) + if err != nil { + fmt.Println("Error opening file:", err) + return nil + } + defer file.Close() + + result := map[string]timeData{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + key, value := parseLine(line) + if strings.Contains(key, "sum") { + key = strings.Replace(key, "_sum", "", -1) + if data, ok := result[key]; ok { + data.processingTimeSum = value + result[key] = data + } else { + result[key] = timeData{processingTimeSum: value} + } + } else if strings.Contains(key, "count") { + key = strings.Replace(key, "_count", "", -1) + if data, ok := result[key]; ok { + data.processingTimeCount = int(value) + result[key] = data + } else { + result[key] = timeData{processingTimeCount: int(value)} + } + } + } + + return result +} + +func retryCmd(command string, args ...string) (string, error) { + + for i := 0; i < maxRetryAttempts; i++ { + result, err := runCmd(command, args...) + if err == nil { + return result, nil // If the function succeeded, return its result. + } + + // If it failed and it's not the last attempt, wait for the specified delay before retrying. + if i < maxRetryAttempts-1 { + time.Sleep(retryDelay) + } + } + + return "", errors.New("maximum retryCmd attempts reached") +} diff --git a/test/benchmarks/sequencer/scripts/uniswap-transfers/main.go b/test/benchmarks/sequencer/scripts/uniswap-transfers/main.go new file mode 100644 index 0000000000..fc269e6514 --- /dev/null +++ b/test/benchmarks/sequencer/scripts/uniswap-transfers/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "flag" + "fmt" + "time" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/scripts/environment" + + "github.com/0xPolygonHermez/zkevm-node/pool" + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/params" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/metrics" + + "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/common/transactions" + uniswaptransfers "github.com/0xPolygonHermez/zkevm-node/test/benchmarks/sequencer/e2e/uniswap-transfers" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" +) + +func main() { + var ( + err error + ) + numOps := flag.Uint64("num-ops", 200, "The number of operations to run. Default is 200.") + flag.Parse() + if numOps == nil { + panic("numOps is nil") + } + pl, l2Client, auth := environment.Init() + initialCount, err := pl.CountTransactionsByStatus(params.Ctx, pool.TxStatusSelected) + if err != nil { + panic(err) + } + start := time.Now() + deployments := uniswap.DeployContractsAndAddLiquidity(l2Client, auth) + deploymentTxsCount := uniswap.GetExecutedTransactionsCount() + elapsedTimeForDeployments := time.Since(start) + + allTxs, err := transactions.SendAndWait( + auth, + l2Client, + pl.GetTxsByStatus, + *numOps, + nil, + &deployments, + uniswaptransfers.TxSender, + ) + if err != nil { + panic(err) + } + + // Wait for Txs to be selected + err = transactions.WaitStatusSelected(pl.CountTransactionsByStatus, initialCount, *numOps) + if err != nil { + panic(err) + } + + metrics.PrintUniswapDeployments(elapsedTimeForDeployments, deploymentTxsCount) + totalGas := metrics.GetTotalGasUsedFromTxs(l2Client, allTxs) + fmt.Println("Total Gas: ", totalGas) +} diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml index b5a5fd29de..199b4fd6a5 100644 --- a/test/config/debug.node.config.toml +++ b/test/config/debug.node.config.toml @@ -5,14 +5,27 @@ Environment = "development" # "production" or "development" Level = "debug" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "localhost" -Port = "5432" -EnableLog = true -MaxConns = 10 +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "localhost" + Port = "5432" + EnableLog = true + MaxConns = 10 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 1500000 @@ -23,6 +36,17 @@ MaxTxDataBytesSize=30000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +AccountQueue = 64 +GlobalQueue = 1024 + [Pool.EffectiveGasPrice] + Enabled = false + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + ZeroByteGasCost = 4 + NetProfit = 1 + BreakEvenFactor = 1.1 + FinalDeviationPct = 10 + L2GasPriceSuggesterFactor = 0.5 [Pool.DB] User = "pool_user" Password = "pool_password" @@ -60,16 +84,6 @@ TrustedSequencerURL = "" WaitPeriodPoolIsEmpty = "1s" BlocksAmountForTxsToBeDeleted = 100 FrequencyToCheckTxsForDelete = "12h" -MaxTxsPerBatch = 300 -MaxBatchBytesSize = 120000 -MaxCumulativeGasUsed = 30000000 -MaxKeccakHashes = 2145 -MaxPoseidonHashes = 252357 -MaxPoseidonPaddings = 135191 -MaxMemAligns = 236585 -MaxArithmetics = 236585 -MaxBinaries = 473170 -MaxSteps = 7570538 TxLifetimeCheckTimeout = "10m" MaxTxLifetime = "3h" [Sequencer.Finalizer] @@ -87,12 +101,10 @@ MaxTxLifetime = "3h" [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" - [Sequencer.EffectiveGasPrice] - MaxBreakEvenGasPriceDeviationPercentage = 10 - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 - Enabled = false + [Sequencer.StreamServer] + Port = 6900 + Filename = "/datastreamer/datastream.bin" + Enabled = true [SequenceSender] WaitPeriodSendSequence = "15s" @@ -126,10 +138,10 @@ DefaultGasPriceWei = 1000000000 MaxGasPriceWei = 0 [MTClient] -URI = "x1-prover:50061" +URI = "127.0.0.1:50061" [Executor] -URI = "x1-prover:50071" +URI = "127.0.0.1:50071" MaxGRPCMessageSize = 100000000 [Metrics] diff --git a/test/config/test.genesis.config.json b/test/config/test.genesis.config.json index b4eadcadd6..918219fd80 100644 --- a/test/config/test.genesis.config.json +++ b/test/config/test.genesis.config.json @@ -6,7 +6,7 @@ "polygonZkEVMGlobalExitRootAddress": "0xEd236da21Ff62bC7B62608AdB818da49E8549fa7", "dataCommitteeContract": "0x6Ae5b0863dBF3477335c0102DBF432aFf04ceb22" }, - "genesisBlockNumber": 303, + "genesisBlockNumber": 271, "root": "0x6748c8d646d0d45108a0771c1c96412da84303eaee74f6e101ea5dabdbb757ca", "genesis": [ { diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index b2a01dce96..f7be19e62e 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -5,14 +5,27 @@ Environment = "development" # "production" or "development" Level = "debug" Outputs = ["stderr"] -[StateDB] -User = "state_user" -Password = "state_password" -Name = "state_db" -Host = "x1-state-db" -Port = "5432" -EnableLog = false -MaxConns = 200 +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "x1-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 [Pool] FreeClaimGasLimit = 1500000 @@ -23,6 +36,17 @@ MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +AccountQueue = 64 +GlobalQueue = 1024 + [Pool.EffectiveGasPrice] + Enabled = false + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + ZeroByteGasCost = 4 + NetProfit = 1 + BreakEvenFactor = 1.1 + FinalDeviationPct = 10 + L2GasPriceSuggesterFactor = 0.5 [Pool.DB] User = "pool_user" Password = "pool_password" @@ -55,22 +79,26 @@ EnableL2SuggestedGasPricePolling = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "http://x1-json-rpc:8123" # If it is empty or not specified, then the value is read from the smc. +L1SynchronizationMode = "sequential" # "sequential" or "parallel" + [Synchronizer.L1ParallelSynchronization] + MaxClients = 10 + MaxPendingNoProcessedBlocks = 25 + RequestLastBlockPeriod = "5s" + RequestLastBlockTimeout = "5s" + RequestLastBlockMaxRetries = 3 + StatisticsPeriod = "5m" + TimeoutMainLoop = "5m" + RollupInfoRetriesSpacing= "5s" + FallbackToSequentialModeOnSynchronized = false + [Synchronizer.L1ParallelSynchronization.PerformanceWarning] + AceptableInacctivityTime = "5s" + ApplyAfterNumRollupReceived = 10 [Sequencer] WaitPeriodPoolIsEmpty = "1s" LastBatchVirtualizationTimeMaxWaitPeriod = "10s" BlocksAmountForTxsToBeDeleted = 100 FrequencyToCheckTxsForDelete = "12h" -MaxTxsPerBatch = 300 -MaxBatchBytesSize = 120000 -MaxCumulativeGasUsed = 30000000 -MaxKeccakHashes = 2145 -MaxPoseidonHashes = 252357 -MaxPoseidonPaddings = 135191 -MaxMemAligns = 236585 -MaxArithmetics = 236585 -MaxBinaries = 473170 -MaxSteps = 7570538 TxLifetimeCheckTimeout = "10m" MaxTxLifetime = "3h" [Sequencer.Finalizer] @@ -88,11 +116,9 @@ MaxTxLifetime = "3h" [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" - [Sequencer.EffectiveGasPrice] - MaxBreakEvenGasPriceDeviationPercentage = 10 - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 + [Sequencer.StreamServer] + Port = 6900 + Filename = "/datastreamer/datastream.bin" Enabled = false [SequenceSender] diff --git a/test/contracts/auto/ConstructorMap.sol b/test/contracts/auto/ConstructorMap.sol new file mode 100644 index 0000000000..87a7c8b66b --- /dev/null +++ b/test/contracts/auto/ConstructorMap.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract ConstructorMap { + mapping(uint => uint) public numbers; + + constructor() { + uint i = 0; + for (i = 0; i < 100; i++) { + numbers[i] = i; + } + } +} diff --git a/test/contracts/auto/FFFFFFFF.sol b/test/contracts/auto/FFFFFFFF.sol new file mode 100644 index 0000000000..8a2c1fb660 --- /dev/null +++ b/test/contracts/auto/FFFFFFFF.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract FFFFFFFF { + constructor() { + assembly { + return(0, 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff) + } + } +} \ No newline at end of file diff --git a/test/contracts/auto/HasOpCode.sol b/test/contracts/auto/HasOpCode.sol new file mode 100644 index 0000000000..9760caca4a --- /dev/null +++ b/test/contracts/auto/HasOpCode.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +contract HasOpCode { + uint256 gasPrice = 0; + uint256 balance = 0; + + function opGasPrice() public { + uint256 tmp; + assembly { + tmp := gasprice() + } + gasPrice = tmp; + } + + function opBalance() public { + address a = msg.sender; + uint256 tmp; + assembly { + tmp := balance(a) + } + balance = tmp; + } +} \ No newline at end of file diff --git a/test/contracts/bin/ConstructorMap/ConstructorMap.go b/test/contracts/bin/ConstructorMap/ConstructorMap.go new file mode 100644 index 0000000000..04f76e21fb --- /dev/null +++ b/test/contracts/bin/ConstructorMap/ConstructorMap.go @@ -0,0 +1,234 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ConstructorMap + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// ConstructorMapMetaData contains all meta data concerning the ConstructorMap contract. +var ConstructorMapMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"numbers\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060005b60648110156100405760008181526020819052604090208190558061003881610046565b915050610014565b5061006f565b600060001982141561006857634e487b7160e01b600052601160045260246000fd5b5060010190565b60aa8061007d6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d39fa23314602d575b600080fd5b604a6038366004605c565b60006020819052908152604090205481565b60405190815260200160405180910390f35b600060208284031215606d57600080fd5b503591905056fea26469706673582212207164b7e8cab7019534d840c5be1f93a98671cdbddc7ea08c6a73b67022062ee864736f6c634300080c0033", +} + +// ConstructorMapABI is the input ABI used to generate the binding from. +// Deprecated: Use ConstructorMapMetaData.ABI instead. +var ConstructorMapABI = ConstructorMapMetaData.ABI + +// ConstructorMapBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use ConstructorMapMetaData.Bin instead. +var ConstructorMapBin = ConstructorMapMetaData.Bin + +// DeployConstructorMap deploys a new Ethereum contract, binding an instance of ConstructorMap to it. +func DeployConstructorMap(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ConstructorMap, error) { + parsed, err := ConstructorMapMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ConstructorMapBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ConstructorMap{ConstructorMapCaller: ConstructorMapCaller{contract: contract}, ConstructorMapTransactor: ConstructorMapTransactor{contract: contract}, ConstructorMapFilterer: ConstructorMapFilterer{contract: contract}}, nil +} + +// ConstructorMap is an auto generated Go binding around an Ethereum contract. +type ConstructorMap struct { + ConstructorMapCaller // Read-only binding to the contract + ConstructorMapTransactor // Write-only binding to the contract + ConstructorMapFilterer // Log filterer for contract events +} + +// ConstructorMapCaller is an auto generated read-only Go binding around an Ethereum contract. +type ConstructorMapCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ConstructorMapTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ConstructorMapTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ConstructorMapFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ConstructorMapFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ConstructorMapSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ConstructorMapSession struct { + Contract *ConstructorMap // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ConstructorMapCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ConstructorMapCallerSession struct { + Contract *ConstructorMapCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ConstructorMapTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ConstructorMapTransactorSession struct { + Contract *ConstructorMapTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ConstructorMapRaw is an auto generated low-level Go binding around an Ethereum contract. +type ConstructorMapRaw struct { + Contract *ConstructorMap // Generic contract binding to access the raw methods on +} + +// ConstructorMapCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ConstructorMapCallerRaw struct { + Contract *ConstructorMapCaller // Generic read-only contract binding to access the raw methods on +} + +// ConstructorMapTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ConstructorMapTransactorRaw struct { + Contract *ConstructorMapTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewConstructorMap creates a new instance of ConstructorMap, bound to a specific deployed contract. +func NewConstructorMap(address common.Address, backend bind.ContractBackend) (*ConstructorMap, error) { + contract, err := bindConstructorMap(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ConstructorMap{ConstructorMapCaller: ConstructorMapCaller{contract: contract}, ConstructorMapTransactor: ConstructorMapTransactor{contract: contract}, ConstructorMapFilterer: ConstructorMapFilterer{contract: contract}}, nil +} + +// NewConstructorMapCaller creates a new read-only instance of ConstructorMap, bound to a specific deployed contract. +func NewConstructorMapCaller(address common.Address, caller bind.ContractCaller) (*ConstructorMapCaller, error) { + contract, err := bindConstructorMap(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ConstructorMapCaller{contract: contract}, nil +} + +// NewConstructorMapTransactor creates a new write-only instance of ConstructorMap, bound to a specific deployed contract. +func NewConstructorMapTransactor(address common.Address, transactor bind.ContractTransactor) (*ConstructorMapTransactor, error) { + contract, err := bindConstructorMap(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ConstructorMapTransactor{contract: contract}, nil +} + +// NewConstructorMapFilterer creates a new log filterer instance of ConstructorMap, bound to a specific deployed contract. +func NewConstructorMapFilterer(address common.Address, filterer bind.ContractFilterer) (*ConstructorMapFilterer, error) { + contract, err := bindConstructorMap(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ConstructorMapFilterer{contract: contract}, nil +} + +// bindConstructorMap binds a generic wrapper to an already deployed contract. +func bindConstructorMap(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ConstructorMapMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_ConstructorMap *ConstructorMapRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ConstructorMap.Contract.ConstructorMapCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_ConstructorMap *ConstructorMapRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ConstructorMap.Contract.ConstructorMapTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_ConstructorMap *ConstructorMapRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ConstructorMap.Contract.ConstructorMapTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_ConstructorMap *ConstructorMapCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ConstructorMap.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_ConstructorMap *ConstructorMapTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ConstructorMap.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_ConstructorMap *ConstructorMapTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ConstructorMap.Contract.contract.Transact(opts, method, params...) +} + +// Numbers is a free data retrieval call binding the contract method 0xd39fa233. +// +// Solidity: function numbers(uint256 ) view returns(uint256) +func (_ConstructorMap *ConstructorMapCaller) Numbers(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _ConstructorMap.contract.Call(opts, &out, "numbers", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Numbers is a free data retrieval call binding the contract method 0xd39fa233. +// +// Solidity: function numbers(uint256 ) view returns(uint256) +func (_ConstructorMap *ConstructorMapSession) Numbers(arg0 *big.Int) (*big.Int, error) { + return _ConstructorMap.Contract.Numbers(&_ConstructorMap.CallOpts, arg0) +} + +// Numbers is a free data retrieval call binding the contract method 0xd39fa233. +// +// Solidity: function numbers(uint256 ) view returns(uint256) +func (_ConstructorMap *ConstructorMapCallerSession) Numbers(arg0 *big.Int) (*big.Int, error) { + return _ConstructorMap.Contract.Numbers(&_ConstructorMap.CallOpts, arg0) +} diff --git a/test/contracts/bin/FFFFFFFF/FFFFFFFF.go b/test/contracts/bin/FFFFFFFF/FFFFFFFF.go new file mode 100644 index 0000000000..f42b566c03 --- /dev/null +++ b/test/contracts/bin/FFFFFFFF/FFFFFFFF.go @@ -0,0 +1,203 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package FFFFFFFF + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// FFFFFFFFMetaData contains all meta data concerning the FFFFFFFF contract. +var FFFFFFFFMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"}]", + Bin: "0x6080604052348015600f57600080fd5b506000196000f3fe", +} + +// FFFFFFFFABI is the input ABI used to generate the binding from. +// Deprecated: Use FFFFFFFFMetaData.ABI instead. +var FFFFFFFFABI = FFFFFFFFMetaData.ABI + +// FFFFFFFFBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use FFFFFFFFMetaData.Bin instead. +var FFFFFFFFBin = FFFFFFFFMetaData.Bin + +// DeployFFFFFFFF deploys a new Ethereum contract, binding an instance of FFFFFFFF to it. +func DeployFFFFFFFF(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *FFFFFFFF, error) { + parsed, err := FFFFFFFFMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FFFFFFFFBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FFFFFFFF{FFFFFFFFCaller: FFFFFFFFCaller{contract: contract}, FFFFFFFFTransactor: FFFFFFFFTransactor{contract: contract}, FFFFFFFFFilterer: FFFFFFFFFilterer{contract: contract}}, nil +} + +// FFFFFFFF is an auto generated Go binding around an Ethereum contract. +type FFFFFFFF struct { + FFFFFFFFCaller // Read-only binding to the contract + FFFFFFFFTransactor // Write-only binding to the contract + FFFFFFFFFilterer // Log filterer for contract events +} + +// FFFFFFFFCaller is an auto generated read-only Go binding around an Ethereum contract. +type FFFFFFFFCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FFFFFFFFTransactor is an auto generated write-only Go binding around an Ethereum contract. +type FFFFFFFFTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FFFFFFFFFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type FFFFFFFFFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FFFFFFFFSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type FFFFFFFFSession struct { + Contract *FFFFFFFF // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// FFFFFFFFCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type FFFFFFFFCallerSession struct { + Contract *FFFFFFFFCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// FFFFFFFFTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type FFFFFFFFTransactorSession struct { + Contract *FFFFFFFFTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// FFFFFFFFRaw is an auto generated low-level Go binding around an Ethereum contract. +type FFFFFFFFRaw struct { + Contract *FFFFFFFF // Generic contract binding to access the raw methods on +} + +// FFFFFFFFCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type FFFFFFFFCallerRaw struct { + Contract *FFFFFFFFCaller // Generic read-only contract binding to access the raw methods on +} + +// FFFFFFFFTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type FFFFFFFFTransactorRaw struct { + Contract *FFFFFFFFTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewFFFFFFFF creates a new instance of FFFFFFFF, bound to a specific deployed contract. +func NewFFFFFFFF(address common.Address, backend bind.ContractBackend) (*FFFFFFFF, error) { + contract, err := bindFFFFFFFF(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FFFFFFFF{FFFFFFFFCaller: FFFFFFFFCaller{contract: contract}, FFFFFFFFTransactor: FFFFFFFFTransactor{contract: contract}, FFFFFFFFFilterer: FFFFFFFFFilterer{contract: contract}}, nil +} + +// NewFFFFFFFFCaller creates a new read-only instance of FFFFFFFF, bound to a specific deployed contract. +func NewFFFFFFFFCaller(address common.Address, caller bind.ContractCaller) (*FFFFFFFFCaller, error) { + contract, err := bindFFFFFFFF(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FFFFFFFFCaller{contract: contract}, nil +} + +// NewFFFFFFFFTransactor creates a new write-only instance of FFFFFFFF, bound to a specific deployed contract. +func NewFFFFFFFFTransactor(address common.Address, transactor bind.ContractTransactor) (*FFFFFFFFTransactor, error) { + contract, err := bindFFFFFFFF(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FFFFFFFFTransactor{contract: contract}, nil +} + +// NewFFFFFFFFFilterer creates a new log filterer instance of FFFFFFFF, bound to a specific deployed contract. +func NewFFFFFFFFFilterer(address common.Address, filterer bind.ContractFilterer) (*FFFFFFFFFilterer, error) { + contract, err := bindFFFFFFFF(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FFFFFFFFFilterer{contract: contract}, nil +} + +// bindFFFFFFFF binds a generic wrapper to an already deployed contract. +func bindFFFFFFFF(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FFFFFFFFMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_FFFFFFFF *FFFFFFFFRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FFFFFFFF.Contract.FFFFFFFFCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_FFFFFFFF *FFFFFFFFRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FFFFFFFF.Contract.FFFFFFFFTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_FFFFFFFF *FFFFFFFFRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FFFFFFFF.Contract.FFFFFFFFTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_FFFFFFFF *FFFFFFFFCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FFFFFFFF.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_FFFFFFFF *FFFFFFFFTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FFFFFFFF.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_FFFFFFFF *FFFFFFFFTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FFFFFFFF.Contract.contract.Transact(opts, method, params...) +} diff --git a/test/contracts/bin/HasOpCode/HasOpCode.go b/test/contracts/bin/HasOpCode/HasOpCode.go new file mode 100644 index 0000000000..2da951f9c4 --- /dev/null +++ b/test/contracts/bin/HasOpCode/HasOpCode.go @@ -0,0 +1,245 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package HasOpCode + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// HasOpCodeMetaData contains all meta data concerning the HasOpCode contract. +var HasOpCodeMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"opBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"opGasPrice\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052600080556000600155348015601857600080fd5b506080806100276000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80633ab08cf914603757806374c73639146042575b600080fd5b60403331600155565b005b60403a60005556fea264697066735822122086d3f33465f92e2f6ddc32c9acfb8512d8c86ff16e540197cd39d4f3aaf38ffc64736f6c634300080c0033", +} + +// HasOpCodeABI is the input ABI used to generate the binding from. +// Deprecated: Use HasOpCodeMetaData.ABI instead. +var HasOpCodeABI = HasOpCodeMetaData.ABI + +// HasOpCodeBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use HasOpCodeMetaData.Bin instead. +var HasOpCodeBin = HasOpCodeMetaData.Bin + +// DeployHasOpCode deploys a new Ethereum contract, binding an instance of HasOpCode to it. +func DeployHasOpCode(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *HasOpCode, error) { + parsed, err := HasOpCodeMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(HasOpCodeBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &HasOpCode{HasOpCodeCaller: HasOpCodeCaller{contract: contract}, HasOpCodeTransactor: HasOpCodeTransactor{contract: contract}, HasOpCodeFilterer: HasOpCodeFilterer{contract: contract}}, nil +} + +// HasOpCode is an auto generated Go binding around an Ethereum contract. +type HasOpCode struct { + HasOpCodeCaller // Read-only binding to the contract + HasOpCodeTransactor // Write-only binding to the contract + HasOpCodeFilterer // Log filterer for contract events +} + +// HasOpCodeCaller is an auto generated read-only Go binding around an Ethereum contract. +type HasOpCodeCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HasOpCodeTransactor is an auto generated write-only Go binding around an Ethereum contract. +type HasOpCodeTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HasOpCodeFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type HasOpCodeFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HasOpCodeSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type HasOpCodeSession struct { + Contract *HasOpCode // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// HasOpCodeCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type HasOpCodeCallerSession struct { + Contract *HasOpCodeCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// HasOpCodeTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type HasOpCodeTransactorSession struct { + Contract *HasOpCodeTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// HasOpCodeRaw is an auto generated low-level Go binding around an Ethereum contract. +type HasOpCodeRaw struct { + Contract *HasOpCode // Generic contract binding to access the raw methods on +} + +// HasOpCodeCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type HasOpCodeCallerRaw struct { + Contract *HasOpCodeCaller // Generic read-only contract binding to access the raw methods on +} + +// HasOpCodeTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type HasOpCodeTransactorRaw struct { + Contract *HasOpCodeTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewHasOpCode creates a new instance of HasOpCode, bound to a specific deployed contract. +func NewHasOpCode(address common.Address, backend bind.ContractBackend) (*HasOpCode, error) { + contract, err := bindHasOpCode(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &HasOpCode{HasOpCodeCaller: HasOpCodeCaller{contract: contract}, HasOpCodeTransactor: HasOpCodeTransactor{contract: contract}, HasOpCodeFilterer: HasOpCodeFilterer{contract: contract}}, nil +} + +// NewHasOpCodeCaller creates a new read-only instance of HasOpCode, bound to a specific deployed contract. +func NewHasOpCodeCaller(address common.Address, caller bind.ContractCaller) (*HasOpCodeCaller, error) { + contract, err := bindHasOpCode(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &HasOpCodeCaller{contract: contract}, nil +} + +// NewHasOpCodeTransactor creates a new write-only instance of HasOpCode, bound to a specific deployed contract. +func NewHasOpCodeTransactor(address common.Address, transactor bind.ContractTransactor) (*HasOpCodeTransactor, error) { + contract, err := bindHasOpCode(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &HasOpCodeTransactor{contract: contract}, nil +} + +// NewHasOpCodeFilterer creates a new log filterer instance of HasOpCode, bound to a specific deployed contract. +func NewHasOpCodeFilterer(address common.Address, filterer bind.ContractFilterer) (*HasOpCodeFilterer, error) { + contract, err := bindHasOpCode(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &HasOpCodeFilterer{contract: contract}, nil +} + +// bindHasOpCode binds a generic wrapper to an already deployed contract. +func bindHasOpCode(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := HasOpCodeMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_HasOpCode *HasOpCodeRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _HasOpCode.Contract.HasOpCodeCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_HasOpCode *HasOpCodeRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _HasOpCode.Contract.HasOpCodeTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_HasOpCode *HasOpCodeRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _HasOpCode.Contract.HasOpCodeTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_HasOpCode *HasOpCodeCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _HasOpCode.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_HasOpCode *HasOpCodeTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _HasOpCode.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_HasOpCode *HasOpCodeTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _HasOpCode.Contract.contract.Transact(opts, method, params...) +} + +// OpBalance is a paid mutator transaction binding the contract method 0x3ab08cf9. +// +// Solidity: function opBalance() returns() +func (_HasOpCode *HasOpCodeTransactor) OpBalance(opts *bind.TransactOpts) (*types.Transaction, error) { + return _HasOpCode.contract.Transact(opts, "opBalance") +} + +// OpBalance is a paid mutator transaction binding the contract method 0x3ab08cf9. +// +// Solidity: function opBalance() returns() +func (_HasOpCode *HasOpCodeSession) OpBalance() (*types.Transaction, error) { + return _HasOpCode.Contract.OpBalance(&_HasOpCode.TransactOpts) +} + +// OpBalance is a paid mutator transaction binding the contract method 0x3ab08cf9. +// +// Solidity: function opBalance() returns() +func (_HasOpCode *HasOpCodeTransactorSession) OpBalance() (*types.Transaction, error) { + return _HasOpCode.Contract.OpBalance(&_HasOpCode.TransactOpts) +} + +// OpGasPrice is a paid mutator transaction binding the contract method 0x74c73639. +// +// Solidity: function opGasPrice() returns() +func (_HasOpCode *HasOpCodeTransactor) OpGasPrice(opts *bind.TransactOpts) (*types.Transaction, error) { + return _HasOpCode.contract.Transact(opts, "opGasPrice") +} + +// OpGasPrice is a paid mutator transaction binding the contract method 0x74c73639. +// +// Solidity: function opGasPrice() returns() +func (_HasOpCode *HasOpCodeSession) OpGasPrice() (*types.Transaction, error) { + return _HasOpCode.Contract.OpGasPrice(&_HasOpCode.TransactOpts) +} + +// OpGasPrice is a paid mutator transaction binding the contract method 0x74c73639. +// +// Solidity: function opGasPrice() returns() +func (_HasOpCode *HasOpCodeTransactorSession) OpGasPrice() (*types.Transaction, error) { + return _HasOpCode.Contract.OpGasPrice(&_HasOpCode.TransactOpts) +} diff --git a/test/docker-compose.yml b/test/docker-compose.yml index df57c0885a..e1d1fd44e5 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.5" networks: default: name: x1 - + services: grafana: container_name: grafana @@ -52,12 +52,16 @@ services: ports: - 9092:9091 # needed if metrics enabled - 6060:6060 + - 6900:6900 # Data stream server environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_POOL_DB_HOST=x1-pool-db + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json + - ./:/datastreamer command: - "/bin/sh" - "-c" @@ -67,9 +71,11 @@ services: container_name: x1-sequence-sender image: x1-node environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_POOL_DB_HOST=x1-pool-db - ZKEVM_NODE_SEQUENCER_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} volumes: - ./sequencer.keystore:/pk/sequencer.keystore - ./config/test.node.config.toml:/app/config.toml @@ -87,8 +93,10 @@ services: - 8133:8133 # needed if WebSockets enabled - 9091:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_POOL_DB_HOST=x1-pool-db + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -104,7 +112,7 @@ services: - 50081:50081 - 9093:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_AGGREGATOR_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 volumes: - ./config/test.node.config.toml:/app/config.toml @@ -120,7 +128,9 @@ services: ports: - 9095:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -135,7 +145,7 @@ services: ports: - 9094:9091 # needed if metrics enabled environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db volumes: - ./sequencer.keystore:/pk/sequencer.keystore - ./aggregator.keystore:/pk/aggregator.keystore @@ -285,7 +295,7 @@ services: - 8124:8124 - 8134:8134 # needed if WebSockets enabled environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_POOL_DB_HOST=x1-pool-db - ZKEVM_NODE_RPC_PORT=8124 - ZKEVM_NODE_RPC_WEBSOCKETS_PORT=8134 @@ -310,7 +320,7 @@ services: x1-mock-l1-network: container_name: x1-mock-l1-network - image: okexchain/x1-geth:e2e_v0.1.0_20231113 + image: okexchain/x1-geth:e2e_v0.1.1_20231214 ports: - 8545:8545 - 8546:8546 @@ -340,7 +350,7 @@ services: x1-prover: container_name: x1-prover - image: hermeznetwork/zkevm-prover:v2.2.0 + image: hermeznetwork/zkevm-prover:v3.0.2 ports: # - 50051:50051 # Prover - 50052:50052 # Mock prover @@ -366,7 +376,7 @@ services: container_name: x1-approve image: x1-node environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db volumes: - ./sequencer.keystore:/pk/keystore - ./config/test.node.config.toml:/app/config.toml @@ -398,24 +408,24 @@ services: - "-N" - "500" - x1-permissionless-node-forced-DAC: - container_name: x1-permissionless-node-forced-DAC + x1-permissionless-node: + container_name: x1-permissionless-node image: x1-node ports: - 8125:8125 environment: - ZKEVM_NODE_ISTRUSTEDSEQUENCER=false - - ZKEVM_NODE_STATEDB_USER=test_user - - ZKEVM_NODE_STATEDB_PASSWORD=test_password - - ZKEVM_NODE_STATEDB_NAME=state_db - - ZKEVM_NODE_STATEDB_HOST=x1-permissionless-db + - ZKEVM_NODE_STATE_DB_USER=test_user + - ZKEVM_NODE_STATE_DB_PASSWORD=test_password + - ZKEVM_NODE_STATE_DB_NAME=state_db + - ZKEVM_NODE_STATE_DB_HOST=x1-permissionless-db - ZKEVM_NODE_POOL_DB_USER=test_user - ZKEVM_NODE_POOL_DB_PASSWORD=test_password - ZKEVM_NODE_POOL_DB_NAME=pool_db - ZKEVM_NODE_POOL_DB_HOST=x1-permissionless-db - ZKEVM_NODE_RPC_PORT=8125 - ZKEVM_NODE_RPC_SEQUENCERNODEURI=http://x1-json-rpc:8123 - - ZKEVM_NODE_SYNCHRONIZER_TRUSTEDSEQUENCERURL=http://you-cant-touch-this:8123 + - ZKEVM_NODE_SYNCHRONIZER_TRUSTEDSEQUENCERURL=http://x1-json-rpc:8123 - ZKEVM_NODE_MTCLIENT_URI=x1-permissionless-prover:50061 - ZKEVM_NODE_EXECUTOR_URI=x1-permissionless-prover:50071 volumes: @@ -425,23 +435,26 @@ services: - "/bin/sh" - "-c" - "/app/x1-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components \"rpc,synchronizer\"" - x1-permissionless-node: - container_name: x1-permissionless-node + + + x1-permissionless-node-forced-DAC: + container_name: x1-permissionless-node-forced-DAC image: x1-node ports: - 8125:8125 environment: - ZKEVM_NODE_ISTRUSTEDSEQUENCER=false - - ZKEVM_NODE_STATEDB_USER=test_user - - ZKEVM_NODE_STATEDB_PASSWORD=test_password - - ZKEVM_NODE_STATEDB_NAME=state_db - - ZKEVM_NODE_STATEDB_HOST=x1-permissionless-db + - ZKEVM_NODE_STATE_DB_USER=test_user + - ZKEVM_NODE_STATE_DB_PASSWORD=test_password + - ZKEVM_NODE_STATE_DB_NAME=state_db + - ZKEVM_NODE_STATE_DB_HOST=x1-permissionless-db - ZKEVM_NODE_POOL_DB_USER=test_user - ZKEVM_NODE_POOL_DB_PASSWORD=test_password - ZKEVM_NODE_POOL_DB_NAME=pool_db - ZKEVM_NODE_POOL_DB_HOST=x1-permissionless-db - ZKEVM_NODE_RPC_PORT=8125 - ZKEVM_NODE_RPC_SEQUENCERNODEURI=http://x1-json-rpc:8123 + - ZKEVM_NODE_SYNCHRONIZER_TRUSTEDSEQUENCERURL=http://you-cant-touch-this:8123 - ZKEVM_NODE_MTCLIENT_URI=x1-permissionless-prover:50061 - ZKEVM_NODE_EXECUTOR_URI=x1-permissionless-prover:50071 volumes: @@ -452,9 +465,10 @@ services: - "-c" - "/app/x1-node run --network custom --custom-network-file /app/genesis.json --cfg /app/config.toml --components \"rpc,synchronizer\"" + x1-permissionless-prover: container_name: x1-permissionless-prover - image: hermeznetwork/zkevm-prover:v2.2.0 + image: hermeznetwork/zkevm-prover:v3.0.2 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover @@ -483,7 +497,7 @@ services: stdin_open: true tty: true environment: - - ZKEVM_NODE_STATEDB_HOST=x1-state-db + - ZKEVM_NODE_STATE_DB_HOST=x1-state-db - ZKEVM_NODE_POOL_DB_HOST=x1-pool-db volumes: - ./config/test.node.config.toml:/app/config.toml diff --git a/test/e2e/debug_calltracer_test.go b/test/e2e/debug_calltracer_test.go index aa86441140..26ad06c2e1 100644 --- a/test/e2e/debug_calltracer_test.go +++ b/test/e2e/debug_calltracer_test.go @@ -98,6 +98,8 @@ func TestDebugTraceTransactionCallTracer(t *testing.T) { // failed transactions {name: "sc deployment reverted", createSignedTx: createScDeployRevertedSignedTx}, + {name: "sc deployment out of gas", createSignedTx: createScDeployOutOfGasSignedTx}, + // PENDING {name: "sc creation storage out of gas", createSignedTx: createScCreationCodeStorageOutOfGasSignedTx}, {name: "sc call reverted", prepare: prepareScCallReverted, createSignedTx: createScCallRevertedSignedTx}, {name: "erc20 transfer reverted", prepare: prepareERC20TransferReverted, createSignedTx: createERC20TransferRevertedSignedTx}, {name: "invalid static call less parameters", prepare: prepareCalls, createSignedTx: createInvalidStaticCallLessParametersSignedTx}, @@ -229,7 +231,9 @@ func compareCallFrame(t *testing.T, referenceValueMap, resultMap map[string]inte require.Equal(t, referenceValueMap["value"], resultMap["value"], fmt.Sprintf("invalid `value` for network %s", networkName)) require.Equal(t, referenceValueMap["type"], resultMap["type"], fmt.Sprintf("invalid `type` for network %s", networkName)) require.Equal(t, referenceValueMap["error"], resultMap["error"], fmt.Sprintf("invalid `error` for network %s", networkName)) - require.Equal(t, referenceValueMap["revertReason"], resultMap["revertReason"], fmt.Sprintf("invalid `revertReason` for network %s", networkName)) + if _, found := referenceValueMap["revertReason"]; found { + require.Equal(t, referenceValueMap["revertReason"], resultMap["revertReason"], fmt.Sprintf("invalid `revertReason` for network %s", networkName)) + } referenceLogs, found := referenceValueMap["logs"].([]interface{}) if found { diff --git a/test/e2e/debug_shared.go b/test/e2e/debug_shared.go index 1c3ec4a773..12e689d4e3 100644 --- a/test/e2e/debug_shared.go +++ b/test/e2e/debug_shared.go @@ -163,6 +163,48 @@ func createScDeployRevertedSignedTx(t *testing.T, ctx context.Context, auth *bin return auth.Signer(auth.From, tx) } +func createScDeployOutOfGasSignedTx(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client, customData map[string]interface{}) (*ethTypes.Transaction, error) { + nonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + gasPrice, err := client.SuggestGasPrice(ctx) + require.NoError(t, err) + + scByteCode, err := testutils.ReadBytecode("ConstructorMap/ConstructorMap.bin") + require.NoError(t, err) + data := common.Hex2Bytes(scByteCode) + + tx := ethTypes.NewTx(ðTypes.LegacyTx{ + Nonce: nonce, + GasPrice: gasPrice, + Gas: uint64(2000000), + Data: data, + }) + + return auth.Signer(auth.From, tx) +} + +// func createScCreationCodeStorageOutOfGasSignedTx(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client, customData map[string]interface{}) (*ethTypes.Transaction, error) { +// nonce, err := client.PendingNonceAt(ctx, auth.From) +// require.NoError(t, err) + +// gasPrice, err := client.SuggestGasPrice(ctx) +// require.NoError(t, err) + +// scByteCode, err := testutils.ReadBytecode("FFFFFFFF/FFFFFFFF.bin") +// require.NoError(t, err) +// data := common.Hex2Bytes(scByteCode) + +// tx := ethTypes.NewTx(ðTypes.LegacyTx{ +// Nonce: nonce, +// GasPrice: gasPrice, +// Gas: uint64(150000), +// Data: data, +// }) + +// return auth.Signer(auth.From, tx) +// } + func prepareScCallReverted(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client) (map[string]interface{}, error) { _, tx, sc, err := Revert2.DeployRevert2(auth, client) require.NoError(t, err) diff --git a/test/e2e/debug_test.go b/test/e2e/debug_test.go index 4eed0ab18e..c46c36e8a9 100644 --- a/test/e2e/debug_test.go +++ b/test/e2e/debug_test.go @@ -294,6 +294,8 @@ func TestDebugTraceTransaction(t *testing.T) { // failed transactions {name: "sc deployment reverted", createSignedTx: createScDeployRevertedSignedTx}, + {name: "sc deployment out of gas", createSignedTx: createScDeployOutOfGasSignedTx}, + // PENDING {name: "sc creation storage out of gas", createSignedTx: createScCreationCodeStorageOutOfGasSignedTx}, {name: "sc call reverted", prepare: prepareScCallReverted, createSignedTx: createScCallRevertedSignedTx}, {name: "erc20 transfer reverted", prepare: prepareERC20TransferReverted, createSignedTx: createERC20TransferRevertedSignedTx}, {name: "invalid static call less parameters", prepare: prepareCalls, createSignedTx: createInvalidStaticCallLessParametersSignedTx}, @@ -623,7 +625,12 @@ func TestDebugTraceBlock(t *testing.T) { resultTransactionMap := resultTransactions[transactionIndex].(map[string]interface{}) resultResultMap := resultTransactionMap["result"].(map[string]interface{}) resultStructLogsMap := resultResultMap["structLogs"].([]interface{}) - + log.Debugf("test[%s] referenceStructLogsMap : L1_len=%d L2_len=%d", tc.name, len(referenceStructLogsMap), len(resultStructLogsMap)) + if len(referenceStructLogsMap) != len(resultStructLogsMap) { + log.Debugf("test[%s] referenceStructLogsMap not equal", tc.name) + log.Debug("L1 (referenceTransactions): ", referenceTransactions) + log.Debug("L2 (resultTransactions): ", resultTransactions) + } require.Equal(t, len(referenceStructLogsMap), len(resultStructLogsMap)) for structLogIndex := range referenceStructLogsMap { diff --git a/test/e2e/effectivegasprice_test.go b/test/e2e/effectivegasprice_test.go new file mode 100644 index 0000000000..439273da6f --- /dev/null +++ b/test/e2e/effectivegasprice_test.go @@ -0,0 +1,63 @@ +package e2e + +import ( + "context" + "math/big" + "testing" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +func TestEffectiveGasPrice(t *testing.T) { + if testing.Short() { + t.Skip() + } + + ctx := context.Background() + + opsCfg := operations.GetDefaultOperationsConfig() + opsCfg.State.MaxCumulativeGasUsed = 80000000000 + + // Load account with balance on local genesis + auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID) + require.NoError(t, err) + + // Load eth client + client, err := ethclient.Dial(operations.DefaultL2NetworkURL) + require.NoError(t, err) + + // Send tx + amount := big.NewInt(10000) + toAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") + senderBalance, err := client.BalanceAt(ctx, auth.From, nil) + require.NoError(t, err) + senderNonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + log.Infof("Receiver Addr: %v", toAddress.String()) + log.Infof("Sender Addr: %v", auth.From.String()) + log.Infof("Sender Balance: %v", senderBalance.String()) + log.Infof("Sender Nonce: %v", senderNonce) + + gasLimit, err := client.EstimateGas(ctx, ethereum.CallMsg{From: auth.From, To: &toAddress, Value: amount}) + require.NoError(t, err) + + gasPrice, err := client.SuggestGasPrice(ctx) + require.NoError(t, err) + + nonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + txs := make([]*types.Transaction, 0, 1) + tx := types.NewTransaction(nonce, toAddress, amount, gasLimit, gasPrice, nil) + txs = append(txs, tx) + + _, err = operations.ApplyL2Txs(ctx, txs, auth, client, operations.TrustedConfirmationLevel) + require.NoError(t, err) +} diff --git a/test/e2e/forced_batches_test.go b/test/e2e/forced_batches_test.go index 2d6324e3fd..67378ef387 100644 --- a/test/e2e/forced_batches_test.go +++ b/test/e2e/forced_batches_test.go @@ -3,10 +3,13 @@ package e2e import ( "context" "math/big" - "sync" "testing" "time" + "github.com/0xPolygonHermez/zkevm-node/config" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevmglobalexitroot" "github.com/0xPolygonHermez/zkevm-node/log" @@ -16,7 +19,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" ) @@ -42,27 +44,21 @@ func TestForcedBatches(t *testing.T) { txs = append(txs, tx) } - wgNormalL2Transfers := new(sync.WaitGroup) - wgNormalL2Transfers.Add(1) var l2BlockNumbers []*big.Int - go func() { - defer wgNormalL2Transfers.Done() - l2BlockNumbers, err = operations.ApplyL2Txs(ctx, txs, auth, client, operations.VerifiedConfirmationLevel) - require.NoError(t, err) - }() + l2BlockNumbers, err = operations.ApplyL2Txs(ctx, txs, auth, client, operations.VerifiedConfirmationLevel) + require.NoError(t, err) time.Sleep(2 * time.Second) amount = big.NewInt(0).Add(amount, big.NewInt(10)) unsignedTx := types.NewTransaction(nonce, toAddress, amount, gasLimit, gasPrice, nil) signedTx, err := auth.Signer(auth.From, unsignedTx) require.NoError(t, err) - encodedTxs, err := state.EncodeTransactions([]types.Transaction{*signedTx}, constants.EffectivePercentage, forkID) + encodedTxs, err := state.EncodeTransactions([]types.Transaction{*signedTx}, constants.EffectivePercentage, forkID6) require.NoError(t, err) forcedBatch, err := sendForcedBatch(t, encodedTxs, opsman) require.NoError(t, err) // Checking if all txs sent before the forced batch were processed within previous closed batch - wgNormalL2Transfers.Wait() for _, l2blockNum := range l2BlockNumbers { batch, err := opsman.State().GetBatchByL2BlockNumber(ctx, l2blockNum.Uint64(), nil) require.NoError(t, err) @@ -75,8 +71,13 @@ func setupEnvironment(ctx context.Context, t *testing.T) (*operations.Manager, * require.NoError(t, err) opsCfg := operations.GetDefaultOperationsConfig() opsCfg.State.MaxCumulativeGasUsed = 80000000000 + genesisFileAsStr, err := config.LoadGenesisFileAsString("../../test/config/test.genesis.config.json") + require.NoError(t, err) + genesisConfig, err := config.LoadGenesisFromJSONString(genesisFileAsStr) + require.NoError(t, err) opsman, err := operations.NewManager(ctx, opsCfg) require.NoError(t, err) + require.NoError(t, opsman.SetForkID(genesisConfig.Genesis.GenesisBlockNum, forkID6)) err = opsman.Setup() require.NoError(t, err) time.Sleep(5 * time.Second) diff --git a/test/e2e/forced_batches_vector_test.go b/test/e2e/forced_batches_vector_group1_test.go similarity index 53% rename from test/e2e/forced_batches_vector_test.go rename to test/e2e/forced_batches_vector_group1_test.go index 05b0eab7f3..7bf6c70cf3 100644 --- a/test/e2e/forced_batches_vector_test.go +++ b/test/e2e/forced_batches_vector_group1_test.go @@ -9,32 +9,28 @@ import ( "testing" "time" - "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" + "github.com/0xPolygonHermez/zkevm-node/config" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/0xPolygonHermez/zkevm-node/test/constants" "github.com/0xPolygonHermez/zkevm-node/test/operations" "github.com/0xPolygonHermez/zkevm-node/test/vectors" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" ) -const ( - forkID = 5 -) - -func TestForcedBatchesVectorFiles(t *testing.T) { +func TestForcedBatchesVectorFilesGroup1(t *testing.T) { if testing.Short() { t.Skip() } - vectorFilesDir := "./../vectors/src/state-transition/forced-tx" + vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group1" ctx := context.Background() - err := filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { + genesisFileAsStr, err := config.LoadGenesisFileAsString("../../test/config/test.genesis.config.json") + require.NoError(t, err) + genesisConfig, err := config.LoadGenesisFromJSONString(genesisFileAsStr) + require.NoError(t, err) + err = filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -63,7 +59,8 @@ func TestForcedBatchesVectorFiles(t *testing.T) { log.Info("# Setting Genesis #") log.Info("###################") genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) - require.NoError(t, opsman.SetGenesis(genesisActions)) + require.NoError(t, opsman.SetGenesis(genesisConfig.Genesis.GenesisBlockNum, genesisActions)) + require.NoError(t, opsman.SetForkID(genesisConfig.Genesis.GenesisBlockNum, forkID6)) require.NoError(t, opsman.Setup()) // Check initial root @@ -75,7 +72,8 @@ func TestForcedBatchesVectorFiles(t *testing.T) { require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) decodedData, err := hex.DecodeHex(testCase.BatchL2Data) require.NoError(t, err) - _, txBytes, _, err := state.DecodeTxs(decodedData, forkID) + _, txBytes, _, err := state.DecodeTxs(decodedData, forkID6) + require.NoError(t, err) forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) require.NoError(t, err) actualNewStateRoot := forcedBatch.StateRoot @@ -135,98 +133,3 @@ func TestForcedBatchesVectorFiles(t *testing.T) { }) require.NoError(t, err) } - -func sendForcedBatchForVector(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { - ctx := context.Background() - st := opsman.State() - // Connect to ethereum node - ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) - require.NoError(t, err) - - // Create smc client - zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) - zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) - require.NoError(t, err) - - auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) - require.NoError(t, err) - - log.Info("Using address: ", auth.From) - num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - log.Info("Number of forceBatches in the smc: ", num) - - // Get tip - tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - if disallowed { - tx, err := zkEvm.ActivateForceBatches(auth) - require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - } - - // Send forceBatch - tx, err := zkEvm.ForceBatch(auth, txs, tip) - require.NoError(t, err) - - log.Info("Forced Batch Submit to L1 TxHash: ", tx.Hash()) - time.Sleep(1 * time.Second) - - err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) - require.NoError(t, err) - - currentBlock, err := ethClient.BlockByNumber(ctx, nil) - require.NoError(t, err) - log.Debug("currentBlock.Time(): ", currentBlock.Time()) - - query := ethereum.FilterQuery{ - FromBlock: currentBlock.Number(), - Addresses: []common.Address{zkEvmAddr}, - } - logs, err := ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - - var forcedBatch *state.Batch - for _, vLog := range logs { - if vLog.Topics[0] != constants.ForcedBatchSignatureHash { - logs, err = ethClient.FilterLogs(ctx, query) - require.NoError(t, err) - continue - } - fb, err := zkEvm.ParseForceBatch(vLog) - if err != nil { - log.Errorf("failed to parse force batch log event, err: ", err) - } - log.Debugf("log decoded: %+v", fb) - ger := fb.LastGlobalExitRoot - log.Info("GlobalExitRoot: ", ger) - log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) - fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) - if err != nil { - log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - return nil, err - } - log.Info("MinForcedTimestamp: ", fullBlock.Time()) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - for err == state.ErrStateNotSynchronized { - time.Sleep(1 * time.Second) - forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) - } - require.NoError(t, err) - require.NotNil(t, forcedBatch) - - log.Info("Waiting Forced Batch to be virtualized ...") - err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - - log.Info("Waiting Forced Batch to be consolidated ...") - err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) - require.NoError(t, err) - } - - return forcedBatch, nil -} diff --git a/test/e2e/forced_batches_vector_group2_test.go b/test/e2e/forced_batches_vector_group2_test.go new file mode 100644 index 0000000000..21070db52f --- /dev/null +++ b/test/e2e/forced_batches_vector_group2_test.go @@ -0,0 +1,134 @@ +package e2e + +import ( + "context" + "math/big" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/0xPolygonHermez/zkevm-node/test/vectors" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestForcedBatchesVectorFilesGroup2(t *testing.T) { + + if testing.Short() { + t.Skip() + } + vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group2" + ctx := context.Background() + genesisFileAsStr, err := config.LoadGenesisFileAsString("../../test/config/test.genesis.config.json") + require.NoError(t, err) + genesisConfig, err := config.LoadGenesisFromJSONString(genesisFileAsStr) + require.NoError(t, err) + err = filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && !strings.HasSuffix(info.Name(), "list.json") { + + t.Run(info.Name(), func(t *testing.T) { + + defer func() { + require.NoError(t, operations.Teardown()) + }() + + // Load test vectors + log.Info("=====================================================================") + log.Info(path) + log.Info("=====================================================================") + testCase, err := vectors.LoadStateTransitionTestCaseV2(path) + require.NoError(t, err) + + opsCfg := operations.GetDefaultOperationsConfig() + opsCfg.State.MaxCumulativeGasUsed = 80000000000 + opsman, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + + // Setting Genesis + log.Info("###################") + log.Info("# Setting Genesis #") + log.Info("###################") + genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) + require.NoError(t, opsman.SetGenesis(genesisConfig.Genesis.GenesisBlockNum, genesisActions)) + require.NoError(t, opsman.SetForkID(genesisConfig.Genesis.GenesisBlockNum, forkID6)) + require.NoError(t, opsman.Setup()) + + // Check initial root + log.Info("################################") + log.Info("# Verifying initial state root #") + log.Info("################################") + actualOldStateRoot, err := opsman.State().GetLastStateRoot(ctx, nil) + require.NoError(t, err) + require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) + decodedData, err := hex.DecodeHex(testCase.BatchL2Data) + require.NoError(t, err) + _, txBytes, _, err := state.DecodeTxs(decodedData, forkID6) + forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) + require.NoError(t, err) + actualNewStateRoot := forcedBatch.StateRoot + isClosed, err := opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) + require.NoError(t, err) + + // wait until is closed + for !isClosed { + time.Sleep(1 * time.Second) + isClosed, err = opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) + require.NoError(t, err) + } + + log.Info("#######################") + log.Info("# Verifying new leafs #") + log.Info("#######################") + merkleTree := opsman.State().GetTree() + for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { + if expectedNewLeaf.IsSmartContract { + log.Info("Smart Contract Address: ", expectedNewLeaf.Address) + } else { + log.Info("Account Address: ", expectedNewLeaf.Address) + } + log.Info("Verifying Balance...") + actualBalance, err := merkleTree.GetBalance(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.Balance.String(), actualBalance.String()) + + log.Info("Verifying Nonce...") + actualNonce, err := merkleTree.GetNonce(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.Nonce, actualNonce.String()) + if expectedNewLeaf.IsSmartContract { + log.Info("Verifying Storage...") + for positionHex, expectedNewStorageHex := range expectedNewLeaf.Storage { + position, ok := big.NewInt(0).SetString(positionHex[2:], 16) + require.True(t, ok) + expectedNewStorage, ok := big.NewInt(0).SetString(expectedNewStorageHex[2:], 16) + require.True(t, ok) + actualStorage, err := merkleTree.GetStorageAt(ctx, common.HexToAddress(expectedNewLeaf.Address), position, actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewStorage, actualStorage) + } + + log.Info("Verifying HashBytecode...") + actualHashByteCode, err := merkleTree.GetCodeHash(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.HashBytecode, common.BytesToHash(actualHashByteCode).String()) + } + } + return + }) + + return nil + } + return nil + }) + require.NoError(t, err) +} diff --git a/test/e2e/forced_batches_vector_group3_test.go b/test/e2e/forced_batches_vector_group3_test.go new file mode 100644 index 0000000000..ec01446114 --- /dev/null +++ b/test/e2e/forced_batches_vector_group3_test.go @@ -0,0 +1,134 @@ +package e2e + +import ( + "context" + "math/big" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/0xPolygonHermez/zkevm-node/test/vectors" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestForcedBatchesVectorFilesGroup3(t *testing.T) { + + if testing.Short() { + t.Skip() + } + vectorFilesDir := "./../vectors/src/state-transition/forced-tx/group3" + ctx := context.Background() + genesisFileAsStr, err := config.LoadGenesisFileAsString("../../test/config/test.genesis.config.json") + require.NoError(t, err) + genesisConfig, err := config.LoadGenesisFromJSONString(genesisFileAsStr) + require.NoError(t, err) + err = filepath.Walk(vectorFilesDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && !strings.HasSuffix(info.Name(), "list.json") { + + t.Run(info.Name(), func(t *testing.T) { + + defer func() { + require.NoError(t, operations.Teardown()) + }() + + // Load test vectors + log.Info("=====================================================================") + log.Info(path) + log.Info("=====================================================================") + testCase, err := vectors.LoadStateTransitionTestCaseV2(path) + require.NoError(t, err) + + opsCfg := operations.GetDefaultOperationsConfig() + opsCfg.State.MaxCumulativeGasUsed = 80000000000 + opsman, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + + // Setting Genesis + log.Info("###################") + log.Info("# Setting Genesis #") + log.Info("###################") + genesisActions := vectors.GenerateGenesisActions(testCase.Genesis) + require.NoError(t, opsman.SetGenesis(genesisConfig.Genesis.GenesisBlockNum, genesisActions)) + require.NoError(t, opsman.SetForkID(genesisConfig.Genesis.GenesisBlockNum, forkID6)) + require.NoError(t, opsman.Setup()) + + // Check initial root + log.Info("################################") + log.Info("# Verifying initial state root #") + log.Info("################################") + actualOldStateRoot, err := opsman.State().GetLastStateRoot(ctx, nil) + require.NoError(t, err) + require.Equal(t, testCase.ExpectedOldStateRoot, actualOldStateRoot.Hex()) + decodedData, err := hex.DecodeHex(testCase.BatchL2Data) + require.NoError(t, err) + _, txBytes, _, err := state.DecodeTxs(decodedData, forkID6) + forcedBatch, err := sendForcedBatchForVector(t, txBytes, opsman) + require.NoError(t, err) + actualNewStateRoot := forcedBatch.StateRoot + isClosed, err := opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) + require.NoError(t, err) + + // wait until is closed + for !isClosed { + time.Sleep(1 * time.Second) + isClosed, err = opsman.State().IsBatchClosed(ctx, forcedBatch.BatchNumber, nil) + require.NoError(t, err) + } + + log.Info("#######################") + log.Info("# Verifying new leafs #") + log.Info("#######################") + merkleTree := opsman.State().GetTree() + for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { + if expectedNewLeaf.IsSmartContract { + log.Info("Smart Contract Address: ", expectedNewLeaf.Address) + } else { + log.Info("Account Address: ", expectedNewLeaf.Address) + } + log.Info("Verifying Balance...") + actualBalance, err := merkleTree.GetBalance(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.Balance.String(), actualBalance.String()) + + log.Info("Verifying Nonce...") + actualNonce, err := merkleTree.GetNonce(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.Nonce, actualNonce.String()) + if expectedNewLeaf.IsSmartContract { + log.Info("Verifying Storage...") + for positionHex, expectedNewStorageHex := range expectedNewLeaf.Storage { + position, ok := big.NewInt(0).SetString(positionHex[2:], 16) + require.True(t, ok) + expectedNewStorage, ok := big.NewInt(0).SetString(expectedNewStorageHex[2:], 16) + require.True(t, ok) + actualStorage, err := merkleTree.GetStorageAt(ctx, common.HexToAddress(expectedNewLeaf.Address), position, actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewStorage, actualStorage) + } + + log.Info("Verifying HashBytecode...") + actualHashByteCode, err := merkleTree.GetCodeHash(ctx, common.HexToAddress(expectedNewLeaf.Address), actualNewStateRoot.Bytes()) + require.NoError(t, err) + require.Equal(t, expectedNewLeaf.HashBytecode, common.BytesToHash(actualHashByteCode).String()) + } + } + return + }) + + return nil + } + return nil + }) + require.NoError(t, err) +} diff --git a/test/e2e/gasless_test.go b/test/e2e/gasless_test.go new file mode 100644 index 0000000000..185383789a --- /dev/null +++ b/test/e2e/gasless_test.go @@ -0,0 +1,89 @@ +package e2e + +import ( + "context" + "math/big" + "os/exec" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +func TestEthTransferGasless(t *testing.T) { + if testing.Short() { + t.Skip() + } + // Edit config + const path = "../../test/config/test.node.config.toml" + require.NoError(t, + exec.Command("sed", "-i", "s/DefaultMinGasPriceAllowed = 1000000000/DefaultMinGasPriceAllowed = 0/g", path).Run(), + ) + require.NoError(t, + exec.Command("sed", "-i", "s/EnableL2SuggestedGasPricePolling = true/EnableL2SuggestedGasPricePolling = false/g", path).Run(), + ) + // Undo edit config + defer func() { + require.NoError(t, + exec.Command("sed", "-i", "s/DefaultMinGasPriceAllowed = 0/DefaultMinGasPriceAllowed = 1000000000/g", path).Run(), + ) + require.NoError(t, + exec.Command("sed", "-i", "s/EnableL2SuggestedGasPricePolling = false/EnableL2SuggestedGasPricePolling = true/g", path).Run(), + ) + }() + + ctx := context.Background() + defer func() { require.NoError(t, operations.Teardown()) }() + + err := operations.Teardown() + require.NoError(t, err) + opsCfg := operations.GetDefaultOperationsConfig() + opsCfg.State.MaxCumulativeGasUsed = 80000000000 + opsman, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + err = opsman.Setup() + require.NoError(t, err) + time.Sleep(5 * time.Second) + // Load account with balance on local genesis + auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID) + require.NoError(t, err) + // Load eth client + client, err := ethclient.Dial(operations.DefaultL2NetworkURL) + require.NoError(t, err) + // Send txs + nTxs := 10 + amount := big.NewInt(0) + toAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") + senderBalance, err := client.BalanceAt(ctx, auth.From, nil) + require.NoError(t, err) + senderNonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + log.Infof("Receiver Addr: %v", toAddress.String()) + log.Infof("Sender Addr: %v", auth.From.String()) + log.Infof("Sender Balance: %v", senderBalance.String()) + log.Infof("Sender Nonce: %v", senderNonce) + + gasLimit, err := client.EstimateGas(ctx, ethereum.CallMsg{From: auth.From, To: &toAddress, Value: amount}) + require.NoError(t, err) + + // Force gas price to be 0 + gasPrice := big.NewInt(0) + nonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + txs := make([]*types.Transaction, 0, nTxs) + for i := 0; i < nTxs; i++ { + tx := types.NewTransaction(nonce+uint64(i), toAddress, amount, gasLimit, gasPrice, nil) + txs = append(txs, tx) + } + + _, err = operations.ApplyL2Txs(ctx, txs, auth, client, operations.VerifiedConfirmationLevel) + require.NoError(t, err) +} diff --git a/test/e2e/jsonrpc2_test.go b/test/e2e/jsonrpc2_test.go index 69b83ee3ef..64857adf65 100644 --- a/test/e2e/jsonrpc2_test.go +++ b/test/e2e/jsonrpc2_test.go @@ -486,11 +486,11 @@ func TestWebSocketsConcurrentWrites(t *testing.T) { log.Infof("Network %s", network.Name) wsConn, _, err := websocket.DefaultDialer.Dial(network.WebSocketURL, nil) + require.NoError(t, err) defer func() { err := wsConn.Close() require.NoError(t, err) }() - require.NoError(t, err) wg := sync.WaitGroup{} wg.Add(msgQty) @@ -527,6 +527,60 @@ func TestWebSocketsConcurrentWrites(t *testing.T) { } } +func TestWebSocketsReadLimit(t *testing.T) { + if testing.Short() { + t.Skip() + } + setup() + defer teardown() + + wsConn, _, err := websocket.DefaultDialer.Dial(operations.DefaultL2NetworkWebSocketURL, nil) + require.NoError(t, err) + defer func() { + err := wsConn.Close() + require.NoError(t, err) + }() + + jReq := make([]byte, 104857601) + err = wsConn.WriteMessage(websocket.TextMessage, jReq) + require.NoError(t, err) + + _, _, err = wsConn.ReadMessage() + require.NotNil(t, err) + require.Equal(t, websocket.CloseMessageTooBig, err.(*websocket.CloseError).Code) +} + +func TestEstimateTxWithDataBiggerThanMaxAllowed(t *testing.T) { + if testing.Short() { + t.Skip() + } + setup() + defer teardown() + + ctx := context.Background() + + ethereumClient, err := ethclient.Dial(operations.DefaultL2NetworkURL) + require.NoError(t, err) + + sender := common.HexToAddress(operations.DefaultSequencerAddress) + receiver := common.HexToAddress(operations.DefaultSequencerAddress) + + balance, err := ethereumClient.BalanceAt(ctx, sender, nil) + require.NoError(t, err) + + _, err = ethereumClient.EstimateGas(ctx, ethereum.CallMsg{ + From: sender, + To: &receiver, + Value: new(big.Int), + Gas: balance.Uint64(), + GasPrice: new(big.Int).SetUint64(0), + Data: make([]byte, 120000), // large data + }) + rpcErr := err.(rpc.Error) + assert.Equal(t, -32000, rpcErr.ErrorCode()) + assert.Equal(t, "batch_l2_data is invalid", rpcErr.Error()) +} + // waitTimeout waits for the waitgroup for the specified max timeout. // Returns true if waiting timed out. func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { diff --git a/test/e2e/pool_test.go b/test/e2e/pool_test.go index ded7a5d0d5..f8df240943 100644 --- a/test/e2e/pool_test.go +++ b/test/e2e/pool_test.go @@ -4,8 +4,10 @@ import ( "context" "math/big" "testing" + "time" "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/HasOpCode" "github.com/0xPolygonHermez/zkevm-node/test/operations" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -194,3 +196,61 @@ func TestPendingNonce(t *testing.T) { } } } + +func TestHasOpCode(t *testing.T) { + if testing.Short() { + t.Skip() + } + + var err error + err = operations.Teardown() + require.NoError(t, err) + + defer func() { require.NoError(t, operations.Teardown()) }() + + ctx := context.Background() + opsCfg := operations.GetDefaultOperationsConfig() + opsMan, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + err = opsMan.Setup() + require.NoError(t, err) + + client := operations.MustGetClient(operations.DefaultL2NetworkURL) + auth := operations.MustGetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID) + + time.Sleep(2 * time.Second) + + log.Debug("deploying HasOpCode SC") + _, scTx, sc, err := HasOpCode.DeployHasOpCode(auth, client) + require.NoError(t, err) + + logTx(scTx) + err = operations.WaitTxToBeMined(ctx, client, scTx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + log.Debug("calling opGasPrice") + scCallOpGasPrice, err := sc.OpGasPrice(auth) + require.NoError(t, err) + + logTx(scCallOpGasPrice) + err = operations.WaitTxToBeMined(ctx, client, scCallOpGasPrice, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + log.Debug("get tx EGPLog") + egpLog, err := opsMan.State().PostgresStorage.GetTransactionEGPLogByHash(ctx, scCallOpGasPrice.Hash(), nil) + require.NoError(t, err) + require.Equal(t, egpLog.GasPriceOC, true) + + log.Debug("calling opBalance") + scCallBalance, err := sc.OpBalance(auth) + require.NoError(t, err) + + logTx(scCallBalance) + err = operations.WaitTxToBeMined(ctx, client, scCallBalance, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + log.Debug("get tx EGPLog") + egpLog, err = opsMan.State().PostgresStorage.GetTransactionEGPLogByHash(ctx, scCallBalance.Hash(), nil) + require.NoError(t, err) + require.Equal(t, egpLog.BalanceOC, true) +} diff --git a/test/e2e/preEIP155_test.go b/test/e2e/preEIP155_test.go index 57ce1a536f..4f2a894bf7 100644 --- a/test/e2e/preEIP155_test.go +++ b/test/e2e/preEIP155_test.go @@ -2,6 +2,8 @@ package e2e import ( "context" + "crypto/ecdsa" + "math/big" "strings" "testing" "time" @@ -88,3 +90,67 @@ func TestPreEIP155Tx(t *testing.T) { } } } + +func TestFakeEIP155With_V_As35(t *testing.T) { + if testing.Short() { + t.Skip() + } + + var err error + err = operations.Teardown() + require.NoError(t, err) + + defer func() { + require.NoError(t, operations.Teardown()) + }() + + ctx := context.Background() + opsCfg := operations.GetDefaultOperationsConfig() + opsMan, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + err = opsMan.Setup() + require.NoError(t, err) + + for _, network := range networks { + log.Debugf(network.Name) + client := operations.MustGetClient(network.URL) + + privateKey, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") + require.NoError(t, err) + publicKey := privateKey.Public() + publicKeyECDSA, _ := publicKey.(*ecdsa.PublicKey) + fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) + nonce, err := client.PendingNonceAt(context.Background(), fromAddress) + require.NoError(t, err) + + toAddress := common.HexToAddress("0x1234") + tx := &types.LegacyTx{ + Nonce: nonce, + To: &toAddress, + Value: big.NewInt(0), + Gas: uint64(21000), + + GasPrice: big.NewInt(10000000000000), + Data: nil, + } + + // set the chainID to 0 to fake a pre EIP155 tx + signer := types.NewEIP155Signer(big.NewInt(0)) + + // sign tx + h := signer.Hash(types.NewTx(tx)) + sig, err := crypto.Sign(h[:], privateKey) + require.NoError(t, err) + r, s, _, err := signer.SignatureValues(types.NewTx(tx), sig) + require.NoError(t, err) + + // set the value V of the signature to 35 + tx.V = big.NewInt(35) + tx.R = r + tx.S = s + + signedTx := types.NewTx(tx) + err = client.SendTransaction(context.Background(), signedTx) + require.Equal(t, "invalid sender", err.Error()) + } +} diff --git a/test/e2e/shared.go b/test/e2e/shared.go index 9e464d7dd1..e8ac83fdd6 100644 --- a/test/e2e/shared.go +++ b/test/e2e/shared.go @@ -5,6 +5,12 @@ import ( "context" "fmt" "math/big" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm" + "github.com/0xPolygonHermez/zkevm-node/test/constants" + "github.com/stretchr/testify/require" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" @@ -19,7 +25,8 @@ import ( const ( invalidParamsErrorCode = -32602 toAddressHex = "0x4d5Cf5032B2a844602278b01199ED191A86c93ff" - gerFinalityBlocks = uint64(250) + gerFinalityBlocks = uint64(2500) + forkID6 = 6 ) var ( @@ -123,3 +130,98 @@ func logTx(tx *ethTypes.Transaction) { //log.Debugf("RLP: ", hex.EncodeToHex(b)) log.Debugf("********************") } + +func sendForcedBatchForVector(t *testing.T, txs []byte, opsman *operations.Manager) (*state.Batch, error) { + ctx := context.Background() + st := opsman.State() + // Connect to ethereum node + ethClient, err := ethclient.Dial(operations.DefaultL1NetworkURL) + require.NoError(t, err) + + // Create smc client + zkEvmAddr := common.HexToAddress(operations.DefaultL1ZkEVMSmartContract) + zkEvm, err := polygonzkevm.NewPolygonzkevm(zkEvmAddr, ethClient) + require.NoError(t, err) + + auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL1ChainID) + require.NoError(t, err) + + log.Info("Using address: ", auth.From) + num, err := zkEvm.LastForceBatch(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + log.Info("Number of forceBatches in the smc: ", num) + + // Get tip + tip, err := zkEvm.GetForcedBatchFee(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + + disallowed, err := zkEvm.IsForcedBatchDisallowed(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + if disallowed { + tx, err := zkEvm.ActivateForceBatches(auth) + require.NoError(t, err) + err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + } + + // Send forceBatch + tx, err := zkEvm.ForceBatch(auth, txs, tip) + require.NoError(t, err) + + log.Info("Forced Batch Submit to L1 TxHash: ", tx.Hash()) + time.Sleep(1 * time.Second) + + err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + currentBlock, err := ethClient.BlockByNumber(ctx, nil) + require.NoError(t, err) + log.Debug("currentBlock.Time(): ", currentBlock.Time()) + + query := ethereum.FilterQuery{ + FromBlock: currentBlock.Number(), + Addresses: []common.Address{zkEvmAddr}, + } + logs, err := ethClient.FilterLogs(ctx, query) + require.NoError(t, err) + + var forcedBatch *state.Batch + for _, vLog := range logs { + if vLog.Topics[0] != constants.ForcedBatchSignatureHash { + logs, err = ethClient.FilterLogs(ctx, query) + require.NoError(t, err) + continue + } + fb, err := zkEvm.ParseForceBatch(vLog) + if err != nil { + log.Errorf("failed to parse force batch log event, err: ", err) + } + log.Debugf("log decoded: %+v", fb) + ger := fb.LastGlobalExitRoot + log.Info("GlobalExitRoot: ", ger) + log.Info("Transactions: ", common.Bytes2Hex(fb.Transactions)) + fullBlock, err := ethClient.BlockByHash(ctx, vLog.BlockHash) + if err != nil { + log.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) + return nil, err + } + log.Info("MinForcedTimestamp: ", fullBlock.Time()) + forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) + for err == state.ErrStateNotSynchronized { + time.Sleep(1 * time.Second) + forcedBatch, err = st.GetBatchByForcedBatchNum(ctx, fb.ForceBatchNum, nil) + } + require.NoError(t, err) + require.NotNil(t, forcedBatch) + + log.Info("Waiting Forced Batch to be virtualized ...") + err = operations.WaitBatchToBeVirtualized(forcedBatch.BatchNumber, 4*time.Minute, st) + require.NoError(t, err) + + log.Info("Waiting Forced Batch to be consolidated ...") + err = operations.WaitBatchToBeConsolidated(forcedBatch.BatchNumber, 4*time.Minute, st) + require.NoError(t, err) + } + + return forcedBatch, nil +} diff --git a/test/e2e/state_test.go b/test/e2e/state_test.go index c5771d979d..8ffb6f028d 100644 --- a/test/e2e/state_test.go +++ b/test/e2e/state_test.go @@ -7,6 +7,7 @@ import ( "strconv" "testing" + "github.com/0xPolygonHermez/zkevm-node/config" "github.com/0xPolygonHermez/zkevm-node/encoding" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/operations" @@ -30,6 +31,10 @@ func TestStateTransition(t *testing.T) { // Load test vectors testCases, err := vectors.LoadStateTransitionTestCases("./../vectors/src/state-transition/no-data/general.json") require.NoError(t, err) + genesisFileAsStr, err := config.LoadGenesisFileAsString("../config/test.genesis.config.json") + require.NoError(t, err) + genesisConfig, err := config.LoadGenesisFromJSONString(genesisFileAsStr) + require.NoError(t, err) for _, testCase := range testCases { t.Run(testCase.Description, func(t *testing.T) { @@ -55,7 +60,7 @@ func TestStateTransition(t *testing.T) { for _, gacc := range testCase.GenesisAccounts { genesisAccounts[gacc.Address] = gacc.Balance.Int } - require.NoError(t, opsman.SetGenesisAccountsBalance(genesisAccounts)) + require.NoError(t, opsman.SetGenesisAccountsBalance(genesisConfig.Genesis.GenesisBlockNum, genesisAccounts)) // Check initial root require.NoError(t, opsman.CheckVirtualRoot(testCase.ExpectedOldRoot)) diff --git a/test/e2e/uniswap_test.go b/test/e2e/uniswap_test.go index cfb58d7fca..d5ef999961 100644 --- a/test/e2e/uniswap_test.go +++ b/test/e2e/uniswap_test.go @@ -36,7 +36,7 @@ func TestUniswap(t *testing.T) { opsCfg := &operations.Config{ State: &state.Config{ - MaxCumulativeGasUsed: cfg.Sequencer.MaxCumulativeGasUsed, + MaxCumulativeGasUsed: cfg.State.Batch.Constraints.MaxCumulativeGasUsed, }, SequenceSender: &operations.SequenceSenderConfig{ SenderAddress: "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", diff --git a/test/operations/manager.go b/test/operations/manager.go index 446406378c..eb6c0ae5c1 100644 --- a/test/operations/manager.go +++ b/test/operations/manager.go @@ -142,7 +142,7 @@ func (m *Manager) CheckConsolidatedRoot(expectedRoot string) error { } // SetGenesisAccountsBalance creates the genesis block in the state. -func (m *Manager) SetGenesisAccountsBalance(genesisAccounts map[string]big.Int) error { +func (m *Manager) SetGenesisAccountsBalance(genesisBlockNumber uint64, genesisAccounts map[string]big.Int) error { var genesisActions []*state.GenesisAction for address, balanceValue := range genesisAccounts { action := &state.GenesisAction{ @@ -153,12 +153,12 @@ func (m *Manager) SetGenesisAccountsBalance(genesisAccounts map[string]big.Int) genesisActions = append(genesisActions, action) } - return m.SetGenesis(genesisActions) + return m.SetGenesis(genesisBlockNumber, genesisActions) } -func (m *Manager) SetGenesis(genesisActions []*state.GenesisAction) error { +func (m *Manager) SetGenesis(genesisBlockNumber uint64, genesisActions []*state.GenesisAction) error { genesisBlock := state.Block{ - BlockNumber: 303, + BlockNumber: genesisBlockNumber, BlockHash: state.ZeroHash, ParentHash: state.ZeroHash, ReceivedAt: time.Now(), @@ -183,7 +183,7 @@ func (m *Manager) SetGenesis(genesisActions []*state.GenesisAction) error { } // SetForkID sets the initial forkID in db for testing purposes -func (m *Manager) SetForkID(forkID uint64) error { +func (m *Manager) SetForkID(blockNum uint64, forkID uint64) error { dbTx, err := m.st.BeginStateTransaction(m.ctx) if err != nil { return err @@ -195,7 +195,7 @@ func (m *Manager) SetForkID(forkID uint64) error { ToBatchNumber: math.MaxUint64, ForkId: forkID, Version: "forkID", - BlockNumber: 303, + BlockNumber: blockNum, } err = m.st.AddForkIDInterval(m.ctx, fID, dbTx) @@ -474,16 +474,16 @@ func initState(maxCumulativeGasUsed uint64) (*state.State, error) { return nil, err } + stateCfg := state.Config{ + MaxCumulativeGasUsed: maxCumulativeGasUsed, + } + ctx := context.Background() - stateDb := state.NewPostgresStorage(sqlDB) + stateDb := state.NewPostgresStorage(stateCfg, sqlDB) executorClient, _, _ := executor.NewExecutorClient(ctx, executorConfig) stateDBClient, _, _ := merkletree.NewMTDBServiceClient(ctx, merkleTreeConfig) stateTree := merkletree.NewStateTree(stateDBClient) - stateCfg := state.Config{ - MaxCumulativeGasUsed: maxCumulativeGasUsed, - } - eventStorage, err := nileventstorage.NewNilEventStorage() if err != nil { return nil, err diff --git a/test/operations/token.go b/test/operations/token.go index 6b12fbef7d..27d8a26bd5 100644 --- a/test/operations/token.go +++ b/test/operations/token.go @@ -84,7 +84,7 @@ type TokenFilterer struct { // TokenSession is an auto generated Go binding around an Ethereum contract, // with pre-set call and transact options. type TokenSession struct { - Contract *Token // Generic contract binding to set the session for + Contract *Token // Generic contract binding to set the session for CallOpts bind.CallOpts // Call options to use throughout this session TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } @@ -92,15 +92,15 @@ type TokenSession struct { // TokenCallerSession is an auto generated read-only Go binding around an Ethereum contract, // with pre-set call options. type TokenCallerSession struct { - Contract *TokenCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session + Contract *TokenCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session } // TokenTransactorSession is an auto generated write-only Go binding around an Ethereum contract, // with pre-set transact options. type TokenTransactorSession struct { - Contract *TokenTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session + Contract *TokenTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } // TokenRaw is an auto generated low-level Go binding around an Ethereum contract. diff --git a/test/scripts/uniswap/main.go b/test/scripts/uniswap/main.go index fe740d532a..fe8b660284 100644 --- a/test/scripts/uniswap/main.go +++ b/test/scripts/uniswap/main.go @@ -3,275 +3,37 @@ package main import ( "context" "fmt" - "math/big" - "strings" - "time" - "github.com/0xPolygonHermez/zkevm-node/encoding" "github.com/0xPolygonHermez/zkevm-node/log" - ERC20 "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" - WETH "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/WETH" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Factory" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Pair" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/interface/UniswapInterfaceMulticall" - "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/periphery/UniswapV2Router02" "github.com/0xPolygonHermez/zkevm-node/test/operations" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" + uniswap "github.com/0xPolygonHermez/zkevm-node/test/scripts/uniswap/pkg" "github.com/ethereum/go-ethereum/ethclient" ) const ( // if you want to test using goerli network // replace this by your goerli infura url - networkURL = "http://localhost:8123" + //networkURL = "http://localhost:8123" + networkURL = "http://localhost:8545" // replace this by your account private key - pk = "0xdfd01798f92667dbf91df722434e8fbe96af0211d4d1b82bbbbc8f1def7a814f" - txTimeout = 60 * time.Second + //pk = "0xdfd01798f92667dbf91df722434e8fbe96af0211d4d1b82bbbbc8f1def7a814f" + pk = operations.DefaultSequencerPrivateKey ) func main() { ctx := context.Background() log.Infof("connecting to %v", networkURL) client, err := ethclient.Dial(networkURL) - chkErr(err) + uniswap.ChkErr(err) log.Infof("connected") chainID, err := client.ChainID(ctx) - chkErr(err) + uniswap.ChkErr(err) log.Infof("chainID: %v", chainID) - auth := getAuth(ctx, client, pk) + auth := uniswap.GetAuth(ctx, client, pk) fmt.Println() - balance, err := client.BalanceAt(ctx, auth.From, nil) - chkErr(err) - log.Debugf("ETH Balance for %v: %v", auth.From, balance) - // Deploy ERC20 Tokens to be swapped - aCoinAddr, aCoin := deployERC20(auth, client, "A COIN", "ACO") - fmt.Println() - bCoinAddr, bCoin := deployERC20(auth, client, "B COIN", "BCO") - fmt.Println() - cCoinAddr, cCoin := deployERC20(auth, client, "C COIN", "CCO") - fmt.Println() - // Deploy wETH Token, it's required by uniswap to swap ETH by tokens - log.Debugf("Deploying wEth SC") - wEthAddr, tx, wethSC, err := WETH.DeployWETH(auth, client) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("wEth SC tx: %v", tx.Hash().Hex()) - log.Debugf("wEth SC addr: %v", wEthAddr.Hex()) - fmt.Println() - // Deploy Uniswap Factory - log.Debugf("Deploying Uniswap Factory") - factoryAddr, tx, factory, err := UniswapV2Factory.DeployUniswapV2Factory(auth, client, auth.From) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("Uniswap Factory SC tx: %v", tx.Hash().Hex()) - log.Debugf("Uniswap Factory SC addr: %v", factoryAddr.Hex()) - fmt.Println() - // Deploy Uniswap Router - log.Debugf("Deploying Uniswap Router") - routerAddr, tx, router, err := UniswapV2Router02.DeployUniswapV2Router02(auth, client, factoryAddr, wEthAddr) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("Uniswap Router SC tx: %v", tx.Hash().Hex()) - log.Debugf("Uniswap Router SC addr: %v", routerAddr.Hex()) - fmt.Println() - // Deploy Uniswap Interface Multicall - log.Debugf("Deploying Uniswap Multicall") - multicallAddr, tx, _, err := UniswapInterfaceMulticall.DeployUniswapInterfaceMulticall(auth, client) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("Uniswap Interface Multicall SC tx: %v", tx.Hash().Hex()) - log.Debugf("Uniswap Interface Multicall SC addr: %v", multicallAddr.Hex()) - fmt.Println() - // Mint balance to tokens - log.Debugf("Minting ERC20 Tokens") - aMintAmount := "1000000000000000000000" - tx = mintERC20(auth, client, aCoin, aMintAmount) - log.Debugf("Mint A Coin tx: %v", tx.Hash().Hex()) - fmt.Println() - bMintAmount := "1000000000000000000000" - tx = mintERC20(auth, client, bCoin, bMintAmount) - log.Debugf("Mint B Coin tx: %v", tx.Hash().Hex()) - fmt.Println() - cMintAmount := "1000000000000000000000" - tx = mintERC20(auth, client, cCoin, cMintAmount) - log.Debugf("Mint C Coin tx: %v", tx.Hash().Hex()) - fmt.Println() - // wrapping eth - wethDepositoAmount := "20000000000000000" - log.Debugf("Depositing %v ETH for account %v on token wEth", wethDepositoAmount, auth.From) - wAuth := getAuth(ctx, client, pk) - wAuth.Value, _ = big.NewInt(0).SetString(wethDepositoAmount, encoding.Base10) - tx, err = wethSC.Deposit(auth) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - value, err := aCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before allowance aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before allowance bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before allowance cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - // Add allowance - approveERC20(auth, client, aCoin, routerAddr, aMintAmount) - fmt.Println() - approveERC20(auth, client, bCoin, routerAddr, bMintAmount) - fmt.Println() - approveERC20(auth, client, cCoin, routerAddr, cMintAmount) - fmt.Println() - approveERC20(auth, client, wethSC, routerAddr, wethDepositoAmount) - fmt.Println() - const liquidityAmount = "10000000000000000000" - value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity A, B aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity A, B bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity A, B cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - // Add liquidity to the pool - tx = addLiquidity(auth, client, router, aCoinAddr, bCoinAddr, liquidityAmount) - log.Debugf("Add Liquidity to Pair A <-> B tx: %v", tx.Hash().Hex()) - fmt.Println() - value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity B, C aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity B, C bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before adding liquidity B, C cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - tx = addLiquidity(auth, client, router, bCoinAddr, cCoinAddr, liquidityAmount) - log.Debugf("Add Liquidity to Pair B <-> C tx: %v", tx.Hash().Hex()) - fmt.Println() - // Execute swaps - const swapExactAmountInNumber = 1000000000000000000 - swapExactAmountIn := big.NewInt(swapExactAmountInNumber) - value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before first swap aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before first swap bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("before first swap cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - log.Debugf("Swaping tokens from A <-> B") - swapExactTokensForTokens(auth, client, factory, router, aCoinAddr, bCoinAddr, swapExactAmountIn) - fmt.Println() - value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("after first swap aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("after first swap bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) - chkErr(err) - log.Debugf("after first swap cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) - log.Debugf("Swaping tokens from B <-> C") - swapExactTokensForTokens(auth, client, factory, router, bCoinAddr, cCoinAddr, swapExactAmountIn) - fmt.Println() -} -func swapExactTokensForTokens(auth *bind.TransactOpts, client *ethclient.Client, - factory *UniswapV2Factory.UniswapV2Factory, router *UniswapV2Router02.UniswapV2Router02, - tokenA, tokenB common.Address, exactAmountIn *big.Int) { - ctx := context.Background() - logPrefix := fmt.Sprintf("swapExactTokensForTokens %v <-> %v", tokenA.Hex(), tokenB.Hex()) - pairAddr, err := factory.GetPair(nil, tokenA, tokenB) - chkErr(err) - log.Debug(logPrefix, " pair: ", pairAddr.Hex()) - pairSC, err := UniswapV2Pair.NewUniswapV2Pair(pairAddr, client) - chkErr(err) - pairReserves, err := pairSC.GetReserves(nil) - chkErr(err) - log.Debug(logPrefix, " reserves 0: ", pairReserves.Reserve0, " 1: ", pairReserves.Reserve1, " Block Timestamp: ", pairReserves.BlockTimestampLast) - amountOut, err := router.GetAmountOut(nil, exactAmountIn, pairReserves.Reserve0, pairReserves.Reserve1) - chkErr(err) - log.Debug(logPrefix, " exactAmountIn: ", exactAmountIn, " amountOut: ", amountOut) - tx, err := router.SwapExactTokensForTokens(auth, exactAmountIn, amountOut, []common.Address{tokenA, tokenB}, auth.From, getDeadline()) - chkErr(err) - log.Debug(logPrefix, " tx: ", tx.Hash().Hex()) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) -} -func getAuth(ctx context.Context, client *ethclient.Client, pkHex string) *bind.TransactOpts { - chainID, err := client.ChainID(ctx) - chkErr(err) - privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(pkHex, "0x")) - chkErr(err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) - chkErr(err) - return auth -} -func deployERC20(auth *bind.TransactOpts, client *ethclient.Client, name, symbol string) (common.Address, *ERC20.ERC20) { - ctx := context.Background() - log.Debugf("Deploying ERC20 Token: [%v]%v", symbol, name) - addr, tx, instance, err := ERC20.DeployERC20(auth, client, name, symbol) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("%v SC tx: %v", name, tx.Hash().Hex()) - log.Debugf("%v SC addr: %v", name, addr.Hex()) - return addr, instance -} -func mintERC20(auth *bind.TransactOpts, client *ethclient.Client, erc20sc *ERC20.ERC20, amount string) *types.Transaction { - ctx := context.Background() - name, err := erc20sc.Name(nil) - chkErr(err) - log.Debugf("Minting %v tokens for account %v on token %v", amount, auth.From, name) - mintAmount, _ := big.NewInt(0).SetString(amount, encoding.Base10) - tx, err := erc20sc.Mint(auth, mintAmount) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - return tx -} -func approveERC20(auth *bind.TransactOpts, client *ethclient.Client, - sc interface { - Name(opts *bind.CallOpts) (string, error) - Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) - }, - routerAddr common.Address, - amount string) { - ctx := context.Background() - name, err := sc.Name(nil) - chkErr(err) - a, _ := big.NewInt(0).SetString(amount, encoding.Base10) - log.Debugf("Approving %v tokens to be used by the router for %v on behalf of account %v", a.Text(encoding.Base10), name, auth.From) - tx, err := sc.Approve(auth, routerAddr, a) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - log.Debugf("Approval %v tx: %v", name, tx.Hash().Hex()) -} -func addLiquidity(auth *bind.TransactOpts, client *ethclient.Client, router *UniswapV2Router02.UniswapV2Router02, tokenA, tokenB common.Address, amount string) *types.Transaction { - ctx := context.Background() - a, _ := big.NewInt(0).SetString(amount, encoding.Base10) - log.Debugf("Adding liquidity(%v) for tokens A: %v, B:%v, Recipient: %v", amount, tokenA.Hex(), tokenB.Hex(), auth.From.Hex()) - tx, err := router.AddLiquidity(auth, tokenA, tokenB, a, a, a, a, auth.From, getDeadline()) - chkErr(err) - err = operations.WaitTxToBeMined(ctx, client, tx, txTimeout) - chkErr(err) - return tx -} -func getDeadline() *big.Int { - const deadLinelimit = 5 * time.Minute - return big.NewInt(time.Now().UTC().Add(deadLinelimit).Unix()) -} -func chkErr(err error) { - if err != nil { - log.Fatal(err) + deployments := uniswap.DeployContractsAndAddLiquidity(client, auth) + for i := 0; i < 5; i++ { + uniswap.SwapTokens(client, auth, deployments) } + } diff --git a/test/scripts/uniswap/pkg/setup.go b/test/scripts/uniswap/pkg/setup.go new file mode 100644 index 0000000000..93d0253272 --- /dev/null +++ b/test/scripts/uniswap/pkg/setup.go @@ -0,0 +1,237 @@ +package pkg + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" + + "github.com/0xPolygonHermez/zkevm-node/encoding" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/WETH" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Factory" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/interface/UniswapInterfaceMulticall" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/periphery/UniswapV2Router02" + "github.com/0xPolygonHermez/zkevm-node/test/operations" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" +) + +const ( + txTimeout = 60 * time.Second +) + +var ( + executedTransctionsCount uint64 = 0 +) + +func DeployContractsAndAddLiquidity(client *ethclient.Client, auth *bind.TransactOpts) Deployments { + ctx := context.Background() + fmt.Println() + balance, err := client.BalanceAt(ctx, auth.From, nil) + ChkErr(err) + log.Debugf("ETH Balance for %v: %v", auth.From, balance) + // Deploy ERC20 Tokens to be swapped + aCoinAddr, aCoin := deployERC20(auth, client, "A COIN", "ACO") + fmt.Println() + bCoinAddr, bCoin := deployERC20(auth, client, "B COIN", "BCO") + fmt.Println() + cCoinAddr, cCoin := deployERC20(auth, client, "C COIN", "CCO") + fmt.Println() + // Deploy wETH Token, it's required by uniswap to swap ETH by tokens + log.Debugf("Deploying wEth SC") + wEthAddr, tx, wethSC, err := WETH.DeployWETH(auth, client) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("wEth SC tx: %v", tx.Hash().Hex()) + log.Debugf("wEth SC addr: %v", wEthAddr.Hex()) + fmt.Println() + // Deploy Uniswap Factory + log.Debugf("Deploying Uniswap Factory") + factoryAddr, tx, factory, err := UniswapV2Factory.DeployUniswapV2Factory(auth, client, auth.From) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("Uniswap Factory SC tx: %v", tx.Hash().Hex()) + log.Debugf("Uniswap Factory SC addr: %v", factoryAddr.Hex()) + fmt.Println() + // Deploy Uniswap Router + log.Debugf("Deploying Uniswap Router") + routerAddr, tx, router, err := UniswapV2Router02.DeployUniswapV2Router02(auth, client, factoryAddr, wEthAddr) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("Uniswap Router SC tx: %v", tx.Hash().Hex()) + log.Debugf("Uniswap Router SC addr: %v", routerAddr.Hex()) + fmt.Println() + // Deploy Uniswap Interface Multicall + log.Debugf("Deploying Uniswap Multicall") + multicallAddr, tx, _, err := UniswapInterfaceMulticall.DeployUniswapInterfaceMulticall(auth, client) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("Uniswap Interface Multicall SC tx: %v", tx.Hash().Hex()) + log.Debugf("Uniswap Interface Multicall SC addr: %v", multicallAddr.Hex()) + fmt.Println() + // Mint balance to tokens + log.Debugf("Minting ERC20 Tokens") + aMintAmount := "1000000000000000000000" + tx = mintERC20(auth, client, aCoin, aMintAmount) + log.Debugf("Mint A Coin tx: %v", tx.Hash().Hex()) + fmt.Println() + bMintAmount := "1000000000000000000000" + tx = mintERC20(auth, client, bCoin, bMintAmount) + log.Debugf("Mint B Coin tx: %v", tx.Hash().Hex()) + fmt.Println() + cMintAmount := "1000000000000000000000" + tx = mintERC20(auth, client, cCoin, cMintAmount) + log.Debugf("Mint C Coin tx: %v", tx.Hash().Hex()) + fmt.Println() + // wrapping eth + wethDepositoAmount := "0000000000000000" + log.Debugf("Depositing %v ETH for account %v on token wEth", wethDepositoAmount, auth.From) + auth.Value, _ = big.NewInt(0).SetString(wethDepositoAmount, encoding.Base10) + tx, err = wethSC.Deposit(auth) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + value, err := aCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before allowance aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before allowance bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before allowance cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + // Add allowance + approveERC20(auth, client, aCoin, routerAddr, aMintAmount) + fmt.Println() + approveERC20(auth, client, bCoin, routerAddr, bMintAmount) + fmt.Println() + approveERC20(auth, client, cCoin, routerAddr, cMintAmount) + fmt.Println() + approveERC20(auth, client, wethSC, routerAddr, wethDepositoAmount) + fmt.Println() + const liquidityAmount = "10000000000000000000" + value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity A, B aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity A, B bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity A, B cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + // Add liquidity to the pool + tx = addLiquidity(auth, client, router, aCoinAddr, bCoinAddr, liquidityAmount) + log.Debugf("Add Liquidity to Pair A <-> B tx: %v", tx.Hash().Hex()) + fmt.Println() + value, err = aCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity B, C aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = bCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity B, C bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = cCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before adding liquidity B, C cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + tx = addLiquidity(auth, client, router, bCoinAddr, cCoinAddr, liquidityAmount) + log.Debugf("Add Liquidity to Pair B <-> C tx: %v", tx.Hash().Hex()) + fmt.Println() + + return Deployments{ + ACoin: aCoin, + ACoinAddr: aCoinAddr, + BCoin: bCoin, + BCoinAddr: bCoinAddr, + CCoin: cCoin, + CCoinAddr: cCoinAddr, + Router: router, + Factory: factory, + } +} + +func WaitForTransactionAndIncrementNonce(l2Client *ethclient.Client, auth *bind.TransactOpts, err error, ctx context.Context, tx *types.Transaction) error { + ChkErr(err) + err = operations.WaitTxToBeMined(ctx, l2Client, tx, txTimeout) + ChkErr(err) + executedTransctionsCount++ + auth.Nonce = nil + auth.Value = nil + + return err +} + +func GetAuth(ctx context.Context, client *ethclient.Client, pkHex string) *bind.TransactOpts { + chainID, err := client.ChainID(ctx) + ChkErr(err) + privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(pkHex, "0x")) + ChkErr(err) + auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) + ChkErr(err) + senderNonce, err := client.PendingNonceAt(ctx, auth.From) + if err != nil { + panic(err) + } + auth.Nonce = big.NewInt(int64(senderNonce)) + return auth +} + +func ChkErr(err error) { + if err != nil { + log.Fatal(err) + } +} + +func GetExecutedTransactionsCount() uint64 { + return executedTransctionsCount +} + +func deployERC20(auth *bind.TransactOpts, client *ethclient.Client, name, symbol string) (common.Address, *ERC20.ERC20) { + ctx := context.Background() + log.Debugf("Deploying ERC20 Token: [%v]%v", symbol, name) + addr, tx, instance, err := ERC20.DeployERC20(auth, client, name, symbol) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("%v SC tx: %v", name, tx.Hash().Hex()) + log.Debugf("%v SC addr: %v", name, addr.Hex()) + return addr, instance +} + +func mintERC20(auth *bind.TransactOpts, client *ethclient.Client, erc20sc *ERC20.ERC20, amount string) *types.Transaction { + ctx := context.Background() + name, err := erc20sc.Name(nil) + ChkErr(err) + log.Debugf("Minting %v tokens for account %v on token %v", amount, auth.From, name) + mintAmount, _ := big.NewInt(0).SetString(amount, encoding.Base10) + tx, err := erc20sc.Mint(auth, mintAmount) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + return tx +} + +func approveERC20(auth *bind.TransactOpts, client *ethclient.Client, + sc interface { + Name(opts *bind.CallOpts) (string, error) + Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) + }, + routerAddr common.Address, + amount string) { + ctx := context.Background() + name, err := sc.Name(nil) + ChkErr(err) + a, _ := big.NewInt(0).SetString(amount, encoding.Base10) + log.Debugf("Approving %v tokens to be used by the router for %v on behalf of account %v", a.Text(encoding.Base10), name, auth.From) + tx, err := sc.Approve(auth, routerAddr, a) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + log.Debugf("Approval %v tx: %v", name, tx.Hash().Hex()) +} + +func addLiquidity(auth *bind.TransactOpts, client *ethclient.Client, router *UniswapV2Router02.UniswapV2Router02, tokenA, tokenB common.Address, amount string) *types.Transaction { + ctx := context.Background() + a, _ := big.NewInt(0).SetString(amount, encoding.Base10) + log.Debugf("Adding liquidity(%v) for tokens A: %v, B:%v, Recipient: %v", amount, tokenA.Hex(), tokenB.Hex(), auth.From.Hex()) + tx, err := router.AddLiquidity(auth, tokenA, tokenB, a, a, a, a, auth.From, getDeadline()) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + return tx +} + +func getDeadline() *big.Int { + const deadLinelimit = 5 * time.Minute + return big.NewInt(time.Now().UTC().Add(deadLinelimit).Unix()) +} diff --git a/test/scripts/uniswap/pkg/swap.go b/test/scripts/uniswap/pkg/swap.go new file mode 100644 index 0000000000..01bf4d13f1 --- /dev/null +++ b/test/scripts/uniswap/pkg/swap.go @@ -0,0 +1,74 @@ +package pkg + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Factory" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Pair" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/periphery/UniswapV2Router02" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +func SwapTokens(client *ethclient.Client, auth *bind.TransactOpts, deployments Deployments) []*types.Transaction { + transactions := make([]*types.Transaction, 0, 2) + // Execute swaps + const swapExactAmountInNumber = 10 + swapExactAmountIn := big.NewInt(swapExactAmountInNumber) + swapExactAmountIn2 := big.NewInt(swapExactAmountInNumber - 1) + value, err := deployments.ACoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before first swap aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = deployments.BCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before first swap bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = deployments.CCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("before first swap cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + log.Debugf("Swaping tokens from A <-> B") + res := SwapExactTokensForTokens(auth, client, deployments.Factory, deployments.Router, deployments.ACoinAddr, deployments.BCoinAddr, swapExactAmountIn) + transactions = append(transactions, res...) + fmt.Println() + value, err = deployments.ACoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("after first swap aCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = deployments.BCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("after first swap bCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + value, err = deployments.CCoin.BalanceOf(&bind.CallOpts{}, auth.From) + ChkErr(err) + log.Debugf("after first swap cCoin.balanceOf[%s]: %d", auth.From.Hex(), value) + log.Debugf("Swaping tokens from B <-> C") + res = SwapExactTokensForTokens(auth, client, deployments.Factory, deployments.Router, deployments.BCoinAddr, deployments.CCoinAddr, swapExactAmountIn2) + transactions = append(transactions, res...) + fmt.Println() + + return transactions +} + +func SwapExactTokensForTokens(auth *bind.TransactOpts, client *ethclient.Client, + factory *UniswapV2Factory.UniswapV2Factory, router *UniswapV2Router02.UniswapV2Router02, + tokenA, tokenB common.Address, exactAmountIn *big.Int) []*types.Transaction { + ctx := context.Background() + logPrefix := fmt.Sprintf("SwapExactTokensForTokens %v <-> %v", tokenA.Hex(), tokenB.Hex()) + pairAddr, err := factory.GetPair(nil, tokenA, tokenB) + ChkErr(err) + log.Debug(logPrefix, " pair: ", pairAddr.Hex()) + pairSC, err := UniswapV2Pair.NewUniswapV2Pair(pairAddr, client) + ChkErr(err) + pairReserves, err := pairSC.GetReserves(nil) + ChkErr(err) + log.Debug(logPrefix, " reserves 0: ", pairReserves.Reserve0, " 1: ", pairReserves.Reserve1, " Block Timestamp: ", pairReserves.BlockTimestampLast) + amountOut, err := router.GetAmountOut(nil, exactAmountIn, pairReserves.Reserve0, pairReserves.Reserve1) + ChkErr(err) + log.Debug(logPrefix, " exactAmountIn: ", exactAmountIn, " amountOut: ", amountOut) + tx, err := router.SwapExactTokensForTokens(auth, exactAmountIn, amountOut, []common.Address{tokenA, tokenB}, auth.From, getDeadline()) + err = WaitForTransactionAndIncrementNonce(client, auth, err, ctx, tx) + return []*types.Transaction{tx} +} diff --git a/test/scripts/uniswap/pkg/types.go b/test/scripts/uniswap/pkg/types.go new file mode 100644 index 0000000000..5da3549d4a --- /dev/null +++ b/test/scripts/uniswap/pkg/types.go @@ -0,0 +1,19 @@ +package pkg + +import ( + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/ERC20" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/core/UniswapV2Factory" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/uniswap/v2/periphery/UniswapV2Router02" + "github.com/ethereum/go-ethereum/common" +) + +type Deployments struct { + ACoin *ERC20.ERC20 + ACoinAddr common.Address + BCoin *ERC20.ERC20 + BCoinAddr common.Address + CCoin *ERC20.ERC20 + CCoinAddr common.Address + Router *UniswapV2Router02.UniswapV2Router02 + Factory *UniswapV2Factory.UniswapV2Factory +} diff --git a/test/vectors/vectors_v2.go b/test/vectors/vectors_v2.go index 80638894f9..5ddfb00363 100644 --- a/test/vectors/vectors_v2.go +++ b/test/vectors/vectors_v2.go @@ -71,7 +71,7 @@ func GenerateGenesisActions(genesis []GenesisEntity) []*state.GenesisAction { } genesisActions = append(genesisActions, action) } - + if genesisEntity.IsSmartContract && len(genesisEntity.Storage) > 0 { for storageKey, storageValue := range genesisEntity.Storage { action := &state.GenesisAction{ diff --git a/tools/datastreamer/Makefile b/tools/datastreamer/Makefile new file mode 100644 index 0000000000..5698417c34 --- /dev/null +++ b/tools/datastreamer/Makefile @@ -0,0 +1,59 @@ +# Check dependencies +# Check for Go +.PHONY: check-go +check-go: + @which go > /dev/null || (echo "Error: Go is not installed" && exit 1) + +# Targets that require the checks +generate-file: check-go +reprocess: check-go +decode-entry-offline: check-go +decode-l2block-offline: check-go +decode-entry: check-go +decode-l2block: check-go +truncate: check-go + +arguments := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +.PHONY: generate-file +generate-file: ## Runs the tool to populate the binary file + go run main.go generate -cfg config/tool.config.toml + +.PHONY: decode-entry +decode-entry: ## Runs the tool to decode a given entry number + go run main.go decode-entry -cfg config/tool.config.toml -entry $(arguments) + +.PHONY: decode-l2block +decode-l2block: ## Runs the tool to decode a given L2 block + go run main.go decode-l2block -cfg config/tool.config.toml -l2block $(arguments) + +.PHONY: decode-entry-offline +decode-entry-offline: ## Runs the offline tool to decode a given entry number + go run main.go decode-entry-offline -cfg config/tool.config.toml -entry $(arguments) + +.PHONY: decode-l2block-offline +decode-l2block-offline: ## Runs the offline tool to decode a given L2 block + go run main.go decode-l2block-offline -cfg config/tool.config.toml -l2block $(arguments) + +.PHONY: truncate +truncate: ## Runs the offline tool to truncate the stream file + go run main.go truncate -cfg config/tool.config.toml -entry $(arguments) + +# .PHONY: reprocess +reprocess: ## Runs the tool to reprocess the information in the stream since a given l2 block + go run main.go reprocess -cfg config/tool.config.toml -genesis ../test/config/test.genesis.config.json -l2block $(arguments) + +## Help display. +## Pulls comments from beside commands and prints a nicely formatted +## display with the commands and their usage information. +.DEFAULT_GOAL := help + +.PHONY: help +help: ## Prints this help + @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ + | sort \ + | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.DEFAULT: + @echo "" + diff --git a/tools/datastreamer/config/config.go b/tools/datastreamer/config/config.go new file mode 100644 index 0000000000..8161a6ec81 --- /dev/null +++ b/tools/datastreamer/config/config.go @@ -0,0 +1,101 @@ +package config + +import ( + "bytes" + "path/filepath" + "strings" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-data-streamer/log" + "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/merkletree" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" + "github.com/urfave/cli/v2" +) + +const ( + // FlagCfg is the flag for cfg + FlagCfg = "cfg" + // FlagGenesis is the flag for genesis file + FlagGenesis = "genesis" +) + +// OnlineConfig is the configuration for the online data streamer +type OnlineConfig struct { + URI string `mapstructure:"URI"` + StreamType datastreamer.StreamType `mapstructure:"StreamType"` +} + +// Config is the configuration for the tool +type Config struct { + ChainID uint64 `mapstructure:"ChainID"` + Online OnlineConfig `mapstructure:"Online"` + Offline datastreamer.Config `mapstructure:"Offline"` + StateDB db.Config `mapstructure:"StateDB"` + Executor executor.Config `mapstructure:"Executor"` + MerkeTree merkletree.Config `mapstructure:"MerkeTree"` + Log log.Config `mapstructure:"Log"` +} + +// Default parses the default configuration values. +func Default() (*Config, error) { + var cfg Config + viper.SetConfigType("toml") + + err := viper.ReadConfig(bytes.NewBuffer([]byte(DefaultValues))) + if err != nil { + return nil, err + } + err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) + if err != nil { + return nil, err + } + return &cfg, nil +} + +// Load parses the configuration values from the config file and environment variables +func Load(ctx *cli.Context) (*Config, error) { + cfg, err := Default() + if err != nil { + return nil, err + } + configFilePath := ctx.String(FlagCfg) + if configFilePath != "" { + dirName, fileName := filepath.Split(configFilePath) + + fileExtension := strings.TrimPrefix(filepath.Ext(fileName), ".") + fileNameWithoutExtension := strings.TrimSuffix(fileName, "."+fileExtension) + + viper.AddConfigPath(dirName) + viper.SetConfigName(fileNameWithoutExtension) + viper.SetConfigType(fileExtension) + } + viper.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + viper.SetEnvPrefix("ZKEVM_DATA_STREAMER") + err = viper.ReadInConfig() + if err != nil { + _, ok := err.(viper.ConfigFileNotFoundError) + if ok { + log.Infof("config file not found") + } else { + log.Infof("error reading config file: ", err) + return nil, err + } + } + + decodeHooks := []viper.DecoderConfigOption{ + // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" + viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))), + } + + err = viper.Unmarshal(&cfg, decodeHooks...) + if err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/tools/datastreamer/config/default.go b/tools/datastreamer/config/default.go new file mode 100644 index 0000000000..7a2b8aea49 --- /dev/null +++ b/tools/datastreamer/config/default.go @@ -0,0 +1,35 @@ +package config + +// DefaultValues is the default configuration +const DefaultValues = ` +ChainID = 1440 + +[Online] +URI = "zkevm-sequencer:6900" +StreamType = 1 + +[Offline] +Port = 6901 +Filename = "datastreamer.bin" + +[StateDB] +User = "state_user" +Password = "state_password" +Name = "state_db" +Host = "localhost" +Port = "5432" +EnableLog = false +MaxConns = 200 + +[Executor] +URI = "zkevm-prover:50071" +MaxGRPCMessageSize = 100000000 + +[MerkeTree] +URI = "zkevm-prover:50061" + +[Log] +Environment = "development" # "production" or "development" +Level = "info" +Outputs = ["stderr"] +` diff --git a/tools/datastreamer/config/tool.config.toml b/tools/datastreamer/config/tool.config.toml new file mode 100644 index 0000000000..2326418375 --- /dev/null +++ b/tools/datastreamer/config/tool.config.toml @@ -0,0 +1,30 @@ +ChainID = 1440 + +[Online] +URI = "zkevm-sequencer:6900" +StreamType = 1 + +[Offline] +Port = 6901 +Filename = "datastream.bin" + +[StateDB] +User = "state_user" +Password = "state_password" +Name = "state_db" +Host = "localhost" +Port = "5432" +EnableLog = false +MaxConns = 200 + +[Executor] +URI = "zkevm-prover:50071" +MaxGRPCMessageSize = 100000000 + +[MerkeTree] +URI = "zkevm-prover:50061" + +[Log] +Environment = "development" +Level = "error" +Outputs = ["stdout"] diff --git a/tools/datastreamer/main.go b/tools/datastreamer/main.go new file mode 100644 index 0000000000..c0ba0e1923 --- /dev/null +++ b/tools/datastreamer/main.go @@ -0,0 +1,838 @@ +package main + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "time" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-data-streamer/log" + nodeConfig "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/encoding" + "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/0xPolygonHermez/zkevm-node/merkletree" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" + "github.com/0xPolygonHermez/zkevm-node/tools/datastreamer/config" + "github.com/ethereum/go-ethereum/common" + "github.com/fatih/color" + "github.com/google/uuid" + "github.com/urfave/cli/v2" +) + +const ( + appName = "zkevm-data-streamer-tool" //nolint:gosec + appUsage = "zkevm datastream tool" +) + +var ( + configFileFlag = cli.StringFlag{ + Name: config.FlagCfg, + Aliases: []string{"c"}, + Usage: "Configuration `FILE`", + DefaultText: "./config/tool.config.toml", + Required: true, + } + + genesisFileFlag = cli.StringFlag{ + Name: config.FlagGenesis, + Aliases: []string{"g"}, + Usage: "Genesis `FILE`", + DefaultText: "./config/genesis.json", + Required: true, + } + + entryFlag = cli.Uint64Flag{ + Name: "entry", + Aliases: []string{"e"}, + Usage: "Entry `NUMBER`", + Required: true, + } + + l2blockFlag = cli.Uint64Flag{ + Name: "l2block", + Aliases: []string{"b"}, + Usage: "L2Block `NUMBER`", + Required: true, + } + + updateFileFlag = cli.BoolFlag{ + Name: "update", + Aliases: []string{"u"}, + Usage: "Update `FILE`", + Required: false, + } +) + +func main() { + app := cli.NewApp() + app.Name = appName + app.Usage = appUsage + + app.Commands = []*cli.Command{ + { + Name: "generate", + Aliases: []string{}, + Usage: "Generate stream file from scratch", + Action: generate, + Flags: []cli.Flag{ + &configFileFlag, + }, + }, + { + Name: "reprocess", + Aliases: []string{}, + Usage: "Reprocess l2block since a given l2block number", + Action: reprocess, + Flags: []cli.Flag{ + &configFileFlag, + &genesisFileFlag, + &l2blockFlag, + &updateFileFlag, + }, + }, + { + Name: "decode-entry-offline", + Aliases: []string{}, + Usage: "Decodes an entry offline", + Action: decodeEntryOffline, + Flags: []cli.Flag{ + &configFileFlag, + &entryFlag, + }, + }, + { + Name: "decode-l2block-offline", + Aliases: []string{}, + Usage: "Decodes a l2 block offline", + Action: decodeL2BlockOffline, + Flags: []cli.Flag{ + &configFileFlag, + &l2blockFlag, + }, + }, + { + Name: "decode-entry", + Aliases: []string{}, + Usage: "Decodes an entry", + Action: decodeEntry, + Flags: []cli.Flag{ + &configFileFlag, + &entryFlag, + }, + }, + { + Name: "decode-l2block", + Aliases: []string{}, + Usage: "Decodes a l2 block", + Action: decodeL2Block, + Flags: []cli.Flag{ + &configFileFlag, + &l2blockFlag, + }, + }, + { + Name: "truncate", + Aliases: []string{}, + Usage: "Truncates the stream file", + Action: truncate, + Flags: []cli.Flag{ + &configFileFlag, + &entryFlag, + }, + }, + } + + err := app.Run(os.Args) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } +} + +func initializeStreamServer(c *config.Config) (*datastreamer.StreamServer, error) { + // Create a stream server + streamServer, err := datastreamer.NewServer(c.Offline.Port, state.StreamTypeSequencer, c.Offline.Filename, &c.Log) + if err != nil { + return nil, err + } + + err = streamServer.Start() + if err != nil { + return nil, err + } + + return streamServer, nil +} + +func generate(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + streamServer, err := initializeStreamServer(c) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + // Connect to the database + stateSqlDB, err := db.NewSQLDB(c.StateDB) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + defer stateSqlDB.Close() + stateDB := state.NewPostgresStorage(state.Config{}, stateSqlDB) + log.Debug("Connected to the database") + + err = state.GenerateDataStreamerFile(cliCtx.Context, streamServer, stateDB) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + printColored(color.FgGreen, "Process finished\n") + + return nil +} + +func reprocess(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + ctx := cliCtx.Context + + genesisFileAsStr, err := nodeConfig.LoadGenesisFileAsString(cliCtx.String(config.FlagGenesis)) + if err != nil { + fmt.Printf("failed to load genesis file. Error: %v", err) + os.Exit(1) + } + + networkConfig, err := nodeConfig.LoadGenesisFromJSONString(genesisFileAsStr) + if err != nil { + fmt.Printf("failed to load genesis configuration from file. Error: %v", err) + os.Exit(1) + } + + currentL2BlockNumber := cliCtx.Uint64("l2block") + var stateRoot []byte + + streamServer, err := initializeStreamServer(c) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + if currentL2BlockNumber == 0 { + printColored(color.FgHiYellow, "\n\nSetting Genesis block\n\n") + + mtDBServerConfig := merkletree.Config{URI: c.MerkeTree.URI} + var mtDBCancel context.CancelFunc + mtDBServiceClient, mtDBClientConn, mtDBCancel := merkletree.NewMTDBServiceClient(ctx, mtDBServerConfig) + defer func() { + mtDBCancel() + mtDBClientConn.Close() + }() + + stateTree := merkletree.NewStateTree(mtDBServiceClient) + + stateRoot, err = setGenesis(ctx, stateTree, networkConfig.Genesis) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + // Get Genesis block from the file and validate the state root + bookMark := state.DSBookMark{ + Type: state.BookMarkTypeL2Block, + L2BlockNumber: 0, + } + + firstEntry, err := streamServer.GetFirstEventAfterBookmark(bookMark.Encode()) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(firstEntry) + + secondEntry, err := streamServer.GetEntry(firstEntry.Number + 1) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(secondEntry) + + if common.Bytes2Hex(stateRoot) != common.Bytes2Hex(secondEntry.Data[40:72]) { + printColored(color.FgRed, "\nError: Genesis state root does not match\n\n") + os.Exit(1) + } else { + printColored(color.FgGreen, "\nGenesis state root matches\n\n") + } + currentL2BlockNumber++ + } + + // Connect to the executor + executorClient, executorClientConn, executorCancel := executor.NewExecutorClient(ctx, c.Executor) + defer func() { + executorCancel() + executorClientConn.Close() + }() + + bookMark := state.DSBookMark{ + Type: state.BookMarkTypeL2Block, + L2BlockNumber: currentL2BlockNumber, + } + + startEntry, err := streamServer.GetFirstEventAfterBookmark(bookMark.Encode()) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + var previousStateRoot = stateRoot + var maxEntry = streamServer.GetHeader().TotalEntries + + for x := startEntry.Number; x < maxEntry; x++ { + printColored(color.FgHiYellow, fmt.Sprintf("\nProcessing entity: %d\n", x)) + + currentEntry, err := streamServer.GetEntry(x) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + var processBatchRequest *executor.ProcessBatchRequest + var expectedNewRoot []byte + var entryToUpdate *datastreamer.FileEntry + + switch currentEntry.Type { + case state.EntryTypeBookMark: + printEntry(currentEntry) + entryToUpdate = nil + continue + case state.EntryTypeUpdateGER: + printEntry(currentEntry) + processBatchRequest = &executor.ProcessBatchRequest{ + OldBatchNum: binary.LittleEndian.Uint64(currentEntry.Data[0:8]) - 1, + Coinbase: common.Bytes2Hex(currentEntry.Data[48:68]), + BatchL2Data: nil, + OldStateRoot: previousStateRoot, + GlobalExitRoot: currentEntry.Data[16:48], + OldAccInputHash: []byte{}, + EthTimestamp: binary.LittleEndian.Uint64(currentEntry.Data[8:16]), + UpdateMerkleTree: uint32(1), + ChainId: c.ChainID, + ForkId: uint64(binary.LittleEndian.Uint16(currentEntry.Data[68:70])), + } + + expectedNewRoot = currentEntry.Data[70:102] + entryToUpdate = nil + case state.EntryTypeL2BlockStart: + startEntry = currentEntry + printEntry(startEntry) + + txEntry, err := streamServer.GetEntry(startEntry.Number + 1) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(txEntry) + + endEntry, err := streamServer.GetEntry(startEntry.Number + 2) //nolint:gomnd + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(endEntry) + + forkID := uint64(binary.LittleEndian.Uint16(startEntry.Data[76:78])) + + tx, err := state.DecodeTx(common.Bytes2Hex((txEntry.Data[6:]))) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + // Get the old state root + oldStateRoot := getOldStateRoot(startEntry.Number, streamServer) + + // RLP encode the transaction using the proper fork id + batchL2Data, err := state.EncodeTransaction(*tx, txEntry.Data[0], forkID) //nolint:gomnd + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + processBatchRequest = &executor.ProcessBatchRequest{ + OldBatchNum: binary.LittleEndian.Uint64(startEntry.Data[0:8]) - 1, + Coinbase: common.Bytes2Hex(startEntry.Data[56:76]), + BatchL2Data: batchL2Data, + OldStateRoot: oldStateRoot, + GlobalExitRoot: startEntry.Data[24:56], + OldAccInputHash: []byte{}, + EthTimestamp: binary.LittleEndian.Uint64(startEntry.Data[16:24]), + UpdateMerkleTree: uint32(1), + ChainId: c.ChainID, + ForkId: uint64(binary.LittleEndian.Uint16(startEntry.Data[76:78])), + } + + expectedNewRoot = endEntry.Data[40:72] + entryToUpdate = &endEntry + x += 2 //nolint:gomnd + } + + // Process batch + processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + if processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { + fmt.Printf("Error: %v\n", processBatchResponse.Error) + os.Exit(1) + } + + if common.Bytes2Hex(processBatchResponse.NewStateRoot) != common.Bytes2Hex(expectedNewRoot) { + printColored(color.FgRed, "\nNew state root does not match\n\n") + printColored(color.FgRed, fmt.Sprintf("Old State Root.........: %s\n", "0x"+common.Bytes2Hex(processBatchRequest.GetOldStateRoot()))) + printColored(color.FgRed, fmt.Sprintf("New State Root.........: %s\n", "0x"+common.Bytes2Hex(processBatchResponse.NewStateRoot))) + printColored(color.FgRed, fmt.Sprintf("Expected New State Root: %s\n", "0x"+common.Bytes2Hex(expectedNewRoot))) + // Check if we must update the file with the new state root + if cliCtx.Bool("update") { + if entryToUpdate.Type != state.EntryTypeL2BlockEnd { + printColored(color.FgRed, "Error: Entry to update is not a L2BlockEnd\n") + os.Exit(1) + } + blockEnd := state.DSL2BlockEnd{}.Decode(entryToUpdate.Data) + blockEnd.StateRoot = common.BytesToHash(processBatchResponse.NewStateRoot) + err = streamServer.UpdateEntryData(entryToUpdate.Number, state.EntryTypeL2BlockEnd, blockEnd.Encode()) + if err != nil { + printColored(color.FgRed, fmt.Sprintf("Error: %v\n", err)) + os.Exit(1) + } + } else { + break + } + } else { + printColored(color.FgGreen, "New state root matches\n") + previousStateRoot = processBatchResponse.NewStateRoot + } + } + + return nil +} + +func decodeEntry(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + client, err := datastreamer.NewClient(c.Online.URI, c.Online.StreamType) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + err = client.Start() + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + client.FromEntry = cliCtx.Uint64("entry") + err = client.ExecCommand(datastreamer.CmdEntry) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + printEntry(client.Entry) + return nil +} + +func decodeL2Block(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + client, err := datastreamer.NewClient(c.Online.URI, c.Online.StreamType) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + err = client.Start() + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + l2BlockNumber := cliCtx.Uint64("l2block") + + bookMark := state.DSBookMark{ + Type: state.BookMarkTypeL2Block, + L2BlockNumber: l2BlockNumber, + } + + client.FromBookmark = bookMark.Encode() + err = client.ExecCommand(datastreamer.CmdBookmark) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + firstEntry := client.Entry + printEntry(firstEntry) + + client.FromEntry = firstEntry.Number + 1 + err = client.ExecCommand(datastreamer.CmdEntry) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + secondEntry := client.Entry + printEntry(secondEntry) + + if l2BlockNumber != 0 { + client.FromEntry = firstEntry.Number + 2 //nolint:gomnd + err = client.ExecCommand(datastreamer.CmdEntry) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + thirdEntry := client.Entry + printEntry(thirdEntry) + } + + return nil +} + +func decodeEntryOffline(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + streamServer, err := initializeStreamServer(c) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + entry, err := streamServer.GetEntry(cliCtx.Uint64("entry")) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + printEntry(entry) + + return nil +} + +func decodeL2BlockOffline(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + streamServer, err := initializeStreamServer(c) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + l2BlockNumber := cliCtx.Uint64("l2block") + + bookMark := state.DSBookMark{ + Type: state.BookMarkTypeL2Block, + L2BlockNumber: l2BlockNumber, + } + + firstEntry, err := streamServer.GetFirstEventAfterBookmark(bookMark.Encode()) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(firstEntry) + + secondEntry, err := streamServer.GetEntry(firstEntry.Number + 1) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(secondEntry) + + if l2BlockNumber != 0 { + thirdEntry, err := streamServer.GetEntry(firstEntry.Number + 2) //nolint:gomnd + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printEntry(thirdEntry) + } + + return nil +} + +func truncate(cliCtx *cli.Context) error { + c, err := config.Load(cliCtx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + log.Init(c.Log) + + streamServer, err := initializeStreamServer(c) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + err = streamServer.TruncateFile(cliCtx.Uint64("entry")) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + printColored(color.FgGreen, "File truncated\n") + + return nil +} + +func printEntry(entry datastreamer.FileEntry) { + switch entry.Type { + case state.EntryTypeBookMark: + bookmark := state.DSBookMark{}.Decode(entry.Data) + printColored(color.FgGreen, "Entry Type......: ") + printColored(color.FgHiYellow, "BookMark\n") + printColored(color.FgGreen, "Entry Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", entry.Number)) + printColored(color.FgGreen, "L2 Block Number.: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", bookmark.L2BlockNumber)) + case state.EntryTypeL2BlockStart: + blockStart := state.DSL2BlockStart{}.Decode(entry.Data) + printColored(color.FgGreen, "Entry Type......: ") + printColored(color.FgHiYellow, "L2 Block Start\n") + printColored(color.FgGreen, "Entry Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", entry.Number)) + printColored(color.FgGreen, "Batch Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.BatchNumber)) + printColored(color.FgGreen, "L2 Block Number.: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.L2BlockNumber)) + printColored(color.FgGreen, "Timestamp.......: ") + printColored(color.FgHiWhite, fmt.Sprintf("%v (%d)\n", time.Unix(int64(blockStart.Timestamp), 0), blockStart.Timestamp)) + printColored(color.FgGreen, "Global Exit Root: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", blockStart.GlobalExitRoot)) + printColored(color.FgGreen, "Coinbase........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", blockStart.Coinbase)) + printColored(color.FgGreen, "Fork ID.........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.ForkID)) + case state.EntryTypeL2Tx: + dsTx := state.DSL2Transaction{}.Decode(entry.Data) + printColored(color.FgGreen, "Entry Type......: ") + printColored(color.FgHiYellow, "L2 Transaction\n") + printColored(color.FgGreen, "Entry Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", entry.Number)) + printColored(color.FgGreen, "Effec. Gas Price: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", dsTx.EffectiveGasPricePercentage)) + printColored(color.FgGreen, "Is Valid........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%t\n", dsTx.IsValid == 1)) + printColored(color.FgGreen, "Encoded Length..: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", dsTx.EncodedLength)) + printColored(color.FgGreen, "Encoded.........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", "0x"+common.Bytes2Hex(dsTx.Encoded))) + + tx, err := state.DecodeTx(common.Bytes2Hex(dsTx.Encoded)) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printColored(color.FgGreen, "Decoded.........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%+v\n", tx)) + sender, err := state.GetSender(*tx) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + printColored(color.FgGreen, "Sender..........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", sender)) + nonce := tx.Nonce() + printColored(color.FgGreen, "Nonce...........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", nonce)) + case state.EntryTypeL2BlockEnd: + blockEnd := state.DSL2BlockEnd{}.Decode(entry.Data) + printColored(color.FgGreen, "Entry Type......: ") + printColored(color.FgHiYellow, "L2 Block End\n") + printColored(color.FgGreen, "Entry Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", entry.Number)) + printColored(color.FgGreen, "L2 Block Number.: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockEnd.L2BlockNumber)) + printColored(color.FgGreen, "L2 Block Hash...: ") + printColored(color.FgHiWhite, fmt.Sprint(blockEnd.BlockHash.Hex()+"\n")) + printColored(color.FgGreen, "State Root......: ") + printColored(color.FgHiWhite, fmt.Sprint(blockEnd.StateRoot.Hex()+"\n")) + case state.EntryTypeUpdateGER: + updateGer := state.DSUpdateGER{}.Decode(entry.Data) + printColored(color.FgGreen, "Entry Type......: ") + printColored(color.FgHiYellow, "Update GER\n") + printColored(color.FgGreen, "Entry Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", entry.Number)) + printColored(color.FgGreen, "Batch Number....: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", updateGer.BatchNumber)) + printColored(color.FgGreen, "Timestamp.......: ") + printColored(color.FgHiWhite, fmt.Sprintf("%v (%d)\n", time.Unix(int64(updateGer.Timestamp), 0), updateGer.Timestamp)) + printColored(color.FgGreen, "Global Exit Root: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", updateGer.GlobalExitRoot)) + printColored(color.FgGreen, "Coinbase........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%s\n", updateGer.Coinbase)) + printColored(color.FgGreen, "Fork ID.........: ") + printColored(color.FgHiWhite, fmt.Sprintf("%d\n", updateGer.ForkID)) + printColored(color.FgGreen, "State Root......: ") + printColored(color.FgHiWhite, fmt.Sprint(updateGer.StateRoot.Hex()+"\n")) + } +} + +func printColored(color color.Attribute, text string) { + colored := fmt.Sprintf("\x1b[%dm%s\x1b[0m", color, text) + fmt.Print(colored) +} + +// setGenesis populates state with genesis information +func setGenesis(ctx context.Context, tree *merkletree.StateTree, genesis state.Genesis) ([]byte, error) { + var ( + root common.Hash + newRoot []byte + err error + ) + + if tree == nil { + return newRoot, fmt.Errorf("state tree is nil") + } + + uuid := uuid.New().String() + + for _, action := range genesis.GenesisActions { + address := common.HexToAddress(action.Address) + switch action.Type { + case int(merkletree.LeafTypeBalance): + balance, err := encoding.DecodeBigIntHexOrDecimal(action.Value) + if err != nil { + return newRoot, err + } + newRoot, _, err = tree.SetBalance(ctx, address, balance, newRoot, uuid) + if err != nil { + return newRoot, err + } + case int(merkletree.LeafTypeNonce): + nonce, err := encoding.DecodeBigIntHexOrDecimal(action.Value) + if err != nil { + return newRoot, err + } + newRoot, _, err = tree.SetNonce(ctx, address, nonce, newRoot, uuid) + if err != nil { + return newRoot, err + } + case int(merkletree.LeafTypeCode): + code, err := hex.DecodeHex(action.Bytecode) + if err != nil { + return newRoot, fmt.Errorf("could not decode SC bytecode for address %q: %v", address, err) + } + newRoot, _, err = tree.SetCode(ctx, address, code, newRoot, uuid) + if err != nil { + return newRoot, err + } + case int(merkletree.LeafTypeStorage): + // Parse position and value + positionBI, err := encoding.DecodeBigIntHexOrDecimal(action.StoragePosition) + if err != nil { + return newRoot, err + } + valueBI, err := encoding.DecodeBigIntHexOrDecimal(action.Value) + if err != nil { + return newRoot, err + } + // Store + newRoot, _, err = tree.SetStorageAt(ctx, address, positionBI, valueBI, newRoot, uuid) + if err != nil { + return newRoot, err + } + default: + return newRoot, fmt.Errorf("unknown genesis action type %q", action.Type) + } + } + + root.SetBytes(newRoot) + + // flush state db + err = tree.Flush(ctx, uuid) + if err != nil { + fmt.Printf("error flushing state tree after genesis: %v", err) + return newRoot, err + } + + return newRoot, nil +} + +func getOldStateRoot(entityNumber uint64, streamServer *datastreamer.StreamServer) []byte { + var found = false + var entry datastreamer.FileEntry + var err error + + for !found && entityNumber > 1 { + entityNumber-- + entry, err = streamServer.GetEntry(entityNumber) + if err != nil { + fmt.Printf("Error: %v\n", err) + os.Exit(1) + } + + if entry.Type == state.EntryTypeL2BlockEnd || entry.Type == state.EntryTypeUpdateGER { + found = true + } + } + + if !found { + fmt.Printf("Error: Could not find old state root") + os.Exit(1) + } + + printColored(color.FgHiYellow, "Getting Old State Root from\n") + printEntry(entry) + + if entry.Type == state.EntryTypeUpdateGER { + return entry.Data[70:102] + } + + return entry.Data[40:72] +} diff --git a/tools/executor/main.go b/tools/executor/main.go index 8f28ae12f1..c9e3b3aa86 100644 --- a/tools/executor/main.go +++ b/tools/executor/main.go @@ -14,6 +14,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/test/testutils" + "github.com/google/uuid" ) const ( @@ -108,11 +109,11 @@ func runTestCase(ctx context.Context, genesis []genesisItem, tc testCase) error } // Executor connection - xecutor, _, _ := executor.NewExecutorClient(ctx, executor.Config{URI: executorURL, MaxGRPCMessageSize: 100000000}) //nolint:gomnd + executorClient, _, _ := executor.NewExecutorClient(ctx, executor.Config{URI: executorURL, MaxGRPCMessageSize: 100000000}) //nolint:gomnd // Execute batches for i := 0; i < len(tc.Requests); i++ { pbr := executor.ProcessBatchRequest(tc.Requests[i]) //nolint - res, err := xecutor.ProcessBatch(ctx, &pbr) + res, err := executorClient.ProcessBatch(ctx, &pbr) if err != nil { return err } @@ -232,7 +233,7 @@ type testCase struct { type executorRequest executor.ProcessBatchRequest func (er *executorRequest) UnmarshalJSON(data []byte) error { - type jExecutorRequeststruct struct { + type jExecutorRequest struct { BatchL2Data string `json:"batchL2Data"` GlobalExitRoot string `json:"globalExitRoot"` OldBatchNum uint64 `json:"oldBatchNum"` @@ -241,7 +242,7 @@ func (er *executorRequest) UnmarshalJSON(data []byte) error { SequencerAddr string `json:"sequencerAddr"` Timestamp uint64 `json:"timestamp"` } - jer := jExecutorRequeststruct{} + jer := jExecutorRequest{} if err := json.Unmarshal(data, &jer); err != nil { return err } @@ -270,6 +271,7 @@ func (er *executorRequest) UnmarshalJSON(data []byte) error { OldStateRoot: oldStateRoot, Coinbase: jer.SequencerAddr, EthTimestamp: jer.Timestamp, + ContextId: uuid.NewString(), } *er = executorRequest(req) //nolint return nil diff --git a/tools/rlp/README.md b/tools/rlp/README.md index 0c612fbb0e..6e88dfbe50 100644 --- a/tools/rlp/README.md +++ b/tools/rlp/README.md @@ -23,7 +23,7 @@ V: 2038 R: 9361089098880882477997043716401602752115554739533279717349253863857164548636 S: 57526471149217844688177159177322186288034197982071061451252570055448276801731 -##### Result: +##### Expected result: ``` ee80843b9aca00830186a094617b3a3528f9cdd6630fd3301b9c8911f7bf063d88016345785d8a0000808203e9808014b2304ca08457f37c4e81284398aa9c511875e47b37b1e6877a26c53c0e7a1c7f2ed42e23e503c446213de2ce077e5cfdc58f0a672dfc0a5d0aa7c0202098c31c ``` diff --git a/tools/state/README.md b/tools/state/README.md new file mode 100644 index 0000000000..76fc97824d --- /dev/null +++ b/tools/state/README.md @@ -0,0 +1,56 @@ +# state tool + +This tool allows to rerun a set of batches, you could set a flag to persist changes in hashDB + +# Usage + + + +## Network configuration +If you want to avoid passing network configuration (`--network` and `--custom-network-file`) you need to provide the L2ChainID (`--l2_chain_id`) + +## Reprocess a set of batches and compare with state database +This reexecute a batch/batches and check if match the data on DB. +It have some flags to allow: +- `--write_on_hash_db`: this for each execution create the corresponding MT if possible +it override state_db +- `--fist_batch`: first batch to process (default: 1) +- `--last_batch`: last batch to process (default: the highest batch on batch table) +- `--l2_chain_id`: Intead of asking to SMC you can set it +- `--dont_stop_on_error`: If a batch have an error the process doesn't stop +- `--prefer_execution_state_root`: The oldStateRoot used to process a batch is usually is the stateRoot of the previous batch on database but, with this flag, you could use the calculated stateRoot from the execution result from previous batch instead + +To see the full flags execute: +``` +go run ./tools/state/. reprocess +``` + +# Examples: + +- You need to set the right `State` config, `Executor` config and `MTClient`. You can override the parameters with environment variables: +``` +KEVM_NODE_MTCLIENT_URI="127.0.0.1:50061" ZKEVM_NODE_STATE_DB_HOST="127.0.0.1" ZKEVM_NODE_EXECUTOR_URI="127.0.0.1:50071" go run ./tools/state/. reprocess -cfg test/config/test.node.config.toml -l2_chain_id 1440 --last_batch_number 5000 +``` +- We are setting the `chain_id` direclty so we don't need the genesis data. +- All this examples redirect the log info to `/dev/null` for that reason if the command returns an error (`$? -ne 1`) relaunch without the redirection part (`2> /dev/null`) to see the full output + +### Rebuild hashdb entries for first 5000 batches + +``` +go run ./tools/state/. reprocess -cfg test/config/test.node.config.toml -l2_chain_id 1440 --last_batch_number 5000 --write_on_hash_db 2> /dev/null +``` +expected output: +``` + batch 91 1.80%: ... ntx: 1 WRITE (flush: 5955) ETA: 38s speed:127.8 batch/s StateRoot:0x9f2db3f7775f30f1e79b4c0d876b8094a839cdba2cc51a48359b817a1c07e09f [OK] + batch 92 1.82%: ... ntx: 0 WRITE (flush: 5956) ETA: 44s speed:112.7 batch/s StateRoot:0x9f2db3f7775f30f1e79b4c0d876b8094a839cdba2cc51a48359b817a1c07e09f [OK] + batch 93 1.84%: ... ntx: 11 WRITE (flush: 5957) ETA: 49s speed:99.3 batch/s StateRoot:0xf77f6df21cbb5455ebae4dd9275bf5753f6e7e94250afe537192e624b7291854 [OK] + batch 94 1.86%: ... ntx: 0 WRITE (flush: 5958) ETA: 54s speed:90.4 batch/s StateRoot:0xf77f6df21cbb5455ebae4dd9275bf5753f6e7e94250afe537192e624b7291854 [OK] + batch 95 1.88%: ... ntx: 0 WRITE (flush: 5959) ETA: 54s speed:91.2 batch/s StateRoot:0xf77f6df21cbb5455ebae4dd9275bf5753f6e7e94250afe537192e624b7291854 [OK] + batch 96 1.90%: ... ntx: 0 WRITE (flush: 5960) ETA: 53s speed:92.0 batch/s StateRoot:0xf77f6df21cbb5455ebae4dd9275bf5753f6e7e94250afe537192e624b7291854 [OK] + batch 97 1.92%: ... ntx: 0 WRITE (flush: 5961) ETA: 53s speed:92.7 batch/s StateRoot:0xf77f6df21cbb5455ebae4dd9275bf5753f6e7e94250afe537192e624b7291854 [OK] +``` + +### Check that the batches from 1000 to 5000 match stateRoot +``` + go run ./tools/state/. reprocess -cfg test/config/test.node.config.toml -l2_chain_id 1440 --first_batch_number 1000 --last_batch_number 5000 2> /dev/null +``` \ No newline at end of file diff --git a/tools/state/control_flush_id.go b/tools/state/control_flush_id.go new file mode 100644 index 0000000000..8817eda71e --- /dev/null +++ b/tools/state/control_flush_id.go @@ -0,0 +1,115 @@ +package main + +import ( + "context" + "time" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" +) + +// FlushIDController is an interface to control the flushID and ProverID +type flushIDController interface { + // UpdateAndCheckProverID check the incomming proverID from executor with the last one, if no match finalize synchronizer + // if there are no previous one it keep this value as the current one + UpdateAndCheckProverID(proverID string) + // BlockUntilLastFlushIDIsWritten blocks until the last flushID is written in DB. It keep in a loop asking to executor + // the flushid written, also check ProverID + BlockUntilLastFlushIDIsWritten() error + // SetPendingFlushIDAndCheckProverID set the pending flushID to be written in DB and check proverID + SetPendingFlushIDAndCheckProverID(flushID uint64, proverID string, callDescription string) +} + +// ClientFlushIDControl is a struct to control the flushID and ProverID, implements FlushIDController interface +type ClientFlushIDControl struct { + state *state.State + ctx context.Context + + // Id of the 'process' of the executor. Each time that it starts this value changes + // This value is obtained from the call state.GetStoredFlushID + // It starts as an empty string and it is filled in the first call + // later the value is checked to be the same (in function checkFlushID) + proverID string + // Previous value returned by state.GetStoredFlushID, is used for decide if write a log or not + previousExecutorFlushID uint64 + latestFlushID uint64 + // If true the lastFlushID is stored in DB and we don't need to check again + latestFlushIDIsFulfilled bool +} + +// NewFlushIDController create a new struct ClientFlushIDControl +func NewFlushIDController(state *state.State, ctx context.Context) *ClientFlushIDControl { + return &ClientFlushIDControl{ + state: state, + ctx: ctx, + proverID: "", + previousExecutorFlushID: 0, + } +} + +// SetPendingFlushIDAndCheckProverID set the pending flushID to be written in DB and check proverID +func (s *ClientFlushIDControl) SetPendingFlushIDAndCheckProverID(flushID uint64, proverID string, callDescription string) { + log.Infof("new executor [%s] pending flushID: %d", callDescription, flushID) + s.latestFlushID = flushID + s.latestFlushIDIsFulfilled = false + s.UpdateAndCheckProverID(proverID) +} + +// UpdateAndCheckProverID check the incomming proverID from executor with the last one, if no match finalize synchronizer +// if there are no previous one it keep this value as the current one +func (s *ClientFlushIDControl) UpdateAndCheckProverID(proverID string) { + if s.proverID == "" { + log.Infof("Current proverID is %s", proverID) + s.proverID = proverID + return + } + if s.proverID != proverID { + log.Fatal("restarting synchronizer because executor have restarted (old=%s, new=%s)", s.proverID, proverID) + } +} + +// BlockUntilLastFlushIDIsWritten blocks until the last flushID is written in DB. It keep in a loop asking to executor +// the flushid written, also check ProverID +func (s *ClientFlushIDControl) BlockUntilLastFlushIDIsWritten() error { + if s.latestFlushIDIsFulfilled { + log.Debugf("no pending flushID, nothing to do. Last pending fulfilled flushID: %d, last executor flushId received: %d", s.latestFlushID, s.latestFlushID) + return nil + } + storedFlushID, proverID, err := s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + if (s.previousExecutorFlushID != storedFlushID) || (s.proverID != proverID) { + log.Infof("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } else { + log.Debugf("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } + s.UpdateAndCheckProverID(proverID) + log.Debugf("storedFlushID (executor reported): %d, latestFlushID (pending): %d", storedFlushID, s.latestFlushID) + if storedFlushID < s.latestFlushID { + log.Infof("Synchornizer BLOCKED!: Wating for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d", + s.latestFlushID, + storedFlushID) + iteration := 0 + start := time.Now() + for storedFlushID < s.latestFlushID { + log.Debugf("Waiting for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d iteration:%d elpased:%s", + s.latestFlushID, storedFlushID, iteration, time.Since(start)) + time.Sleep(100 * time.Millisecond) //nolint:gomnd + storedFlushID, _, err = s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + iteration++ + } + log.Infof("Synchornizer resumed, flushID stored: %d", s.latestFlushID) + } + log.Infof("Pending Flushid fullfiled: %d, executor have write %d", s.latestFlushID, storedFlushID) + s.latestFlushIDIsFulfilled = true + s.previousExecutorFlushID = storedFlushID + return nil +} diff --git a/tools/state/estimated_time.go b/tools/state/estimated_time.go new file mode 100644 index 0000000000..e4ee6f59c6 --- /dev/null +++ b/tools/state/estimated_time.go @@ -0,0 +1,32 @@ +package main + +import "time" + +const conversionFactorPercentage = 100 + +type estimatedTimeOfArrival struct { + totalItems int + processedItems int + startTime time.Time + previousStepTime time.Time +} + +func (e *estimatedTimeOfArrival) start(totalItems int) { + e.totalItems = totalItems + e.processedItems = 0 + e.startTime = time.Now() + e.previousStepTime = e.startTime +} + +// return eta time.Duration, percent float64, itemsPerSecond float64 +func (e *estimatedTimeOfArrival) step(itemsProcessedInthisStep int) (time.Duration, float64, float64) { + e.processedItems += itemsProcessedInthisStep + + curentTime := time.Now() + elapsedTime := curentTime.Sub(e.startTime) + eta := time.Duration(float64(elapsedTime) / float64(e.processedItems) * float64(e.totalItems-e.processedItems)) + percent := float64(e.processedItems) / float64(e.totalItems) * conversionFactorPercentage + itemsPerSecond := float64(e.processedItems) / float64(elapsedTime.Seconds()) + e.previousStepTime = curentTime + return eta, percent, itemsPerSecond +} diff --git a/tools/state/main.go b/tools/state/main.go new file mode 100644 index 0000000000..7696dc87e0 --- /dev/null +++ b/tools/state/main.go @@ -0,0 +1,99 @@ +package main + +import ( + "log" + "os" + + "github.com/0xPolygonHermez/zkevm-node" + "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/urfave/cli/v2" +) + +const ( + appName = "zkevm-statedb-tool" +) + +const ( + flagChainID = "l2_chain_id" +) + +var ( + configFileFlag = cli.StringFlag{ + Name: config.FlagCfg, + Aliases: []string{"c"}, + Usage: "Configuration `FILE`", + Required: false, + } + configChainIDFlag = cli.StringFlag{ + Name: flagChainID, + Usage: "forced L2 chain id instead of asking SMC", + Required: false, + } + networkFlag = cli.StringFlag{ + Name: config.FlagNetwork, + Aliases: []string{"net"}, + Usage: "Load default network configuration. Supported values: [`mainnet`, `testnet`, `custom`]", + Required: false, + } + customNetworkFlag = cli.StringFlag{ + Name: config.FlagCustomNetwork, + Aliases: []string{"net-file"}, + Usage: "Load the network configuration file if --network=custom", + Required: false, + } + firstBatchNumberFlag = cli.StringFlag{ + Name: "first_batch_number", + Aliases: []string{"start"}, + Usage: "First batch number (default:1)", + Required: false, + } + lastBatchNumberFlag = cli.StringFlag{ + Name: "last_batch_number", + Aliases: []string{"end"}, + Usage: "Last batch number (default:last one on batch table)", + Required: false, + } + writeOnHashDBFlag = cli.BoolFlag{ + Name: "write_on_hash_db", + Usage: "When process batches say to exectuor to write on the MT in a persistent way (default:false)", + Required: false, + } + dontStopOnErrorFlag = cli.BoolFlag{ + Name: "dont_stop_on_error", + Usage: "Keep processing even if a batch have an error (default:false)", + Required: false, + } + preferExecutionStateRootFlag = cli.BoolFlag{ + Name: "prefer_execution_state_root", + Usage: "Instaed of using the state_root from previous batch use the stateRoot from previous execution (default:false)", + Required: false, + } +) + +func main() { + app := cli.NewApp() + app.Name = appName + app.Version = zkevm.Version + + app.Commands = []*cli.Command{ + { + Name: "version", + Aliases: []string{}, + Usage: "Application version and build", + Action: versionCmd, + }, + { + Name: "reprocess", + Aliases: []string{}, + Usage: "reprocess batches", + Action: reprocessCmd, + Flags: []cli.Flag{&configFileFlag, &networkFlag, &customNetworkFlag, &configChainIDFlag, &firstBatchNumberFlag, + &lastBatchNumberFlag, &writeOnHashDBFlag, &dontStopOnErrorFlag, &preferExecutionStateRootFlag}, + }, + } + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + os.Exit(1) + } +} diff --git a/tools/state/output_interface.go b/tools/state/output_interface.go new file mode 100644 index 0000000000..b29873cf93 --- /dev/null +++ b/tools/state/output_interface.go @@ -0,0 +1,13 @@ +package main + +import "github.com/ethereum/go-ethereum/common" + +type reprocessingOutputer interface { + start(fromBatchNumber uint64, toBatchNumber uint64, l2ChainId uint64) + startProcessingBatch(current_batch_number uint64) + numOfTransactionsInBatch(numOfTrs int) + addTransactionError(trxIndex int, err error) + isWrittenOnHashDB(isWritten bool, flushid uint64) + finishProcessingBatch(stateRoot common.Hash, err error) + end(err error) +} diff --git a/tools/state/output_pretty.go b/tools/state/output_pretty.go new file mode 100644 index 0000000000..92a529a034 --- /dev/null +++ b/tools/state/output_pretty.go @@ -0,0 +1,78 @@ +package main + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" +) + +type trxErrorEntry struct { + trxIndex int + err error +} + +type reprocessingOutputPretty struct { + fromBatchNumber uint64 + toBatchNumber uint64 + l2ChainId uint64 + timeStart time.Time + trxErrors []trxErrorEntry + thereisABatchProcessingInProgress bool + currentBatchNumber uint64 + estimatedTime estimatedTimeOfArrival +} + +func (o *reprocessingOutputPretty) start(fromBatchNumber uint64, toBatchNumber uint64, l2ChainId uint64) { + o.fromBatchNumber = fromBatchNumber + o.toBatchNumber = toBatchNumber + o.l2ChainId = l2ChainId + o.timeStart = time.Now() + o.trxErrors = make([]trxErrorEntry, 0) + o.estimatedTime.start(int(toBatchNumber - fromBatchNumber)) + fmt.Printf("START: batches [%d to %d] l2ChainId=[%d]\n", fromBatchNumber, toBatchNumber, l2ChainId) +} + +func (o *reprocessingOutputPretty) numOfTransactionsInBatch(numOfTrs int) { + fmt.Printf(" ntx: %3d", numOfTrs) +} + +func (o *reprocessingOutputPretty) startProcessingBatch(current_batch_number uint64) { + fmt.Printf("\t batch %6d %2.2f%%: ...", current_batch_number, float64(current_batch_number-o.fromBatchNumber)*100/float64(o.toBatchNumber-o.fromBatchNumber)) + o.currentBatchNumber = current_batch_number + o.thereisABatchProcessingInProgress = true +} +func (o *reprocessingOutputPretty) addTransactionError(trxIndex int, err error) { + o.trxErrors = append(o.trxErrors, trxErrorEntry{trxIndex: trxIndex, err: err}) +} + +func (o *reprocessingOutputPretty) finishProcessingBatch(stateRoot common.Hash, err error) { + estimatedTime, _, itemsPerSecond := o.estimatedTime.step(1) + fmt.Printf(" ETA:%10s speed:%3.1f batch/s ", estimatedTime.Round(time.Second), itemsPerSecond) + if err == nil { + fmt.Printf(" StateRoot:%30s [OK]\n", stateRoot) + } else { + fmt.Printf(" StateRoot:%30s [ERROR] %v\n", "", err) + } + for _, trxError := range o.trxErrors { + fmt.Printf("\t\t[ERROR] trx %d: %v\n", trxError.trxIndex, trxError.err) + } + o.trxErrors = make([]trxErrorEntry, 0) + o.thereisABatchProcessingInProgress = false +} + +func (o *reprocessingOutputPretty) isWrittenOnHashDB(isWritten bool, flushid uint64) { + if isWritten { + fmt.Printf(" WRITE (flush:%5d) ", flushid) + } +} + +func (o *reprocessingOutputPretty) end(err error) { + if err != nil { + if o.thereisABatchProcessingInProgress { + o.finishProcessingBatch(common.Hash{}, err) + } else { + fmt.Printf("\n[ERROR] %v", err) + } + } +} diff --git a/tools/state/reprocess_action.go b/tools/state/reprocess_action.go new file mode 100644 index 0000000000..83c3cac0d4 --- /dev/null +++ b/tools/state/reprocess_action.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" +) + +type reprocessAction struct { + firstBatchNumber uint64 + lastBatchNumber uint64 + l2ChainId uint64 + // If true, when execute a batch write the MT in hashDB + updateHasbDB bool + stopOnError bool + preferExecutionStateRoot bool + + st *state.State + ctx context.Context + output reprocessingOutputer + flushIdCtrl flushIDController +} + +func (r *reprocessAction) start() error { + lastBatch := r.lastBatchNumber + firstBatchNumber := r.firstBatchNumber + + batch, err := getBatchByNumber(r.ctx, r.st, firstBatchNumber-1) + if err != nil { + log.Errorf("no batch %d. Error: %v", 0, err) + return err + } + oldStateRoot := batch.StateRoot + oldAccInputHash := batch.AccInputHash + + for i := uint64(firstBatchNumber); i < lastBatch; i++ { + r.output.startProcessingBatch(i) + batchOnDB, response, err := r.stepWithFlushId(i, oldStateRoot, oldAccInputHash) + if response != nil { + r.output.finishProcessingBatch(response.NewStateRoot, err) + } else { + r.output.finishProcessingBatch(common.Hash{}, err) + } + if batchOnDB != nil { + oldStateRoot = batchOnDB.StateRoot + oldAccInputHash = batchOnDB.AccInputHash + } + + if r.preferExecutionStateRoot && response != nil { + // If there is a response use that instead of the batch on DB + log.Infof("Using as oldStateRoot the execution state root: %s", response.NewStateRoot) + oldStateRoot = response.NewStateRoot + oldAccInputHash = response.NewAccInputHash + } + if r.stopOnError && err != nil { + log.Fatalf("error processing batch %d. Error: %v", i, err) + } + } + return nil +} + +func (r *reprocessAction) stepWithFlushId(i uint64, oldStateRoot common.Hash, oldAccInputHash common.Hash) (*state.Batch, *state.ProcessBatchResponse, error) { + if r.updateHasbDB { + err := r.flushIdCtrl.BlockUntilLastFlushIDIsWritten() + if err != nil { + return nil, nil, err + } + } + batchOnDB, response, err := r.step(i, oldStateRoot, oldAccInputHash) + if r.updateHasbDB && err == nil && response != nil { + r.flushIdCtrl.SetPendingFlushIDAndCheckProverID(response.FlushID, response.ProverID, "reprocessAction") + } + return batchOnDB, response, err +} + +// returns: +// - state.Batch -> batch on DB +// - *ProcessBatchResponse -> response of reprocessing batch with EXECTOR +func (r *reprocessAction) step(i uint64, oldStateRoot common.Hash, oldAccInputHash common.Hash) (*state.Batch, *state.ProcessBatchResponse, error) { + dbTx, err := r.st.BeginStateTransaction(r.ctx) + if err != nil { + log.Errorf("error creating db transaction to get latest block. Error: %v", err) + return nil, nil, err + } + + batch2, err := r.st.GetBatchByNumber(r.ctx, i, dbTx) + if err != nil { + log.Errorf("no batch %d. Error: %v", i, err) + return batch2, nil, err + } + + request := state.ProcessRequest{ + BatchNumber: batch2.BatchNumber, + OldStateRoot: oldStateRoot, + OldAccInputHash: oldAccInputHash, + Coinbase: batch2.Coinbase, + Timestamp: batch2.Timestamp, + + GlobalExitRoot: batch2.GlobalExitRoot, + Transactions: batch2.BatchL2Data, + } + log.Debugf("Processing batch %d: ntx:%d StateRoot:%s", batch2.BatchNumber, len(batch2.BatchL2Data), batch2.StateRoot) + forkID := r.st.GetForkIDByBatchNumber(batch2.BatchNumber) + syncedTxs, _, _, err := state.DecodeTxs(batch2.BatchL2Data, forkID) + if err != nil { + log.Errorf("error decoding synced txs from trustedstate. Error: %v, TrustedBatchL2Data: %s", err, batch2.BatchL2Data) + return batch2, nil, err + } else { + r.output.numOfTransactionsInBatch(len(syncedTxs)) + } + var response *state.ProcessBatchResponse + + log.Infof("id:%d len_trs:%d oldStateRoot:%s", batch2.BatchNumber, len(syncedTxs), request.OldStateRoot) + response, err = r.st.ProcessBatch(r.ctx, request, r.updateHasbDB) + for tx_i, txresponse := range response.Responses { + if txresponse.RomError != nil { + r.output.addTransactionError(tx_i, txresponse.RomError) + log.Errorf("error processing batch %d. tx:%d Error: %v stateroot:%s", i, tx_i, txresponse.RomError, response.NewStateRoot) + //return txresponse.RomError + } + } + + if err != nil { + r.output.isWrittenOnHashDB(false, response.FlushID) + if rollbackErr := dbTx.Rollback(r.ctx); rollbackErr != nil { + return batch2, response, fmt.Errorf( + "failed to rollback dbTx: %s. Rollback err: %w", + rollbackErr.Error(), err, + ) + } + log.Errorf("error processing batch %d. Error: %v", i, err) + return batch2, response, err + } else { + r.output.isWrittenOnHashDB(r.updateHasbDB, response.FlushID) + } + if response.NewStateRoot != batch2.StateRoot { + if rollbackErr := dbTx.Rollback(r.ctx); rollbackErr != nil { + return batch2, response, fmt.Errorf( + "failed to rollback dbTx: %s. Rollback err: %w", + rollbackErr.Error(), err, + ) + } + log.Errorf("error processing batch %d. Error: state root differs: calculated: %s != expected: %s", i, response.NewStateRoot, batch2.StateRoot) + return batch2, response, fmt.Errorf("missmatch state root calculated: %s != expected: %s", response.NewStateRoot, batch2.StateRoot) + } + + if commitErr := dbTx.Commit(r.ctx); commitErr != nil { + return batch2, response, fmt.Errorf( + "failed to commit dbTx: %s. Commit err: %w", + commitErr.Error(), err, + ) + } + + log.Infof("Verified batch %d: ntx:%d StateRoot:%s", i, len(syncedTxs), batch2.StateRoot) + + return batch2, response, nil +} diff --git a/tools/state/reprocess_cmd.go b/tools/state/reprocess_cmd.go new file mode 100644 index 0000000000..88f03afb2a --- /dev/null +++ b/tools/state/reprocess_cmd.go @@ -0,0 +1,183 @@ +package main + +import ( + "context" + "errors" + "fmt" + + "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/0xPolygonHermez/zkevm-node/db" + "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/event" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/merkletree" + "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/urfave/cli/v2" +) + +func isNetworkConfigNeeded(cliCtx *cli.Context) bool { + // The only reason if to obtain the chainId from the SMC + return cliCtx.Uint64(flagChainID) == 0 +} + +func reprocessCmd(cliCtx *cli.Context) error { + cfg, err := config.Load(cliCtx, isNetworkConfigNeeded(cliCtx)) + if err != nil { + return err + } + log.Init(cfg.Log) + // This connect to database + stateSqlDB, err := db.NewSQLDB(cfg.State.DB) + if err != nil { + log.Fatal(err) + } + l2ChainID := getL2ChainID(cliCtx, cfg) + needsExecutor := true + needsStateTree := true + + st := newState(cliCtx.Context, cfg, l2ChainID, []state.ForkIDInterval{}, stateSqlDB, nil, needsExecutor, needsStateTree) + + forksIdIntervals, err := getforkIDIntervalsFromDB(context.Background(), st) + log.Debugf("forkids: %v err:%s", forksIdIntervals, err) + if err != nil { + log.Errorf("error getting forkIDs from db. Error: %v", err) + return err + } + st.UpdateForkIDIntervalsInMemory(forksIdIntervals) + + action := reprocessAction{ + firstBatchNumber: getFirstBatchNumber(cliCtx), + lastBatchNumber: getLastBatchNumber(cliCtx, cliCtx.Context, st), + l2ChainId: l2ChainID, + updateHasbDB: getUpdateHashDB(cliCtx), + st: st, + ctx: cliCtx.Context, + output: &reprocessingOutputPretty{}, + flushIdCtrl: NewFlushIDController(st, cliCtx.Context), + stopOnError: !cliCtx.Bool(dontStopOnErrorFlag.Name), + preferExecutionStateRoot: cliCtx.Bool(preferExecutionStateRootFlag.Name), + } + action.output.start(action.firstBatchNumber, action.lastBatchNumber, l2ChainID) + log.Infof("Reprocessing batches from %d to %d", action.firstBatchNumber, action.lastBatchNumber) + err = action.start() + action.output.end(err) + + if err != nil { + log.Errorf("error reprocessing batches. Error: %v", err) + return err + } + return nil +} + +func getUpdateHashDB(cliCtx *cli.Context) bool { + return cliCtx.Bool(writeOnHashDBFlag.Name) +} + +func newEtherman(c config.Config) (*etherman.Client, error) { + etherman, err := etherman.NewClient(c.Etherman, c.NetworkConfig.L1Config) + if err != nil { + return nil, err + } + return etherman, nil +} + +func getFirstBatchNumber(cliCtx *cli.Context) uint64 { + res := cliCtx.Uint64(firstBatchNumberFlag.Name) + if res == 0 { + return 1 + } + return res +} +func getforkIDIntervalsFromDB(ctx context.Context, st *state.State) ([]state.ForkIDInterval, error) { + log.Debug("getting forkIDs from db") + forkIDIntervals, err := st.GetForkIDs(ctx, nil) + if err != nil && !errors.Is(err, state.ErrStateNotSynchronized) { + return []state.ForkIDInterval{}, fmt.Errorf("error getting forkIDs from db. Error: %v", err) + } + return forkIDIntervals, nil +} + +func getLastBatchNumber(cliCtx *cli.Context, ctx context.Context, st *state.State) uint64 { + res := cliCtx.Uint64(lastBatchNumberFlag.Name) + if res == 0 { + dbTx, err := st.BeginStateTransaction(ctx) + if err != nil { + log.Fatalf("error creating db transaction to get latest block. Error: %v", err) + } + lastBatch, err := st.GetLastBatchNumber(ctx, dbTx) + if err != nil { + log.Fatalf("no last batch. Error: %v", err) + } + return lastBatch + } + return res +} + +func getL2ChainID(cliCtx *cli.Context, c *config.Config) uint64 { + flagL2chainID := cliCtx.Uint64(flagChainID) + if flagL2chainID != 0 { + log.Debugf("Using L2ChainID from flag: %d", flagL2chainID) + return flagL2chainID + } + + etherman, err := newEtherman(*c) + if err != nil { + log.Fatal(err) + } + + // READ CHAIN ID FROM POE SC + log.Debug("Reading L2ChainID from SMC") + l2ChainID, err := etherman.GetL2ChainID() + if err != nil { + log.Fatal(err) + } + log.Debugf("Using L2ChainID from SMC: %d", l2ChainID) + return l2ChainID +} + +func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDIntervals []state.ForkIDInterval, sqlDB *pgxpool.Pool, eventLog *event.EventLog, needsExecutor, needsStateTree bool) *state.State { + stateDb := state.NewPostgresStorage(state.Config{}, sqlDB) + + // Executor + var executorClient executor.ExecutorServiceClient + if needsExecutor { + executorClient, _, _ = executor.NewExecutorClient(ctx, c.Executor) + } + + // State Tree + var stateTree *merkletree.StateTree + if needsStateTree { + stateDBClient, _, _ := merkletree.NewMTDBServiceClient(ctx, c.MTClient) + stateTree = merkletree.NewStateTree(stateDBClient) + } + + stateCfg := state.Config{ + MaxCumulativeGasUsed: c.State.Batch.Constraints.MaxCumulativeGasUsed, + ChainID: l2ChainID, + ForkIDIntervals: forkIDIntervals, + MaxResourceExhaustedAttempts: c.Executor.MaxResourceExhaustedAttempts, + WaitOnResourceExhaustion: c.Executor.WaitOnResourceExhaustion, + ForkUpgradeBatchNumber: c.ForkUpgradeBatchNumber, + ForkUpgradeNewForkId: c.ForkUpgradeNewForkId, + } + + st := state.NewState(stateCfg, stateDb, executorClient, stateTree, eventLog) + return st +} + +func getBatchByNumber(ctx context.Context, st *state.State, batchNumber uint64) (*state.Batch, error) { + dbTx, err := st.BeginStateTransaction(ctx) + if err != nil { + log.Errorf("error creating db transaction to get latest block. Error: %v", err) + return nil, err + } + batch, err := st.GetBatchByNumber(ctx, batchNumber, dbTx) + if err != nil { + log.Errorf("no batch %d. Error: %v", 0, err) + return nil, err + } + _ = dbTx.Commit(ctx) + return batch, nil +} diff --git a/tools/state/version.go b/tools/state/version.go new file mode 100644 index 0000000000..cd85629245 --- /dev/null +++ b/tools/state/version.go @@ -0,0 +1,13 @@ +package main + +import ( + "os" + + "github.com/0xPolygonHermez/zkevm-node" + "github.com/urfave/cli/v2" +) + +func versionCmd(*cli.Context) error { + zkevm.PrintVersion(os.Stdout) + return nil +} diff --git a/tools/zkevmprovermock/Dockerfile b/tools/zkevmprovermock/Dockerfile deleted file mode 100644 index d4d21e0d8c..0000000000 --- a/tools/zkevmprovermock/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.19-alpine AS build - -ENV CGO_ENABLED=0 -WORKDIR /app -COPY . . -RUN apk add --no-cache build-base && \ - go build -ldflags '-extldflags "-static"' -o ./zkprover-mock ./tools/zkevmprovermock/cmd/... - -FROM alpine:3.16.0 -COPY --from=build /app/zkprover-mock /app/zkprover-mock -CMD ["/bin/sh", "-c", "/app/zkprover-mock server"]