From 7914028045c67b1800a9d6f0a2caf1354393bb20 Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Fri, 12 Jan 2024 14:54:55 +0100 Subject: [PATCH 01/54] draft integration of beethoven endpoint for proof settlement --- aggregator/aggregator.go | 157 ++++++++++++++++++++++++++++++++++----- aggregator/config.go | 18 +++++ go.mod | 43 +++++------ go.sum | 26 +++++++ 4 files changed, 203 insertions(+), 41 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index ff9998a18a..02853948b6 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -2,9 +2,11 @@ package aggregator import ( "context" + "crypto/ecdsa" "encoding/json" "errors" "fmt" + "github.com/0xPolygon/beethoven/tx" "math/big" "net" "strconv" @@ -13,6 +15,8 @@ import ( "time" "unicode" + "github.com/0xPolygon/beethoven/client" + beethovenTypes "github.com/0xPolygon/beethoven/rpc/types" "github.com/0xPolygonHermez/zkevm-node/aggregator/metrics" "github.com/0xPolygonHermez/zkevm-node/aggregator/prover" "github.com/0xPolygonHermez/zkevm-node/config/types" @@ -65,6 +69,9 @@ type Aggregator struct { srv *grpc.Server ctx context.Context exit context.CancelFunc + + BeethovenClient client.ClientInterface + sequencerPrivateKey *ecdsa.PrivateKey } // New creates a new aggregator. @@ -73,6 +80,8 @@ func New( stateInterface stateInterface, ethTxManager ethTxManager, etherman etherman, + beethovenClient client.ClientInterface, + sequencerPrivateKey *ecdsa.PrivateKey, ) (Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker switch cfg.TxProfitabilityCheckerType { @@ -94,6 +103,9 @@ func New( TimeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), + + BeethovenClient: beethovenClient, + sequencerPrivateKey: sequencerPrivateKey, } return a, nil @@ -267,34 +279,139 @@ func (a *Aggregator) sendFinalProof() { log.Infof("Final proof inputs: NewLocalExitRoot [%#x], NewStateRoot [%#x]", inputs.NewLocalExitRoot, inputs.NewStateRoot) - // add batch verification to be monitored - sender := common.HexToAddress(a.cfg.SenderAddress) - to, data, err := a.Ethman.BuildTrustedVerifyBatchesTxData(proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender) - if err != nil { - log.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) - a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) - continue - } - monitoredTxID := buildMonitoredTxID(proof.BatchNumber, proof.BatchNumberFinal) - err = a.EthTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, sender, to, nil, data, a.cfg.GasOffset, nil) - if err != nil { - mTxLogger := ethtxmanager.CreateLogger(ethTxManagerOwner, monitoredTxID, sender, to) - mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err) - a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) - continue + switch a.cfg.SettlementBackend { + case Beethoven: + if success := a.settleWithBeethoven(ctx, proof, inputs); !success { + continue + } + default: + if success := a.settleDirect(ctx, proof, inputs); !success { + continue + } } - // process monitored batch verifications before starting a next cycle - a.EthTxManager.ProcessPendingMonitoredTxs(ctx, ethTxManagerOwner, func(result ethtxmanager.MonitoredTxResult, dbTx pgx.Tx) { - a.handleMonitoredTxResult(result) - }, nil) - a.resetVerifyProofTime() a.endProofVerification() } } } +func (a *Aggregator) settleDirect( + ctx context.Context, + proof *state.Proof, + inputs ethmanTypes.FinalProofInputs, +) (success bool) { + // add batch verification to be monitored + sender := common.HexToAddress(a.cfg.SenderAddress) + + to, data, err := a.Ethman.BuildTrustedVerifyBatchesTxData( + proof.BatchNumber-1, + proof.BatchNumberFinal, + &inputs, + sender, + ) + if err != nil { + log.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) + a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) + + return false + } + + monitoredTxID := buildMonitoredTxID(proof.BatchNumber, proof.BatchNumberFinal) + err = a.EthTxManager.Add( + ctx, + ethTxManagerOwner, + monitoredTxID, + sender, + to, + nil, + data, + a.cfg.GasOffset, + nil, + ) + if err != nil { + mTxLogger := ethtxmanager.CreateLogger(ethTxManagerOwner, monitoredTxID, sender, to) + mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err) + a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) + + return false + } + + // process monitored batch verifications before starting a next cycle + a.EthTxManager.ProcessPendingMonitoredTxs( + ctx, + ethTxManagerOwner, + func(result ethtxmanager.MonitoredTxResult, dbTx pgx.Tx) { + a.handleMonitoredTxResult(result) + }, + nil, + ) + + return true +} + +func (a *Aggregator) settleWithBeethoven( + ctx context.Context, + proof *state.Proof, + inputs ethmanTypes.FinalProofInputs, +) (success bool) { + proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") + proofBytes := common.Hex2Bytes(proofStrNo0x) + tx := tx.Tx{ + LastVerifiedBatch: beethovenTypes.ArgUint64(proof.BatchNumber - 1), + NewVerifiedBatch: beethovenTypes.ArgUint64(proof.BatchNumberFinal), + ZKP: tx.ZKP{ + NewStateRoot: common.BytesToHash(inputs.NewStateRoot), + NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), + Proof: beethovenTypes.ArgBytes(proofBytes), + }, + RollupID: a.Ethman.RollupID, + } + signedTx, err := tx.Sign(a.sequencerPrivateKey) // No other way to sign? Dont want to use the PK at too many places... + + if err != nil { + log.Errorf("failed to sign tx: %v", err) + a.handleFailureToSendToBeethoven(ctx, proof) + + return false + } + + log.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) + txHash, err := a.BeethovenClient.SendTx(*signedTx) + if err != nil { + log.Errorf("failed to send tx to the interop: %v", err) + a.handleFailureToSendToBeethoven(ctx, proof) + + return false + } + + log.Infof("tx %s sent to beethoven, waiting to be mined", txHash.Hex()) + log.Debugf("Timeout set to %f seconds", a.cfg.BeethovenTxTimeout.Duration.Seconds()) + waitCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(a.cfg.BeethovenTxTimeout.Duration)) + defer cancelFunc() + if err := a.BeethovenClient.WaitTxToBeMined(txHash, waitCtx); err != nil { + log.Errorf("interop didn't mine the tx: %v", err) + a.handleFailureToSendToBeethoven(ctx, proof) + + return false + } + + // TODO: wait for synchronizer to catch up + return true +} + +func (a *Aggregator) handleFailureToSendToBeethoven(ctx context.Context, proof *state.Proof) { + log := log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) + proof.GeneratingSince = nil + + err := a.State.UpdateGeneratedProof(ctx, proof, nil) + if err != nil { + log.Errorf("Failed updating proof state (false): %v", err) + } + + a.endProofVerification() +} + func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Context, proof *state.Proof) { log := log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) proof.GeneratingSince = nil diff --git a/aggregator/config.go b/aggregator/config.go index fd966f2223..db71994566 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -8,6 +8,15 @@ import ( "github.com/0xPolygonHermez/zkevm-node/encoding" ) +type SettlementBackend string + +const ( + // To define the beethoven service for settlement + Beethoven SettlementBackend = "beethoven" + // To define the direct settlement to L1 + L1 SettlementBackend = "l1" +) + // TokenAmountWithDecimals is a wrapper type that parses token amount with decimals to big int type TokenAmountWithDecimals struct { *big.Int `validate:"required"` @@ -85,4 +94,13 @@ type Config struct { // gas offset: 100 // final gas: 1100 GasOffset uint64 `mapstructure:"GasOffset"` + + // SettlementBackend configuration defines how a final ZKP should be settled. Directly to L1 or over the Beethoven service. + SettlementBackend SettlementBackend `mapstructure:"SettlementBackend"` + + // BeethovenTxTimeout is the interval time to wait for a tx to be mined from the beethoven + BeethovenTxTimeout types.Duration `mapstructure:"BeethovenTxTimeout"` + + // BeethovenURL url of the beethoven service + BeethovenURL string `mapstructure:"BeethovenURL"` } diff --git a/go.mod b/go.mod index eb2343fcfc..0f2e3fe131 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18 github.com/didip/tollbooth/v6 v6.1.2 github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 - github.com/ethereum/go-ethereum v1.13.2 + github.com/ethereum/go-ethereum v1.13.8 github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.11.0 github.com/gobuffalo/packr/v2 v2.8.3 @@ -20,14 +20,14 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 - github.com/rubenv/sql-migrate v1.5.2 + github.com/rubenv/sql-migrate v1.6.0 github.com/spf13/afero v1.11.0 - github.com/spf13/viper v1.17.0 + github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 - github.com/umbracle/ethgo v0.1.3 + github.com/umbracle/ethgo v0.1.4-0.20230712173909-df37dddf16f0 github.com/urfave/cli/v2 v2.26.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/net v0.19.0 golang.org/x/sync v0.5.0 google.golang.org/grpc v1.60.0 @@ -38,34 +38,35 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect + github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/StackExchange/wmi v1.2.1 // indirect - github.com/VictoriaMetrics/fastcache v1.6.0 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.12.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.0 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -108,7 +109,7 @@ require ( github.com/markbates/safe v1.0.1 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect @@ -116,17 +117,17 @@ require ( github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/skeema/knownhosts v1.2.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/stretchr/objx v0.5.0 // indirect @@ -141,12 +142,12 @@ require ( github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/tools v0.15.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -162,6 +163,6 @@ require ( require ( github.com/fatih/color v1.16.0 github.com/joho/godotenv v1.5.1 - github.com/prometheus/client_golang v1.17.0 - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + github.com/prometheus/client_golang v1.18.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa ) diff --git a/go.sum b/go.sum index 2c816a8982..5e1f4c4758 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc h1:yUoC5oDBHbriJSRfVYWNji+rYJdjxqIibmVpzoXUM3g= +github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc/go.mod h1:V+tv5idj5g9yO/sTPzi9j4waUOcGAC2VakTBWGHB3lw= github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18 h1:InqeTcHrNbfj1OUfn2aFplFay7ibd7KhYqvmMZYZfn0= github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -66,6 +68,7 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -86,6 +89,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= @@ -127,6 +131,7 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= @@ -134,6 +139,7 @@ github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/Yj github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg= github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU= github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -149,6 +155,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -163,6 +170,7 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01PjQ= @@ -195,8 +203,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw= github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -211,6 +221,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= @@ -567,6 +578,7 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -648,6 +660,8 @@ github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= @@ -655,6 +669,9 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -670,6 +687,7 @@ github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OK github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/rubenv/sql-migrate v1.6.0/go.mod h1:m3ilnKP7sNb4eYkLsp6cGdPOl4OBcXM6rcbzU+Oqc5k= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -678,6 +696,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -717,6 +736,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -728,6 +748,7 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -769,6 +790,7 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/ethgo v0.1.4-0.20230712173909-df37dddf16f0/go.mod h1:J+OZNfRCtbaYW3AEc0m47GhwAzlNJjcr9vO86nzOr6E= github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI= @@ -864,6 +886,7 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -876,6 +899,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -905,6 +929,7 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1162,6 +1187,7 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 05b033d0ef116b43395c4649d856de21cb8d630b Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Fri, 12 Jan 2024 15:16:54 +0100 Subject: [PATCH 02/54] Getter added for RollupID to the etherman struct and updated the depending aggreagtor/aggregator.go code --- aggregator/aggregator.go | 2 +- aggregator/interfaces.go | 1 + etherman/etherman.go | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 02853948b6..8ecf1159e8 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -365,7 +365,7 @@ func (a *Aggregator) settleWithBeethoven( NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), Proof: beethovenTypes.ArgBytes(proofBytes), }, - RollupID: a.Ethman.RollupID, + RollupID: a.Ethman.GetRollupId(), } signedTx, err := tx.Sign(a.sequencerPrivateKey) // No other way to sign? Dont want to use the PK at too many places... diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 3bc9c2fe8a..d91acf0b30 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -37,6 +37,7 @@ type ethTxManager interface { // etherman contains the methods required to interact with ethereum type etherman interface { + GetRollupId() uint32 GetLatestVerifiedBatchNum() (uint64, error) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error) } diff --git a/etherman/etherman.go b/etherman/etherman.go index 59b0e267e2..edc5a27300 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -1753,3 +1753,7 @@ func (etherMan *Client) generateRandomAuth() (bind.TransactOpts, error) { return *auth, nil } + +func (etherMan *Client) GetRollupId() uint32 { + return etherMan.RollupID +} From 0888768068255b8ddbf8e1bc06e5230e19ee735f Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Tue, 16 Jan 2024 11:24:18 +0100 Subject: [PATCH 03/54] trusted sequencer private key added to cmd/run, aggregator/aggregator, and aggregator/config --- aggregator/aggregator.go | 2 +- aggregator/config.go | 4 ++++ cmd/run.go | 9 ++++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 8ecf1159e8..7bee2ffa50 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -367,7 +367,7 @@ func (a *Aggregator) settleWithBeethoven( }, RollupID: a.Ethman.GetRollupId(), } - signedTx, err := tx.Sign(a.sequencerPrivateKey) // No other way to sign? Dont want to use the PK at too many places... + signedTx, err := tx.Sign(a.sequencerPrivateKey) if err != nil { log.Errorf("failed to sign tx: %v", err) diff --git a/aggregator/config.go b/aggregator/config.go index db71994566..81a9819d93 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -1,6 +1,7 @@ package aggregator import ( + "crypto/ecdsa" "fmt" "math/big" @@ -103,4 +104,7 @@ type Config struct { // BeethovenURL url of the beethoven service BeethovenURL string `mapstructure:"BeethovenURL"` + + // SequencerPrivateKey Private key of the trusted sequencer + SequencerPrivateKey *ecdsa.PrivateKey `mapstructure:"SequencerPrivateKey"` } diff --git a/cmd/run.go b/cmd/run.go index 247501bbcc..cac65b4958 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,6 +12,7 @@ import ( "runtime" "time" + beethovenClient "github.com/0xPolygon/beethoven/client" datastreamerlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" "github.com/0xPolygonHermez/zkevm-node" "github.com/0xPolygonHermez/zkevm-node/aggregator" @@ -424,7 +425,13 @@ func createSequenceSender(cfg config.Config, pool *pool.Pool, etmStorage *ethtxm } func runAggregator(ctx context.Context, c aggregator.Config, etherman *etherman.Client, ethTxManager *ethtxmanager.Client, st *state.State) { - agg, err := aggregator.New(c, st, ethTxManager, etherman) + var beethCli *beethovenClient.Client + + if c.SettlementBackend == aggregator.Beethoven { + beethCli = beethovenClient.New(c.BeethovenURL) + } + + agg, err := aggregator.New(c, st, ethTxManager, etherman, beethCli, c.SequencerPrivateKey) if err != nil { log.Fatal(err) } From 4abe334ad4ddf619f93cbde3e6b3321d32193bce Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Tue, 16 Jan 2024 11:25:20 +0100 Subject: [PATCH 04/54] overtook aggregator_test changes from Arnau's PR --- aggregator/aggregator_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index 700b95bd80..7580ac71d4 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -190,7 +190,7 @@ func TestSendFinalProof(t *testing.T) { stateMock := mocks.NewStateMock(t) ethTxManager := mocks.NewEthTxManager(t) etherman := mocks.NewEtherman(t) - a, err := New(cfg, stateMock, ethTxManager, etherman) + a, err := New(cfg, stateMock, ethTxManager, etherman, nil, nil) require.NoError(err) a.ctx, a.exit = context.WithCancel(context.Background()) m := mox{ @@ -685,7 +685,7 @@ func TestTryAggregateProofs(t *testing.T) { ethTxManager := mocks.NewEthTxManager(t) etherman := mocks.NewEtherman(t) proverMock := mocks.NewProverMock(t) - a, err := New(cfg, stateMock, ethTxManager, etherman) + a, err := New(cfg, stateMock, ethTxManager, etherman, nil, nil) require.NoError(err) aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -958,7 +958,7 @@ func TestTryGenerateBatchProof(t *testing.T) { ethTxManager := mocks.NewEthTxManager(t) etherman := mocks.NewEtherman(t) proverMock := mocks.NewProverMock(t) - a, err := New(cfg, stateMock, ethTxManager, etherman) + a, err := New(cfg, stateMock, ethTxManager, etherman, nil, nil) require.NoError(err) aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -1235,7 +1235,7 @@ func TestTryBuildFinalProof(t *testing.T) { ethTxManager := mocks.NewEthTxManager(t) etherman := mocks.NewEtherman(t) proverMock := mocks.NewProverMock(t) - a, err := New(cfg, stateMock, ethTxManager, etherman) + a, err := New(cfg, stateMock, ethTxManager, etherman, nil, nil) require.NoError(err) aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -1365,7 +1365,7 @@ func TestIsSynced(t *testing.T) { ethTxManager := mocks.NewEthTxManager(t) etherman := mocks.NewEtherman(t) proverMock := mocks.NewProverMock(t) - a, err := New(cfg, stateMock, ethTxManager, etherman) + a, err := New(cfg, stateMock, ethTxManager, etherman, nil, nil) require.NoError(err) aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) From a04fa54efcaf003105854202dcd73b984c9a1db4 Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Tue, 16 Jan 2024 11:26:02 +0100 Subject: [PATCH 05/54] etherman mock for aggregator test updated with mockery --- aggregator/mocks/mock_etherman.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go index 4f0f97c61e..bde6db5c0d 100644 --- a/aggregator/mocks/mock_etherman.go +++ b/aggregator/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -81,6 +81,24 @@ func (_m *Etherman) GetLatestVerifiedBatchNum() (uint64, error) { return r0, r1 } +// GetRollupId provides a mock function with given fields: +func (_m *Etherman) GetRollupId() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetRollupId") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + // NewEtherman creates a new instance of Etherman. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEtherman(t interface { From bd64c7b6c3e6a6d8a109b0b9db9f8cad9bb5f857 Mon Sep 17 00:00:00 2001 From: Samuel Furter Date: Tue, 16 Jan 2024 11:26:41 +0100 Subject: [PATCH 06/54] dependencies updated --- go.mod | 7 ++-- go.sum | 105 +++++++++++++++++++++++---------------------------------- 2 files changed, 47 insertions(+), 65 deletions(-) diff --git a/go.mod b/go.mod index 0f2e3fe131..9b1f90f081 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,6 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect - github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect @@ -55,9 +54,11 @@ require ( github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -68,13 +69,13 @@ require ( github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-stack/stack v1.8.1 // indirect github.com/gobuffalo/logger v1.0.7 // indirect github.com/gobuffalo/packd v1.0.2 // indirect github.com/gofrs/flock v0.8.1 // indirect @@ -118,6 +119,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -161,6 +163,7 @@ require ( ) require ( + github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc github.com/fatih/color v1.16.0 github.com/joho/godotenv v1.5.1 github.com/prometheus/client_golang v1.18.0 diff --git a/go.sum b/go.sum index 5e1f4c4758..19b5e146ab 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,7 @@ github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjA github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= -github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= @@ -87,8 +86,7 @@ github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xW github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= @@ -104,7 +102,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -122,26 +119,26 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= -github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= -github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg= -github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= -github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU= -github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -153,8 +150,9 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -166,10 +164,9 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -201,11 +198,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw= -github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -214,17 +209,18 @@ github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4Nij github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -267,8 +263,6 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= github.com/gobuffalo/logger v1.0.7 h1:LTLwWelETXDYyqF/ASf0nxaIcdEOIJNxRokPcfI/xbU= @@ -294,8 +288,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -329,7 +323,6 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -576,8 +569,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= @@ -630,12 +623,12 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -658,8 +651,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -667,10 +658,9 @@ github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cY github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -685,8 +675,7 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= -github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/rubenv/sql-migrate v1.6.0 h1:IZpcTlAx/VKXphWEpwWJ7BaMq05tYtE80zYz+8a5Il8= github.com/rubenv/sql-migrate v1.6.0/go.mod h1:m3ilnKP7sNb4eYkLsp6cGdPOl4OBcXM6rcbzU+Oqc5k= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -694,8 +683,7 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= -github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= @@ -734,8 +722,7 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= @@ -746,8 +733,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= -github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= @@ -788,8 +774,7 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= -github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/ethgo v0.1.4-0.20230712173909-df37dddf16f0 h1:wE2g4ydxJk8kdRIRq69zZvnhJ49ShS2BJSzFBFdMv6I= github.com/umbracle/ethgo v0.1.4-0.20230712173909-df37dddf16f0/go.mod h1:J+OZNfRCtbaYW3AEc0m47GhwAzlNJjcr9vO86nzOr6E= github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= @@ -884,8 +869,7 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -897,8 +881,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -927,8 +910,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1064,7 +1046,6 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1082,7 +1063,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1185,8 +1165,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 5c48881bd0dd34944b4fe2194f4782e27bc273c5 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Fri, 19 Jan 2024 15:48:28 +0100 Subject: [PATCH 07/54] Go get --- go.sum | 1 + 1 file changed, 1 insertion(+) diff --git a/go.sum b/go.sum index 5a6f9c42d0..7223dd0fad 100644 --- a/go.sum +++ b/go.sum @@ -570,6 +570,7 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= From 44a23ee2cdbbdbfe6b3c53a8541cf17ef3d82dfb Mon Sep 17 00:00:00 2001 From: agnusmor Date: Fri, 19 Jan 2024 17:19:22 +0100 Subject: [PATCH 08/54] fix smtProof --- aggregator/aggregator.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index ff9998a18a..11b5d4f4ad 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1022,17 +1022,31 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. aLeaves := make([][32]byte, len(leaves)) for i, leaf := range leaves { aLeaves[i] = l1infotree.HashLeafData(leaf.GlobalExitRoot.GlobalExitRoot, leaf.PreviousBlockHash, uint64(leaf.Timestamp.Unix())) + log.Debugf("aLeaves[%d]: %s", i, common.Bytes2Hex(aLeaves[i][:])) } + log.Debugf("IndexL1InfoTree: %d", l2blockRaw.IndexL1InfoTree) + // Calculate smt proof - smtProof, _, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) + smtProof, l1InfoRoot, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) if err != nil { return nil, err } + log.Debugf("L1InfoRoot: %s", l1InfoRoot.String()) + + for i, proof := range smtProof { + log.Debugf("smtProof[%d]: %s", i, common.Bytes2Hex(proof[:])) + } + protoProof := make([][]byte, len(smtProof)) for i, proof := range smtProof { - protoProof[i] = proof[:] + tmpProof := proof + protoProof[i] = tmpProof[:] + } + + for i, proof := range protoProof { + log.Debugf("proof[%d]: %s", i, common.Bytes2Hex(proof)) } l1InfoTreeData[l2blockRaw.IndexL1InfoTree] = &prover.L1Data{ From e2879d232efd515604ed115480ace9b703ed10be Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Fri, 19 Jan 2024 17:37:32 +0100 Subject: [PATCH 09/54] Go sum --- go.sum | 1 + 1 file changed, 1 insertion(+) diff --git a/go.sum b/go.sum index 7223dd0fad..c553b3c9b1 100644 --- a/go.sum +++ b/go.sum @@ -570,6 +570,7 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= From 54312566ce0ca82eb6a98993c6066381255fca0b Mon Sep 17 00:00:00 2001 From: John Hilliard Date: Mon, 22 Jan 2024 09:59:38 -0500 Subject: [PATCH 10/54] fix: using constructor to create StackTrie In some circumstances, we've encountered panics when using the empty struct for the stacktrie because the hasher is nil. This commit switches to using `NewSTackTrie` to create the object and avoids those panics. --- state/transaction.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/state/transaction.go b/state/transaction.go index 2f306dc2a9..986d4dd9b6 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -307,7 +307,8 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P } // Create block to be able to calculate its hash - block := NewL2Block(l2Header, transactions, []*L2Header{}, receipts, &trie.StackTrie{}) + st := trie.NewStackTrie(nil) + block := NewL2Block(l2Header, transactions, []*L2Header{}, receipts, st) block.ReceivedAt = time.Unix(int64(l2Block.Timestamp), 0) for _, receipt := range receipts { From 6a468ecab1f0219db925a4b0d22198456992a280 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Tue, 23 Jan 2024 17:13:35 +0100 Subject: [PATCH 11/54] Load PK from Keystore file config Not sure if this was already implemented and can be reused. --- aggregator/config.go | 3 +-- cmd/run.go | 8 +++++++- config/config.go | 20 ++++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/aggregator/config.go b/aggregator/config.go index 81a9819d93..9866938a5b 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -1,7 +1,6 @@ package aggregator import ( - "crypto/ecdsa" "fmt" "math/big" @@ -106,5 +105,5 @@ type Config struct { BeethovenURL string `mapstructure:"BeethovenURL"` // SequencerPrivateKey Private key of the trusted sequencer - SequencerPrivateKey *ecdsa.PrivateKey `mapstructure:"SequencerPrivateKey"` + SequencerPrivateKey types.KeystoreFileConfig `mapstructure:"SequencerPrivateKey"` } diff --git a/cmd/run.go b/cmd/run.go index 27cb55696c..006ebc3046 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -432,7 +432,13 @@ func runAggregator(ctx context.Context, c aggregator.Config, etherman *etherman. beethCli = beethovenClient.New(c.BeethovenURL) } - agg, err := aggregator.New(c, st, ethTxManager, etherman, beethCli, c.SequencerPrivateKey) + // Load private key + pk, err := config.NewKeyFromKeystore(c.SequencerPrivateKey) + if err != nil { + log.Fatal(err) + } + + agg, err := aggregator.New(c, st, ethTxManager, etherman, beethCli, pk) if err != nil { log.Fatal(err) } diff --git a/config/config.go b/config/config.go index acfb2186f8..a370c89235 100644 --- a/config/config.go +++ b/config/config.go @@ -2,10 +2,13 @@ package config import ( "bytes" + "crypto/ecdsa" + "os" "path/filepath" "strings" "github.com/0xPolygonHermez/zkevm-node/aggregator" + "github.com/0xPolygonHermez/zkevm-node/config/types" "github.com/0xPolygonHermez/zkevm-node/db" "github.com/0xPolygonHermez/zkevm-node/etherman" "github.com/0xPolygonHermez/zkevm-node/ethtxmanager" @@ -21,6 +24,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/synchronizer" + "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/mitchellh/mapstructure" "github.com/spf13/viper" "github.com/urfave/cli/v2" @@ -183,3 +187,19 @@ func Load(ctx *cli.Context, loadNetworkConfig bool) (*Config, error) { } return cfg, nil } + +// NewKeyFromKeystore creates a private key from a keystore file +func NewKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { + if cfg.Path == "" && cfg.Password == "" { + return nil, nil + } + keystoreEncrypted, err := os.ReadFile(filepath.Clean(cfg.Path)) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(keystoreEncrypted, cfg.Password) + if err != nil { + return nil, err + } + return key.PrivateKey, nil +} From 6e8ad9eacf37c696e5bc16e84c188633843455fd Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Tue, 23 Jan 2024 18:35:34 +0100 Subject: [PATCH 12/54] fix migration batch wip field. fix sequence when no wip batch (#3126) --- db/migrations/state/0013.sql | 2 +- sequencer/l2block.go | 2 +- sequencesender/sequencesender.go | 20 +++++++++++++++----- state/batch.go | 2 +- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/db/migrations/state/0013.sql b/db/migrations/state/0013.sql index cd18a7b14a..fca1ffefbb 100644 --- a/db/migrations/state/0013.sql +++ b/db/migrations/state/0013.sql @@ -11,7 +11,7 @@ ALTER TABLE state.transaction CREATE INDEX IF NOT EXISTS idx_transaction_l2_hash ON state.transaction (l2_hash); ALTER TABLE state.batch - ADD COLUMN IF NOT EXISTS wip BOOLEAN NOT NULL; + ADD COLUMN IF NOT EXISTS wip BOOLEAN NOT NULL DEFAULT FALSE; ALTER TABLE state.virtual_batch ADD COLUMN IF NOT EXISTS timestamp_batch_etrog TIMESTAMP WITH TIME ZONE NULL, diff --git a/sequencer/l2block.go b/sequencer/l2block.go index 37f9d89777..9ea43f03b4 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -504,7 +504,7 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp time.Ti } } - log.Infof("new WIP L2 block [%d] created, batch: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfTreeIndexChanged: %s, oldStateRoot: %s, stateRoot: %s, used counters: %s", + log.Infof("new WIP L2 block [%d] created, batch: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfTreeIndexChanged: %v, oldStateRoot: %s, stateRoot: %s, used counters: %s", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.timestamp.Unix(), f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged, f.wipBatch.imStateRoot, batchResponse.NewStateRoot, f.logZKCounters(f.wipL2Block.usedResources.ZKCounters)) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index aa92886f80..22c362cc2a 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -187,8 +187,11 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen if err != nil { return nil, fmt.Errorf("failed to get last virtual batch num, err: %w", err) } + log.Debugf("last virtual batch number: %d", lastVirtualBatchNum) currentBatchNumToSequence := lastVirtualBatchNum + 1 + log.Debugf("current batch number to sequence: %d", currentBatchNumToSequence) + sequences := []types.Sequence{} // var estimatedGas uint64 @@ -202,20 +205,27 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen return nil, fmt.Errorf("aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber+1) } + // Add new sequence + batch, err := s.state.GetBatchByNumber(ctx, currentBatchNumToSequence, nil) + if err != nil { + if err == state.ErrNotFound { + break + } + log.Debugf("failed to get batch by number %d, err: %w", currentBatchNumToSequence, err) + return nil, err + } + // Check if batch is closed isClosed, err := s.state.IsBatchClosed(ctx, currentBatchNumToSequence, nil) if err != nil { + log.Debugf("failed to check if batch %d is closed, err: %w", currentBatchNumToSequence, err) return nil, err } + if !isClosed { // Reached current (WIP) batch break } - // Add new sequence - batch, err := s.state.GetBatchByNumber(ctx, currentBatchNumToSequence, nil) - if err != nil { - return nil, err - } seq := types.Sequence{ GlobalExitRoot: batch.GlobalExitRoot, //TODO: set empty for regular batches diff --git a/state/batch.go b/state/batch.go index 174492c957..040a991ae0 100644 --- a/state/batch.go +++ b/state/batch.go @@ -70,7 +70,7 @@ const ( // ForcedBatchDeadlineClosingReason is the closing reason used when forced batch deadline is reached ForcedBatchDeadlineClosingReason ClosingReason = "Forced batch deadline" // MaxDeltaTimestampClosingReason is the closing reason used when max delta batch timestamp is reached - MaxDeltaTimestampClosingReason ClosingReason = "Max delta timestamp delta" + MaxDeltaTimestampClosingReason ClosingReason = "Max delta timestamp" // NoTxFitsClosingReason is the closing reason used when any of the txs in the pool (worker) fits in the remaining resources of the batch NoTxFitsClosingReason ClosingReason = "No transactions fits" ) From 22b2651997531ee385dcc4e8dd47329a218392fe Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Tue, 23 Jan 2024 19:07:33 +0100 Subject: [PATCH 13/54] fix Leaves smtproof (#3127) * fix Leaves smtproof * pointer protection * fix unit test --- aggregator/aggregator.go | 30 +++++++++++++++++++----------- aggregator/aggregator_test.go | 5 +++++ 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index b30cd005ff..74aed60c3b 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1004,6 +1004,15 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. if err != nil { return nil, err } + leaves, err := a.State.GetLeafsByL1InfoRoot(ctx, *l1InfoRoot, nil) + if err != nil { + return nil, err + } + + aLeaves := make([][32]byte, len(leaves)) + for i, leaf := range leaves { + aLeaves[i] = l1infotree.HashLeafData(leaf.GlobalExitRoot.GlobalExitRoot, leaf.PreviousBlockHash, uint64(leaf.Timestamp.Unix())) + } for _, l2blockRaw := range batchRawData.Blocks { _, contained := l1InfoTreeData[l2blockRaw.IndexL1InfoTree] @@ -1013,21 +1022,20 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. return nil, err } - leaves, err := a.State.GetLeafsByL1InfoRoot(ctx, l1InfoTreeExitRootStorageEntry.L1InfoTreeRoot, nil) - if err != nil { - return nil, err - } - - aLeaves := make([][32]byte, len(leaves)) - for i, leaf := range leaves { - aLeaves[i] = l1infotree.HashLeafData(leaf.GlobalExitRoot.GlobalExitRoot, leaf.PreviousBlockHash, uint64(leaf.Timestamp.Unix())) - } - // Calculate smt proof - smtProof, _, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) + smtProof, calculatedL1InfoRoot, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) if err != nil { return nil, err } + if l1InfoRoot != nil && *l1InfoRoot != calculatedL1InfoRoot { + for i, l := range aLeaves { + log.Info("AllLeaves[%d]: %s", i, common.Bytes2Hex(l[:])) + } + for i, s := range smtProof { + log.Info("smtProof[%d]: %s", i, common.Bytes2Hex(s[:])) + } + return nil, fmt.Errorf("error: l1InfoRoot mismatch. L1InfoRoot: %s, calculatedL1InfoRoot: %s. l1InfoTreeIndex: %d", l1InfoRoot.String(), calculatedL1InfoRoot.String(), l2blockRaw.IndexL1InfoTree) + } protoProof := make([][]byte, len(smtProof)) for i, proof := range smtProof { diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index 1dc49448ce..eb51a09381 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -799,6 +799,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() + m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errBanana).Once() @@ -840,6 +841,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() + m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -882,6 +884,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() + m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -924,6 +927,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() + m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -980,6 +984,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() + m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() From 1962ca1ca27f96b47ef5cb34f902c950001d1803 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Wed, 24 Jan 2024 09:48:10 +0100 Subject: [PATCH 14/54] Add l2block log dump when storing error. Refactor ProcessBatchV2 logs (#3129) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add l2block log dump when storing error. refactor ProcessBatchV2 logs * add AddL2Block log * fix index AddL2Block log * use aux variable to avoid reusing poiner --------- Co-authored-by: Toni Ramírez --- sequencer/l2block.go | 25 ++++++- state/batchV2.go | 121 +++++++++++++++----------------- state/convertersV2.go | 12 ---- state/pgstatestorage/l2block.go | 5 +- 4 files changed, 83 insertions(+), 80 deletions(-) diff --git a/sequencer/l2block.go b/sequencer/l2block.go index 9ea43f03b4..bfcafe2305 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -138,6 +138,8 @@ func (f *finalizer) storePendingL2Blocks(ctx context.Context) { err := f.storeL2Block(ctx, l2Block) if err != nil { + // Dump L2Block info + f.logL2Block(l2Block) f.Halt(ctx, fmt.Errorf("error storing L2 block %d [%d], error: %v", l2Block.batchResponse.BlockResponses[0].BlockNumber, l2Block.trackingNum, err)) } @@ -444,8 +446,8 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp time.Ti newL2Block := &L2Block{} // Tracking number - newL2Block.trackingNum = f.l2BlockCounter f.l2BlockCounter++ + newL2Block.trackingNum = f.l2BlockCounter log.Debugf("opening new WIP L2 block [%d]", newL2Block.trackingNum) @@ -553,3 +555,24 @@ func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBat return batchResponse, nil } + +func (f *finalizer) logL2Block(l2Block *L2Block) { + var blockResp *state.ProcessBlockResponse + if l2Block.batchResponse != nil { + if len(l2Block.batchResponse.BlockResponses) > 0 { + blockResp = l2Block.batchResponse.BlockResponses[0] + } + } + + if blockResp != nil { + log.Infof("DUMP L2 block %d [%d], Timestamp: %d, ParentHash: %s, Coinbase: %s, GER: %s, BlockHashL1: %s, GasUsed: %d, BlockInfoRoot: %s, BlockHash: %s", + blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1, + blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash) + + for i, txResp := range blockResp.TransactionResponses { + log.Infof(" tx[%d] Hash: %s, HashL2: %s, StateRoot: %s, Type: %d, GasLeft: %d, GasUsed: %d, GasRefund: %d, CreateAddress: %s, ChangesStateRoot: %v, EGP: %s, EGPPct: %d, HasGaspriceOpcode: %v, HasBalanceOpcode: %v", + i, txResp.TxHash, txResp.TxHashL2_V2, txResp.StateRoot, txResp.Type, txResp.GasLeft, txResp.GasUsed, txResp.GasRefunded, txResp.CreateAddress, txResp.ChangesStateRoot, txResp.EffectiveGasPrice, + txResp.EffectivePercentage, txResp.HasGaspriceOpcode, txResp.HasBalanceOpcode) + } + } +} diff --git a/state/batchV2.go b/state/batchV2.go index 5a3d9cd6be..53b1f25128 100644 --- a/state/batchV2.go +++ b/state/batchV2.go @@ -262,7 +262,7 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * return nil, ErrExecutorNil } - request := "BatchNum: %v, OldBatchNum: %v, From: %v, OldStateRoot: %v, L1InfoRoot: %v, OldAccInputHash: %v, TimestampLimit: %v, Coinbase: %v, UpdateMerkleTree: %v, SkipFirstChangeL2Block: %v, SkipWriteBlockInfoRoot: %v, ChainId: %v, ForkId: %v, ContextId: %v, SkipVerifyL1InfoRoot: %v, ForcedBlockhashL1: %v, L1InfoTreeData: %+v, BatchL2Data: %v" + batchRequestLog := "BatchNum: %v, OldBatchNum: %v, From: %v, OldStateRoot: %v, L1InfoRoot: %v, OldAccInputHash: %v, TimestampLimit: %v, Coinbase: %v, UpdateMerkleTree: %v, SkipFirstChangeL2Block: %v, SkipWriteBlockInfoRoot: %v, ChainId: %v, ForkId: %v, ContextId: %v, SkipVerifyL1InfoRoot: %v, ForcedBlockhashL1: %v, L1InfoTreeData: %+v, BatchL2Data: %v" l1DataStr := "" for i, l1Data := range batchRequest.L1InfoTreeData { @@ -272,91 +272,80 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * l1DataStr = l1DataStr[:len(l1DataStr)-1] } - request = fmt.Sprintf(request, batchRequest.OldBatchNum+1, batchRequest.OldBatchNum, batchRequest.From, hex.EncodeToHex(batchRequest.OldStateRoot), hex.EncodeToHex(batchRequest.L1InfoRoot), + batchRequestLog = fmt.Sprintf(batchRequestLog, batchRequest.OldBatchNum+1, batchRequest.OldBatchNum, batchRequest.From, hex.EncodeToHex(batchRequest.OldStateRoot), hex.EncodeToHex(batchRequest.L1InfoRoot), hex.EncodeToHex(batchRequest.OldAccInputHash), batchRequest.TimestampLimit, batchRequest.Coinbase, batchRequest.UpdateMerkleTree, batchRequest.SkipFirstChangeL2Block, batchRequest.SkipWriteBlockInfoRoot, batchRequest.ChainId, batchRequest.ForkId, batchRequest.ContextId, batchRequest.SkipVerifyL1InfoRoot, hex.EncodeToHex(batchRequest.ForcedBlockhashL1), l1DataStr, hex.EncodeToHex(batchRequest.BatchL2Data)) - log.Debugf("executor batch request: %s", request) + log.Debugf("executor batchRequest: %s", batchRequestLog) now := time.Now() - res, err := s.executorClient.ProcessBatchV2(ctx, batchRequest) + batchResponse, err := s.executorClient.ProcessBatchV2(ctx, batchRequest) if err != nil { - log.Errorf("Error s.executorClient.ProcessBatchV2: %v", err) - log.Errorf("Error s.executorClient.ProcessBatchV2: %s", err.Error()) - log.Errorf("Error s.executorClient.ProcessBatchV2 response: %v", res) - } else if res.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { - log.Debug(processBatchResponseToString(res, "")) - err = executor.ExecutorErr(res.Error) - s.eventLog.LogExecutorErrorV2(ctx, res.Error, batchRequest) - } else if res.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR && executor.IsROMOutOfCountersError(res.ErrorRom) { - log.Warn("OOC error: ", processBatchResponseToString(res, "")) - } else if res.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR { - log.Warn(processBatchResponseToString(res, "")) - err = executor.RomErr(res.ErrorRom) + log.Errorf("error executor ProcessBatchV2: %v", err) + log.Errorf("error executor ProcessBatchV2: %s", err.Error()) + log.Errorf("error executor ProcessBatchV2 response: %v", batchResponse) + } else { + batchResponseToString := processBatchResponseToString(batchResponse) + if batchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { + err = executor.ExecutorErr(batchResponse.Error) + log.Debug("executor batchRequest, executor error: %v", err) + log.Debug(batchResponseToString) + s.eventLog.LogExecutorErrorV2(ctx, batchResponse.Error, batchRequest) + } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR && executor.IsROMOutOfCountersError(batchResponse.ErrorRom) { + log.Warn("executor batchRequest, ROM OOC, error: %v", err) + log.Warn(batchResponseToString) + } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR { + err = executor.RomErr(batchResponse.ErrorRom) + log.Warn("executor batchRequest, ROM error: %v", err) + log.Warn(batchResponseToString) + } else { + log.Debug(batchResponseToString) + } } + //workarroundDuplicatedBlock(res) elapsed := time.Since(now) if caller != metrics.DiscardCallerLabel { metrics.ExecutorProcessingTime(string(caller), elapsed) } + log.Infof("batch %d took %v to be processed by the executor ", batchRequest.OldBatchNum+1, elapsed) - return res, err + return batchResponse, err } -func processBatchResponseToString(r *executor.ProcessBatchResponseV2, prefix string) string { - res := prefix + "ProcessBatchResponseV2: \n" - res += prefix + fmt.Sprintf("NewStateRoot: %v\n", hex.EncodeToHex(r.NewStateRoot)) - res += prefix + fmt.Sprintf("NewAccInputHash: %v\n", hex.EncodeToHex(r.NewAccInputHash)) - res += prefix + fmt.Sprintf("NewLocalExitRoot: %v\n", hex.EncodeToHex(r.NewLocalExitRoot)) - res += prefix + fmt.Sprintf("NewBatchNumber: %v\n", r.NewBatchNum) - res += prefix + fmt.Sprintf("Error: %v\n", r.Error) - res += prefix + fmt.Sprintf("FlushId: %v\n", r.FlushId) - res += prefix + fmt.Sprintf("StoredFlushId: %v\n", r.StoredFlushId) - res += prefix + fmt.Sprintf("ProverId: %v\n", r.ProverId) - res += prefix + fmt.Sprintf("GasUsed: %v\n", r.GasUsed) - res += prefix + fmt.Sprintf("ForkId: %v\n", r.ForkId) - for blockIndex, block := range r.BlockResponses { - newPrefix := prefix + " " + fmt.Sprintf("BlockResponse[%v]: ", blockIndex) - res += blockResponseToString(block, newPrefix) - } - return res +func processBatchResponseToString(batchResponse *executor.ProcessBatchResponseV2) string { + batchResponseLog := "executor batchResponse: NewStateRoot: %v, NewAccInputHash: %v, NewLocalExitRoot: %v, NewBatchNumber: %v, GasUsed: %v, FlushId: %v, StoredFlushId: %v, ProverId:%v, ForkId:%v, Error: %v\n" + batchResponseLog = fmt.Sprintf(batchResponseLog, hex.EncodeToHex(batchResponse.NewStateRoot), hex.EncodeToHex(batchResponse.NewAccInputHash), hex.EncodeToHex(batchResponse.NewLocalExitRoot), + batchResponse.NewBatchNum, batchResponse.GasUsed, batchResponse.FlushId, batchResponse.StoredFlushId, batchResponse.ProverId, batchResponse.ForkId, batchResponse.Error) + + for blockIndex, block := range batchResponse.BlockResponses { + prefix := " " + fmt.Sprintf("block[%v]: ", blockIndex) + batchResponseLog += blockResponseToString(block, prefix) + } + + return batchResponseLog } -func blockResponseToString(r *executor.ProcessBlockResponseV2, prefix string) string { - res := prefix + "ProcessBlockResponseV2:----------------------------- \n" - res += prefix + fmt.Sprintf("ParentHash: %v\n", common.BytesToHash(r.ParentHash)) - res += prefix + fmt.Sprintf("Coinbase: %v\n", r.Coinbase) - res += prefix + fmt.Sprintf("GasLimit: %v\n", r.GasLimit) - res += prefix + fmt.Sprintf("BlockNumber: %v\n", r.BlockNumber) - res += prefix + fmt.Sprintf("Timestamp: %v\n", r.Timestamp) - res += prefix + fmt.Sprintf("GlobalExitRoot:%v\n", common.BytesToHash(r.Ger)) - res += prefix + fmt.Sprintf("BlockHashL1: %v\n", common.BytesToHash(r.BlockHashL1)) - res += prefix + fmt.Sprintf("GasUsed: %v\n", r.GasUsed) - res += prefix + fmt.Sprintf("BlockInfoRoot: %v\n", common.BytesToHash(r.BlockInfoRoot)) - res += prefix + fmt.Sprintf("BlockHash: %v\n", common.BytesToHash(r.BlockHash)) - for txIndex, tx := range r.Responses { - newPrefix := prefix + " " + fmt.Sprintf("TransactionResponse[%v]: ", txIndex) - res += transactionResponseToString(tx, newPrefix) - } - res += prefix + "----------------------------------------------------------------- [Block]\n" - - return res +func blockResponseToString(blockResponse *executor.ProcessBlockResponseV2, prefix string) string { + blockResponseLog := prefix + "ParentHash: %v, Coinbase: %v, GasLimit: %v, BlockNumber: %v, Timestamp: %v, GlobalExitRoot: %v, BlockHashL1: %v, GasUsed: %v, BlockInfoRoot: %v, BlockHash: %v\n" + blockResponseLog = fmt.Sprintf(blockResponseLog, common.BytesToHash(blockResponse.ParentHash), blockResponse.Coinbase, blockResponse.GasLimit, blockResponse.BlockNumber, blockResponse.Timestamp, + common.BytesToHash(blockResponse.Ger), common.BytesToHash(blockResponse.BlockHashL1), blockResponse.GasUsed, common.BytesToHash(blockResponse.BlockInfoRoot), common.BytesToHash(blockResponse.BlockHash)) + + for txIndex, tx := range blockResponse.Responses { + prefix := " " + fmt.Sprintf("tx[%v]: ", txIndex) + blockResponseLog += transactionResponseToString(tx, prefix) + } + + return blockResponseLog } -func transactionResponseToString(r *executor.ProcessTransactionResponseV2, prefix string) string { - res := prefix + "ProcessTransactionResponseV2:----------------------------------- \n" - res += prefix + fmt.Sprintf("TxHash: %v\n", common.BytesToHash(r.TxHash)) - res += prefix + fmt.Sprintf("TxHashL2: %v\n", common.BytesToHash(r.TxHashL2)) - res += prefix + fmt.Sprintf("Type: %v\n", r.Type) - res += prefix + fmt.Sprintf("Error: %v\n", r.Error) - res += prefix + fmt.Sprintf("GasUsed: %v\n", r.GasUsed) - res += prefix + fmt.Sprintf("GasLeft: %v\n", r.GasLeft) - res += prefix + fmt.Sprintf("GasRefund:%v\n", r.GasRefunded) - res += prefix + fmt.Sprintf("StateRoot:%v\n", common.BytesToHash(r.StateRoot)) - res += prefix + "----------------------------------------------------------------- [Transaction]\n" - - return res +func transactionResponseToString(txResponse *executor.ProcessTransactionResponseV2, prefix string) string { + txResponseLog := prefix + "TxHash: %v, TxHashL2: %v, Type: %v, StateRoot:%v, GasUsed: %v, GasLeft: %v, GasRefund: %v, Error: %v\n" + txResponseLog = fmt.Sprintf(txResponseLog, common.BytesToHash(txResponse.TxHash), common.BytesToHash(txResponse.TxHashL2), txResponse.Type, + common.BytesToHash(txResponse.StateRoot), txResponse.GasUsed, txResponse.GasLeft, txResponse.GasRefunded, txResponse.Error) + + return txResponseLog } // ProcessAndStoreClosedBatchV2 is used by the Synchronizer to add a closed batch into the data base. Values returned are the new stateRoot, diff --git a/state/convertersV2.go b/state/convertersV2.go index eb95d0379b..eb83c32b36 100644 --- a/state/convertersV2.go +++ b/state/convertersV2.go @@ -168,18 +168,6 @@ func (s *State) convertToProcessTransactionResponseV2(responses []*executor.Proc if tx != nil { result.Tx = *tx - log.Debugf("ProcessTransactionResponseV2[TxHash]: %v", result.TxHash) - if response.Error == executor.RomError_ROM_ERROR_NO_ERROR { - log.Debugf("ProcessTransactionResponseV2[Nonce]: %v", result.Tx.Nonce()) - } - log.Debugf("ProcessTransactionResponseV2[StateRoot]: %v", result.StateRoot.String()) - log.Debugf("ProcessTransactionResponseV2[Error]: %v", result.RomError) - log.Debugf("ProcessTransactionResponseV2[GasUsed]: %v", result.GasUsed) - log.Debugf("ProcessTransactionResponseV2[GasLeft]: %v", result.GasLeft) - log.Debugf("ProcessTransactionResponseV2[GasRefunded]: %v", result.GasRefunded) - log.Debugf("ProcessTransactionResponseV2[ChangesStateRoot]: %v", result.ChangesStateRoot) - log.Debugf("ProcessTransactionResponseV2[EffectiveGasPrice]: %v", result.EffectiveGasPrice) - log.Debugf("ProcessTransactionResponseV2[EffectivePercentage]: %v", result.EffectivePercentage) } results = append(results, result) diff --git a/state/pgstatestorage/l2block.go b/state/pgstatestorage/l2block.go index 8c9416bce1..ae2f80d77f 100644 --- a/state/pgstatestorage/l2block.go +++ b/state/pgstatestorage/l2block.go @@ -206,11 +206,14 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 return err } - l2TxHash, err := state.GetL2Hash(*tx) + aux := *tx + l2TxHash, err := state.GetL2Hash(aux) if err != nil { return err } + log.Debugf("[AddL2Block] L2Block num: %d, tx[%d] = txHash: %s, txHashL2: %s", l2Block.Number().Uint64(), idx, tx.Hash().String(), l2TxHash.String()) + txRow := []interface{}{tx.Hash().String(), encoded, decoded, l2Block.Number().Uint64(), txsEGPData[idx].EffectivePercentage, egpLogBytes, l2TxHash.String()} txRows = append(txRows, txRow) } From c3dddf79b228c8f190c8cad71807a56599d6ef18 Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Wed, 24 Jan 2024 14:49:43 +0100 Subject: [PATCH 15/54] fix panic etherman (#3133) --- etherman/etherman.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/etherman/etherman.go b/etherman/etherman.go index 95e99820df..c605b53ad6 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -215,6 +215,10 @@ func NewClient(cfg Config, l1Config L1Config) (*Client, error) { if err != nil { return nil, err } + oldGlobalExitRoot, err := oldpolygonzkevmglobalexitroot.NewOldpolygonzkevmglobalexitroot(l1Config.GlobalExitRootManagerAddr, ethClient) + if err != nil { + return nil, err + } pol, err := pol.NewPol(l1Config.PolAddr, ethClient) if err != nil { return nil, err @@ -241,14 +245,15 @@ func NewClient(cfg Config, l1Config L1Config) (*Client, error) { log.Debug("rollupID: ", rollupID) return &Client{ - EthClient: ethClient, - ZkEVM: zkevm, - OldZkEVM: oldZkevm, - RollupManager: rollupManager, - Pol: pol, - GlobalExitRootManager: globalExitRoot, - SCAddresses: scAddresses, - RollupID: rollupID, + EthClient: ethClient, + ZkEVM: zkevm, + OldZkEVM: oldZkevm, + RollupManager: rollupManager, + Pol: pol, + GlobalExitRootManager: globalExitRoot, + OldGlobalExitRootManager: oldGlobalExitRoot, + SCAddresses: scAddresses, + RollupID: rollupID, GasProviders: externalGasProviders{ MultiGasProvider: cfg.MultiGasProvider, Providers: gProviders, From 8e1e5e94e17458522a41a61a44bd5c2b54360607 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Wed, 24 Jan 2024 21:02:23 +0100 Subject: [PATCH 16/54] Fix GetL2Hash function. Use tx L2 hash returned by the executor when storing L2Block. Logs changes (#3134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix l2 hash when to is nill * get l2 hash from executor * fix use tx L2 hash returned by the executor when storing L2Block. Logs changes * fix linter * add L2block tracking number to process tx logs * fix ProcessBatchV2 log format * fix UTs. set useMainExecGenerated to true * set prover image to v4.0.0-RC30 * test setting useMainExecGenerated = false * fix independent_test * fix non-e2e tests --------- Co-authored-by: Toni Ramírez --- docker-compose.yml | 2 +- sequencer/finalizer.go | 11 ++-- sequencer/interfaces.go | 2 +- sequencer/l2block.go | 15 ++--- sequencer/mock_state.go | 10 ++-- sequencer/worker.go | 2 +- state/batchV2.go | 4 +- state/convertersV2.go | 1 + state/genesis.go | 16 ++++-- state/interfaces.go | 2 +- state/mocks/mock_storage.go | 21 +++---- state/pgstatestorage/l2block.go | 32 ++++++----- state/pgstatestorage/pgstatestorage_test.go | 56 +++++++++++++------ .../forkid_dragonfruit/dragonfruit_test.go | 14 +++-- .../forkid_independent/independent_test.go | 18 ++++-- state/test/l2txhash_test.go | 46 +++++++++++++-- state/transaction.go | 27 +++++---- test/docker-compose.yml | 4 +- 18 files changed, 188 insertions(+), 95 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 6b5508ba33..77bcce0313 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -107,7 +107,7 @@ services: zkevm-prover: container_name: zkevm-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v4.0.0-RC29 + image: hermeznetwork/zkevm-prover:v4.0.0-RC30 depends_on: zkevm-state-db: condition: service_healthy diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 082febc089..5cb3e705cb 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -337,6 +337,9 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first metrics.ProcessingTime(time.Since(start)) }() + log.Infof("processing tx %s, batchNumber: %d, l2Block: [%d], oldStateRoot: %s, L1InfoRootIndex: %d", + tx.HashStr, f.wipBatch.batchNumber, f.wipL2Block.trackingNum, f.wipBatch.imStateRoot, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex) + batchRequest := state.ProcessRequest{ BatchNumber: f.wipBatch.batchNumber, OldStateRoot: f.wipBatch.imStateRoot, @@ -425,9 +428,6 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first batchRequest.Transactions = append(batchRequest.Transactions, effectivePercentageAsDecodedHex...) - log.Infof("processing tx %s, wipBatch.BatchNumber: %d, batchNumber: %d, oldStateRoot: %s, L1InfoRootIndex: %d", - tx.HashStr, f.wipBatch.batchNumber, batchRequest.BatchNumber, batchRequest.OldStateRoot, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex) - batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) if err != nil && (errors.Is(err, runtime.ErrExecutorDBError) || errors.Is(err, runtime.ErrInvalidTxChangeL2BlockMinTimestamp)) { @@ -463,9 +463,8 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first // Update imStateRoot f.wipBatch.imStateRoot = batchResponse.NewStateRoot - log.Infof("processed tx %s. Batch.batchNumber: %d, batchNumber: %d, newStateRoot: %s, oldStateRoot: %s, used counters: %s", - tx.HashStr, f.wipBatch.batchNumber, batchRequest.BatchNumber, batchResponse.NewStateRoot.String(), - batchRequest.OldStateRoot.String(), f.logZKCounters(batchResponse.UsedZkCounters)) + log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, used counters: %s", + tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(), f.logZKCounters(batchResponse.UsedZkCounters)) return nil, nil } diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 2b89c8e9f4..54b8276a41 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -84,7 +84,7 @@ type stateInterface interface { FlushMerkleTree(ctx context.Context, newStateRoot common.Hash) error GetStoredFlushID(ctx context.Context) (uint64, string, error) GetForkIDByBatchNumber(batchNumber uint64) uint64 - AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error + AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*state.DSL2Block, error) GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, readWIPBatch bool, dbTx pgx.Tx) ([]*state.DSBatch, error) GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSL2Block, error) diff --git a/sequencer/l2block.go b/sequencer/l2block.go index bfcafe2305..5aab392767 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -139,7 +139,7 @@ func (f *finalizer) storePendingL2Blocks(ctx context.Context) { if err != nil { // Dump L2Block info - f.logL2Block(l2Block) + f.dumpL2Block(l2Block) f.Halt(ctx, fmt.Errorf("error storing L2 block %d [%d], error: %v", l2Block.batchResponse.BlockResponses[0].BlockNumber, l2Block.trackingNum, err)) } @@ -556,7 +556,7 @@ func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBat return batchResponse, nil } -func (f *finalizer) logL2Block(l2Block *L2Block) { +func (f *finalizer) dumpL2Block(l2Block *L2Block) { var blockResp *state.ProcessBlockResponse if l2Block.batchResponse != nil { if len(l2Block.batchResponse.BlockResponses) > 0 { @@ -564,15 +564,16 @@ func (f *finalizer) logL2Block(l2Block *L2Block) { } } + txsLog := "" if blockResp != nil { - log.Infof("DUMP L2 block %d [%d], Timestamp: %d, ParentHash: %s, Coinbase: %s, GER: %s, BlockHashL1: %s, GasUsed: %d, BlockInfoRoot: %s, BlockHash: %s", - blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1, - blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash) - for i, txResp := range blockResp.TransactionResponses { - log.Infof(" tx[%d] Hash: %s, HashL2: %s, StateRoot: %s, Type: %d, GasLeft: %d, GasUsed: %d, GasRefund: %d, CreateAddress: %s, ChangesStateRoot: %v, EGP: %s, EGPPct: %d, HasGaspriceOpcode: %v, HasBalanceOpcode: %v", + txsLog += fmt.Sprintf(" tx[%d] Hash: %s, HashL2: %s, StateRoot: %s, Type: %d, GasLeft: %d, GasUsed: %d, GasRefund: %d, CreateAddress: %s, ChangesStateRoot: %v, EGP: %s, EGPPct: %d, HasGaspriceOpcode: %v, HasBalanceOpcode: %v\n", i, txResp.TxHash, txResp.TxHashL2_V2, txResp.StateRoot, txResp.Type, txResp.GasLeft, txResp.GasUsed, txResp.GasRefunded, txResp.CreateAddress, txResp.ChangesStateRoot, txResp.EffectiveGasPrice, txResp.EffectivePercentage, txResp.HasGaspriceOpcode, txResp.HasBalanceOpcode) } + + log.Infof("DUMP L2 block %d [%d], Timestamp: %d, ParentHash: %s, Coinbase: %s, GER: %s, BlockHashL1: %s, GasUsed: %d, BlockInfoRoot: %s, BlockHash: %s\n%s", + blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1, + blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash, txsLog) } } diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go index c4d5c58f6c..0e0a1aaf85 100644 --- a/sequencer/mock_state.go +++ b/sequencer/mock_state.go @@ -26,17 +26,17 @@ type StateMock struct { mock.Mock } -// AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx -func (_m *StateMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx) +// AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx +func (_m *StateMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx) if len(ret) == 0 { panic("no return value specified for AddL2Block") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.L2Block, []*types.Receipt, []state.StoreTxEGPData, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.L2Block, []*types.Receipt, []common.Hash, []state.StoreTxEGPData, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx) } else { r0 = ret.Error(0) } diff --git a/sequencer/worker.go b/sequencer/worker.go index 64feb05b64..4e277b7b84 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -338,7 +338,7 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e wg.Wait() if foundAt != -1 { - log.Debugf("best fitting tx found: tx %s at index %d with gasPrice %d", tx.HashStr, foundAt, tx.GasPrice) + log.Debugf("best fitting tx %s found at index %d with gasPrice %d", tx.HashStr, foundAt, tx.GasPrice) return tx, nil } else { return nil, ErrNoFittingTransaction diff --git a/state/batchV2.go b/state/batchV2.go index 53b1f25128..bc7ed63ca8 100644 --- a/state/batchV2.go +++ b/state/batchV2.go @@ -277,7 +277,7 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * batchRequest.SkipWriteBlockInfoRoot, batchRequest.ChainId, batchRequest.ForkId, batchRequest.ContextId, batchRequest.SkipVerifyL1InfoRoot, hex.EncodeToHex(batchRequest.ForcedBlockhashL1), l1DataStr, hex.EncodeToHex(batchRequest.BatchL2Data)) - log.Debugf("executor batchRequest: %s", batchRequestLog) + log.Debugf("executor batchRequest, %s", batchRequestLog) now := time.Now() batchResponse, err := s.executorClient.ProcessBatchV2(ctx, batchRequest) @@ -316,7 +316,7 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * } func processBatchResponseToString(batchResponse *executor.ProcessBatchResponseV2) string { - batchResponseLog := "executor batchResponse: NewStateRoot: %v, NewAccInputHash: %v, NewLocalExitRoot: %v, NewBatchNumber: %v, GasUsed: %v, FlushId: %v, StoredFlushId: %v, ProverId:%v, ForkId:%v, Error: %v\n" + batchResponseLog := "executor batchResponse, NewStateRoot: %v, NewAccInputHash: %v, NewLocalExitRoot: %v, NewBatchNumber: %v, GasUsed: %v, FlushId: %v, StoredFlushId: %v, ProverId:%v, ForkId:%v, Error: %v\n" batchResponseLog = fmt.Sprintf(batchResponseLog, hex.EncodeToHex(batchResponse.NewStateRoot), hex.EncodeToHex(batchResponse.NewAccInputHash), hex.EncodeToHex(batchResponse.NewLocalExitRoot), batchResponse.NewBatchNum, batchResponse.GasUsed, batchResponse.FlushId, batchResponse.StoredFlushId, batchResponse.ProverId, batchResponse.ForkId, batchResponse.Error) diff --git a/state/convertersV2.go b/state/convertersV2.go index eb83c32b36..b841417ad0 100644 --- a/state/convertersV2.go +++ b/state/convertersV2.go @@ -102,6 +102,7 @@ func (s *State) convertToProcessTransactionResponseV2(responses []*executor.Proc isRomOOCError := false results := make([]*ProcessTransactionResponse, 0, len(responses)) + for _, response := range responses { if response.Error != executor.RomError_ROM_ERROR_NO_ERROR { isRomLevelError = true diff --git a/state/genesis.go b/state/genesis.go index be3c9c8ad4..29e2a87e74 100644 --- a/state/genesis.go +++ b/state/genesis.go @@ -190,12 +190,20 @@ func (s *State) SetGenesis(ctx context.Context, block Block, genesis Genesis, m l2Block := NewL2Block(header, []*types.Transaction{}, []*L2Header{}, receipts, &trie.StackTrie{}) l2Block.ReceivedAt = block.ReceivedAt - storeTxsEGPData := []StoreTxEGPData{} - for range l2Block.Transactions() { - storeTxsEGPData = append(storeTxsEGPData, StoreTxEGPData{EGPLog: nil, EffectivePercentage: MaxEffectivePercentage}) + numTxs := len(l2Block.Transactions()) + storeTxsEGPData := make([]StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range l2Block.Transactions() { + storeTxsEGPData[i] = StoreTxEGPData{EGPLog: nil, EffectivePercentage: MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := GetL2Hash(aux) + if err != nil { + return common.Hash{}, err + } + txsL2Hash[i] = l2TxHash } - err = s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + err = s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) if err != nil { return common.Hash{}, err } diff --git a/state/interfaces.go b/state/interfaces.go index 29c3ff1ce7..8100fd25a6 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -70,7 +70,7 @@ type storage interface { GetL2BlockTransactionCountByHash(ctx context.Context, blockHash common.Hash, dbTx pgx.Tx) (uint64, error) GetL2BlockTransactionCountByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (uint64, error) GetTransactionEGPLogByHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*EffectiveGasPriceLog, error) - AddL2Block(ctx context.Context, batchNumber uint64, l2Block *L2Block, receipts []*types.Receipt, txsEGPData []StoreTxEGPData, dbTx pgx.Tx) error + AddL2Block(ctx context.Context, batchNumber uint64, l2Block *L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []StoreTxEGPData, dbTx pgx.Tx) error GetLastVirtualizedL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) GetLastConsolidatedL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) GetLastVerifiedL2BlockNumberUntilL1Block(ctx context.Context, l1FinalizedBlockNumber uint64, dbTx pgx.Tx) (uint64, error) diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 88f1da4e86..51a6284c3c 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -418,17 +418,17 @@ func (_c *StorageMock_AddL1InfoRootToExitRoot_Call) RunAndReturn(run func(contex return _c } -// AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx -func (_m *StorageMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx) +// AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx +func (_m *StorageMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx) if len(ret) == 0 { panic("no return value specified for AddL2Block") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.L2Block, []*types.Receipt, []state.StoreTxEGPData, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.L2Block, []*types.Receipt, []common.Hash, []state.StoreTxEGPData, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx) } else { r0 = ret.Error(0) } @@ -446,15 +446,16 @@ type StorageMock_AddL2Block_Call struct { // - batchNumber uint64 // - l2Block *state.L2Block // - receipts []*types.Receipt +// - txsL2Hash []common.Hash // - txsEGPData []state.StoreTxEGPData // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) AddL2Block(ctx interface{}, batchNumber interface{}, l2Block interface{}, receipts interface{}, txsEGPData interface{}, dbTx interface{}) *StorageMock_AddL2Block_Call { - return &StorageMock_AddL2Block_Call{Call: _e.mock.On("AddL2Block", ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx)} +func (_e *StorageMock_Expecter) AddL2Block(ctx interface{}, batchNumber interface{}, l2Block interface{}, receipts interface{}, txsL2Hash interface{}, txsEGPData interface{}, dbTx interface{}) *StorageMock_AddL2Block_Call { + return &StorageMock_AddL2Block_Call{Call: _e.mock.On("AddL2Block", ctx, batchNumber, l2Block, receipts, txsL2Hash, txsEGPData, dbTx)} } -func (_c *StorageMock_AddL2Block_Call) Run(run func(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx)) *StorageMock_AddL2Block_Call { +func (_c *StorageMock_AddL2Block_Call) Run(run func(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx)) *StorageMock_AddL2Block_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(*state.L2Block), args[3].([]*types.Receipt), args[4].([]state.StoreTxEGPData), args[5].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint64), args[2].(*state.L2Block), args[3].([]*types.Receipt), args[4].([]common.Hash), args[5].([]state.StoreTxEGPData), args[6].(pgx.Tx)) }) return _c } @@ -464,7 +465,7 @@ func (_c *StorageMock_AddL2Block_Call) Return(_a0 error) *StorageMock_AddL2Block return _c } -func (_c *StorageMock_AddL2Block_Call) RunAndReturn(run func(context.Context, uint64, *state.L2Block, []*types.Receipt, []state.StoreTxEGPData, pgx.Tx) error) *StorageMock_AddL2Block_Call { +func (_c *StorageMock_AddL2Block_Call) RunAndReturn(run func(context.Context, uint64, *state.L2Block, []*types.Receipt, []common.Hash, []state.StoreTxEGPData, pgx.Tx) error) *StorageMock_AddL2Block_Call { _c.Call.Return(run) return _c } diff --git a/state/pgstatestorage/l2block.go b/state/pgstatestorage/l2block.go index ae2f80d77f..a01853931d 100644 --- a/state/pgstatestorage/l2block.go +++ b/state/pgstatestorage/l2block.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "time" "github.com/0xPolygonHermez/zkevm-node/hex" @@ -146,9 +147,9 @@ func (p *PostgresStorage) GetL2BlockTransactionCountByNumber(ctx context.Context } // AddL2Block adds a new L2 block to the State Store -func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { +func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsL2Hash []common.Hash, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error { //TODO: Optmize this function using only one SQL (with several values) to insert all the txs, receips and logs - log.Debugf("[AddL2Block] adding l2 block: %v", l2Block.NumberU64()) + log.Debugf("[AddL2Block] adding L2 block %d", l2Block.NumberU64()) start := time.Now() e := p.getExecQuerier(dbTx) @@ -182,6 +183,9 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 return err } + logTxsL2Hash := "" + forkId := p.GetForkIDByBatchNumber(batchNumber) + if len(l2Block.Transactions()) > 0 { txRows := [][]interface{}{} @@ -206,21 +210,21 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 return err } - aux := *tx - l2TxHash, err := state.GetL2Hash(aux) - if err != nil { - return err - } - - log.Debugf("[AddL2Block] L2Block num: %d, tx[%d] = txHash: %s, txHashL2: %s", l2Block.Number().Uint64(), idx, tx.Hash().String(), l2TxHash.String()) + logTxsL2Hash += fmt.Sprintf("tx[%d] txHash: %s, txHashL2: %s\n", idx, tx.Hash().String(), txsL2Hash[idx].String()) - txRow := []interface{}{tx.Hash().String(), encoded, decoded, l2Block.Number().Uint64(), txsEGPData[idx].EffectivePercentage, egpLogBytes, l2TxHash.String()} + txRow := []interface{}{tx.Hash().String(), encoded, decoded, l2Block.Number().Uint64(), txsEGPData[idx].EffectivePercentage, egpLogBytes} + if forkId >= state.FORKID_ETROG { + txRow = append(txRow, txsL2Hash[idx].String()) + } txRows = append(txRows, txRow) } - _, err := dbTx.CopyFrom(ctx, pgx.Identifier{"state", "transaction"}, - []string{"hash", "encoded", "decoded", "l2_block_num", "effective_percentage", "egp_log", "l2_hash"}, - pgx.CopyFromRows(txRows)) + txFields := []string{"hash", "encoded", "decoded", "l2_block_num", "effective_percentage", "egp_log"} + if forkId >= state.FORKID_ETROG { + txFields = append(txFields, "l2_hash") + } + + _, err := dbTx.CopyFrom(ctx, pgx.Identifier{"state", "transaction"}, txFields, pgx.CopyFromRows(txRows)) if err != nil { return err @@ -237,7 +241,7 @@ func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2 p.AddLogs(ctx, logs, dbTx) } - log.Debugf("[AddL2Block] l2 block %v took %v to be added", l2Block.NumberU64(), time.Since(start)) + log.Debugf("[AddL2Block] added L2 block %d, time: %v\n%s", l2Block.NumberU64(), time.Since(start), logTxsL2Hash) return nil } diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 88882d3681..6a4c395c0d 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -193,12 +193,18 @@ func TestGetBatchByL2BlockNumber(t *testing.T) { l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{}) receipt.BlockHash = l2Block.Hash() - storeTxsEGPData := []state.StoreTxEGPData{} - for range transactions { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash } - err = pgStateStorage.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + err = pgStateStorage.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) result, err := pgStateStorage.BatchNumberByL2BlockNumber(ctx, l2Block.Number().Uint64(), dbTx) require.NoError(t, err) @@ -712,12 +718,18 @@ func TestGetLastVerifiedL2BlockNumberUntilL1Block(t *testing.T) { l2Header := state.NewL2Header(&types.Header{Number: big.NewInt(0).SetUint64(blockNumber + uint64(10))}) l2Block := state.NewL2BlockWithHeader(l2Header) - storeTxsEGPData := []state.StoreTxEGPData{} - for range l2Block.Transactions() { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(0)}) + numTxs := len(l2Block.Transactions()) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range l2Block.Transactions() { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(0)} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash } - err = testState.AddL2Block(ctx, batchNumber, l2Block, []*types.Receipt{}, storeTxsEGPData, dbTx) + err = testState.AddL2Block(ctx, batchNumber, l2Block, []*types.Receipt{}, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) virtualBatch := state.VirtualBatch{BlockNumber: blockNumber, BatchNumber: batchNumber, Coinbase: addr, SequencerAddr: addr, TxHash: hash} @@ -932,12 +944,18 @@ func TestGetLogs(t *testing.T) { receipt.BlockHash = l2Block.Hash() } - storeTxsEGPData := []state.StoreTxEGPData{} - for range transactions { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash } - err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) } @@ -1059,12 +1077,18 @@ func TestGetNativeBlockHashesInRange(t *testing.T) { receipt.BlockHash = l2Block.Hash() } - storeTxsEGPData := []state.StoreTxEGPData{} - for range transactions { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash } - err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) nativeBlockHashes = append(nativeBlockHashes, l2Block.Header().Root) diff --git a/state/test/forkid_dragonfruit/dragonfruit_test.go b/state/test/forkid_dragonfruit/dragonfruit_test.go index 27f684373b..806bc9aba7 100644 --- a/state/test/forkid_dragonfruit/dragonfruit_test.go +++ b/state/test/forkid_dragonfruit/dragonfruit_test.go @@ -1496,12 +1496,18 @@ func TestExecutorRevert(t *testing.T) { receipt.BlockHash = l2Block.Hash() receipt1.BlockHash = l2Block.Hash() - storeTxsEGPData := []state.StoreTxEGPData{} - for range transactions { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash } - err = testState.AddL2Block(ctx, 0, l2Block, receipts, storeTxsEGPData, dbTx) + err = testState.AddL2Block(ctx, 0, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) l2Block, err = testState.GetL2BlockByHash(ctx, l2Block.Hash(), dbTx) require.NoError(t, err) diff --git a/state/test/forkid_independent/independent_test.go b/state/test/forkid_independent/independent_test.go index 3e385fffe8..4ef0b04a72 100644 --- a/state/test/forkid_independent/independent_test.go +++ b/state/test/forkid_independent/independent_test.go @@ -650,12 +650,18 @@ func TestAddGetL2Block(t *testing.T) { receipt.BlockHash = l2Block.Hash() - storeTxsEGPData := []state.StoreTxEGPData{} - for range transactions { - storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage}) - } - - err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx) + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i, txTmp := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + aux := *txTmp + l2TxHash, err := state.GetL2Hash(aux) + require.NoError(t, err) + txsL2Hash[i] = l2TxHash + } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) require.NoError(t, err) result, err := testState.GetL2BlockByHash(ctx, l2Block.Hash(), dbTx) require.NoError(t, err) diff --git a/state/test/l2txhash_test.go b/state/test/l2txhash_test.go index c510b581c1..f91c3268bf 100644 --- a/state/test/l2txhash_test.go +++ b/state/test/l2txhash_test.go @@ -35,7 +35,27 @@ const vectorString = `[ "l2TxHash": "0x8f9b0375a6b0f1bd9d54ff499921766828ae8e5314fc44a494736b5c4cc3bb56" }, { - "nonce": "0x01", + "nonce": "0x00", + "gasPrice": "0x3b9aca00", + "gasLimit": "0x186a0", + "to": "", + "value": "0x00", + "data": "0x56d5be740000000000000000000000001275fbb540c8efc58b812ba83b0d0b8b9917ae98", + "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "l2TxHash": "0x380cb737b37d06e6d7e6a24ba2b1a3e29e18b28d367fc5a4fa57ddcf60a12a54" + }, + { + "nonce": "0x00", + "gasPrice": "0x3b9aca00", + "gasLimit": "0x186a0", + "to": "", + "value": "0x00", + "data": "", + "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "l2TxHash": "0x822ab53546a48dcbc25cdccc883e378b911554b9f00599a0d783e6376246732d" + }, + { + "nonce": "0x1", "gasPrice": "0x3b9aca00", "gasLimit": "0x186a0", "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", @@ -45,7 +65,7 @@ const vectorString = `[ "l2TxHash": "0xaa8f08e5bee683718f3f14fa352aaeb8e7de49f8b0e59f03128ef37fa6ac18e3" }, { - "nonce": "0x01", + "nonce": "0x1", "gasPrice": "0x3b9aca00", "gasLimit": "v186a0", "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", @@ -91,10 +111,17 @@ func TestL2TxHash(t *testing.T) { // Create types.Transaction from test vector for x, testVector := range testVectors { + to := &common.Address{} nonce := new(big.Int).SetBytes(common.FromHex(testVector.Nonce)).Uint64() gasPrice := new(big.Int).SetBytes(common.FromHex(testVector.GasPrice)) gasLimit := new(big.Int).SetBytes(common.FromHex(testVector.GasLimit)).Uint64() - to := common.HexToAddress(testVector.To) + if testVector.To != "" { + aux := common.HexToAddress(testVector.To) + to = &aux + } else { + to = nil + } + value := new(big.Int).SetBytes(common.FromHex(testVector.Value)) data := common.FromHex(testVector.Data) from := common.HexToAddress(testVector.From) @@ -104,12 +131,21 @@ func TestL2TxHash(t *testing.T) { log.Debugf("nonce: %x", nonce) log.Debugf("gasPrice: %x", gasPrice) log.Debugf("gasLimit: %x", gasLimit) - log.Debugf("to: %s", to.String()) + if to != nil { + log.Debugf("to: %s", to.String()) + } else { + log.Debugf("to: nil") + } log.Debugf("value: %x", value) log.Debugf("data: %s", common.Bytes2Hex(data)) log.Debugf("from: %s", from.String()) - tx := types.NewTransaction(nonce, to, value, gasLimit, gasPrice, data) + tx := types.NewTx(&types.LegacyTx{Nonce: nonce, + GasPrice: gasPrice, + Gas: gasLimit, + To: to, + Value: value, + Data: data}) require.NoError(t, err) hash, err := state.TestGetL2Hash(*tx, from) diff --git a/state/transaction.go b/state/transaction.go index 2f306dc2a9..bb74ca2e1f 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -44,7 +44,9 @@ func getL2Hash(tx types.Transaction, sender common.Address) (common.Hash, error) input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Nonce())) input += formatL2TxHashParam(fmt.Sprintf("%x", tx.GasPrice())) input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Gas())) - input += pad20Bytes(formatL2TxHashParam(fmt.Sprintf("%x", tx.To()))) + if tx.To() != nil { + input += pad20Bytes(formatL2TxHashParam(fmt.Sprintf("%x", tx.To()))) + } input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Value())) if len(tx.Data()) > 0 { input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Data())) @@ -246,9 +248,10 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce if txsEGPLog != nil { storeTxsEGPData[0].EGPLog = txsEGPLog[i] } + txsL2Hash := []common.Hash{processedTx.TxHashL2_V2} // Store L2 block and its transaction - if err := s.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx); err != nil { + if err := s.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx); err != nil { return err } } @@ -280,9 +283,11 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P l2Header.GlobalExitRoot = l2Block.GlobalExitRoot l2Header.BlockInfoRoot = l2Block.BlockInfoRoot - transactions := []*types.Transaction{} - storeTxsEGPData := []StoreTxEGPData{} - receipts := []*types.Receipt{} + numTxs := len(l2Block.TransactionResponses) + transactions := make([]*types.Transaction, numTxs) + storeTxsEGPData := make([]StoreTxEGPData, numTxs) + receipts := make([]*types.Receipt, numTxs) + txsL2Hash := make([]common.Hash, numTxs) for i, txResponse := range l2Block.TransactionResponses { // if the transaction has an intrinsic invalid tx error it means @@ -295,15 +300,16 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P } txResp := *txResponse - transactions = append(transactions, &txResp.Tx) + transactions[i] = &txResp.Tx + txsL2Hash[i] = txResp.TxHashL2_V2 - storeTxsEGPData = append(storeTxsEGPData, StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(txResponse.EffectivePercentage)}) + storeTxsEGPData[i] = StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(txResponse.EffectivePercentage)} if txsEGPLog != nil { storeTxsEGPData[i].EGPLog = txsEGPLog[i] } receipt := GenerateReceipt(header.Number, txResponse, uint(i)) - receipts = append(receipts, receipt) + receipts[i] = receipt } // Create block to be able to calculate its hash @@ -315,7 +321,7 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P } // Store L2 block and its transactions - if err := s.AddL2Block(ctx, batchNumber, block, receipts, storeTxsEGPData, dbTx); err != nil { + if err := s.AddL2Block(ctx, batchNumber, block, receipts, txsL2Hash, storeTxsEGPData, dbTx); err != nil { return err } @@ -697,9 +703,10 @@ func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, proces receipt.BlockHash = l2Block.Hash() storeTxsEGPData := []StoreTxEGPData{{EGPLog: egpLog, EffectivePercentage: uint8(processedTx.EffectivePercentage)}} + txsL2Hash := []common.Hash{processedTx.TxHashL2_V2} // Store L2 block and its transaction - if err := s.AddL2Block(ctx, batchNumber, l2Block, receipts, storeTxsEGPData, dbTx); err != nil { + if err := s.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx); err != nil { return nil, err } diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 38f8b8c644..3390e8ed36 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -513,7 +513,7 @@ services: zkevm-prover: container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC29 + image: hermeznetwork/zkevm-prover:v4.0.0-RC30 ports: - 50061:50061 # MT - 50071:50071 # Executor @@ -602,7 +602,7 @@ services: zkevm-permissionless-prover: container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC29 + image: hermeznetwork/zkevm-prover:v4.0.0-RC30 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover From 19d493e438f66c73fce526427f01b0590aea2a2c Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Thu, 25 Jan 2024 10:00:57 -0300 Subject: [PATCH 17/54] remove l2 tx hash computation and use data stored in the state returned by the executor (#3139) --- jsonrpc/endpoints_eth.go | 45 ++++- jsonrpc/endpoints_eth_test.go | 15 +- jsonrpc/endpoints_zkevm.go | 24 ++- jsonrpc/endpoints_zkevm_test.go | 36 +++- jsonrpc/mocks/mock_state.go | 30 ++++ jsonrpc/types/interfaces.go | 1 + jsonrpc/types/types.go | 44 ++--- state/genesis.go | 17 +- state/interfaces.go | 1 + state/mocks/mock_storage.go | 60 +++++++ state/pgstatestorage/pgstatestorage_test.go | 28 +--- state/pgstatestorage/transaction.go | 18 ++ .../forkid_dragonfruit/dragonfruit_test.go | 7 +- .../forkid_independent/independent_test.go | 7 +- state/test/l2txhash_test.go | 157 ------------------ state/transaction.go | 65 -------- 16 files changed, 248 insertions(+), 307 deletions(-) delete mode 100644 state/test/l2txhash_test.go diff --git a/jsonrpc/endpoints_eth.go b/jsonrpc/endpoints_eth.go index e1c1aaa9fa..55c7698427 100644 --- a/jsonrpc/endpoints_eth.go +++ b/jsonrpc/endpoints_eth.go @@ -305,7 +305,7 @@ func (e *EthEndpoints) GetBlockByHash(hash types.ArgHash, fullTx bool, includeEx receipts = append(receipts, *receipt) } - rpcBlock, err := types.NewBlock(state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, false, includeExtraInfo) + rpcBlock, err := types.NewBlock(ctx, e.state, state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, false, includeExtraInfo, dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err, true) } @@ -329,7 +329,7 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool, i UncleHash: ethTypes.EmptyUncleHash, }) l2Block := state.NewL2BlockWithHeader(l2Header) - rpcBlock, err := types.NewBlock(nil, l2Block, nil, fullTx, false, includeExtraInfo) + rpcBlock, err := types.NewBlock(ctx, e.state, nil, l2Block, nil, fullTx, false, includeExtraInfo, dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true) } @@ -359,7 +359,7 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool, i receipts = append(receipts, *receipt) } - rpcBlock, err := types.NewBlock(state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, false, includeExtraInfo) + rpcBlock, err := types.NewBlock(ctx, e.state, state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, false, includeExtraInfo, dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err, true) } @@ -563,7 +563,16 @@ func (e *EthEndpoints) GetTransactionByBlockHashAndIndex(hash types.ArgHash, ind return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err, true) } - res, err := types.NewTransaction(*tx, receipt, false, includeExtraInfo) + var l2Hash *common.Hash + if includeExtraInfo != nil && *includeExtraInfo { + l2h, err := e.state.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get l2 transaction hash", err, true) + } + l2Hash = &l2h + } + + res, err := types.NewTransaction(*tx, receipt, false, l2Hash) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -596,7 +605,16 @@ func (e *EthEndpoints) GetTransactionByBlockNumberAndIndex(number *types.BlockNu return RPCErrorResponse(types.DefaultErrorCode, "failed to get transaction receipt", err, true) } - res, err := types.NewTransaction(*tx, receipt, false, includeExtraInfo) + var l2Hash *common.Hash + if includeExtraInfo != nil && *includeExtraInfo { + l2h, err := e.state.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get l2 transaction hash", err, true) + } + l2Hash = &l2h + } + + res, err := types.NewTransaction(*tx, receipt, false, l2Hash) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -621,7 +639,16 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash, includeExtraInfo return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction receipt from state", err, true) } - res, err := types.NewTransaction(*tx, receipt, false, includeExtraInfo) + var l2Hash *common.Hash + if includeExtraInfo != nil && *includeExtraInfo { + l2h, err := e.state.GetL2TxHashByTxHash(ctx, hash.Hash(), dbTx) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get l2 transaction hash", err, true) + } + l2Hash = &l2h + } + + res, err := types.NewTransaction(*tx, receipt, false, l2Hash) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -641,7 +668,7 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash, includeExtraInfo } if poolTx.Status == pool.TxStatusPending { tx = &poolTx.Transaction - res, err := types.NewTransaction(*tx, nil, false, includeExtraInfo) + res, err := types.NewTransaction(*tx, nil, false, nil) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -812,7 +839,7 @@ func (e *EthEndpoints) GetTransactionReceipt(hash types.ArgHash) (interface{}, t return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx receipt from state", err, true) } - receipt, err := types.NewReceipt(*tx, r, state.Ptr(false)) + receipt, err := types.NewReceipt(*tx, r, nil) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build the receipt response", err, true) } @@ -1089,7 +1116,7 @@ func (e *EthEndpoints) notifyNewHeads(wg *sync.WaitGroup, event state.NewL2Block defer wg.Done() start := time.Now() - b, err := types.NewBlock(state.Ptr(event.Block.Hash()), &event.Block, nil, false, false, state.Ptr(false)) + b, err := types.NewBlock(context.Background(), e.state, state.Ptr(event.Block.Hash()), &event.Block, nil, false, false, state.Ptr(false), nil) if err != nil { log.Errorf("failed to build block response to subscription: %v", err) return diff --git a/jsonrpc/endpoints_eth_test.go b/jsonrpc/endpoints_eth_test.go index 0bc941b09c..e3dc38e9d8 100644 --- a/jsonrpc/endpoints_eth_test.go +++ b/jsonrpc/endpoints_eth_test.go @@ -1305,6 +1305,12 @@ func TestGetL2BlockByNumber(t *testing.T) { Return(receipt, nil). Once() } + for _, signedTx := range signedTransactions { + m.State. + On("GetL2TxHashByTxHash", context.Background(), signedTx.Hash(), m.DbTx). + Return(signedTx.Hash(), nil). + Once() + } }, }, { @@ -1339,6 +1345,12 @@ func TestGetL2BlockByNumber(t *testing.T) { Return(receipt, nil). Once() } + for _, signedTx := range signedTransactions { + m.State. + On("GetL2TxHashByTxHash", context.Background(), signedTx.Hash(), m.DbTx). + Return(signedTx.Hash(), nil). + Once() + } }, }, { @@ -3260,8 +3272,7 @@ func TestGetTransactionReceipt(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - l2Hash, err := state.GetL2Hash(*signedTx) - require.NoError(t, err) + l2Hash := common.HexToHash("0x987654321") log := ðTypes.Log{Topics: []common.Hash{common.HexToHash("0x1")}, Data: []byte{}} logs := []*ethTypes.Log{log} diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go index dcecce0e60..8fffa106a9 100644 --- a/jsonrpc/endpoints_zkevm.go +++ b/jsonrpc/endpoints_zkevm.go @@ -195,7 +195,7 @@ func (z *ZKEVMEndpoints) GetBatchByNumber(batchNumber types.BatchNumber, fullTx } batch.Transactions = txs - rpcBatch, err := types.NewBatch(batch, virtualBatch, verifiedBatch, blocks, receipts, fullTx, true, ger) + rpcBatch, err := types.NewBatch(ctx, z.state, batch, virtualBatch, verifiedBatch, blocks, receipts, fullTx, true, ger, dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build the batch %v response", batchNumber), err, true) } @@ -218,7 +218,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b UncleHash: ethTypes.EmptyUncleHash, }) l2Block := state.NewL2BlockWithHeader(l2Header) - rpcBlock, err := types.NewBlock(nil, l2Block, nil, fullTx, false, state.Ptr(true)) + rpcBlock, err := types.NewBlock(ctx, z.state, nil, l2Block, nil, fullTx, false, state.Ptr(true), dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true) } @@ -248,7 +248,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b receipts = append(receipts, *receipt) } - rpcBlock, err := types.NewBlock(state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, true, state.Ptr(true)) + rpcBlock, err := types.NewBlock(ctx, z.state, state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, true, state.Ptr(true), dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by number %v", blockNumber), err, true) } @@ -277,7 +277,7 @@ func (z *ZKEVMEndpoints) GetFullBlockByHash(hash types.ArgHash, fullTx bool) (in receipts = append(receipts, *receipt) } - rpcBlock, err := types.NewBlock(state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, true, state.Ptr(true)) + rpcBlock, err := types.NewBlock(ctx, z.state, state.Ptr(l2Block.Hash()), l2Block, receipts, fullTx, true, state.Ptr(true), dbTx) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, fmt.Sprintf("couldn't build block response for block by hash %v", hash.Hash()), err, true) } @@ -324,7 +324,12 @@ func (z *ZKEVMEndpoints) GetTransactionByL2Hash(hash types.ArgHash) (interface{} return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction receipt from state", err, true) } - res, err := types.NewTransaction(*tx, receipt, false, state.Ptr(true)) + l2Hash, err := z.state.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get l2 transaction hash", err, true) + } + + res, err := types.NewTransaction(*tx, receipt, false, &l2Hash) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -344,7 +349,7 @@ func (z *ZKEVMEndpoints) GetTransactionByL2Hash(hash types.ArgHash) (interface{} } if poolTx.Status == pool.TxStatusPending { tx = &poolTx.Transaction - res, err := types.NewTransaction(*tx, nil, false, state.Ptr(true)) + res, err := types.NewTransaction(*tx, nil, false, nil) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build transaction response", err, true) } @@ -371,7 +376,12 @@ func (z *ZKEVMEndpoints) GetTransactionReceiptByL2Hash(hash types.ArgHash) (inte return RPCErrorResponse(types.DefaultErrorCode, "failed to get tx receipt from state", err, true) } - receipt, err := types.NewReceipt(*tx, r, state.Ptr(true)) + l2Hash, err := z.state.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return RPCErrorResponse(types.DefaultErrorCode, "failed to get l2 transaction hash", err, true) + } + + receipt, err := types.NewReceipt(*tx, r, &l2Hash) if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to build the receipt response", err, true) } diff --git a/jsonrpc/endpoints_zkevm_test.go b/jsonrpc/endpoints_zkevm_test.go index bef21a342f..9b56552dca 100644 --- a/jsonrpc/endpoints_zkevm_test.go +++ b/jsonrpc/endpoints_zkevm_test.go @@ -775,6 +775,10 @@ func TestGetBatchByNumber(t *testing.T) { On("GetTransactionReceipt", context.Background(), tx.Hash(), m.DbTx). Return(receipts[i], nil). Once() + m.State. + On("GetL2TxHashByTxHash", context.Background(), tx.Hash(), m.DbTx). + Return(tx.Hash(), nil). + Once() } m.State. On("GetTransactionsByBatchNumber", context.Background(), hex.DecodeBig(tc.Number).Uint64(), m.DbTx). @@ -966,8 +970,9 @@ func TestGetBatchByNumber(t *testing.T) { receipts = append(receipts, receipt) from, _ := state.GetSender(*tx) V, R, S := tx.RawSignatureValues() + l2Hash := common.HexToHash("0x987654321") - rpcReceipt, err := types.NewReceipt(*tx, receipt, state.Ptr(true)) + rpcReceipt, err := types.NewReceipt(*tx, receipt, &l2Hash) require.NoError(t, err) tc.ExpectedResult.Transactions = append(tc.ExpectedResult.Transactions, @@ -990,6 +995,7 @@ func TestGetBatchByNumber(t *testing.T) { R: types.ArgBig(*R), S: types.ArgBig(*S), Receipt: &rpcReceipt, + L2Hash: &l2Hash, }, }, ) @@ -1054,7 +1060,13 @@ func TestGetBatchByNumber(t *testing.T) { On("GetTransactionReceipt", context.Background(), tx.Hash(), m.DbTx). Return(receipts[i], nil). Once() + + m.State. + On("GetL2TxHashByTxHash", context.Background(), tx.Hash(), m.DbTx). + Return(tx.Hash(), nil). + Once() } + m.State. On("GetTransactionsByBatchNumber", context.Background(), uint64(tc.ExpectedResult.Number), m.DbTx). Return(batchTxs, effectivePercentages, nil). @@ -1911,8 +1923,7 @@ func TestGetTransactionByL2Hash(t *testing.T) { txV, txR, txS := signedTx.RawSignatureValues() - l2Hash, err := state.GetL2Hash(*signedTx) - require.NoError(t, err) + l2Hash := common.HexToHash("0x987654321") rpcTransaction := types.Transaction{ Nonce: types.ArgUint64(signedTx.Nonce()), @@ -1962,6 +1973,11 @@ func TestGetTransactionByL2Hash(t *testing.T) { On("GetTransactionReceipt", context.Background(), tc.Hash, m.DbTx). Return(receipt, nil). Once() + + m.State. + On("GetL2TxHashByTxHash", context.Background(), signedTx.Hash(), m.DbTx). + Return(l2Hash, nil). + Once() }, }, { @@ -1974,6 +1990,7 @@ func TestGetTransactionByL2Hash(t *testing.T) { tc.ExpectedResult.BlockHash = nil tc.ExpectedResult.BlockNumber = nil tc.ExpectedResult.TxIndex = nil + tc.ExpectedResult.L2Hash = nil m.DbTx. On("Commit", context.Background()). @@ -2201,8 +2218,7 @@ func TestGetTransactionReceiptByL2Hash(t *testing.T) { signedTx, err := auth.Signer(auth.From, tx) require.NoError(t, err) - l2Hash, err := state.GetL2Hash(*signedTx) - require.NoError(t, err) + l2Hash := common.HexToHash("0x987654321") log := ðTypes.Log{Topics: []common.Hash{common.HexToHash("0x1")}, Data: []byte{}} logs := []*ethTypes.Log{log} @@ -2273,6 +2289,11 @@ func TestGetTransactionReceiptByL2Hash(t *testing.T) { On("GetTransactionReceipt", context.Background(), tc.Hash, m.DbTx). Return(receipt, nil). Once() + + m.State. + On("GetL2TxHashByTxHash", context.Background(), signedTx.Hash(), m.DbTx). + Return(l2Hash, nil). + Once() }, }, { @@ -2398,6 +2419,11 @@ func TestGetTransactionReceiptByL2Hash(t *testing.T) { On("GetTransactionReceipt", context.Background(), tc.Hash, m.DbTx). Return(ethTypes.NewReceipt([]byte{}, false, 0), nil). Once() + + m.State. + On("GetL2TxHashByTxHash", context.Background(), tx.Hash(), m.DbTx). + Return(l2Hash, nil). + Once() }, }, } diff --git a/jsonrpc/mocks/mock_state.go b/jsonrpc/mocks/mock_state.go index 632440ad8b..a01634ace7 100644 --- a/jsonrpc/mocks/mock_state.go +++ b/jsonrpc/mocks/mock_state.go @@ -507,6 +507,36 @@ func (_m *StateMock) GetL2BlocksByBatchNumber(ctx context.Context, batchNumber u return r0, r1 } +// GetL2TxHashByTxHash provides a mock function with given fields: ctx, hash, dbTx +func (_m *StateMock) GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (common.Hash, error) { + ret := _m.Called(ctx, hash, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetL2TxHashByTxHash") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, pgx.Tx) (common.Hash, error)); ok { + return rf(ctx, hash, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, pgx.Tx) common.Hash); ok { + r0 = rf(ctx, hash, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, pgx.Tx) error); ok { + r1 = rf(ctx, hash, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLastBatchNumber provides a mock function with given fields: ctx, dbTx func (_m *StateMock) GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) { ret := _m.Called(ctx, dbTx) diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go index 3a9011d99f..651034bba2 100644 --- a/jsonrpc/types/interfaces.go +++ b/jsonrpc/types/interfaces.go @@ -75,6 +75,7 @@ type StateInterface interface { GetLastVerifiedBatchNumberUntilL1Block(ctx context.Context, l1BlockNumber uint64, dbTx pgx.Tx) (uint64, error) GetBatchTimestamp(ctx context.Context, batchNumber uint64, forcedForkId *uint64, dbTx pgx.Tx) (*time.Time, error) GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) + GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (common.Hash, error) } // EthermanInterface provides integration with L1 diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go index 167d100351..8f3745c7b7 100644 --- a/jsonrpc/types/types.go +++ b/jsonrpc/types/types.go @@ -264,7 +264,7 @@ type Block struct { } // NewBlock creates a Block instance -func NewBlock(hash *common.Hash, b *state.L2Block, receipts []types.Receipt, fullTx, includeReceipts bool, includeExtraInfo *bool) (*Block, error) { +func NewBlock(ctx context.Context, st StateInterface, hash *common.Hash, b *state.L2Block, receipts []types.Receipt, fullTx, includeReceipts bool, includeExtraInfo *bool, dbTx pgx.Tx) (*Block, error) { h := b.Header() var miner *common.Address @@ -323,7 +323,16 @@ func NewBlock(hash *common.Hash, b *state.L2Block, receipts []types.Receipt, ful receiptPtr = &receipt } - rpcTx, err := NewTransaction(*tx, receiptPtr, includeReceipts, includeExtraInfo) + var l2Hash *common.Hash + if includeExtraInfo != nil && *includeExtraInfo { + l2h, err := st.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return nil, err + } + l2Hash = &l2h + } + + rpcTx, err := NewTransaction(*tx, receiptPtr, includeReceipts, l2Hash) if err != nil { return nil, err } @@ -368,7 +377,7 @@ type Batch struct { } // NewBatch creates a Batch instance -func NewBatch(batch *state.Batch, virtualBatch *state.VirtualBatch, verifiedBatch *state.VerifiedBatch, blocks []state.L2Block, receipts []types.Receipt, fullTx, includeReceipts bool, ger *state.GlobalExitRoot) (*Batch, error) { +func NewBatch(ctx context.Context, st StateInterface, batch *state.Batch, virtualBatch *state.VirtualBatch, verifiedBatch *state.VerifiedBatch, blocks []state.L2Block, receipts []types.Receipt, fullTx, includeReceipts bool, ger *state.GlobalExitRoot, dbTx pgx.Tx) (*Batch, error) { batchL2Data := batch.BatchL2Data closed := !batch.WIP res := &Batch{ @@ -409,7 +418,11 @@ func NewBatch(batch *state.Batch, virtualBatch *state.VirtualBatch, verifiedBatc if receipt, found := receiptsMap[tx.Hash()]; found { receiptPtr = &receipt } - rpcTx, err := NewTransaction(tx, receiptPtr, includeReceipts, state.Ptr(true)) + l2Hash, err := st.GetL2TxHashByTxHash(ctx, tx.Hash(), dbTx) + if err != nil { + return nil, err + } + rpcTx, err := NewTransaction(tx, receiptPtr, includeReceipts, &l2Hash) if err != nil { return nil, err } @@ -423,7 +436,7 @@ func NewBatch(batch *state.Batch, virtualBatch *state.VirtualBatch, verifiedBatc for _, b := range blocks { b := b if fullTx { - block, err := NewBlock(state.Ptr(b.Hash()), &b, nil, false, false, state.Ptr(true)) + block, err := NewBlock(ctx, st, state.Ptr(b.Hash()), &b, nil, false, false, state.Ptr(true), dbTx) if err != nil { return nil, err } @@ -550,11 +563,10 @@ func (t Transaction) CoreTx() *types.Transaction { func NewTransaction( tx types.Transaction, receipt *types.Receipt, - includeReceipt bool, includeExtraInfo *bool, + includeReceipt bool, l2Hash *common.Hash, ) (*Transaction, error) { v, r, s := tx.RawSignatureValues() from, _ := state.GetSender(tx) - l2Hash, _ := state.GetL2Hash(tx) res := &Transaction{ Nonce: ArgUint64(tx.Nonce()), @@ -570,10 +582,7 @@ func NewTransaction( From: from, ChainID: ArgBig(*tx.ChainId()), Type: ArgUint64(tx.Type()), - } - - if includeExtraInfo != nil && *includeExtraInfo { - res.L2Hash = &l2Hash + L2Hash: l2Hash, } if receipt != nil { @@ -582,7 +591,7 @@ func NewTransaction( res.BlockHash = &receipt.BlockHash ti := ArgUint64(receipt.TransactionIndex) res.TxIndex = &ti - rpcReceipt, err := NewReceipt(tx, receipt, includeExtraInfo) + rpcReceipt, err := NewReceipt(tx, receipt, l2Hash) if err != nil { return nil, err } @@ -615,7 +624,7 @@ type Receipt struct { } // NewReceipt creates a new Receipt instance -func NewReceipt(tx types.Transaction, r *types.Receipt, includeExtraInfo *bool) (Receipt, error) { +func NewReceipt(tx types.Transaction, r *types.Receipt, l2Hash *common.Hash) (Receipt, error) { to := tx.To() logs := r.Logs if logs == nil { @@ -637,10 +646,6 @@ func NewReceipt(tx types.Transaction, r *types.Receipt, includeExtraInfo *bool) if err != nil { return Receipt{}, err } - l2Hash, err := state.GetL2Hash(tx) - if err != nil { - return Receipt{}, err - } receipt := Receipt{ Root: common.BytesToHash(r.PostState), CumulativeGasUsed: ArgUint64(r.CumulativeGasUsed), @@ -656,16 +661,13 @@ func NewReceipt(tx types.Transaction, r *types.Receipt, includeExtraInfo *bool) FromAddr: from, ToAddr: to, Type: ArgUint64(r.Type), + TxL2Hash: l2Hash, } if r.EffectiveGasPrice != nil { egp := ArgBig(*r.EffectiveGasPrice) receipt.EffectiveGasPrice = &egp } - if includeExtraInfo != nil && *includeExtraInfo { - receipt.TxL2Hash = &l2Hash - } - return receipt, nil } diff --git a/state/genesis.go b/state/genesis.go index 29e2a87e74..5abc927c49 100644 --- a/state/genesis.go +++ b/state/genesis.go @@ -190,19 +190,14 @@ func (s *State) SetGenesis(ctx context.Context, block Block, genesis Genesis, m l2Block := NewL2Block(header, []*types.Transaction{}, []*L2Header{}, receipts, &trie.StackTrie{}) l2Block.ReceivedAt = block.ReceivedAt - numTxs := len(l2Block.Transactions()) - storeTxsEGPData := make([]StoreTxEGPData, numTxs) - txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range l2Block.Transactions() { - storeTxsEGPData[i] = StoreTxEGPData{EGPLog: nil, EffectivePercentage: MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := GetL2Hash(aux) - if err != nil { - return common.Hash{}, err - } - txsL2Hash[i] = l2TxHash + // Sanity check + if len(l2Block.Transactions()) > 0 { + return common.Hash{}, fmt.Errorf("genesis L2Block contains %d transactions and should have 0", len(l2Block.Transactions())) } + storeTxsEGPData := []StoreTxEGPData{} + txsL2Hash := []common.Hash{} + err = s.AddL2Block(ctx, batch.BatchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) if err != nil { return common.Hash{}, err diff --git a/state/interfaces.go b/state/interfaces.go index 8100fd25a6..6b0cf82e68 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -149,4 +149,5 @@ type storage interface { GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) + GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (common.Hash, error) } diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 51a6284c3c..eac22d7cf5 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -3244,6 +3244,66 @@ func (_c *StorageMock_GetL2BlocksByBatchNumber_Call) RunAndReturn(run func(conte return _c } +// GetL2TxHashByTxHash provides a mock function with given fields: ctx, hash, dbTx +func (_m *StorageMock) GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (common.Hash, error) { + ret := _m.Called(ctx, hash, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetL2TxHashByTxHash") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, pgx.Tx) (common.Hash, error)); ok { + return rf(ctx, hash, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, pgx.Tx) common.Hash); ok { + r0 = rf(ctx, hash, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, pgx.Tx) error); ok { + r1 = rf(ctx, hash, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetL2TxHashByTxHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL2TxHashByTxHash' +type StorageMock_GetL2TxHashByTxHash_Call struct { + *mock.Call +} + +// GetL2TxHashByTxHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetL2TxHashByTxHash(ctx interface{}, hash interface{}, dbTx interface{}) *StorageMock_GetL2TxHashByTxHash_Call { + return &StorageMock_GetL2TxHashByTxHash_Call{Call: _e.mock.On("GetL2TxHashByTxHash", ctx, hash, dbTx)} +} + +func (_c *StorageMock_GetL2TxHashByTxHash_Call) Run(run func(ctx context.Context, hash common.Hash, dbTx pgx.Tx)) *StorageMock_GetL2TxHashByTxHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetL2TxHashByTxHash_Call) Return(_a0 common.Hash, _a1 error) *StorageMock_GetL2TxHashByTxHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetL2TxHashByTxHash_Call) RunAndReturn(run func(context.Context, common.Hash, pgx.Tx) (common.Hash, error)) *StorageMock_GetL2TxHashByTxHash_Call { + _c.Call.Return(run) + return _c +} + // GetLastBatchNumber provides a mock function with given fields: ctx, dbTx func (_m *StorageMock) GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) { ret := _m.Called(ctx, dbTx) diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 6a4c395c0d..b37689e709 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -196,12 +196,9 @@ func TestGetBatchByL2BlockNumber(t *testing.T) { numTxs := len(transactions) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range transactions { + for i := range transactions { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = pgStateStorage.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) @@ -721,12 +718,9 @@ func TestGetLastVerifiedL2BlockNumberUntilL1Block(t *testing.T) { numTxs := len(l2Block.Transactions()) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range l2Block.Transactions() { + for i := range l2Block.Transactions() { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(0)} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = testState.AddL2Block(ctx, batchNumber, l2Block, []*types.Receipt{}, txsL2Hash, storeTxsEGPData, dbTx) @@ -947,12 +941,9 @@ func TestGetLogs(t *testing.T) { numTxs := len(transactions) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range transactions { + for i := range transactions { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) @@ -1080,12 +1071,9 @@ func TestGetNativeBlockHashesInRange(t *testing.T) { numTxs := len(transactions) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range transactions { + for i := range transactions { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) diff --git a/state/pgstatestorage/transaction.go b/state/pgstatestorage/transaction.go index 17490e766c..5ef4ad9485 100644 --- a/state/pgstatestorage/transaction.go +++ b/state/pgstatestorage/transaction.go @@ -554,3 +554,21 @@ func (p *PostgresStorage) GetTransactionEGPLogByHash(ctx context.Context, transa return &egpLog, nil } + +// GetL2TxHashByTxHash gets the L2 Hash from the tx found by the provided tx hash +func (p *PostgresStorage) GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (common.Hash, error) { + const getTransactionByHashSQL = "SELECT transaction.l2_hash FROM state.transaction WHERE hash = $1" + + var l2HashHex string + q := p.getExecQuerier(dbTx) + err := q.QueryRow(ctx, getTransactionByHashSQL, hash.String()).Scan(&l2HashHex) + + if errors.Is(err, pgx.ErrNoRows) { + return common.Hash{}, state.ErrNotFound + } else if err != nil { + return common.Hash{}, err + } + + l2Hash := common.HexToHash(l2HashHex) + return l2Hash, nil +} diff --git a/state/test/forkid_dragonfruit/dragonfruit_test.go b/state/test/forkid_dragonfruit/dragonfruit_test.go index 806bc9aba7..791804137a 100644 --- a/state/test/forkid_dragonfruit/dragonfruit_test.go +++ b/state/test/forkid_dragonfruit/dragonfruit_test.go @@ -1499,12 +1499,9 @@ func TestExecutorRevert(t *testing.T) { numTxs := len(transactions) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range transactions { + for i := range transactions { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = testState.AddL2Block(ctx, 0, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) diff --git a/state/test/forkid_independent/independent_test.go b/state/test/forkid_independent/independent_test.go index 4ef0b04a72..a7dec58a10 100644 --- a/state/test/forkid_independent/independent_test.go +++ b/state/test/forkid_independent/independent_test.go @@ -653,12 +653,9 @@ func TestAddGetL2Block(t *testing.T) { numTxs := len(transactions) storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) txsL2Hash := make([]common.Hash, numTxs) - for i, txTmp := range transactions { + for i := range transactions { storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} - aux := *txTmp - l2TxHash, err := state.GetL2Hash(aux) - require.NoError(t, err) - txsL2Hash[i] = l2TxHash + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) } err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, dbTx) diff --git a/state/test/l2txhash_test.go b/state/test/l2txhash_test.go deleted file mode 100644 index f91c3268bf..0000000000 --- a/state/test/l2txhash_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package test - -import ( - "encoding/json" - "math/big" - "testing" - - "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/require" -) - -const vectorString = `[ - { - "nonce": "", - "gasPrice": "0x3b9aca00", - "gasLimit": "186a0", - "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "value": "0x100", - "data": "0xs5b8e9959000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000da608060405234801561001057600080fd5b5060bb8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063f6d97e8c14602d575b600080fd5b60336047565b604051603e91906062565b60405180910390f35b6000806003s90508091505090565b605c81607b565b82525050565b6000602082019050607560008301846055565b92915050565b600081905091905056fea2646970667358221220a33fdecaf587db45fa0e1fe4bfca25de09e35bb9a45fa6dab1bf1964244a929164736f6c63430008070033000000000000", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "failed", - "reason": "Invalid nonce value" - }, - { - "nonce": "0x00", - "gasPrice": "0x3b9aca00", - "gasLimit": "0x186a0", - "to": "0x005Cf5032B2a844602278b01199ED191A86c93ff", - "value": "0x00", - "data": "0x56d5be740000000000000000000000001275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "0x8f9b0375a6b0f1bd9d54ff499921766828ae8e5314fc44a494736b5c4cc3bb56" - }, - { - "nonce": "0x00", - "gasPrice": "0x3b9aca00", - "gasLimit": "0x186a0", - "to": "", - "value": "0x00", - "data": "0x56d5be740000000000000000000000001275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "0x380cb737b37d06e6d7e6a24ba2b1a3e29e18b28d367fc5a4fa57ddcf60a12a54" - }, - { - "nonce": "0x00", - "gasPrice": "0x3b9aca00", - "gasLimit": "0x186a0", - "to": "", - "value": "0x00", - "data": "", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "0x822ab53546a48dcbc25cdccc883e378b911554b9f00599a0d783e6376246732d" - }, - { - "nonce": "0x1", - "gasPrice": "0x3b9aca00", - "gasLimit": "0x186a0", - "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "value": "0x100", - "data": "0x5b8e9959000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000da608060405234801561001057600080fd5b5060bb8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063f6d97e8c14602d575b600080fd5b60336047565b604051603e91906062565b60405180910390f35b600080600390508091505090565b605c81607b565b82525050565b6000602082019050607560008301846055565b92915050565b600081905091905056fea2646970667358221220a33fdecaf587db45fa0e1fe4bfca25de09e35bb9a45fa6dab1bf1964244a929164736f6c63430008070033000000000000", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "0xaa8f08e5bee683718f3f14fa352aaeb8e7de49f8b0e59f03128ef37fa6ac18e3" - }, - { - "nonce": "0x1", - "gasPrice": "0x3b9aca00", - "gasLimit": "v186a0", - "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "value": "0x100", - "data": "0x5b8e9959000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000da608060405234801561001057600080fd5b5060bb8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063f6d97e8c14602d575b600080fd5b60336047565b604051603e91906062565b60405180910390f35b600080600390508091505090565b605c81607b565b82525050565b6000602082019050607560008301846055565b92915050565b600081905091905056fea2646970667358221220a33fdecaf587db45fa0e1fe4bfca25de09e35bb9a45fa6dab1bf1964244a929164736f6c63430008070033000000000000", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "failed", - "reason": "Invalid gasLimit value" - }, - { - "nonce": "0x21", - "gasPrice": "0x3b9aca00", - "gasLimit": "186a0", - "to": "0x1275fbb540c8efc58b812ba83b0d0b8b9917ae98", - "value": "0x100", - "data": "0xs5b8e9959000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000da608060405234801561001057600080fd5b5060bb8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063f6d97e8c14602d575b600080fd5b60336047565b604051603e91906062565b60405180910390f35b6000806003s90508091505090565b605c81607b565b82525050565b6000602082019050607560008301846055565b92915050565b600081905091905056fea2646970667358221220a33fdecaf587db45fa0e1fe4bfca25de09e35bb9a45fa6dab1bf1964244a929164736f6c63430008070033000000000000", - "from": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", - "l2TxHash": "failed", - "reason": "Invalid data value" - } -]` - -type testVector struct { - Nonce string `json:"nonce"` - GasPrice string `json:"gasPrice"` - GasLimit string `json:"gasLimit"` - To string `json:"to"` - Value string `json:"value"` - Data string `json:"data"` - From string `json:"from"` - L2TxHash string `json:"l2TxHash"` - Reason string `json:"reason"` - Signature string `json:"signature"` -} - -func TestL2TxHash(t *testing.T) { - // Unmarshall the test vector - var testVectors []testVector - err := json.Unmarshal([]byte(vectorString), &testVectors) - if err != nil { - require.NoError(t, err) - } - - // Create types.Transaction from test vector - for x, testVector := range testVectors { - to := &common.Address{} - nonce := new(big.Int).SetBytes(common.FromHex(testVector.Nonce)).Uint64() - gasPrice := new(big.Int).SetBytes(common.FromHex(testVector.GasPrice)) - gasLimit := new(big.Int).SetBytes(common.FromHex(testVector.GasLimit)).Uint64() - if testVector.To != "" { - aux := common.HexToAddress(testVector.To) - to = &aux - } else { - to = nil - } - - value := new(big.Int).SetBytes(common.FromHex(testVector.Value)) - data := common.FromHex(testVector.Data) - from := common.HexToAddress(testVector.From) - - if testVector.L2TxHash != "failed" { - log.Debug("Test vector: ", x) - log.Debugf("nonce: %x", nonce) - log.Debugf("gasPrice: %x", gasPrice) - log.Debugf("gasLimit: %x", gasLimit) - if to != nil { - log.Debugf("to: %s", to.String()) - } else { - log.Debugf("to: nil") - } - log.Debugf("value: %x", value) - log.Debugf("data: %s", common.Bytes2Hex(data)) - log.Debugf("from: %s", from.String()) - - tx := types.NewTx(&types.LegacyTx{Nonce: nonce, - GasPrice: gasPrice, - Gas: gasLimit, - To: to, - Value: value, - Data: data}) - require.NoError(t, err) - - hash, err := state.TestGetL2Hash(*tx, from) - require.NoError(t, err) - - require.Equal(t, testVector.L2TxHash, hash.String()) - } - } -} diff --git a/state/transaction.go b/state/transaction.go index bb74ca2e1f..d393956bd6 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -5,13 +5,11 @@ import ( "errors" "fmt" "math/big" - "strings" "time" "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/merkletree" "github.com/0xPolygonHermez/zkevm-node/state/runtime" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" @@ -24,69 +22,6 @@ import ( "google.golang.org/grpc/status" ) -// TestGetL2Hash computes the l2 hash of a transaction for testing purposes -func TestGetL2Hash(tx types.Transaction, sender common.Address) (common.Hash, error) { - return getL2Hash(tx, sender) -} - -// GetL2Hash computes the l2 hash of a transaction -func GetL2Hash(tx types.Transaction) (common.Hash, error) { - sender, err := GetSender(tx) - if err != nil { - log.Debugf("error getting sender: %v", err) - } - - return getL2Hash(tx, sender) -} - -func getL2Hash(tx types.Transaction, sender common.Address) (common.Hash, error) { - var input string - input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Nonce())) - input += formatL2TxHashParam(fmt.Sprintf("%x", tx.GasPrice())) - input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Gas())) - if tx.To() != nil { - input += pad20Bytes(formatL2TxHashParam(fmt.Sprintf("%x", tx.To()))) - } - input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Value())) - if len(tx.Data()) > 0 { - input += formatL2TxHashParam(fmt.Sprintf("%x", tx.Data())) - } - if sender != ZeroAddress { - input += pad20Bytes(formatL2TxHashParam(fmt.Sprintf("%x", sender))) - } - - h4Hash, err := merkletree.HashContractBytecode(common.Hex2Bytes(input)) - if err != nil { - return common.Hash{}, err - } - - return common.HexToHash(merkletree.H4ToString(h4Hash)), nil -} - -// pad20Bytes pads the given address with 0s to make it 20 bytes long -func pad20Bytes(address string) string { - const addressLength = 40 - - if len(address) < addressLength { - address = strings.Repeat("0", addressLength-len(address)) + address - } - return address -} - -func formatL2TxHashParam(param string) string { - param = strings.TrimLeft(param, "0x") - - if param == "00" || param == "" { - return "00" - } - - if len(param)%2 != 0 { - param = "0" + param - } - - return param -} - // GetSender gets the sender from the transaction's signature func GetSender(tx types.Transaction) (common.Address, error) { signer := types.NewEIP155Signer(tx.ChainId()) From 8124e5232ed77e1c0a4e233647f1e098ed409e4c Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:06:45 +0100 Subject: [PATCH 18/54] etrog: fix permissionless errors (#3140) * if got an error getting lastBlock produce a SIGSEGV * reduce info logs * fix unittest --- .../l1_rollup_info_producer.go | 34 ++++++++------ synchronizer/l1_parallel_sync/l1_workers.go | 4 ++ .../mock_workers_interface.go | 45 +++++++++++++++++++ 3 files changed, 70 insertions(+), 13 deletions(-) diff --git a/synchronizer/l1_parallel_sync/l1_rollup_info_producer.go b/synchronizer/l1_parallel_sync/l1_rollup_info_producer.go index dd2fb58ef6..a297ffe04d 100644 --- a/synchronizer/l1_parallel_sync/l1_rollup_info_producer.go +++ b/synchronizer/l1_parallel_sync/l1_rollup_info_producer.go @@ -21,7 +21,6 @@ import ( "sync/atomic" "time" - "github.com/0xPolygonHermez/zkevm-node" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/synchronizer/common" ) @@ -90,6 +89,7 @@ type workersInterface interface { requestLastBlockWithRetries(ctx context.Context, timeout time.Duration, maxPermittedRetries int) responseL1LastBlock getResponseChannelForRollupInfo() chan responseRollupInfoByBlockRange String() string + ToStringBrief() string howManyRunningWorkers() int } @@ -229,7 +229,7 @@ func (l *L1RollupInfoProducer) Reset(startingBlockNumber uint64) { } func (l *L1RollupInfoProducer) resetUnsafe(startingBlockNumber uint64) { - log.Infof("producer: Reset L1 sync process to blockNumber %d st=%s", startingBlockNumber, l.toStringBrief()) + log.Debugf("producer: Reset L1 sync process to blockNumber %d st=%s", startingBlockNumber, l.toStringBrief()) l.setStatusReseting() log.Debugf("producer: Reset(%d): stop previous run (state=%s)", startingBlockNumber, l.getStatus().String()) log.Debugf("producer: Reset(%d): syncStatus.reset", startingBlockNumber) @@ -243,7 +243,7 @@ func (l *L1RollupInfoProducer) resetUnsafe(startingBlockNumber uint64) { log.Debugf("producer: Reset(%d): reset Filter", startingBlockNumber) l.filterToSendOrdererResultsToConsumer.Reset(startingBlockNumber) l.setStatus(producerIdle) - log.Infof("producer: Reset(%d): reset done!", startingBlockNumber) + log.Infof("producer: Reset(%d): reset producer done!", startingBlockNumber) } func (l *L1RollupInfoProducer) isProducerRunning() bool { @@ -351,13 +351,13 @@ func (l *L1RollupInfoProducer) step(waitDuration *time.Duration) bool { if atomic.CompareAndSwapInt32((*int32)(&l.status), int32(producerNoRunning), int32(producerIdle)) { // l.getStatus() == producerNoRunning log.Info("producer: step: status is no running, changing to idle %s", l.getStatus().String()) } - log.Infof("producer: build_time:%s step: status:%s", zkevm.BuildDate, l.toStringBrief()) + log.Debugf("producer: step: status:%s", l.toStringBrief()) select { case <-l.ctxWithCancel.Done(): log.Debugf("producer: context canceled") return false case cmd := <-l.channelCmds: - log.Infof("producer: received a command") + log.Debugf("producer: received a command") res := l.executeCmd(cmd) if !res { log.Info("producer: cmd %s stop the process", cmd.cmd.String()) @@ -438,7 +438,7 @@ func (l *L1RollupInfoProducer) step(waitDuration *time.Duration) bool { func (l *L1RollupInfoProducer) executeCmd(cmd producerCmd) bool { switch cmd.cmd { case producerStop: - log.Infof("producer: received a stop, so it stops processing") + log.Infof("producer: received a stop, so it stops requesting new rollup info and stop current requests") l.stopUnsafe() return false case producerReset: @@ -534,7 +534,7 @@ func (l *L1RollupInfoProducer) launchWork() (int, error) { blockRangeMsg := br.String() + unsafeAreaMsg _, err := l.workers.asyncRequestRollupInfoByBlockRange(l.ctxWithCancel.ctx, request) if err != nil { - if errors.Is(err, errAllWorkersBusy) { + if !errors.Is(err, errAllWorkersBusy) { accDebugStr += fmt.Sprintf(" segment %s -> [Error:%s] ", blockRangeMsg, err.Error()) } break @@ -545,7 +545,10 @@ func (l *L1RollupInfoProducer) launchWork() (int, error) { log.Debugf("producer: launch_worker: Launched worker for segment %s, num_workers_in_this_iteration: %d", blockRangeMsg, launchedWorker) l.syncStatus.OnStartedNewWorker(*br) } - log.Infof("producer: launch_worker: num of launched workers: %d result: %s status_comm:%s", launchedWorker, accDebugStr, l.outgoingPackageStatusDebugString()) + if launchedWorker > 0 { + log.Infof("producer: launch_worker: num of launched workers: %d (%s) result: %s ", launchedWorker, l.workers.ToStringBrief(), accDebugStr) + } + log.Debugf("producer: launch_worker: num of launched workers: %d result: %s status_comm:%s", launchedWorker, accDebugStr, l.outgoingPackageStatusDebugString()) return launchedWorker, nil } @@ -559,13 +562,13 @@ func (l *L1RollupInfoProducer) renewLastBlockOnL1IfNeeded(reason string) { ttl := l.ttlOfLastBlockOnL1() oldBlock := l.syncStatus.GetLastBlockOnL1() if elapsed > ttl { - log.Infof("producer: Need a new value for Last Block On L1, doing the request reason:%s", reason) + log.Debugf("producer: Need a new value for Last Block On L1, doing the request reason:%s", reason) result := l.workers.requestLastBlockWithRetries(l.ctxWithCancel.ctx, l.cfg.TimeoutForRequestLastBlockOnL1, l.cfg.NumOfAllowedRetriesForRequestLastBlockOnL1) - log.Infof("producer: Need a new value for Last Block On L1, doing the request old_block:%v -> new block:%v", oldBlock, result.result.block) if result.generic.err != nil { - log.Error(result.generic.err) return } + log.Infof("producer: Need a new value for Last Block On L1, doing the request old_block:%v -> new block:%v", oldBlock, result.result.block) + l.onNewLastBlock(result.result.block) } } @@ -588,7 +591,12 @@ func (l *L1RollupInfoProducer) onResponseRollupInfo(result responseRollupInfoByB } if isOk { outgoingPackages := l.filterToSendOrdererResultsToConsumer.Filter(*newL1SyncMessageData(result.result)) - log.Infof("producer: filtered Br[%s/%d], outgoing %d filter_status:%s", result.result.blockRange.String(), result.result.getHighestBlockNumberInResponse(), len(outgoingPackages), l.filterToSendOrdererResultsToConsumer.ToStringBrief()) + log.Debugf("producer: filtered Br[%s/%d], outgoing %d filter_status:%s", result.result.blockRange.String(), result.result.getHighestBlockNumberInResponse(), len(outgoingPackages), l.filterToSendOrdererResultsToConsumer.ToStringBrief()) + if len(outgoingPackages) > 0 { + for idx, msg := range outgoingPackages { + log.Infof("producer: sendind data to consumer: [%d/%d] -> range:[%s] Sending results [data] to consumer:%s ", idx, len(outgoingPackages), result.result.blockRange.String(), msg.toStringBrief()) + } + } l.sendPackages(outgoingPackages) } else { if errors.Is(result.generic.err, context.Canceled) { @@ -601,7 +609,7 @@ func (l *L1RollupInfoProducer) onResponseRollupInfo(result responseRollupInfoByB func (l *L1RollupInfoProducer) sendPackages(outgoingPackages []L1SyncMessage) { for _, pkg := range outgoingPackages { - log.Infof("producer: Sending results [data] to consumer:%s: status_comm:%s", pkg.toStringBrief(), l.outgoingPackageStatusDebugString()) + log.Debugf("producer: Sending results [data] to consumer:%s: status_comm:%s", pkg.toStringBrief(), l.outgoingPackageStatusDebugString()) l.outgoingChannel <- pkg } } diff --git a/synchronizer/l1_parallel_sync/l1_workers.go b/synchronizer/l1_parallel_sync/l1_workers.go index a55951434f..4f2e65421a 100644 --- a/synchronizer/l1_parallel_sync/l1_workers.go +++ b/synchronizer/l1_parallel_sync/l1_workers.go @@ -67,6 +67,10 @@ func (w *workers) String() string { return result } +func (w *workers) ToStringBrief() string { + return fmt.Sprintf(" working: %d of %d ", w.howManyRunningWorkers(), len(w.workers)) +} + func newWorkers(ethermans []L1ParallelEthermanInterface, cfg workersConfig) *workers { result := workers{chIncommingRollupInfo: make(chan responseRollupInfoByBlockRange, len(ethermans)+1), cfg: cfg} diff --git a/synchronizer/l1_parallel_sync/mock_workers_interface.go b/synchronizer/l1_parallel_sync/mock_workers_interface.go index 46327d594d..af162aba67 100644 --- a/synchronizer/l1_parallel_sync/mock_workers_interface.go +++ b/synchronizer/l1_parallel_sync/mock_workers_interface.go @@ -67,6 +67,51 @@ func (_c *workersInterfaceMock_String_Call) RunAndReturn(run func() string) *wor return _c } +// ToStringBrief provides a mock function with given fields: +func (_m *workersInterfaceMock) ToStringBrief() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ToStringBrief") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// workersInterfaceMock_ToStringBrief_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ToStringBrief' +type workersInterfaceMock_ToStringBrief_Call struct { + *mock.Call +} + +// ToStringBrief is a helper method to define mock.On call +func (_e *workersInterfaceMock_Expecter) ToStringBrief() *workersInterfaceMock_ToStringBrief_Call { + return &workersInterfaceMock_ToStringBrief_Call{Call: _e.mock.On("ToStringBrief")} +} + +func (_c *workersInterfaceMock_ToStringBrief_Call) Run(run func()) *workersInterfaceMock_ToStringBrief_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *workersInterfaceMock_ToStringBrief_Call) Return(_a0 string) *workersInterfaceMock_ToStringBrief_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *workersInterfaceMock_ToStringBrief_Call) RunAndReturn(run func() string) *workersInterfaceMock_ToStringBrief_Call { + _c.Call.Return(run) + return _c +} + // asyncRequestRollupInfoByBlockRange provides a mock function with given fields: ctx, request func (_m *workersInterfaceMock) asyncRequestRollupInfoByBlockRange(ctx context.Context, request requestRollupInfoByBlockRange) (chan responseRollupInfoByBlockRange, error) { ret := _m.Called(ctx, request) From 895c1e057f57e539848a5c8e86170ad588390121 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:19:47 +0100 Subject: [PATCH 19/54] Fix missing batch bookmark (#3122) * fix missingBookMark * fix missingBookMark * fixes * fix * fix * fix seq-sender config * fixes * doc --- docs/config-file/node-config-doc.html | 2 +- docs/config-file/node-config-doc.md | 84 ------------------------ docs/config-file/node-config-schema.json | 49 -------------- sequencer/sequencer.go | 13 ++-- sequencesender/config.go | 12 ---- state/datastream.go | 34 +++++++--- state/pgstatestorage/datastream.go | 5 +- 7 files changed, 35 insertions(+), 164 deletions(-) diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 959c636b8c..d1f7eb383c 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -56,7 +56,7 @@
"300ms"
 

Default: "30s"Type: string

L1BlockTimestampMargin is the time difference (margin) that must exists between last L1 block and last L2 block in the sequence before
to send the sequence to L1. If the difference is lower than this value then sequencesender will wait until the difference is equal or greater


Examples:

"1m"
 
"300ms"
-

Default: 131072Type: integer

MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not.


Type: array of integer

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


PrivateKey defines all the key store files that are going to be read in order to provide the private keys to sign the L1 txs
Default: "/pk/sequencer.keystore"Type: string

Path is the file path for the key store file


Default: "testonly"Type: string

Password is the password to decrypt the key store file


Default: 0Type: integer

Batch number where there is a forkid change (fork upgrade)


Default: 80000Type: integer

GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100


StreamClientCfg is the config for the stream client
Default: ""Type: string

Datastream server to connect


Log is the log configuration
Default: ""Type: enum (of string)

Environment defining the log format ("production" or "development").
In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.
Check here

Must be one of:

  • "production"
  • "development"

Default: ""Type: enum (of string)

Level of log. As lower value more logs are going to be generated

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Type: array of string

Outputs

Each item of this array must be:


Configuration of the aggregator service
Default: "0.0.0.0"Type: string

Host for the grpc server


Default: 50081Type: integer

Port for the grpc server


Default: "5s"Type: string

RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate
or batches to generate proofs. It is also used in the isSynced loop


Examples:

"1m"
+

Default: 131072Type: integer

MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not.


Type: array of integer

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


PrivateKey defines all the key store files that are going to be read in order to provide the private keys to sign the L1 txs
Default: "/pk/sequencer.keystore"Type: string

Path is the file path for the key store file


Default: "testonly"Type: string

Password is the password to decrypt the key store file


Default: 0Type: integer

Batch number where there is a forkid change (fork upgrade)


Default: 80000Type: integer

GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100


Configuration of the aggregator service
Default: "0.0.0.0"Type: string

Host for the grpc server


Default: 50081Type: integer

Port for the grpc server


Default: "5s"Type: string

RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate
or batches to generate proofs. It is also used in the isSynced loop


Examples:

"1m"
 
"300ms"
 

Default: "1m30s"Type: string

VerifyProofInterval is the interval of time to verify/send an proof in L1


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 56341f187b..abeaa062ab 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -2139,7 +2139,6 @@ Must be one of:
 | - [PrivateKey](#SequenceSender_PrivateKey )                                                             | No      | object           | No         | -          | PrivateKey defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs | | - [ForkUpgradeBatchNumber](#SequenceSender_ForkUpgradeBatchNumber ) | No | integer | No | - | Batch number where there is a forkid change (fork upgrade) | | - [GasOffset](#SequenceSender_GasOffset ) | No | integer | No | - | GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.

ex:
gas estimation: 1000
gas offset: 100
final gas: 1100 | -| - [StreamClient](#SequenceSender_StreamClient ) | No | object | No | - | StreamClientCfg is the config for the stream client | ### 11.1. `SequenceSender.WaitPeriodSendSequence` @@ -2334,89 +2333,6 @@ final gas: 1100 GasOffset=80000 ``` -### 11.10. `[SequenceSender.StreamClient]` - -**Type:** : `object` -**Description:** StreamClientCfg is the config for the stream client - -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| ------------------------------------------------ | ------- | ------ | ---------- | ---------- | ---------------------------- | -| - [Server](#SequenceSender_StreamClient_Server ) | No | string | No | - | Datastream server to connect | -| - [Log](#SequenceSender_StreamClient_Log ) | No | object | No | - | Log is the log configuration | - -#### 11.10.1. `SequenceSender.StreamClient.Server` - -**Type:** : `string` - -**Default:** `""` - -**Description:** Datastream server to connect - -**Example setting the default value** (""): -``` -[SequenceSender.StreamClient] -Server="" -``` - -#### 11.10.2. `[SequenceSender.StreamClient.Log]` - -**Type:** : `object` -**Description:** Log is the log configuration - -| Property | Pattern | Type | Deprecated | Definition | Title/Description | -| -------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| - [Environment](#SequenceSender_StreamClient_Log_Environment ) | No | enum (of string) | No | - | Environment defining the log format ("production" or "development").
In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.
Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) | -| - [Level](#SequenceSender_StreamClient_Log_Level ) | No | enum (of string) | No | - | Level of log. As lower value more logs are going to be generated | -| - [Outputs](#SequenceSender_StreamClient_Log_Outputs ) | No | array of string | No | - | Outputs | - -##### 11.10.2.1. `SequenceSender.StreamClient.Log.Environment` - -**Type:** : `enum (of string)` - -**Default:** `""` - -**Description:** Environment defining the log format ("production" or "development"). -In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above. -Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) - -**Example setting the default value** (""): -``` -[SequenceSender.StreamClient.Log] -Environment="" -``` - -Must be one of: -* "production" -* "development" - -##### 11.10.2.2. `SequenceSender.StreamClient.Log.Level` - -**Type:** : `enum (of string)` - -**Default:** `""` - -**Description:** Level of log. As lower value more logs are going to be generated - -**Example setting the default value** (""): -``` -[SequenceSender.StreamClient.Log] -Level="" -``` - -Must be one of: -* "debug" -* "info" -* "warn" -* "error" -* "dpanic" -* "panic" -* "fatal" - -##### 11.10.2.3. `SequenceSender.StreamClient.Log.Outputs` - -**Type:** : `array of string` -**Description:** Outputs - ## 12. `[Aggregator]` **Type:** : `object` diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json index 3a423746b9..2ffad27300 100644 --- a/docs/config-file/node-config-schema.json +++ b/docs/config-file/node-config-schema.json @@ -898,55 +898,6 @@ "type": "integer", "description": "GasOffset is the amount of gas to be added to the gas estimation in order\nto provide an amount that is higher than the estimated one. This is used\nto avoid the TX getting reverted in case something has changed in the network\nstate after the estimation which can cause the TX to require more gas to be\nexecuted.\n\nex:\ngas estimation: 1000\ngas offset: 100\nfinal gas: 1100", "default": 80000 - }, - "StreamClient": { - "properties": { - "Server": { - "type": "string", - "description": "Datastream server to connect", - "default": "" - }, - "Log": { - "properties": { - "Environment": { - "type": "string", - "enum": [ - "production", - "development" - ], - "description": "Environment defining the log format (\"production\" or \"development\").\nIn development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.\nCheck [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig)", - "default": "" - }, - "Level": { - "type": "string", - "enum": [ - "debug", - "info", - "warn", - "error", - "dpanic", - "panic", - "fatal" - ], - "description": "Level of log. As lower value more logs are going to be generated", - "default": "" - }, - "Outputs": { - "items": { - "type": "string" - }, - "type": "array", - "description": "Outputs" - } - }, - "additionalProperties": false, - "type": "object", - "description": "Log is the log configuration" - } - }, - "additionalProperties": false, - "type": "object", - "description": "StreamClientCfg is the config for the stream client" } }, "additionalProperties": false, diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go index 7e19fbef46..0e04de2905 100644 --- a/sequencer/sequencer.go +++ b/sequencer/sequencer.go @@ -58,7 +58,9 @@ func New(cfg Config, batchCfg state.BatchConfig, poolCfg pool.Config, txPool txP eventLog: eventLog, } - sequencer.dataToStream = make(chan interface{}, batchCfg.Constraints.MaxTxsPerBatch*datastreamChannelMultiplier) + // TODO: Make configurable + channelBufferSize := 200 * datastreamChannelMultiplier // nolint:gomnd + sequencer.dataToStream = make(chan interface{}, channelBufferSize) return sequencer, nil } @@ -235,11 +237,10 @@ func (s *Sequencer) sendDataToStreamer() { dataStream := <-s.dataToStream if s.streamServer != nil { - switch t := dataStream.(type) { + switch data := dataStream.(type) { // Stream a complete L2 block with its transactions case state.DSL2FullBlock: - l2Block := t - l2Transactions := t.Txs + l2Block := data err = s.streamServer.StartAtomicOp() if err != nil { @@ -274,7 +275,7 @@ func (s *Sequencer) sendDataToStreamer() { continue } - for _, l2Transaction := range l2Transactions { + for _, l2Transaction := range l2Block.Txs { // Populate intermediate state root position := state.GetSystemSCPosition(blockStart.L2BlockNumber) imStateRoot, err := s.stateIntf.GetStorageAt(context.Background(), common.HexToAddress(state.SystemSC), big.NewInt(0).SetBytes(position), l2Block.StateRoot) @@ -310,7 +311,7 @@ func (s *Sequencer) sendDataToStreamer() { // Stream a bookmark case state.DSBookMark: - bookmark := t + bookmark := data err = s.streamServer.StartAtomicOp() if err != nil { diff --git a/sequencesender/config.go b/sequencesender/config.go index a99a299100..56da9f14ab 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -2,7 +2,6 @@ package sequencesender import ( "github.com/0xPolygonHermez/zkevm-node/config/types" - "github.com/0xPolygonHermez/zkevm-node/log" "github.com/ethereum/go-ethereum/common" ) @@ -42,15 +41,4 @@ type Config struct { // gas offset: 100 // final gas: 1100 GasOffset uint64 `mapstructure:"GasOffset"` - - // StreamClientCfg is the config for the stream client - StreamClient StreamClientCfg `mapstructure:"StreamClient"` -} - -// StreamClientCfg contains the data streamer's configuration properties -type StreamClientCfg struct { - // Datastream server to connect - Server string `mapstructure:"Server"` - // Log is the log configuration - Log log.Config `mapstructure:"Log"` } diff --git a/state/datastream.go b/state/datastream.go index 260587db4a..2ce8516397 100644 --- a/state/datastream.go +++ b/state/datastream.go @@ -230,7 +230,8 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St header := streamServer.GetHeader() var currentBatchNumber uint64 = 0 - var lastAddedL2Block uint64 = 0 + var lastAddedL2BlockNumber uint64 = 0 + var lastAddedBatchNumber uint64 = 0 if header.TotalEntries == 0 { // Get Genesis block @@ -323,6 +324,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St return err } currentBatchNumber = binary.LittleEndian.Uint64(firstEntry.Data[0:8]) + lastAddedL2BlockNumber = currentL2BlockNumber case EntryTypeBookMark: log.Info("Latest entry type is BookMark") bookMark := DSBookMark{} @@ -349,6 +351,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St log.Infof("Current entry number: %d", entry) log.Infof("Current batch number: %d", currentBatchNumber) + log.Infof("Last added L2 block number: %d", lastAddedL2BlockNumber) for err == nil { // Get Next Batch @@ -382,10 +385,16 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St } // Generate full batches - fullBatches := computeFullBatches(batches, l2Blocks, l2Txs) + fullBatches := computeFullBatches(batches, l2Blocks, l2Txs, lastAddedL2BlockNumber) currentBatchNumber += limit for b, batch := range fullBatches { + if batch.BatchNumber <= lastAddedBatchNumber && lastAddedBatchNumber != 0 { + continue + } else { + lastAddedBatchNumber = batch.BatchNumber + } + err = streamServer.StartAtomicOp() if err != nil { return err @@ -396,15 +405,15 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St Value: batch.BatchNumber, } - missingBookMark := false + missingBatchBookMark := true if b == 0 { _, err = streamServer.GetBookmark(bookMark.Encode()) - if err != nil { - missingBookMark = true + if err == nil { + missingBatchBookMark = false } } - if missingBookMark { + if missingBatchBookMark { _, err = streamServer.AddStreamBookmark(bookMark.Encode()) if err != nil { return err @@ -432,10 +441,10 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St } } else { for blockIndex, l2block := range batch.L2Blocks { - if l2block.L2BlockNumber <= lastAddedL2Block && lastAddedL2Block != 0 { + if l2block.L2BlockNumber <= lastAddedL2BlockNumber && lastAddedL2BlockNumber != 0 { continue } else { - lastAddedL2Block = l2block.L2BlockNumber + lastAddedL2BlockNumber = l2block.L2BlockNumber } l1BlockHash := common.Hash{} @@ -496,6 +505,12 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St Value: blockStart.L2BlockNumber, } + // Check if l2 block was already added + _, err = streamServer.GetBookmark(bookMark.Encode()) + if err == nil { + continue + } + _, err = streamServer.AddStreamBookmark(bookMark.Encode()) if err != nil { return err @@ -566,8 +581,7 @@ func GetSystemSCPosition(blockNumber uint64) []byte { } // computeFullBatches computes the full batches -func computeFullBatches(batches []*DSBatch, l2Blocks []*DSL2Block, l2Txs []*DSL2Transaction) []*DSFullBatch { - prevL2BlockNumber := uint64(0) +func computeFullBatches(batches []*DSBatch, l2Blocks []*DSL2Block, l2Txs []*DSL2Transaction, prevL2BlockNumber uint64) []*DSFullBatch { currentL2Tx := 0 currentL2Block := uint64(0) diff --git a/state/pgstatestorage/datastream.go b/state/pgstatestorage/datastream.go index a2a2b4f953..706ecfa6f6 100644 --- a/state/pgstatestorage/datastream.go +++ b/state/pgstatestorage/datastream.go @@ -139,12 +139,12 @@ func scanDSL2Transaction(row pgx.Row) (*state.DSL2Transaction, error) { // GetDSBatches returns the DS batches func (p *PostgresStorage) GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, readWIPBatch bool, dbTx pgx.Tx) ([]*state.DSBatch, error) { var getBatchByNumberSQL = ` - SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, f.fork_id + SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, b.wip, f.fork_id FROM state.batch b, state.fork_id f WHERE b.batch_num >= $1 AND b.batch_num <= $2 AND batch_num between f.from_batch_num AND f.to_batch_num` if !readWIPBatch { - getBatchByNumberSQL += " AND b.wip = false" + getBatchByNumberSQL += " AND b.wip is false" } getBatchByNumberSQL += " ORDER BY b.batch_num ASC" @@ -191,6 +191,7 @@ func scanDSBatch(row pgx.Row) (state.DSBatch, error) { &coinbaseStr, &batch.BatchL2Data, &batch.ForcedBatchNum, + &batch.WIP, &batch.ForkID, ) if err != nil { From 6feb7b88433672f0fc70711e07913d750f71c5f1 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:38:07 +0100 Subject: [PATCH 20/54] fix wipL2Block deltaTimestamp (#3142) * improve open new wipL2Block logs * fix wipL2Block deltaTimestamp --- sequencer/finalizer.go | 4 ++-- sequencer/l2block.go | 47 +++++++++++++++++++++--------------------- state/batchV2.go | 6 +++--- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 5cb3e705cb..11af7d7c4d 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -268,7 +268,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { for { start := now() // We have reached the L2 block time, we need to close the current L2 block and open a new one - if !f.wipL2Block.timestamp.Add(f.cfg.L2BlockMaxDeltaTimestamp.Duration).After(time.Now()) { + if f.wipL2Block.timestamp+uint64(f.cfg.L2BlockMaxDeltaTimestamp.Seconds()) <= uint64(time.Now().Unix()) { f.finalizeWIPL2Block(ctx) } @@ -345,7 +345,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first OldStateRoot: f.wipBatch.imStateRoot, Coinbase: f.wipBatch.coinbase, L1InfoRoot_V2: mockL1InfoRoot, - TimestampLimit_V2: uint64(f.wipL2Block.timestamp.Unix()), + TimestampLimit_V2: f.wipL2Block.timestamp, Caller: stateMetrics.SequencerCallerLabel, ForkID: f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber), Transactions: tx.RawTx, diff --git a/sequencer/l2block.go b/sequencer/l2block.go index 5aab392767..7ab4423b0c 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -17,7 +17,7 @@ import ( // L2Block represents a wip or processed L2 block type L2Block struct { trackingNum uint64 - timestamp time.Time + timestamp uint64 deltaTimestamp uint32 initialStateRoot common.Hash l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry @@ -61,7 +61,7 @@ func (f *finalizer) initWIPL2Block(ctx context.Context) { log.Fatalf("failed to get last L2 block number, error: %v", err) } - f.openNewWIPL2Block(ctx, lastL2Block.ReceivedAt, nil) + f.openNewWIPL2Block(ctx, uint64(lastL2Block.ReceivedAt.Unix()), nil) } // addPendingL2BlockToProcess adds a pending L2 block that is closed and ready to be processed by the executor @@ -160,8 +160,9 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error l2Block.initialStateRoot = f.wipBatch.finalStateRoot - log.Infof("processing L2 block [%d], batch: %d, initialStateRoot: %s txs: %d, l1InfoTreeIndex: %d", - l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.initialStateRoot, len(l2Block.transactions), l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex) + log.Infof("processing L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s txs: %d", + l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + l2Block.l1InfoTreeExitRootChanged, l2Block.initialStateRoot, len(l2Block.transactions)) batchResponse, batchL2DataSize, err := f.executeL2Block(ctx, l2Block) @@ -203,10 +204,9 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error endProcessing := time.Now() - log.Infof("processed L2 block %d [%d], batch: %d, initialStateRoot: %s, stateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v, used counters: %s", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.initialStateRoot, l2Block.batchResponse.NewStateRoot, len(l2Block.transactions), - len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot, endProcessing.Sub(startProcessing), - f.logZKCounters(batchResponse.UsedZkCounters)) + log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, stateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v, used counters: %s", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, l2Block.initialStateRoot, + l2Block.batchResponse.NewStateRoot, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot, endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters)) return nil } @@ -245,7 +245,7 @@ func (f *finalizer) executeL2Block(ctx context.Context, l2Block *L2Block) (*stat OldStateRoot: l2Block.initialStateRoot, Coinbase: f.wipBatch.coinbase, L1InfoRoot_V2: mockL1InfoRoot, - TimestampLimit_V2: uint64(l2Block.timestamp.Unix()), + TimestampLimit_V2: l2Block.timestamp, Transactions: batchL2Data, SkipFirstChangeL2Block_V2: false, SkipWriteBlockInfoRoot_V2: false, @@ -298,9 +298,9 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { // If the L2 block has txs now f.storedFlushID >= l2BlockToStore.flushId, we can store tx blockResponse := l2Block.batchResponse.BlockResponses[0] - log.Infof("storing L2 block %d [%d], batch: %d, txs: %d/%d, blockHash: %s, infoRoot: %s", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, len(l2Block.transactions), len(blockResponse.TransactionResponses), - blockResponse.BlockHash, blockResponse.BlockInfoRoot.String()) + log.Infof("storing L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String()) dbTx, err := f.stateIntf.BeginStateTransaction(ctx) if err != nil { @@ -402,9 +402,9 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { endStoring := time.Now() - log.Infof("stored L2 block: %d [%d], batch: %d, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, len(l2Block.transactions), len(blockResponse.TransactionResponses), - blockResponse.BlockHash, blockResponse.BlockInfoRoot.String(), endStoring.Sub(startStoring)) + log.Infof("stored L2 block: %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String(), endStoring.Sub(startStoring)) return nil } @@ -442,17 +442,15 @@ func (f *finalizer) closeWIPL2Block(ctx context.Context) { } // openNewWIPL2Block opens a new wip L2 block -func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp time.Time, prevL1InfoTreeIndex *uint32) { +func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, prevL1InfoTreeIndex *uint32) { newL2Block := &L2Block{} // Tracking number f.l2BlockCounter++ newL2Block.trackingNum = f.l2BlockCounter - log.Debugf("opening new WIP L2 block [%d]", newL2Block.trackingNum) - - newL2Block.timestamp = now() - newL2Block.deltaTimestamp = uint32(newL2Block.timestamp.Sub(prevTimestamp).Truncate(time.Second).Seconds()) + newL2Block.deltaTimestamp = uint32(uint64(now().Unix()) - prevTimestamp) + newL2Block.timestamp = prevTimestamp + uint64(newL2Block.deltaTimestamp) newL2Block.transactions = []*TxTracker{} @@ -477,6 +475,9 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp time.Ti f.wipL2Block = newL2Block + log.Debugf("creating new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v", + f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged) + // We process (execute) the new wip L2 block to update the imStateRoot and also get the counters used by the wip l2block batchResponse, err := f.executeNewWIPL2Block(ctx) if err != nil { @@ -506,8 +507,8 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp time.Ti } } - log.Infof("new WIP L2 block [%d] created, batch: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfTreeIndexChanged: %v, oldStateRoot: %s, stateRoot: %s, used counters: %s", - f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.timestamp.Unix(), f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, stateRoot: %s, used counters: %s", + f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged, f.wipBatch.imStateRoot, batchResponse.NewStateRoot, f.logZKCounters(f.wipL2Block.usedResources.ZKCounters)) } @@ -523,7 +524,7 @@ func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBat OldStateRoot: f.wipBatch.imStateRoot, Coinbase: f.wipBatch.coinbase, L1InfoRoot_V2: mockL1InfoRoot, - TimestampLimit_V2: uint64(f.wipL2Block.timestamp.Unix()), + TimestampLimit_V2: f.wipL2Block.timestamp, Caller: stateMetrics.SequencerCallerLabel, ForkID: f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber), SkipWriteBlockInfoRoot_V2: true, diff --git a/state/batchV2.go b/state/batchV2.go index bc7ed63ca8..c43f216906 100644 --- a/state/batchV2.go +++ b/state/batchV2.go @@ -289,15 +289,15 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * batchResponseToString := processBatchResponseToString(batchResponse) if batchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(batchResponse.Error) - log.Debug("executor batchRequest, executor error: %v", err) + log.Debugf("executor batchResponse, executor error: %v", err) log.Debug(batchResponseToString) s.eventLog.LogExecutorErrorV2(ctx, batchResponse.Error, batchRequest) } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR && executor.IsROMOutOfCountersError(batchResponse.ErrorRom) { - log.Warn("executor batchRequest, ROM OOC, error: %v", err) + log.Warnf("executor batchResponse, ROM OOC, error: %v", err) log.Warn(batchResponseToString) } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR { err = executor.RomErr(batchResponse.ErrorRom) - log.Warn("executor batchRequest, ROM error: %v", err) + log.Warnf("executor batchResponse, ROM error: %v", err) log.Warn(batchResponseToString) } else { log.Debug(batchResponseToString) From 646603e7012fa79d65c0f967932d0399bfe11438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 25 Jan 2024 17:15:39 +0100 Subject: [PATCH 21/54] ensure tx order in stream (#3144) --- state/pgstatestorage/datastream.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/state/pgstatestorage/datastream.go b/state/pgstatestorage/datastream.go index 706ecfa6f6..891732d128 100644 --- a/state/pgstatestorage/datastream.go +++ b/state/pgstatestorage/datastream.go @@ -86,9 +86,9 @@ func scanL2Block(row pgx.Row) (*state.DSL2Block, error) { // GetDSL2Transactions returns the L2 transactions func (p *PostgresStorage) GetDSL2Transactions(ctx context.Context, firstL2Block, lastL2Block uint64, dbTx pgx.Tx) ([]*state.DSL2Transaction, error) { const l2TxSQL = `SELECT l2_block_num, t.effective_percentage, t.encoded - FROM state.transaction t - WHERE l2_block_num BETWEEN $1 AND $2 - ORDER BY t.l2_block_num ASC` + FROM state.transaction t, state.receipt r + WHERE l2_block_num BETWEEN $1 AND $2 AND r.tx_hash = t.hash + ORDER BY t.l2_block_num ASC, r.tx_index ASC` e := p.getExecQuerier(dbTx) rows, err := e.Query(ctx, l2TxSQL, firstL2Block, lastL2Block) From 4ddba20eb371582a7f4c80a740d4d7d8cfb3efb6 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Thu, 25 Jan 2024 21:07:27 +0100 Subject: [PATCH 22/54] fix wipL2Block stateRoot (#3145) --- sequencer/batch.go | 1 + sequencer/finalizer.go | 20 ++++++++++---------- sequencer/l2block.go | 8 +++++--- state/batch.go | 2 +- state/batchV2.go | 38 +++++++++++++++++++------------------- 5 files changed, 36 insertions(+), 33 deletions(-) diff --git a/sequencer/batch.go b/sequencer/batch.go index 3c007209c2..10a5bd3ceb 100644 --- a/sequencer/batch.go +++ b/sequencer/batch.go @@ -202,6 +202,7 @@ func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason sta } if f.wipL2Block != nil { + f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot // Subtract the WIP L2 block used resources to batch overflow, overflowResource := batch.imRemainingResources.Sub(f.wipL2Block.usedResources) if overflow { diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 11af7d7c4d..9e9f5f5b19 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -479,16 +479,6 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx return errWg, result.BlockResponses[0].TransactionResponses[0].RomError } - // Check remaining resources - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))}) - if overflow { - log.Infof("current tx %s exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing", tx.HashStr, overflowResource) - start := time.Now() - f.workerIntf.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, result.UsedZkCounters) - metrics.WorkerProcessingTime(time.Since(start)) - return nil, err - } - egpEnabled := f.effectiveGasPrice.IsEnabled() if !tx.IsLastExecution { @@ -531,6 +521,16 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx } } + // Check remaining resources + overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))}) + if overflow { + log.Infof("current tx %s exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing", tx.HashStr, overflowResource) + start := time.Now() + f.workerIntf.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, result.UsedZkCounters) + metrics.WorkerProcessingTime(time.Since(start)) + return nil, err + } + // Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging tx.EGPLog.Enabled = egpEnabled tx.EGPLog.GasPriceOC = result.BlockResponses[0].TransactionResponses[0].HasGaspriceOpcode diff --git a/sequencer/l2block.go b/sequencer/l2block.go index 7ab4423b0c..28ccd40b2c 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -20,6 +20,7 @@ type L2Block struct { timestamp uint64 deltaTimestamp uint32 initialStateRoot common.Hash + imStateRoot common.Hash l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry l1InfoTreeExitRootChanged bool usedResources state.BatchResources @@ -402,7 +403,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { endStoring := time.Now() - log.Infof("stored L2 block: %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v", + log.Infof("stored L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v", blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String(), endStoring.Sub(startStoring)) @@ -488,8 +489,9 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, f.Halt(ctx, fmt.Errorf("number of L2 block [%d] responses returned by the executor is %d and must be 1", f.wipL2Block.trackingNum, len(batchResponse.BlockResponses))) } - // Update imStateRoot and wip L2 block number - f.wipBatch.imStateRoot = batchResponse.NewStateRoot + // Update imStateRoot + f.wipL2Block.imStateRoot = batchResponse.NewStateRoot + f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot // Save and sustract the resources used by the new WIP L2 block from the wip batch // We need to increase the poseidon hashes to reserve in the batch the hashes needed to write the L1InfoRoot when processing the final L2 Block (SkipWriteBlockInfoRoot_V2=false) diff --git a/state/batch.go b/state/batch.go index 040a991ae0..f9a30637c4 100644 --- a/state/batch.go +++ b/state/batch.go @@ -72,7 +72,7 @@ const ( // MaxDeltaTimestampClosingReason is the closing reason used when max delta batch timestamp is reached MaxDeltaTimestampClosingReason ClosingReason = "Max delta timestamp" // NoTxFitsClosingReason is the closing reason used when any of the txs in the pool (worker) fits in the remaining resources of the batch - NoTxFitsClosingReason ClosingReason = "No transactions fits" + NoTxFitsClosingReason ClosingReason = "No transaction fits" ) // ProcessingReceipt indicates the outcome (StateRoot, AccInputHash) of processing a batch diff --git a/state/batchV2.go b/state/batchV2.go index c43f216906..d93ec7daf8 100644 --- a/state/batchV2.go +++ b/state/batchV2.go @@ -262,7 +262,7 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * return nil, ErrExecutorNil } - batchRequestLog := "BatchNum: %v, OldBatchNum: %v, From: %v, OldStateRoot: %v, L1InfoRoot: %v, OldAccInputHash: %v, TimestampLimit: %v, Coinbase: %v, UpdateMerkleTree: %v, SkipFirstChangeL2Block: %v, SkipWriteBlockInfoRoot: %v, ChainId: %v, ForkId: %v, ContextId: %v, SkipVerifyL1InfoRoot: %v, ForcedBlockhashL1: %v, L1InfoTreeData: %+v, BatchL2Data: %v" + batchRequestLog := "OldBatchNum: %v, From: %v, OldStateRoot: %v, L1InfoRoot: %v, OldAccInputHash: %v, TimestampLimit: %v, Coinbase: %v, UpdateMerkleTree: %v, SkipFirstChangeL2Block: %v, SkipWriteBlockInfoRoot: %v, ChainId: %v, ForkId: %v, ContextId: %v, SkipVerifyL1InfoRoot: %v, ForcedBlockhashL1: %v, L1InfoTreeData: %+v, BatchL2Data: %v" l1DataStr := "" for i, l1Data := range batchRequest.L1InfoTreeData { @@ -272,52 +272,52 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, batchRequest * l1DataStr = l1DataStr[:len(l1DataStr)-1] } - batchRequestLog = fmt.Sprintf(batchRequestLog, batchRequest.OldBatchNum+1, batchRequest.OldBatchNum, batchRequest.From, hex.EncodeToHex(batchRequest.OldStateRoot), hex.EncodeToHex(batchRequest.L1InfoRoot), + batchRequestLog = fmt.Sprintf(batchRequestLog, batchRequest.OldBatchNum, batchRequest.From, hex.EncodeToHex(batchRequest.OldStateRoot), hex.EncodeToHex(batchRequest.L1InfoRoot), hex.EncodeToHex(batchRequest.OldAccInputHash), batchRequest.TimestampLimit, batchRequest.Coinbase, batchRequest.UpdateMerkleTree, batchRequest.SkipFirstChangeL2Block, batchRequest.SkipWriteBlockInfoRoot, batchRequest.ChainId, batchRequest.ForkId, batchRequest.ContextId, batchRequest.SkipVerifyL1InfoRoot, hex.EncodeToHex(batchRequest.ForcedBlockhashL1), l1DataStr, hex.EncodeToHex(batchRequest.BatchL2Data)) - log.Debugf("executor batchRequest, %s", batchRequestLog) + newBatchNum := batchRequest.OldBatchNum + 1 + log.Debugf("executor batch %d request, %s", newBatchNum, batchRequestLog) now := time.Now() batchResponse, err := s.executorClient.ProcessBatchV2(ctx, batchRequest) + elapsed := time.Since(now) + + //workarroundDuplicatedBlock(res) + if caller != metrics.DiscardCallerLabel { + metrics.ExecutorProcessingTime(string(caller), elapsed) + } + if err != nil { log.Errorf("error executor ProcessBatchV2: %v", err) log.Errorf("error executor ProcessBatchV2: %s", err.Error()) log.Errorf("error executor ProcessBatchV2 response: %v", batchResponse) } else { - batchResponseToString := processBatchResponseToString(batchResponse) + batchResponseToString := processBatchResponseToString(newBatchNum, batchResponse, elapsed) if batchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(batchResponse.Error) - log.Debugf("executor batchResponse, executor error: %v", err) - log.Debug(batchResponseToString) + log.Warnf("executor batch %d response, executor error: %v", newBatchNum, err) + log.Warn(batchResponseToString) s.eventLog.LogExecutorErrorV2(ctx, batchResponse.Error, batchRequest) } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR && executor.IsROMOutOfCountersError(batchResponse.ErrorRom) { - log.Warnf("executor batchResponse, ROM OOC, error: %v", err) + log.Warnf("executor batch %d response, ROM OOC, error: %v", newBatchNum, err) log.Warn(batchResponseToString) } else if batchResponse.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR { err = executor.RomErr(batchResponse.ErrorRom) - log.Warnf("executor batchResponse, ROM error: %v", err) + log.Warnf("executor batch %d response, ROM error: %v", newBatchNum, err) log.Warn(batchResponseToString) } else { log.Debug(batchResponseToString) } } - //workarroundDuplicatedBlock(res) - elapsed := time.Since(now) - if caller != metrics.DiscardCallerLabel { - metrics.ExecutorProcessingTime(string(caller), elapsed) - } - - log.Infof("batch %d took %v to be processed by the executor ", batchRequest.OldBatchNum+1, elapsed) - return batchResponse, err } -func processBatchResponseToString(batchResponse *executor.ProcessBatchResponseV2) string { - batchResponseLog := "executor batchResponse, NewStateRoot: %v, NewAccInputHash: %v, NewLocalExitRoot: %v, NewBatchNumber: %v, GasUsed: %v, FlushId: %v, StoredFlushId: %v, ProverId:%v, ForkId:%v, Error: %v\n" - batchResponseLog = fmt.Sprintf(batchResponseLog, hex.EncodeToHex(batchResponse.NewStateRoot), hex.EncodeToHex(batchResponse.NewAccInputHash), hex.EncodeToHex(batchResponse.NewLocalExitRoot), +func processBatchResponseToString(batchNum uint64, batchResponse *executor.ProcessBatchResponseV2, executionTime time.Duration) string { + batchResponseLog := "executor batch %d response, Time: %v, NewStateRoot: %v, NewAccInputHash: %v, NewLocalExitRoot: %v, NewBatchNumber: %v, GasUsed: %v, FlushId: %v, StoredFlushId: %v, ProverId:%v, ForkId:%v, Error: %v\n" + batchResponseLog = fmt.Sprintf(batchResponseLog, batchNum, executionTime, hex.EncodeToHex(batchResponse.NewStateRoot), hex.EncodeToHex(batchResponse.NewAccInputHash), hex.EncodeToHex(batchResponse.NewLocalExitRoot), batchResponse.NewBatchNum, batchResponse.GasUsed, batchResponse.FlushId, batchResponse.StoredFlushId, batchResponse.ProverId, batchResponse.ForkId, batchResponse.Error) for blockIndex, block := range batchResponse.BlockResponses { From 18251645a592c3147a421ac53a796c42a89af100 Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Thu, 25 Jan 2024 23:15:57 +0100 Subject: [PATCH 23/54] fix panic NewStackTrie (#3146) --- state/transaction.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/state/transaction.go b/state/transaction.go index d393956bd6..7d77265000 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -248,7 +248,8 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P } // Create block to be able to calculate its hash - block := NewL2Block(l2Header, transactions, []*L2Header{}, receipts, &trie.StackTrie{}) + st := trie.NewStackTrie(nil) + block := NewL2Block(l2Header, transactions, []*L2Header{}, receipts, st) block.ReceivedAt = time.Unix(int64(l2Block.Timestamp), 0) for _, receipt := range receipts { From bc518133e7d627a9853fc74b03b8ef5d4711a488 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Thu, 25 Jan 2024 20:05:06 -0300 Subject: [PATCH 24/54] fix sync initalization for etrog (#3147) --- state/transaction.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/state/transaction.go b/state/transaction.go index 7d77265000..cca22989b6 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -219,10 +219,10 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P l2Header.BlockInfoRoot = l2Block.BlockInfoRoot numTxs := len(l2Block.TransactionResponses) - transactions := make([]*types.Transaction, numTxs) - storeTxsEGPData := make([]StoreTxEGPData, numTxs) - receipts := make([]*types.Receipt, numTxs) - txsL2Hash := make([]common.Hash, numTxs) + transactions := make([]*types.Transaction, 0, numTxs) + storeTxsEGPData := make([]StoreTxEGPData, 0, numTxs) + receipts := make([]*types.Receipt, 0, numTxs) + txsL2Hash := make([]common.Hash, 0, numTxs) for i, txResponse := range l2Block.TransactionResponses { // if the transaction has an intrinsic invalid tx error it means @@ -235,16 +235,18 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P } txResp := *txResponse - transactions[i] = &txResp.Tx - txsL2Hash[i] = txResp.TxHashL2_V2 + transactions = append(transactions, &txResp.Tx) + txsL2Hash = append(txsL2Hash, txResp.TxHashL2_V2) - storeTxsEGPData[i] = StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(txResponse.EffectivePercentage)} + storeTxEGPData := StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(txResponse.EffectivePercentage)} if txsEGPLog != nil { - storeTxsEGPData[i].EGPLog = txsEGPLog[i] + storeTxEGPData.EGPLog = txsEGPLog[i] } + storeTxsEGPData = append(storeTxsEGPData, storeTxEGPData) + receipt := GenerateReceipt(header.Number, txResponse, uint(i)) - receipts[i] = receipt + receipts = append(receipts, receipt) } // Create block to be able to calculate its hash From 4dc6258fdba6a9eb4162e76849730a4b61898e10 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Fri, 26 Jan 2024 04:40:07 +0100 Subject: [PATCH 25/54] fix wipL2block imStateRoot (#3148) --- sequencer/batch.go | 9 ++++----- sequencer/errors.go | 2 ++ sequencer/finalizer.go | 6 +++++- sequencer/l2block.go | 5 +++-- sequencer/worker.go | 2 +- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/sequencer/batch.go b/sequencer/batch.go index 10a5bd3ceb..47b79cdf81 100644 --- a/sequencer/batch.go +++ b/sequencer/batch.go @@ -65,6 +65,7 @@ func (f *finalizer) setWIPBatch(ctx context.Context, wipStateBatch *state.Batch) initialStateRoot: prevStateBatch.StateRoot, finalStateRoot: wipStateBatch.StateRoot, timestamp: wipStateBatch.Timestamp, + countOfL2Blocks: len(wipStateBatchBlocks.Blocks), countOfTxs: wipStateBatchCountOfTxs, imRemainingResources: remainingResources, finalRemainingResources: remainingResources, @@ -196,7 +197,7 @@ func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason sta f.initWIPL2Block(ctx) } - batch, err := f.openNewWIPBatch(ctx, lastBatchNumber+1, stateRoot) + f.wipBatch, err = f.openNewWIPBatch(ctx, lastBatchNumber+1, stateRoot) if err != nil { return fmt.Errorf("failed to open new wip batch, error: %v", err) } @@ -204,15 +205,13 @@ func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason sta if f.wipL2Block != nil { f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot // Subtract the WIP L2 block used resources to batch - overflow, overflowResource := batch.imRemainingResources.Sub(f.wipL2Block.usedResources) + overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(f.wipL2Block.usedResources) if overflow { return fmt.Errorf("failed to subtract L2 block [%d] used resources to new wip batch %d, overflow resource: %s", - f.wipL2Block.trackingNum, batch.batchNumber, overflowResource) + f.wipL2Block.trackingNum, f.wipBatch.batchNumber, overflowResource) } } - f.wipBatch = batch - log.Infof("new WIP batch %d", f.wipBatch.batchNumber) return nil diff --git a/sequencer/errors.go b/sequencer/errors.go index 44fbc8bdd0..10c87aaa2f 100644 --- a/sequencer/errors.go +++ b/sequencer/errors.go @@ -27,6 +27,8 @@ var ( ErrExecutorError = errors.New("executor error") // ErrNoFittingTransaction happens when there is not a tx (from the txSortedList) that fits in the remaining batch resources ErrNoFittingTransaction = errors.New("no fit transaction") + // ErrBatchResourceUnderFlow happens when there is batch resoure underflow after sustract the resources from a tx + ErrBatchResourceUnderFlow = errors.New("batch resource underflow") // ErrTransactionsListEmpty happens when txSortedList is empty ErrTransactionsListEmpty = errors.New("transactions list empty") ) diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 9e9f5f5b19..c7021bdcd1 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -277,6 +277,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { // If we have txs pending to process but none of them fits into the wip batch, we close the wip batch and open a new one if err == ErrNoFittingTransaction { f.finalizeWIPBatch(ctx, state.NoTxFitsClosingReason) + continue } metrics.WorkerProcessingTime(time.Since(start)) @@ -293,6 +294,9 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { firstTxProcess = false log.Infof("reprocessing tx %s because of effective gas price calculation", tx.HashStr) continue + } else if err == ErrBatchResourceUnderFlow { + log.Infof("skipping tx %s due to a batch resource underflow", tx.HashStr) + break } else { log.Errorf("failed to process tx %s, error: %v", err) break @@ -528,7 +532,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx start := time.Now() f.workerIntf.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, result.UsedZkCounters) metrics.WorkerProcessingTime(time.Since(start)) - return nil, err + return nil, ErrBatchResourceUnderFlow } // Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging diff --git a/sequencer/l2block.go b/sequencer/l2block.go index 28ccd40b2c..8ae00be9db 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -490,6 +490,7 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, } // Update imStateRoot + oldIMStateRoot := f.wipBatch.imStateRoot f.wipL2Block.imStateRoot = batchResponse.NewStateRoot f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot @@ -509,9 +510,9 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, } } - log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, stateRoot: %s, used counters: %s", + log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, used counters: %s", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, - f.wipL2Block.l1InfoTreeExitRootChanged, f.wipBatch.imStateRoot, batchResponse.NewStateRoot, f.logZKCounters(f.wipL2Block.usedResources.ZKCounters)) + f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, f.logZKCounters(f.wipL2Block.usedResources.ZKCounters)) } // executeNewWIPL2Block executes an empty L2 Block in the executor and returns the batch response from the executor diff --git a/sequencer/worker.go b/sequencer/worker.go index 4e277b7b84..b0f07a2311 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -365,7 +365,7 @@ func (w *Worker) ExpireTransactions(maxTime time.Duration) []*TxTracker { delete(w.pool, addrQueue.fromStr) } } - log.Debug("expire transactions ended, addrQueue length: %d, delete count: %d ", len(w.pool), len(txs)) + log.Debugf("expire transactions ended, addrQueue length: %d, delete count: %d ", len(w.pool), len(txs)) return txs } From 0a92869ea831fa9503edf68ed031cb0e3390dea0 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Fri, 26 Jan 2024 09:12:09 +0100 Subject: [PATCH 26/54] go mod tidy --- go.mod | 5 +++-- go.sum | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5be29a54ed..369d6b98d5 100644 --- a/go.mod +++ b/go.mod @@ -119,6 +119,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -143,7 +144,7 @@ require ( github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect @@ -166,5 +167,5 @@ require ( github.com/fatih/color v1.16.0 github.com/joho/godotenv v1.5.1 github.com/prometheus/client_golang v1.18.0 - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa ) diff --git a/go.sum b/go.sum index cc8919aca2..5896013fbf 100644 --- a/go.sum +++ b/go.sum @@ -660,6 +660,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= From ece38193514751ec0b2c940280ac4cb70a9c3f89 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Fri, 26 Jan 2024 14:27:34 +0100 Subject: [PATCH 27/54] Update refs to agglayer --- aggregator/aggregator.go | 43 ++++++++++++++++++++-------------------- aggregator/config.go | 14 ++++++------- cmd/run.go | 8 ++++---- go.mod | 1 + go.sum | 2 ++ 5 files changed, 36 insertions(+), 32 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index ceca3db281..2a79219f09 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/0xPolygon/beethoven/tx" "math/big" "net" "strconv" @@ -15,8 +14,10 @@ import ( "time" "unicode" - "github.com/0xPolygon/beethoven/client" - beethovenTypes "github.com/0xPolygon/beethoven/rpc/types" + "github.com/0xPolygon/agglayer/tx" + + "github.com/0xPolygon/agglayer/client" + agglayerTypes "github.com/0xPolygon/agglayer/rpc/types" "github.com/0xPolygonHermez/zkevm-node/aggregator/metrics" "github.com/0xPolygonHermez/zkevm-node/aggregator/prover" "github.com/0xPolygonHermez/zkevm-node/config/types" @@ -70,7 +71,7 @@ type Aggregator struct { ctx context.Context exit context.CancelFunc - BeethovenClient client.ClientInterface + AggLayerClient client.ClientInterface sequencerPrivateKey *ecdsa.PrivateKey } @@ -80,7 +81,7 @@ func New( stateInterface stateInterface, ethTxManager ethTxManager, etherman etherman, - beethovenClient client.ClientInterface, + agglayerClient client.ClientInterface, sequencerPrivateKey *ecdsa.PrivateKey, ) (Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker @@ -104,7 +105,7 @@ func New( finalProof: make(chan finalProofMsg), - BeethovenClient: beethovenClient, + AggLayerClient: agglayerClient, sequencerPrivateKey: sequencerPrivateKey, } @@ -280,8 +281,8 @@ func (a *Aggregator) sendFinalProof() { log.Infof("Final proof inputs: NewLocalExitRoot [%#x], NewStateRoot [%#x]", inputs.NewLocalExitRoot, inputs.NewStateRoot) switch a.cfg.SettlementBackend { - case Beethoven: - if success := a.settleWithBeethoven(ctx, proof, inputs); !success { + case AggLayer: + if success := a.settleWithAggLayer(ctx, proof, inputs); !success { continue } default: @@ -350,7 +351,7 @@ func (a *Aggregator) settleDirect( return true } -func (a *Aggregator) settleWithBeethoven( +func (a *Aggregator) settleWithAggLayer( ctx context.Context, proof *state.Proof, inputs ethmanTypes.FinalProofInputs, @@ -358,12 +359,12 @@ func (a *Aggregator) settleWithBeethoven( proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) tx := tx.Tx{ - LastVerifiedBatch: beethovenTypes.ArgUint64(proof.BatchNumber - 1), - NewVerifiedBatch: beethovenTypes.ArgUint64(proof.BatchNumberFinal), + LastVerifiedBatch: agglayerTypes.ArgUint64(proof.BatchNumber - 1), + NewVerifiedBatch: agglayerTypes.ArgUint64(proof.BatchNumberFinal), ZKP: tx.ZKP{ NewStateRoot: common.BytesToHash(inputs.NewStateRoot), NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), - Proof: beethovenTypes.ArgBytes(proofBytes), + Proof: agglayerTypes.ArgBytes(proofBytes), }, RollupID: a.Ethman.GetRollupId(), } @@ -371,27 +372,27 @@ func (a *Aggregator) settleWithBeethoven( if err != nil { log.Errorf("failed to sign tx: %v", err) - a.handleFailureToSendToBeethoven(ctx, proof) + a.handleFailureToSendToAggLayer(ctx, proof) return false } log.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) - txHash, err := a.BeethovenClient.SendTx(*signedTx) + txHash, err := a.AggLayerClient.SendTx(*signedTx) if err != nil { log.Errorf("failed to send tx to the interop: %v", err) - a.handleFailureToSendToBeethoven(ctx, proof) + a.handleFailureToSendToAggLayer(ctx, proof) return false } - log.Infof("tx %s sent to beethoven, waiting to be mined", txHash.Hex()) - log.Debugf("Timeout set to %f seconds", a.cfg.BeethovenTxTimeout.Duration.Seconds()) - waitCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(a.cfg.BeethovenTxTimeout.Duration)) + log.Infof("tx %s sent to agglayer, waiting to be mined", txHash.Hex()) + log.Debugf("Timeout set to %f seconds", a.cfg.AggLayerTxTimeout.Duration.Seconds()) + waitCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(a.cfg.AggLayerTxTimeout.Duration)) defer cancelFunc() - if err := a.BeethovenClient.WaitTxToBeMined(txHash, waitCtx); err != nil { + if err := a.AggLayerClient.WaitTxToBeMined(txHash, waitCtx); err != nil { log.Errorf("interop didn't mine the tx: %v", err) - a.handleFailureToSendToBeethoven(ctx, proof) + a.handleFailureToSendToAggLayer(ctx, proof) return false } @@ -400,7 +401,7 @@ func (a *Aggregator) settleWithBeethoven( return true } -func (a *Aggregator) handleFailureToSendToBeethoven(ctx context.Context, proof *state.Proof) { +func (a *Aggregator) handleFailureToSendToAggLayer(ctx context.Context, proof *state.Proof) { log := log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) proof.GeneratingSince = nil diff --git a/aggregator/config.go b/aggregator/config.go index 3ab60e64e9..1cf9a0b0cf 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -11,8 +11,8 @@ import ( type SettlementBackend string const ( - // To define the beethoven service for settlement - Beethoven SettlementBackend = "beethoven" + // To define the agglayer service for settlement + AggLayer SettlementBackend = "agglayer" // To define the direct settlement to L1 L1 SettlementBackend = "l1" ) @@ -95,14 +95,14 @@ type Config struct { // final gas: 1100 GasOffset uint64 `mapstructure:"GasOffset"` - // SettlementBackend configuration defines how a final ZKP should be settled. Directly to L1 or over the Beethoven service. + // SettlementBackend configuration defines how a final ZKP should be settled. Directly to L1 or over the AggLayer service. SettlementBackend SettlementBackend `mapstructure:"SettlementBackend"` - // BeethovenTxTimeout is the interval time to wait for a tx to be mined from the beethoven - BeethovenTxTimeout types.Duration `mapstructure:"BeethovenTxTimeout"` + // AggLayerTxTimeout is the interval time to wait for a tx to be mined from the agglayer + AggLayerTxTimeout types.Duration `mapstructure:"AggLayerTxTimeout"` - // BeethovenURL url of the beethoven service - BeethovenURL string `mapstructure:"BeethovenURL"` + // AggLayerURL url of the agglayer service + AggLayerURL string `mapstructure:"AggLayerURL"` // SequencerPrivateKey Private key of the trusted sequencer SequencerPrivateKey types.KeystoreFileConfig `mapstructure:"SequencerPrivateKey"` diff --git a/cmd/run.go b/cmd/run.go index 006ebc3046..06f3879892 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,7 +12,7 @@ import ( "runtime" "time" - beethovenClient "github.com/0xPolygon/beethoven/client" + agglayerClient "github.com/0xPolygon/agglayer/client" datastreamerlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" "github.com/0xPolygonHermez/zkevm-node" "github.com/0xPolygonHermez/zkevm-node/aggregator" @@ -426,10 +426,10 @@ func createSequenceSender(cfg config.Config, pool *pool.Pool, etmStorage *ethtxm } func runAggregator(ctx context.Context, c aggregator.Config, etherman *etherman.Client, ethTxManager *ethtxmanager.Client, st *state.State) { - var beethCli *beethovenClient.Client + var beethCli *agglayerClient.Client - if c.SettlementBackend == aggregator.Beethoven { - beethCli = beethovenClient.New(c.BeethovenURL) + if c.SettlementBackend == aggregator.AggLayer { + beethCli = agglayerClient.New(c.AggLayerURL) } // Load private key diff --git a/go.mod b/go.mod index 369d6b98d5..d24a5dd125 100644 --- a/go.mod +++ b/go.mod @@ -38,6 +38,7 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect + github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect diff --git a/go.sum b/go.sum index 5896013fbf..3f8be260dc 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b h1:zwZP+YFDOLWL8qgWDu/D7E09MuLwIKXz6EqyT/CYBEY= +github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b/go.mod h1:grNx11QYbBuhO8UrYjcWAam96cOKgCd3UC+s3Y60KGE= github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc h1:yUoC5oDBHbriJSRfVYWNji+rYJdjxqIibmVpzoXUM3g= github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc/go.mod h1:V+tv5idj5g9yO/sTPzi9j4waUOcGAC2VakTBWGHB3lw= github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18 h1:InqeTcHrNbfj1OUfn2aFplFay7ibd7KhYqvmMZYZfn0= From 86fcb5b6134f70c0e5f3e00006445fca039d4ecc Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Fri, 26 Jan 2024 11:15:04 -0300 Subject: [PATCH 28/54] fix tracer to work by block stateroot instead of by tx (#3150) --- db/migrations/state/0015.sql | 6 ++++ db/migrations/state/0015_test.go | 49 +++++++++++++++++++++++++++++ state/pgstatestorage/transaction.go | 8 ++++- state/trace.go | 41 +++++++++++------------- 4 files changed, 80 insertions(+), 24 deletions(-) create mode 100644 db/migrations/state/0015.sql create mode 100644 db/migrations/state/0015_test.go diff --git a/db/migrations/state/0015.sql b/db/migrations/state/0015.sql new file mode 100644 index 0000000000..05657826cc --- /dev/null +++ b/db/migrations/state/0015.sql @@ -0,0 +1,6 @@ +-- +migrate Up +CREATE INDEX IF NOT EXISTS idx_receipt_tx_index ON state.receipt (block_num, tx_index); + +-- +migrate Down +DROP INDEX IF EXISTS state.idx_receipt_tx_index; + diff --git a/db/migrations/state/0015_test.go b/db/migrations/state/0015_test.go new file mode 100644 index 0000000000..20f34bdbf9 --- /dev/null +++ b/db/migrations/state/0015_test.go @@ -0,0 +1,49 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0015 struct{} + +func (m migrationTest0015) InsertData(db *sql.DB) error { + return nil +} + +func (m migrationTest0015) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + indexes := []string{ + "idx_receipt_tx_index", + } + // Check indexes adding + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) + } +} + +func (m migrationTest0015) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + indexes := []string{ + "idx_receipt_tx_index", + } + // Check indexes removing + for _, idx := range indexes { + // getIndex + const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;` + row := db.QueryRow(getIndex, idx) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) + } +} + +func TestMigration0015(t *testing.T) { + runMigrationTest(t, 15, migrationTest0015{}) +} diff --git a/state/pgstatestorage/transaction.go b/state/pgstatestorage/transaction.go index 5ef4ad9485..9d8fe15efa 100644 --- a/state/pgstatestorage/transaction.go +++ b/state/pgstatestorage/transaction.go @@ -377,7 +377,13 @@ func scanLogs(rows pgx.Rows) ([]*types.Log, error) { // GetTxsByBlockNumber returns all the txs in a given block func (p *PostgresStorage) GetTxsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Transaction, error) { - const getTxsByBlockNumSQL = "SELECT encoded FROM state.transaction WHERE l2_block_num = $1" + const getTxsByBlockNumSQL = `SELECT t.encoded + FROM state.transaction t + JOIN state.receipt r + ON t.hash = r.tx_hash + WHERE t.l2_block_num = $1 + AND r.block_num = $1 + ORDER by r.tx_index ASC` q := p.getExecQuerier(dbTx) rows, err := q.Query(ctx, getTxsByBlockNumSQL, blockNumber) diff --git a/state/trace.go b/state/trace.go index 22cc5b5306..c567343c82 100644 --- a/state/trace.go +++ b/state/trace.go @@ -49,28 +49,23 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has return nil, err } - // if tx index is zero, we need to get the state root from the previous block - // else we need to get the state root from the previous tx + // the old state root is the previous block state root var oldStateRoot common.Hash - if receipt.TransactionIndex == 0 { - // get the previous L2 Block - previousL2BlockNumber := uint64(0) - if receipt.BlockNumber.Uint64() > 0 { - previousL2BlockNumber = receipt.BlockNumber.Uint64() - 1 - } - previousL2Block, err := s.GetL2BlockByNumber(ctx, previousL2BlockNumber, dbTx) - if err != nil { - return nil, err - } - oldStateRoot = previousL2Block.Root() - } else { - previousTx := l2Block.Transactions()[receipt.TransactionIndex-1] - // gets the tx receipt - previousReceipt, err := s.GetTransactionReceipt(ctx, previousTx.Hash(), dbTx) - if err != nil { - return nil, err - } - oldStateRoot = common.BytesToHash(previousReceipt.PostState) + previousL2BlockNumber := uint64(0) + if receipt.BlockNumber.Uint64() > 0 { + previousL2BlockNumber = receipt.BlockNumber.Uint64() - 1 + } + previousL2Block, err := s.GetL2BlockByNumber(ctx, previousL2BlockNumber, dbTx) + if err != nil { + return nil, err + } + oldStateRoot = previousL2Block.Root() + + // since the executor only stores the state roots by block, we need to + // execute all the txs in the block until the tx we want to trace + var txsToEncode []types.Transaction + for i := 0; i <= int(receipt.TransactionIndex); i++ { + txsToEncode = append(txsToEncode, *l2Block.Transactions()[i]) } // gets batch that including the l2 block @@ -111,7 +106,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has } } // generate batch l2 data for the transaction - batchL2Data, err := EncodeTransactions([]types.Transaction{*tx}, []uint8{MaxEffectivePercentage}, forkId) + batchL2Data, err := EncodeTransactions(txsToEncode, []uint8{MaxEffectivePercentage}, forkId) if err != nil { return nil, err } @@ -192,7 +187,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has deltaTimestamp := uint32(uint64(time.Now().Unix()) - l2Block.Time()) transactions := s.BuildChangeL2Block(deltaTimestamp, uint32(0)) - batchL2Data, err := EncodeTransactions([]types.Transaction{*tx}, []uint8{MaxEffectivePercentage}, forkId) + batchL2Data, err := EncodeTransactions(txsToEncode, []uint8{MaxEffectivePercentage}, forkId) if err != nil { log.Errorf("error encoding transaction ", err) return nil, err From 75a957ffa9c8df5ad5184fa3043643f184972c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:33:18 +0100 Subject: [PATCH 29/54] update prover image (#3151) --- docker-compose.yml | 2 +- test/config/test.prover.config.json | 2 +- test/docker-compose.yml | 4 ++-- test/vectors/src/etrog/balances.json | 24 ++++++++++++------------ test/vectors/src/etrog/chain-ids.json | 16 ++++++++-------- test/vectors/src/etrog/general.json | 12 ++++++------ test/vectors/src/etrog/nonces.json | 12 ++++++------ test/vectors/src/etrog/seq-fees.json | 8 ++++---- 8 files changed, 40 insertions(+), 40 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 77bcce0313..e592405fcd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -107,7 +107,7 @@ services: zkevm-prover: container_name: zkevm-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v4.0.0-RC30 + image: hermeznetwork/zkevm-prover:v4.0.0-RC31 depends_on: zkevm-state-db: condition: service_healthy diff --git a/test/config/test.prover.config.json b/test/config/test.prover.config.json index fa4f4bd45e..91685ace2f 100644 --- a/test/config/test.prover.config.json +++ b/test/config/test.prover.config.json @@ -27,7 +27,7 @@ "runBlakeTest": false, "executeInParallel": true, - "useMainExecGenerated": false, + "useMainExecGenerated": true, "saveRequestToFile": false, "saveInputToFile": false, "saveDbReadsToFile": false, diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 3390e8ed36..e9d1992c50 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -513,7 +513,7 @@ services: zkevm-prover: container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC30 + image: hermeznetwork/zkevm-prover:v4.0.0-RC31 ports: - 50061:50061 # MT - 50071:50071 # Executor @@ -602,7 +602,7 @@ services: zkevm-permissionless-prover: container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC30 + image: hermeznetwork/zkevm-prover:v4.0.0-RC31 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover diff --git a/test/vectors/src/etrog/balances.json b/test/vectors/src/etrog/balances.json index d2c1b6e9c9..27eb8efc5c 100644 --- a/test/vectors/src/etrog/balances.json +++ b/test/vectors/src/etrog/balances.json @@ -64,7 +64,7 @@ "reason": "" } ], - "expectedNewRoot": "0x0777fa5bf4f4e196922ad075559c70e618fbef9f3cebb54373bd8fe5b5d9d7dd", + "expectedNewRoot": "0xeb4e96c476272380fc34c95085a0c22b62ca6acc35aca899f412b12400fa7b43", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -87,7 +87,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x76b362a9afd679ea13a456ab103786492c65946be653589c1fd627841d0c6fdd", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x2188fcec35b2d0bf0f25b46af7332b00659c5dd02a62a5e3f6b326d03d0940dc" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xf0ce5654efe502ad230660a7fb8cae50f6e44c201ff4f19ea3df024312964796" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -164,7 +164,7 @@ "reason": "TX INVALID: Not enough funds to pay total transaction cost" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -187,7 +187,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -264,7 +264,7 @@ "reason": "" } ], - "expectedNewRoot": "0xe25a4fe9b531278192e00bbb7704ec097a6020c8609c66af3ba8e08970ccd2cd", + "expectedNewRoot": "0x27967154319c4e7b25aead5b1d8ec0cee661a09a3ac36d198f85e76d6b073206", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "200000000000000000000", @@ -287,7 +287,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0xa88244596a08b2d267f764ef93304d682aac646f8603eaaad50d68b303eb9fe2", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xe9bb3f72261a9eecfee637bfcffef4e3bccfe9cb65803e086e02e11b9922e67d" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x63d044d976dc4898eb710a7178deb3826cd8c76a8d2727550b4abbab7e41a035" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -364,7 +364,7 @@ "reason": "TX INVALID: Not enough funds to pay total transaction cost" } ], - "expectedNewRoot": "0xda3921b9fb60b8961e0ef86f69867abb4b0d90ebbaf28575c6804ccfc9e43d9e", + "expectedNewRoot": "0x049e6807497377c06b07125357279757fe68327f9173a2de513ae510a843f515", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "199999999999999999999", @@ -387,7 +387,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0xa88244596a08b2d267f764ef93304d682aac646f8603eaaad50d68b303eb9fe2", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xc02ff54ce8707133eacca2d47b0b8f2a9d303108168e1836b9b05abecafcca1c" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x77707cb7d024a57c97320e6999898d5d62adc0ea2396865f56c840bc96d6d824" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -464,7 +464,7 @@ "reason": "TX INVALID: Not enough funds to pay total transaction cost" } ], - "expectedNewRoot": "0xda3921b9fb60b8961e0ef86f69867abb4b0d90ebbaf28575c6804ccfc9e43d9e", + "expectedNewRoot": "0x049e6807497377c06b07125357279757fe68327f9173a2de513ae510a843f515", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "199999999999999999999", @@ -487,7 +487,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0xa88244596a08b2d267f764ef93304d682aac646f8603eaaad50d68b303eb9fe2", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xc02ff54ce8707133eacca2d47b0b8f2a9d303108168e1836b9b05abecafcca1c" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x77707cb7d024a57c97320e6999898d5d62adc0ea2396865f56c840bc96d6d824" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -557,7 +557,7 @@ "reason": "" } ], - "expectedNewRoot": "0x0777fa5bf4f4e196922ad075559c70e618fbef9f3cebb54373bd8fe5b5d9d7dd", + "expectedNewRoot": "0xeb4e96c476272380fc34c95085a0c22b62ca6acc35aca899f412b12400fa7b43", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -580,7 +580,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x76b362a9afd679ea13a456ab103786492c65946be653589c1fd627841d0c6fdd", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x2188fcec35b2d0bf0f25b46af7332b00659c5dd02a62a5e3f6b326d03d0940dc" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xf0ce5654efe502ad230660a7fb8cae50f6e44c201ff4f19ea3df024312964796" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 diff --git a/test/vectors/src/etrog/chain-ids.json b/test/vectors/src/etrog/chain-ids.json index cd63b6ae11..97e514033a 100644 --- a/test/vectors/src/etrog/chain-ids.json +++ b/test/vectors/src/etrog/chain-ids.json @@ -45,7 +45,7 @@ "reason": "" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -68,7 +68,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -145,7 +145,7 @@ "reason": "" } ], - "expectedNewRoot": "0x8127c019e957b239c27197afaefbb6306324e9ed215e97bb770e974bac5839db", + "expectedNewRoot": "0x1a6a11bd02788a7dec426f4ca37ab7ed597a9b5932d338a94e82537d7a375447", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99800000000000000000", @@ -168,7 +168,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xe3dcdd4a0a7e3e0323ba1c8d72a354806e6c6f2eceb0ef9c94263e02f47194f2" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xeea4404d01c82d7a732219206dba26f6f574ac45405cebc618c90e2ca176928b" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -245,7 +245,7 @@ "reason": "TX INVALID: Chain ID does not match" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -268,7 +268,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -358,7 +358,7 @@ "reason": "" } ], - "expectedNewRoot": "0x8127c019e957b239c27197afaefbb6306324e9ed215e97bb770e974bac5839db", + "expectedNewRoot": "0x1a6a11bd02788a7dec426f4ca37ab7ed597a9b5932d338a94e82537d7a375447", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99800000000000000000", @@ -381,7 +381,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xe3dcdd4a0a7e3e0323ba1c8d72a354806e6c6f2eceb0ef9c94263e02f47194f2" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xeea4404d01c82d7a732219206dba26f6f574ac45405cebc618c90e2ca176928b" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 diff --git a/test/vectors/src/etrog/general.json b/test/vectors/src/etrog/general.json index 33d501f0a9..f49468d6e7 100644 --- a/test/vectors/src/etrog/general.json +++ b/test/vectors/src/etrog/general.json @@ -45,7 +45,7 @@ "reason": "" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -68,7 +68,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -202,7 +202,7 @@ "reason": "TX INVALID: Chain ID does not match" } ], - "expectedNewRoot": "0x0a4dc70ff8b88768cef30e3ee65f9b702fbf78aa363fd661d678afe865feec9b", + "expectedNewRoot": "0x4369f0637a7da0ff4c671a84ce53198a8203e697d350dd75ab64f9041ba4e876", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900021000000000000", @@ -246,7 +246,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x76b362a9afd679ea13a456ab103786492c65946be653589c1fd627841d0c6fdd", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xc1648043ad67f347b98e0e70a0fa4d9cc977a1df9421396bf4710b7326aeb4da" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xad44dbf296830f69ee8c761c6efe3534ee86ee65e0ee70ae532f56d4f1bb7df1" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -613,7 +613,7 @@ "reason": "" } ], - "expectedNewRoot": "0xc283fd835d2a1aa390c1965569ab992aad2d81420d64c0675b729a2a5ebdbc1c", + "expectedNewRoot": "0xd1152a73c3849ad6ae615572af23a3ff623401f23d309a597a7e76081e7b0535", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99700000000000000000", @@ -636,7 +636,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xa6db087bb38b34847d7abe47e81c892917916fb7a3e4104b273664ccb2a754c0" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xd71042c20553e93c7e74537a148a683ec1eafffe44ac19d581d73e8abbdac925" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 diff --git a/test/vectors/src/etrog/nonces.json b/test/vectors/src/etrog/nonces.json index e787aa5b57..e24cdee669 100644 --- a/test/vectors/src/etrog/nonces.json +++ b/test/vectors/src/etrog/nonces.json @@ -58,7 +58,7 @@ "reason": "TX INVALID: Invalid nonce" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -81,7 +81,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -158,7 +158,7 @@ "reason": "TX INVALID: Invalid nonce" } ], - "expectedNewRoot": "0xc779695ab569ed314668df881d04d23d0bd6723e6543c46a07b3f29309a5f24d", + "expectedNewRoot": "0x7419a71f9923fca08275c77c2603de4ecec62f37f4c1d00229b38e17424320ba", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99900000000000000000", @@ -181,7 +181,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x329a29043c3cc4295020538645d79fc1569c5daaeb13acd503c3d750f5e47561" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x9675adb76a5d1a0776067b07073f74735f2cb4a55732e6396c1322c143c4bc68" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -284,7 +284,7 @@ "reason": "" } ], - "expectedNewRoot": "0x8127c019e957b239c27197afaefbb6306324e9ed215e97bb770e974bac5839db", + "expectedNewRoot": "0x1a6a11bd02788a7dec426f4ca37ab7ed597a9b5932d338a94e82537d7a375447", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "99800000000000000000", @@ -307,7 +307,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0xe3dcdd4a0a7e3e0323ba1c8d72a354806e6c6f2eceb0ef9c94263e02f47194f2" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0xeea4404d01c82d7a732219206dba26f6f574ac45405cebc618c90e2ca176928b" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 diff --git a/test/vectors/src/etrog/seq-fees.json b/test/vectors/src/etrog/seq-fees.json index 0c9e18d1c0..cc0fd67063 100644 --- a/test/vectors/src/etrog/seq-fees.json +++ b/test/vectors/src/etrog/seq-fees.json @@ -44,7 +44,7 @@ "reason": "" } ], - "expectedNewRoot": "0x49d957444a1efadc63e03a2165d7797ad29c59704ce456cbf6ca5bff846679e1", + "expectedNewRoot": "0xdcbc4f7af20a522c9bae09dfa040f5689d8aa0157ad89e53bec6f0b82c0e568d", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "100000000000000000000", @@ -67,7 +67,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x4a9bfcb163ec91c5beb22e6aca41592433092c8c7821b01d37fd0de483f9265d", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x4131fda7ac3a06a611ce9087f07744f5f1dc0ceccc59e7d302df0778052a0a76" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x5ba4d7b4f761bc8971885fdd21a89cb3d7b4568547e121adcb0702b247d00955" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 @@ -144,7 +144,7 @@ "customRawTx": "0xe88001830186a0944d5cf5032b2a844602278b01199ed191a86c93ff8612309ce54000808203e88080d888c8279ee16a7089a1365d72d86590ce9693b9cfe245c3f929197845085b273b8c51009f0bd7d1fbdcd7f91e3397e27bf263437fc0b097afefb1040cc635231bff" } ], - "expectedNewRoot": "0xd62be8855f11fa9c83e3fbe8975d5214f60b98a373346a69f706d6984d64041a", + "expectedNewRoot": "0x75ef75b63f3e7022ad66f69779c80b17d0d887426914522e39db96fe63cd33b3", "expectedNewLeafs": { "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D": { "balance": "1000000000000", @@ -167,7 +167,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000000": "0x01", "0x0000000000000000000000000000000000000000000000000000000000000002": "0x73e6af6f", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49": "0x8397a02db0909df274170a8a1a4e45f6d7f24e66606976cd00960d8c6c850c97", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0d5e3907b92bc4b51ef293caf60423c0222ea2c71234256cf84c745d464cd560" + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x3e675ac2f8896ab5ec58b767b6e32a4b1c0888ab8be203abc5d1327adcc9e937" }, "hashBytecode": "0x0000000000000000000000000000000000000000000000000000000000000000", "bytecodeLength": 0 From f69f0f9c6287d6dad4ddc224b8d37cc059ce5c0b Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 26 Jan 2024 19:28:52 +0100 Subject: [PATCH 30/54] if a trusted batch is empty and WIP just create the entry in state.batch (#3152) --- etherman/etherman.go | 7 ++++ synchronizer/common/converters.go | 29 +++++++++++++++++ .../executor_trusted_batch_sync.go | 32 ++++++++++++++++--- 3 files changed, 64 insertions(+), 4 deletions(-) create mode 100644 synchronizer/common/converters.go diff --git a/etherman/etherman.go b/etherman/etherman.go index c605b53ad6..d384d45652 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -201,26 +201,32 @@ func NewClient(cfg Config, l1Config L1Config) (*Client, error) { // Create smc clients zkevm, err := polygonzkevm.NewPolygonzkevm(l1Config.ZkEVMAddr, ethClient) if err != nil { + log.Errorf("error creating Polygonzkevm client (%s). Error: %w", l1Config.ZkEVMAddr.String(), err) return nil, err } oldZkevm, err := oldpolygonzkevm.NewOldpolygonzkevm(l1Config.RollupManagerAddr, ethClient) if err != nil { + log.Errorf("error creating NewOldpolygonzkevm client (%s). Error: %w", l1Config.RollupManagerAddr.String(), err) return nil, err } rollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(l1Config.RollupManagerAddr, ethClient) if err != nil { + log.Errorf("error creating NewPolygonrollupmanager client (%s). Error: %w", l1Config.RollupManagerAddr.String(), err) return nil, err } globalExitRoot, err := polygonzkevmglobalexitroot.NewPolygonzkevmglobalexitroot(l1Config.GlobalExitRootManagerAddr, ethClient) if err != nil { + log.Errorf("error creating NewPolygonzkevmglobalexitroot client (%s). Error: %w", l1Config.GlobalExitRootManagerAddr.String(), err) return nil, err } oldGlobalExitRoot, err := oldpolygonzkevmglobalexitroot.NewOldpolygonzkevmglobalexitroot(l1Config.GlobalExitRootManagerAddr, ethClient) if err != nil { + log.Errorf("error creating NewOldpolygonzkevmglobalexitroot client (%s). Error: %w", l1Config.GlobalExitRootManagerAddr.String(), err) return nil, err } pol, err := pol.NewPol(l1Config.PolAddr, ethClient) if err != nil { + log.Errorf("error creating NewPol client (%s). Error: %w", l1Config.PolAddr.String(), err) return nil, err } var scAddresses []common.Address @@ -240,6 +246,7 @@ func NewClient(cfg Config, l1Config L1Config) (*Client, error) { // Get RollupID rollupID, err := rollupManager.RollupAddressToID(&bind.CallOpts{Pending: false}, l1Config.ZkEVMAddr) if err != nil { + log.Errorf("error rollupManager.cRollupAddressToID(%s). Error: %w", l1Config.RollupManagerAddr, err) return nil, err } log.Debug("rollupID: ", rollupID) diff --git a/synchronizer/common/converters.go b/synchronizer/common/converters.go new file mode 100644 index 0000000000..116f1f3076 --- /dev/null +++ b/synchronizer/common/converters.go @@ -0,0 +1,29 @@ +package common + +import ( + "time" + + "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" + "github.com/0xPolygonHermez/zkevm-node/state" +) + +// RpcBatchToStateBatch converts a rpc batch to a state batch +func RpcBatchToStateBatch(rpcBatch *types.Batch) *state.Batch { + if rpcBatch == nil { + return nil + } + batch := &state.Batch{ + BatchNumber: uint64(rpcBatch.Number), + Coinbase: rpcBatch.Coinbase, + StateRoot: rpcBatch.StateRoot, + BatchL2Data: rpcBatch.BatchL2Data, + GlobalExitRoot: rpcBatch.GlobalExitRoot, + LocalExitRoot: rpcBatch.MainnetExitRoot, + Timestamp: time.Unix(int64(rpcBatch.Timestamp), 0), + WIP: !rpcBatch.Closed, + } + if rpcBatch.ForcedBatchNumber != nil { + batch.ForcedBatchNum = (*uint64)(rpcBatch.ForcedBatchNumber) + } + return batch +} diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go index 0b8c3818a0..ae330fe628 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go +++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go @@ -104,9 +104,32 @@ func (b *SyncTrustedBatchExecutorForEtrog) NothingProcess(ctx context.Context, d return &res, nil } +// CreateEmptyBatch create a new empty batch (no batchL2Data and WIP) +func (b *SyncTrustedBatchExecutorForEtrog) CreateEmptyBatch(ctx context.Context, data *l2_shared.ProcessData, dbTx pgx.Tx) (*l2_shared.ProcessResponse, error) { + log.Debugf("%s The Batch is a WIP empty, so just creating a DB entry", data.DebugPrefix) + err := b.openBatch(ctx, data.TrustedBatch, dbTx, data.DebugPrefix) + if err != nil { + log.Errorf("%s error openning batch. Error: %v", data.DebugPrefix, err) + return nil, err + } + log.Debugf("%s updateWIPBatch", data.DebugPrefix) + err = b.updateWIPBatch(ctx, data, data.TrustedBatch.StateRoot, dbTx) + if err != nil { + log.Errorf("%s error updateWIPBatch. Error: ", data.DebugPrefix, err) + return nil, err + } + res := l2_shared.NewProcessResponse() + stateBatch := syncCommon.RpcBatchToStateBatch(data.TrustedBatch) + res.UpdateCurrentBatch(stateBatch) + return &res, nil +} + // FullProcess process a batch that is not on database, so is the first time we process it func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data *l2_shared.ProcessData, dbTx pgx.Tx) (*l2_shared.ProcessResponse, error) { log.Debugf("%s FullProcess", data.DebugPrefix) + if len(data.TrustedBatch.BatchL2Data) == 0 && data.BatchMustBeClosed { + return b.CreateEmptyBatch(ctx, data, dbTx) + } err := b.checkIfWeAreSyncedFromL1ToProcessGlobalExitRoot(ctx, data, dbTx) if err != nil { log.Errorf("%s error checkIfWeAreSyncedFromL1ToProcessGlobalExitRoot. Error: %v", data.DebugPrefix, err) @@ -117,6 +140,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data log.Errorf("%s error openning batch. Error: %v", data.DebugPrefix, err) return nil, err } + leafs, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, data.TrustedBatch.BatchL2Data, dbTx) if err != nil { log.Errorf("%s error getting GetL1InfoTreeDataFromBatchL2Data: %v. Error:%w", data.DebugPrefix, l1InfoRoot, err) @@ -144,7 +168,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data } } else { log.Debugf("%s updateWIPBatch", data.DebugPrefix) - err = b.updateWIPBatch(ctx, data, processBatchResp, dbTx) + err = b.updateWIPBatch(ctx, data, processBatchResp.NewStateRoot, dbTx) if err != nil { log.Errorf("%s error updateWIPBatch. Error: ", data.DebugPrefix, err) return nil, err @@ -214,7 +238,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) IncrementalProcess(ctx context.Contex } } else { log.Debugf("%s updateWIPBatch", data.DebugPrefix) - err = b.updateWIPBatch(ctx, data, processBatchResp, dbTx) + err = b.updateWIPBatch(ctx, data, processBatchResp.NewStateRoot, dbTx) if err != nil { log.Errorf("%s error updateWIPBatch. Error: ", data.DebugPrefix, err) return nil, err @@ -237,10 +261,10 @@ func (b *SyncTrustedBatchExecutorForEtrog) checkIfWeAreSyncedFromL1ToProcessGlob return b.l1SyncChecker.CheckL1SyncStatusEnoughToProcessBatch(ctx, data.BatchNumber, data.TrustedBatch.GlobalExitRoot, dbTx) } -func (b *SyncTrustedBatchExecutorForEtrog) updateWIPBatch(ctx context.Context, data *l2_shared.ProcessData, processBatchResp *state.ProcessBatchResponse, dbTx pgx.Tx) error { +func (b *SyncTrustedBatchExecutorForEtrog) updateWIPBatch(ctx context.Context, data *l2_shared.ProcessData, NewStateRoot common.Hash, dbTx pgx.Tx) error { receipt := state.ProcessingReceipt{ BatchNumber: data.BatchNumber, - StateRoot: processBatchResp.NewStateRoot, + StateRoot: NewStateRoot, LocalExitRoot: data.TrustedBatch.LocalExitRoot, BatchL2Data: data.TrustedBatch.BatchL2Data, AccInputHash: data.TrustedBatch.AccInputHash, From 65a507e6abb18fdf84e6a8fcc83a1e187f43a9c4 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Fri, 26 Jan 2024 16:46:46 -0300 Subject: [PATCH 31/54] fix trace for tx index greater than 0 (#3153) --- state/trace.go | 9 ++++-- test/e2e/debug_calltracer_test.go | 46 +++++++++++++++++++++------- test/e2e/debug_shared.go | 34 +++++++++++++++++++++ test/e2e/debug_test.go | 50 +++++++++++++++++++++++-------- 4 files changed, 112 insertions(+), 27 deletions(-) diff --git a/state/trace.go b/state/trace.go index c567343c82..c227ad85c0 100644 --- a/state/trace.go +++ b/state/trace.go @@ -64,8 +64,11 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has // since the executor only stores the state roots by block, we need to // execute all the txs in the block until the tx we want to trace var txsToEncode []types.Transaction + var effectivePercentage []uint8 for i := 0; i <= int(receipt.TransactionIndex); i++ { txsToEncode = append(txsToEncode, *l2Block.Transactions()[i]) + effectivePercentage = append(effectivePercentage, MaxEffectivePercentage) + log.Debugf("trace will reprocess tx: %v", l2Block.Transactions()[i].Hash().String()) } // gets batch that including the l2 block @@ -106,7 +109,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has } } // generate batch l2 data for the transaction - batchL2Data, err := EncodeTransactions(txsToEncode, []uint8{MaxEffectivePercentage}, forkId) + batchL2Data, err := EncodeTransactions(txsToEncode, effectivePercentage, forkId) if err != nil { return nil, err } @@ -187,7 +190,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has deltaTimestamp := uint32(uint64(time.Now().Unix()) - l2Block.Time()) transactions := s.BuildChangeL2Block(deltaTimestamp, uint32(0)) - batchL2Data, err := EncodeTransactions(txsToEncode, []uint8{MaxEffectivePercentage}, forkId) + batchL2Data, err := EncodeTransactions(txsToEncode, effectivePercentage, forkId) if err != nil { log.Errorf("error encoding transaction ", err) return nil, err @@ -243,7 +246,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has if err != nil { return nil, err } - response = convertedResponse.BlockResponses[0].TransactionResponses[0] + response = convertedResponse.BlockResponses[0].TransactionResponses[len(convertedResponse.BlockResponses[0].TransactionResponses)-1] } // Sanity check diff --git a/test/e2e/debug_calltracer_test.go b/test/e2e/debug_calltracer_test.go index f45f362881..d145c6b3e5 100644 --- a/test/e2e/debug_calltracer_test.go +++ b/test/e2e/debug_calltracer_test.go @@ -15,6 +15,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/test/operations" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" @@ -179,19 +180,42 @@ func TestDebugTraceTransactionCallTracer(t *testing.T) { require.NoError(t, err) } - signedTx, err := tc.createSignedTx(t, ctx, auth, ethereumClient, customData) - require.NoError(t, err) - - err = ethereumClient.SendTransaction(ctx, signedTx) - require.NoError(t, err) - - log.Debugf("tx sent: %v", signedTx.Hash().String()) + var receipt *ethTypes.Receipt + var signedTx *ethTypes.Transaction + forceTxIndexDifferentFromZero := tcIdx%2 == 0 + for { + log.Debugf("forceTxIndexDifferentFromZero: %v", forceTxIndexDifferentFromZero) + var err error + if forceTxIndexDifferentFromZero { + // send eth transfers txs to make the trace tx to not be the index 0 in the block + sendEthTransfersWithoutWaiting(t, ctx, ethereumClient, auth, common.HexToAddress(operations.DefaultSequencerAddress), big.NewInt(1), 3) + } + signedTx, err = tc.createSignedTx(t, ctx, auth, ethereumClient, customData) + require.NoError(t, err) - err = operations.WaitTxToBeMined(ctx, ethereumClient, signedTx, operations.DefaultTimeoutTxToBeMined) - if err != nil && !strings.HasPrefix(err.Error(), "transaction has failed, reason:") { + err = ethereumClient.SendTransaction(ctx, signedTx) require.NoError(t, err) - } + log.Debugf("tx sent: %v", signedTx.Hash().String()) + + err = operations.WaitTxToBeMined(ctx, ethereumClient, signedTx, operations.DefaultTimeoutTxToBeMined) + if err != nil && !strings.HasPrefix(err.Error(), "transaction has failed, reason:") { + require.NoError(t, err) + } + + if forceTxIndexDifferentFromZero { + receipt, err = ethereumClient.TransactionReceipt(ctx, signedTx.Hash()) + require.NoError(t, err) + if receipt.TransactionIndex != 0 { + log.Debugf("tx receipt has tx index %v, accepted", receipt.TransactionIndex) + break + } else { + log.Debugf("tx receipt has tx index 0, retrying") + } + } else { + break + } + } debugOptions := map[string]interface{}{ "tracer": "callTracer", "tracerConfig": map[string]interface{}{ @@ -415,7 +439,7 @@ func TestDebugTraceBlockCallTracer(t *testing.T) { require.NoError(t, err) require.Nil(t, response.Error) require.NotNil(t, response.Result) - log.Debugf("%s response:%s", debugID, string(response.Result)) + // log.Debugf("%s response:%s", debugID, string(response.Result)) txHash := signedTx.Hash().String() resultForTx := findTxInResponse(t, response.Result, txHash, debugID) diff --git a/test/e2e/debug_shared.go b/test/e2e/debug_shared.go index 9db03a8631..08a0367ffb 100644 --- a/test/e2e/debug_shared.go +++ b/test/e2e/debug_shared.go @@ -31,6 +31,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Revert2" "github.com/0xPolygonHermez/zkevm-node/test/operations" "github.com/0xPolygonHermez/zkevm-node/test/testutils" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" @@ -849,3 +850,36 @@ func createDeployCreate0SignedTx(t *testing.T, ctx context.Context, auth *bind.T return auth.Signer(auth.From, tx) } + +func sendEthTransfersWithoutWaiting(t *testing.T, ctx context.Context, client *ethclient.Client, auth *bind.TransactOpts, to common.Address, value *big.Int, howMany int) { + nonce, err := client.PendingNonceAt(ctx, auth.From) + require.NoError(t, err) + + gasPrice, err := client.SuggestGasPrice(ctx) + require.NoError(t, err) + + gas, err := client.EstimateGas(ctx, ethereum.CallMsg{ + From: auth.From, + To: &auth.From, + GasPrice: gasPrice, + Value: value, + }) + require.NoError(t, err) + + for i := 0; i < howMany; i++ { + tx := ethTypes.NewTx(ðTypes.LegacyTx{ + To: &to, + Nonce: nonce + uint64(i), + GasPrice: gasPrice, + Value: value, + Gas: gas, + }) + + signedTx, err := auth.Signer(auth.From, tx) + require.NoError(t, err) + + err = client.SendTransaction(ctx, signedTx) + require.NoError(t, err) + log.Debugf("sending eth transfer: %v", signedTx.Hash().String()) + } +} diff --git a/test/e2e/debug_test.go b/test/e2e/debug_test.go index 63b513a93e..a514136ad2 100644 --- a/test/e2e/debug_test.go +++ b/test/e2e/debug_test.go @@ -396,24 +396,48 @@ func TestDebugTraceTransaction(t *testing.T) { require.NoError(t, err) } - signedTx, err := tc.createSignedTx(t, ctx, auth, ethereumClient, customData) - require.NoError(t, err) - - balance, err := ethereumClient.BalanceAt(ctx, auth.From, nil) - require.NoError(t, err) + var receipt *ethTypes.Receipt + var signedTx *ethTypes.Transaction + forceTxIndexDifferentFromZero := tcIdx%2 == 0 + for { + log.Debugf("forceTxIndexDifferentFromZero: %v", forceTxIndexDifferentFromZero) + var err error + if forceTxIndexDifferentFromZero { + // send eth transfers txs to make the trace tx to not be the index 0 in the block + sendEthTransfersWithoutWaiting(t, ctx, ethereumClient, auth, common.HexToAddress(operations.DefaultSequencerAddress), big.NewInt(1), 3) + } - log.Debugf("%s balance of %v: %v", debugID, auth.From, balance.String()) + signedTx, err = tc.createSignedTx(t, ctx, auth, ethereumClient, customData) + require.NoError(t, err) - err = ethereumClient.SendTransaction(ctx, signedTx) - require.NoError(t, err) + balance, err := ethereumClient.BalanceAt(ctx, auth.From, nil) + require.NoError(t, err) - log.Debugf("%s tx sent: %v", debugID, signedTx.Hash().String()) + log.Debugf("%s balance of %v: %v", debugID, auth.From, balance.String()) - err = operations.WaitTxToBeMined(ctx, ethereumClient, signedTx, operations.DefaultTimeoutTxToBeMined) - if err != nil && !strings.HasPrefix(err.Error(), "transaction has failed, reason:") { + err = ethereumClient.SendTransaction(ctx, signedTx) require.NoError(t, err) - } + log.Debugf("%s tx sent: %v", debugID, signedTx.Hash().String()) + + err = operations.WaitTxToBeMined(ctx, ethereumClient, signedTx, operations.DefaultTimeoutTxToBeMined) + if err != nil && !strings.HasPrefix(err.Error(), "transaction has failed, reason:") { + require.NoError(t, err) + } + + if forceTxIndexDifferentFromZero { + receipt, err = ethereumClient.TransactionReceipt(ctx, signedTx.Hash()) + require.NoError(t, err) + if receipt.TransactionIndex != 0 { + log.Debugf("tx receipt has tx index %v, accepted", receipt.TransactionIndex) + break + } else { + log.Debugf("tx receipt has tx index 0, retrying") + } + } else { + break + } + } debugOptions := map[string]interface{}{ "disableStorage": false, "disableStack": false, @@ -425,7 +449,7 @@ func TestDebugTraceTransaction(t *testing.T) { require.NoError(t, err) require.Nil(t, response.Error) require.NotNil(t, response.Result) - log.Debugf("%s response:%s", debugID, string(response.Result)) + // log.Debugf("%s response:%s", debugID, string(response.Result)) resultForTx := convertJson(t, response.Result, debugID) results[network.Name] = resultForTx From 8f79b21f45ab9340b95d433231b03b009d3eeed6 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 26 Jan 2024 20:48:40 +0100 Subject: [PATCH 32/54] add cardona.zip (#3154) --- .github/workflows/release.yml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 364c3a73de..f72164cc09 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 #v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -33,8 +33,18 @@ jobs: uses: olegtarasov/get-tag@v2.1.2 id: tagName - - name: Put testnet and mainnet artifacts into a single zip + - name: Put cardona, testnet and mainnet artifacts into a single zip run: | + # CARDONA + mkdir -p cardona/config/environments/cardona + mkdir -p cardona/db/scripts + cp config/environments/cardona/* cardona/config/environments/cardona + cp docker-compose.yml cardona + sed -i 's/\/config\/environments\/${ZKEVM_NETWORK}/\/config\/environments\/cardona/g' cardona/docker-compose.yml + cp db/scripts/init_prover_db.sql cardona/db/scripts + mv cardona/config/environments/cardona/example.env cardona + sed -i -e "s/image: zkevm-node/image: hermeznetwork\/zkevm-node:$GIT_TAG_NAME/g" cardona/docker-compose.yml + zip -r cardona.zip cardona # TESTNET mkdir -p testnet/config/environments/testnet mkdir -p testnet/db/scripts @@ -56,10 +66,10 @@ jobs: sed -i -e "s/image: zkevm-node/image: hermeznetwork\/zkevm-node:$GIT_TAG_NAME/g" mainnet/docker-compose.yml zip -r mainnet.zip mainnet - - name: Publish testnet and mainnet zip into release + - name: Publish cardona, testnet and mainnet zip into release uses: AButler/upload-release-assets@v2.0 with: - files: 'testnet.zip;mainnet.zip' + files: 'cardona.zip;testnet.zip;mainnet.zip' repo-token: ${{ secrets.TOKEN_RELEASE }} release-tag: ${{ steps.tagName.outputs.tag }} From c100d407c39dae04b910846312b389a1fef022e7 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:46:11 +0100 Subject: [PATCH 33/54] Fix default value when creating transaction.used_sha256_hashes field in the pool (#3156) * set DEFAULT 0 when creatingtransaction.used_sha256_hashes field in the pool * update prover image to v4.0.0 --- db/migrations/pool/0012.sql | 2 +- docker-compose.yml | 2 +- test/docker-compose.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/db/migrations/pool/0012.sql b/db/migrations/pool/0012.sql index 29de6aca41..1c8f0a0d5b 100644 --- a/db/migrations/pool/0012.sql +++ b/db/migrations/pool/0012.sql @@ -1,7 +1,7 @@ -- +migrate Up ALTER TABLE pool.transaction ADD COLUMN l2_hash VARCHAR UNIQUE, - ADD COLUMN used_sha256_hashes INTEGER; + ADD COLUMN used_sha256_hashes INTEGER DEFAULT 0; CREATE INDEX IF NOT EXISTS idx_transaction_l2_hash ON pool.transaction (l2_hash); -- +migrate Down diff --git a/docker-compose.yml b/docker-compose.yml index e592405fcd..73a4372685 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -107,7 +107,7 @@ services: zkevm-prover: container_name: zkevm-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v4.0.0-RC31 + image: hermeznetwork/zkevm-prover:v4.0.0 depends_on: zkevm-state-db: condition: service_healthy diff --git a/test/docker-compose.yml b/test/docker-compose.yml index e9d1992c50..16eb52a75a 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -513,7 +513,7 @@ services: zkevm-prover: container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC31 + image: hermeznetwork/zkevm-prover:v4.0.0 ports: - 50061:50061 # MT - 50071:50071 # Executor @@ -602,7 +602,7 @@ services: zkevm-permissionless-prover: container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v4.0.0-RC31 + image: hermeznetwork/zkevm-prover:v4.0.0 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover From 1a8dd7d8a7f8fdfc23b862af062f410a5dfdf703 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:25:42 +0100 Subject: [PATCH 34/54] add cardona testnet (#2909) (#3155) * add cardona testnet (#2909) * update config * update cardona genesis --------- Co-authored-by: Thiago Coimbra Lemos --- cmd/main.go | 2 +- config/cardonagenesis.go | 109 +++ config/config.go | 2 +- config/environments/cardona/example.env | 9 + config/environments/cardona/node.config.toml | 97 +++ config/environments/cardona/postgresql.conf | 815 ++++++++++++++++++ .../environments/cardona/prover.config.json | 117 +++ config/environments/mainnet/node.config.toml | 3 +- config/environments/testnet/node.config.toml | 7 +- config/network.go | 5 +- config/network_test.go | 13 + docs/networks.md | 3 +- docs/production-setup.md | 4 +- tools/state/main.go | 2 +- 14 files changed, 1178 insertions(+), 10 deletions(-) create mode 100644 config/cardonagenesis.go create mode 100644 config/environments/cardona/example.env create mode 100644 config/environments/cardona/node.config.toml create mode 100644 config/environments/cardona/postgresql.conf create mode 100644 config/environments/cardona/prover.config.json diff --git a/cmd/main.go b/cmd/main.go index 7086e8994a..c37395d532 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -47,7 +47,7 @@ var ( networkFlag = cli.StringFlag{ Name: config.FlagNetwork, Aliases: []string{"net"}, - Usage: "Load default network configuration. Supported values: [`mainnet`, `testnet`, `custom`]", + Usage: "Load default network configuration. Supported values: [`mainnet`, `testnet`, `cardona`, `custom`]", Required: true, } customNetworkFlag = cli.StringFlag{ diff --git a/config/cardonagenesis.go b/config/cardonagenesis.go new file mode 100644 index 0000000000..0a77b9ca30 --- /dev/null +++ b/config/cardonagenesis.go @@ -0,0 +1,109 @@ +package config + +// CardonaNetworkConfigJSON is the hardcoded network configuration to be used for the official mainnet setup +const CardonaNetworkConfigJSON = ` +{ + "l1Config": { + "polygonZkEVMAddress": "0xA13Ddb14437A8F34897131367ad3ca78416d6bCa", + "polygonZkEVMBridgeAddress": "0x528e26b25a34a4A5d0dbDa1d57D318153d2ED582", + "polygonZkEVMGlobalExitRootAddress": "0xAd1490c248c5d3CbAE399Fd529b79B42984277DF", + "polTokenAddress": "0x6a7c3F4B0651d6DA389AD1d11D962ea458cDCA70", + "polygonRollupManagerAddress": "0x32d33D5137a7cFFb54c5Bf8371172bcEc5f310ff", + "chainId": 11155111 + }, + "genesisBlockNumber": 4789190, + "root": "0x91dfcdeb628dfdc51f3a2ee38cb17c78581e4e7ff91bcc2e327d24a9dfa46982", + "genesis": [ + { + "contractName": "PolygonZkEVMDeployer", + "balance": "0", + "nonce": "4", + "address": "0x36810012486fc134D0679c07f85fe5ba5A087D8C", + "bytecode": "0x6080604052600436106100705760003560e01c8063715018a61161004e578063715018a6146100e65780638da5cb5b146100fb578063e11ae6cb14610126578063f2fde38b1461013957600080fd5b80632b79805a146100755780634a94d4871461008a5780636d07dbf81461009d575b600080fd5b610088610083366004610927565b610159565b005b6100886100983660046109c7565b6101cb565b3480156100a957600080fd5b506100bd6100b8366004610a1e565b61020d565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100f257600080fd5b50610088610220565b34801561010757600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff166100bd565b610088610134366004610a40565b610234565b34801561014557600080fd5b50610088610154366004610a90565b61029b565b610161610357565b600061016e8585856103d8565b905061017a8183610537565b5060405173ffffffffffffffffffffffffffffffffffffffff821681527fba82f25fed02cd2a23d9f5d11c2ef588d22af5437cbf23bfe61d87257c480e4c9060200160405180910390a15050505050565b6101d3610357565b6101de83838361057b565b506040517f25adb19089b6a549831a273acdf7908cff8b7ee5f551f8d1d37996cf01c5df5b90600090a1505050565b600061021983836105a9565b9392505050565b610228610357565b61023260006105b6565b565b61023c610357565b60006102498484846103d8565b60405173ffffffffffffffffffffffffffffffffffffffff821681529091507fba82f25fed02cd2a23d9f5d11c2ef588d22af5437cbf23bfe61d87257c480e4c9060200160405180910390a150505050565b6102a3610357565b73ffffffffffffffffffffffffffffffffffffffff811661034b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b610354816105b6565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610232576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65726044820152606401610342565b600083471015610444576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e63650000006044820152606401610342565b81516000036104af576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f6044820152606401610342565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610219576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f79000000000000006044820152606401610342565b6060610219838360006040518060400160405280601e81526020017f416464726573733a206c6f772d6c6576656c2063616c6c206661696c6564000081525061062b565b60606105a1848484604051806060016040528060298152602001610b3d6029913961062b565b949350505050565b6000610219838330610744565b6000805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6060824710156106bd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c00000000000000000000000000000000000000000000000000006064820152608401610342565b6000808673ffffffffffffffffffffffffffffffffffffffff1685876040516106e69190610acf565b60006040518083038185875af1925050503d8060008114610723576040519150601f19603f3d011682016040523d82523d6000602084013e610728565b606091505b50915091506107398783838761076e565b979650505050505050565b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b606083156108045782516000036107fd5773ffffffffffffffffffffffffffffffffffffffff85163b6107fd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610342565b50816105a1565b6105a183838151156108195781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103429190610aeb565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f83011261088d57600080fd5b813567ffffffffffffffff808211156108a8576108a861084d565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019082821181831017156108ee576108ee61084d565b8160405283815286602085880101111561090757600080fd5b836020870160208301376000602085830101528094505050505092915050565b6000806000806080858703121561093d57600080fd5b8435935060208501359250604085013567ffffffffffffffff8082111561096357600080fd5b61096f8883890161087c565b9350606087013591508082111561098557600080fd5b506109928782880161087c565b91505092959194509250565b803573ffffffffffffffffffffffffffffffffffffffff811681146109c257600080fd5b919050565b6000806000606084860312156109dc57600080fd5b6109e58461099e565b9250602084013567ffffffffffffffff811115610a0157600080fd5b610a0d8682870161087c565b925050604084013590509250925092565b60008060408385031215610a3157600080fd5b50508035926020909101359150565b600080600060608486031215610a5557600080fd5b8335925060208401359150604084013567ffffffffffffffff811115610a7a57600080fd5b610a868682870161087c565b9150509250925092565b600060208284031215610aa257600080fd5b6102198261099e565b60005b83811015610ac6578181015183820152602001610aae565b50506000910152565b60008251610ae1818460208701610aab565b9190910192915050565b6020815260008251806020840152610b0a816040850160208701610aab565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2063616c6c20776974682076616c7565206661696c6564a26469706673582212203e70ce334e8ec9d8d03e87415afd36dce4e82633bd277b08937095a6bd66367764736f6c63430008110033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000ff6250d0e86a2465b0c1bf8e36409503d6a26963" + } + }, + { + "contractName": "ProxyAdmin", + "balance": "0", + "nonce": "1", + "address": "0x85cEB41028B1a5ED2b88E395145344837308b251", + "bytecode": "0x60806040526004361061007b5760003560e01c80639623609d1161004e5780639623609d1461012b57806399a88ec41461013e578063f2fde38b1461015e578063f3b7dead1461017e57600080fd5b8063204e1c7a14610080578063715018a6146100c95780637eff275e146100e05780638da5cb5b14610100575b600080fd5b34801561008c57600080fd5b506100a061009b366004610608565b61019e565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100d557600080fd5b506100de610255565b005b3480156100ec57600080fd5b506100de6100fb36600461062c565b610269565b34801561010c57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff166100a0565b6100de610139366004610694565b6102f7565b34801561014a57600080fd5b506100de61015936600461062c565b61038c565b34801561016a57600080fd5b506100de610179366004610608565b6103e8565b34801561018a57600080fd5b506100a0610199366004610608565b6104a4565b60008060008373ffffffffffffffffffffffffffffffffffffffff166040516101ea907f5c60da1b00000000000000000000000000000000000000000000000000000000815260040190565b600060405180830381855afa9150503d8060008114610225576040519150601f19603f3d011682016040523d82523d6000602084013e61022a565b606091505b50915091508161023957600080fd5b8080602001905181019061024d9190610788565b949350505050565b61025d6104f0565b6102676000610571565b565b6102716104f0565b6040517f8f28397000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690638f283970906024015b600060405180830381600087803b1580156102db57600080fd5b505af11580156102ef573d6000803e3d6000fd5b505050505050565b6102ff6104f0565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef28690349061035590869086906004016107a5565b6000604051808303818588803b15801561036e57600080fd5b505af1158015610382573d6000803e3d6000fd5b5050505050505050565b6103946104f0565b6040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690633659cfe6906024016102c1565b6103f06104f0565b73ffffffffffffffffffffffffffffffffffffffff8116610498576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b6104a181610571565b50565b60008060008373ffffffffffffffffffffffffffffffffffffffff166040516101ea907ff851a44000000000000000000000000000000000000000000000000000000000815260040190565b60005473ffffffffffffffffffffffffffffffffffffffff163314610267576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015260640161048f565b6000805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff811681146104a157600080fd5b60006020828403121561061a57600080fd5b8135610625816105e6565b9392505050565b6000806040838503121561063f57600080fd5b823561064a816105e6565b9150602083013561065a816105e6565b809150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156106a957600080fd5b83356106b4816105e6565b925060208401356106c4816105e6565b9150604084013567ffffffffffffffff808211156106e157600080fd5b818601915086601f8301126106f557600080fd5b81358181111561070757610707610665565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561074d5761074d610665565b8160405282815289602084870101111561076657600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b60006020828403121561079a57600080fd5b8151610625816105e6565b73ffffffffffffffffffffffffffffffffffffffff8316815260006020604081840152835180604085015260005b818110156107ef578581018301518582016060015282016107d3565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010192505050939250505056fea2646970667358221220372a0e10eebea1b7fa43ae4c976994e6ed01d85eedc3637b83f01d3f06be442064736f6c63430008110033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000dbc6981a11fc2b000c635bfa7c47676b25c87d39" + } + }, + { + "contractName": "PolygonZkEVMBridge implementation", + "balance": "0", + "nonce": "1", + "address": "0x8BD36ca1A55e389335004872aA3C3Be0969D3aA7", + "bytecode": "0x6080604052600436106200019f5760003560e01c8063647c576c11620000e7578063be5831c71162000089578063dbc169761162000060578063dbc169761462000639578063ee25560b1462000651578063fb570834146200068257600080fd5b8063be5831c714620005ae578063cd58657914620005ea578063d02103ca146200060157600080fd5b80639e34070f11620000be5780639e34070f146200050a578063aaa13cc2146200054f578063bab161bf146200057457600080fd5b8063647c576c146200048657806379e2cf9714620004ab57806381b1c17414620004c357600080fd5b80632d2c9d94116200015157806334ac9cf2116200012857806334ac9cf2146200034b5780633ae05047146200037a5780633e197043146200039257600080fd5b80632d2c9d9414620002765780632dfdf0b5146200029b578063318aee3d14620002c257600080fd5b806322e95f2c116200018657806322e95f2c14620001ef578063240ff378146200023a5780632cffd02e146200025157600080fd5b806315064c9614620001a45780632072f6c514620001d5575b600080fd5b348015620001b157600080fd5b50606854620001c09060ff1681565b60405190151581526020015b60405180910390f35b348015620001e257600080fd5b50620001ed620006a7565b005b348015620001fc57600080fd5b50620002146200020e366004620032db565b62000705565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001620001cc565b620001ed6200024b36600462003372565b620007a8565b3480156200025e57600080fd5b50620001ed6200027036600462003409565b620009d0565b3480156200028357600080fd5b50620001ed6200029536600462003409565b62000f74565b348015620002a857600080fd5b50620002b360535481565b604051908152602001620001cc565b348015620002cf57600080fd5b5062000319620002e1366004620034ef565b606b6020526000908152604090205463ffffffff811690640100000000900473ffffffffffffffffffffffffffffffffffffffff1682565b6040805163ffffffff909316835273ffffffffffffffffffffffffffffffffffffffff909116602083015201620001cc565b3480156200035857600080fd5b50606c54620002149073ffffffffffffffffffffffffffffffffffffffff1681565b3480156200038757600080fd5b50620002b362001178565b3480156200039f57600080fd5b50620002b3620003b136600462003526565b6040517fff0000000000000000000000000000000000000000000000000000000000000060f889901b1660208201527fffffffff0000000000000000000000000000000000000000000000000000000060e088811b821660218401527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606089811b821660258601529188901b909216603984015285901b16603d8201526051810183905260718101829052600090609101604051602081830303815290604052805190602001209050979650505050505050565b3480156200049357600080fd5b50620001ed620004a5366004620035b0565b6200125e565b348015620004b857600080fd5b50620001ed620014ad565b348015620004d057600080fd5b5062000214620004e236600462003600565b606a6020526000908152604090205473ffffffffffffffffffffffffffffffffffffffff1681565b3480156200051757600080fd5b50620001c06200052936600462003600565b600881901c600090815260696020526040902054600160ff9092169190911b9081161490565b3480156200055c57600080fd5b50620002146200056e3660046200361a565b620014e7565b3480156200058157600080fd5b506068546200059890610100900463ffffffff1681565b60405163ffffffff9091168152602001620001cc565b348015620005bb57600080fd5b506068546200059890790100000000000000000000000000000000000000000000000000900463ffffffff1681565b620001ed620005fb366004620036ce565b620016d3565b3480156200060e57600080fd5b50606854620002149065010000000000900473ffffffffffffffffffffffffffffffffffffffff1681565b3480156200064657600080fd5b50620001ed62001c37565b3480156200065e57600080fd5b50620002b36200067036600462003600565b60696020526000908152604090205481565b3480156200068f57600080fd5b50620001c0620006a136600462003770565b62001c93565b606c5473ffffffffffffffffffffffffffffffffffffffff163314620006f9576040517fe2e8106b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6200070362001d7c565b565b6040805160e084901b7fffffffff0000000000000000000000000000000000000000000000000000000016602080830191909152606084901b7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000016602483015282516018818403018152603890920183528151918101919091206000908152606a909152205473ffffffffffffffffffffffffffffffffffffffff165b92915050565b60685460ff1615620007e6576040517f2f0047fc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60685463ffffffff8681166101009092041614806200080c5750600263ffffffff861610155b1562000844576040517f0595ea2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f501781209a1f8899323b96b4ef08b168df93e0a90c673d1e4cce39366cb62f9b6001606860019054906101000a900463ffffffff163388883488886053546040516200089a9998979695949392919062003806565b60405180910390a1620009b8620009b26001606860019054906101000a900463ffffffff16338989348989604051620008d592919062003881565b60405180910390206040517fff0000000000000000000000000000000000000000000000000000000000000060f889901b1660208201527fffffffff0000000000000000000000000000000000000000000000000000000060e088811b821660218401527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606089811b821660258601529188901b909216603984015285901b16603d8201526051810183905260718101829052600090609101604051602081830303815290604052805190602001209050979650505050505050565b62001e10565b8215620009c957620009c962001f27565b5050505050565b60685460ff161562000a0e576040517f2f0047fc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000a258b8b8b8b8b8b8b8b8b8b8b600062001ffc565b73ffffffffffffffffffffffffffffffffffffffff861662000b01576040805160008082526020820190925273ffffffffffffffffffffffffffffffffffffffff861690859060405162000a7a9190620038e6565b60006040518083038185875af1925050503d806000811462000ab9576040519150601f19603f3d011682016040523d82523d6000602084013e62000abe565b606091505b505090508062000afa576040517f6747a28800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5062000efc565b60685463ffffffff61010090910481169088160362000b435762000b3d73ffffffffffffffffffffffffffffffffffffffff87168585620021ed565b62000efc565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e089901b1660208201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b166024820152600090603801604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291815281516020928301206000818152606a90935291205490915073ffffffffffffffffffffffffffffffffffffffff168062000e6e576000808062000c1886880188620039fb565b92509250925060008584848460405162000c329062003292565b62000c409392919062003abd565b8190604051809103906000f590508015801562000c61573d6000803e3d6000fd5b506040517f40c10f1900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8c81166004830152602482018c9052919250908216906340c10f1990604401600060405180830381600087803b15801562000cd757600080fd5b505af115801562000cec573d6000803e3d6000fd5b5050505080606a600088815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060405180604001604052808e63ffffffff1681526020018d73ffffffffffffffffffffffffffffffffffffffff16815250606b60008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050507f490e59a1701b938786ac72570a1efeac994a3dbe96e2e883e19e902ace6e6a398d8d838b8b60405162000e5c95949392919062003afa565b60405180910390a15050505062000ef9565b6040517f40c10f1900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8781166004830152602482018790528216906340c10f1990604401600060405180830381600087803b15801562000edf57600080fd5b505af115801562000ef4573d6000803e3d6000fd5b505050505b50505b6040805163ffffffff8c811682528916602082015273ffffffffffffffffffffffffffffffffffffffff88811682840152861660608201526080810185905290517f25308c93ceeed162da955b3f7ce3e3f93606579e40fb92029faa9efe275459839181900360a00190a15050505050505050505050565b60685460ff161562000fb2576040517f2f0047fc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000fc98b8b8b8b8b8b8b8b8b8b8b600162001ffc565b60008473ffffffffffffffffffffffffffffffffffffffff1684888a868660405160240162000ffc949392919062003b42565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f1806b5f200000000000000000000000000000000000000000000000000000000179052516200107f9190620038e6565b60006040518083038185875af1925050503d8060008114620010be576040519150601f19603f3d011682016040523d82523d6000602084013e620010c3565b606091505b5050905080620010ff576040517f37e391c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805163ffffffff8d811682528a16602082015273ffffffffffffffffffffffffffffffffffffffff89811682840152871660608201526080810186905290517f25308c93ceeed162da955b3f7ce3e3f93606579e40fb92029faa9efe275459839181900360a00190a1505050505050505050505050565b605354600090819081805b602081101562001255578083901c600116600103620011e65760338160208110620011b257620011b262003b8a565b0154604080516020810192909252810185905260600160405160208183030381529060405280519060200120935062001213565b60408051602081018690529081018390526060016040516020818303038152906040528051906020012093505b604080516020810184905290810183905260600160405160208183030381529060405280519060200120915080806200124c9062003be8565b91505062001183565b50919392505050565b600054610100900460ff16158080156200127f5750600054600160ff909116105b806200129b5750303b1580156200129b575060005460ff166001145b6200132d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156200138c57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b606880547fffffffffffffff000000000000000000000000000000000000000000000000ff1661010063ffffffff8716027fffffffffffffff0000000000000000000000000000000000000000ffffffffff16176501000000000073ffffffffffffffffffffffffffffffffffffffff8681169190910291909117909155606c80547fffffffffffffffffffffffff00000000000000000000000000000000000000001691841691909117905562001443620022c3565b8015620014a757600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50505050565b605354606854790100000000000000000000000000000000000000000000000000900463ffffffff16101562000703576200070362001f27565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e089901b1660208201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b1660248201526000908190603801604051602081830303815290604052805190602001209050600060ff60f81b3083604051806020016200157d9062003292565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f909101166040819052620015c8908d908d908d908d908d9060200162003c23565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905262001606929160200162003c64565b604051602081830303815290604052805190602001206040516020016200168f94939291907fff0000000000000000000000000000000000000000000000000000000000000094909416845260609290921b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660018401526015830152603582015260550190565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291905280516020909101209a9950505050505050505050565b60685460ff161562001711576040517f2f0047fc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6200171b62002366565b60685463ffffffff888116610100909204161480620017415750600263ffffffff881610155b1562001779576040517f0595ea2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008060608773ffffffffffffffffffffffffffffffffffffffff8816620017df57883414620017d5576040517fb89240f500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000925062001ad9565b341562001818576040517f798ee6f100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8089166000908152606b602090815260409182902082518084019093525463ffffffff811683526401000000009004909216918101829052901562001908576040517f9dc29fac000000000000000000000000000000000000000000000000000000008152336004820152602481018b905273ffffffffffffffffffffffffffffffffffffffff8a1690639dc29fac90604401600060405180830381600087803b158015620018db57600080fd5b505af1158015620018f0573d6000803e3d6000fd5b50505050806020015194508060000151935062001ad7565b85156200191d576200191d898b8989620023db565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009073ffffffffffffffffffffffffffffffffffffffff8b16906370a0823190602401602060405180830381865afa1580156200198b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620019b1919062003c97565b9050620019d773ffffffffffffffffffffffffffffffffffffffff8b1633308e620028f9565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009073ffffffffffffffffffffffffffffffffffffffff8c16906370a0823190602401602060405180830381865afa15801562001a45573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062001a6b919062003c97565b905062001a79828262003cb1565b6068548c9850610100900463ffffffff169650935062001a998762002959565b62001aa48c62002a71565b62001aaf8d62002b7e565b60405160200162001ac39392919062003abd565b604051602081830303815290604052945050505b505b7f501781209a1f8899323b96b4ef08b168df93e0a90c673d1e4cce39366cb62f9b600084868e8e868860535460405162001b1b98979695949392919062003cc7565b60405180910390a162001c0f620009b2600085878f8f8789805190602001206040517fff0000000000000000000000000000000000000000000000000000000000000060f889901b1660208201527fffffffff0000000000000000000000000000000000000000000000000000000060e088811b821660218401527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606089811b821660258601529188901b909216603984015285901b16603d8201526051810183905260718101829052600090609101604051602081830303815290604052805190602001209050979650505050505050565b861562001c205762001c2062001f27565b5050505062001c2e60018055565b50505050505050565b606c5473ffffffffffffffffffffffffffffffffffffffff16331462001c89576040517fe2e8106b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6200070362002c80565b600084815b602081101562001d6e57600163ffffffff8616821c8116900362001d0a5785816020811062001ccb5762001ccb62003b8a565b60200201358260405160200162001cec929190918252602082015260400190565b60405160208183030381529060405280519060200120915062001d59565b8186826020811062001d205762001d2062003b8a565b602002013560405160200162001d40929190918252602082015260400190565b6040516020818303038152906040528051906020012091505b8062001d658162003be8565b91505062001c98565b50821490505b949350505050565b60685460ff161562001dba576040517f2f0047fc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556040517f2261efe5aef6fedc1fd1550b25facc9181745623049c7901287030b9ad1a549790600090a1565b80600162001e216020600262003e79565b62001e2d919062003cb1565b6053541062001e68576040517fef5ccf6600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600060536000815462001e7b9062003be8565b9182905550905060005b602081101562001f17578082901c60011660010362001ebd57826033826020811062001eb55762001eb562003b8a565b015550505050565b6033816020811062001ed35762001ed362003b8a565b01546040805160208101929092528101849052606001604051602081830303815290604052805190602001209250808062001f0e9062003be8565b91505062001e85565b5062001f2262003e87565b505050565b6053546068805463ffffffff909216790100000000000000000000000000000000000000000000000000027fffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffff909216919091179081905573ffffffffffffffffffffffffffffffffffffffff65010000000000909104166333d6247d62001fad62001178565b6040518263ffffffff1660e01b815260040162001fcc91815260200190565b600060405180830381600087803b15801562001fe757600080fd5b505af1158015620014a7573d6000803e3d6000fd5b6200200d8b63ffffffff1662002d10565b6068546040805160208082018e90528183018d9052825180830384018152606083019384905280519101207f257b363200000000000000000000000000000000000000000000000000000000909252606481019190915260009165010000000000900473ffffffffffffffffffffffffffffffffffffffff169063257b3632906084016020604051808303816000875af1158015620020b0573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620020d6919062003c97565b90508060000362002112576040517e2f6fad00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60685463ffffffff88811661010090920416146200215c576040517f0595ea2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606854600090610100900463ffffffff166200217a5750896200217d565b508a5b620021a66200219d848c8c8c8c8c8c8c604051620008d592919062003881565b8f8f8462001c93565b620021dd576040517fe0417cec00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050505050505050505050505050565b60405173ffffffffffffffffffffffffffffffffffffffff831660248201526044810182905262001f229084907fa9059cbb00000000000000000000000000000000000000000000000000000000906064015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915262002d75565b600054610100900460ff166200235c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840162001324565b6200070362002e88565b600260015403620023d4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015260640162001324565b6002600155565b6000620023ec600482848662003eb6565b620023f79162003ee2565b90507f2afa5331000000000000000000000000000000000000000000000000000000007fffffffff00000000000000000000000000000000000000000000000000000000821601620026765760008080808080806200245a896004818d62003eb6565b81019062002469919062003f2b565b96509650965096509650965096503373ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff1614620024dd576040517f912ecce700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff861630146200252d576040517f750643af00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8a851462002567576040517f03fffc4b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805173ffffffffffffffffffffffffffffffffffffffff89811660248301528881166044830152606482018890526084820187905260ff861660a483015260c4820185905260e48083018590528351808403909101815261010490920183526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fd505accf000000000000000000000000000000000000000000000000000000001790529151918e1691620026229190620038e6565b6000604051808303816000865af19150503d806000811462002661576040519150601f19603f3d011682016040523d82523d6000602084013e62002666565b606091505b50505050505050505050620009c9565b7fffffffff0000000000000000000000000000000000000000000000000000000081167f8fcbaf0c0000000000000000000000000000000000000000000000000000000014620026f2576040517fe282c0ba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000808080808080806200270a8a6004818e62003eb6565b81019062002719919062003f86565b975097509750975097509750975097503373ffffffffffffffffffffffffffffffffffffffff168873ffffffffffffffffffffffffffffffffffffffff16146200278f576040517f912ecce700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff87163014620027df576040517f750643af00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805173ffffffffffffffffffffffffffffffffffffffff8a811660248301528981166044830152606482018990526084820188905286151560a483015260ff861660c483015260e482018590526101048083018590528351808403909101815261012490920183526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f8fcbaf0c000000000000000000000000000000000000000000000000000000001790529151918f1691620028a39190620038e6565b6000604051808303816000865af19150503d8060008114620028e2576040519150601f19603f3d011682016040523d82523d6000602084013e620028e7565b606091505b50505050505050505050505050505050565b60405173ffffffffffffffffffffffffffffffffffffffff80851660248301528316604482015260648101829052620014a79085907f23b872dd000000000000000000000000000000000000000000000000000000009060840162002240565b60408051600481526024810182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f06fdde03000000000000000000000000000000000000000000000000000000001790529051606091600091829173ffffffffffffffffffffffffffffffffffffffff861691620029dd9190620038e6565b600060405180830381855afa9150503d806000811462002a1a576040519150601f19603f3d011682016040523d82523d6000602084013e62002a1f565b606091505b50915091508162002a66576040518060400160405280600781526020017f4e4f5f4e414d450000000000000000000000000000000000000000000000000081525062001d74565b62001d748162002f21565b60408051600481526024810182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f95d89b41000000000000000000000000000000000000000000000000000000001790529051606091600091829173ffffffffffffffffffffffffffffffffffffffff86169162002af59190620038e6565b600060405180830381855afa9150503d806000811462002b32576040519150601f19603f3d011682016040523d82523d6000602084013e62002b37565b606091505b50915091508162002a66576040518060400160405280600981526020017f4e4f5f53594d424f4c000000000000000000000000000000000000000000000081525062001d74565b60408051600481526024810182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f313ce5670000000000000000000000000000000000000000000000000000000017905290516000918291829173ffffffffffffffffffffffffffffffffffffffff86169162002c019190620038e6565b600060405180830381855afa9150503d806000811462002c3e576040519150601f19603f3d011682016040523d82523d6000602084013e62002c43565b606091505b509150915081801562002c57575080516020145b62002c6457601262001d74565b8080602001905181019062001d74919062004012565b60018055565b60685460ff1662002cbd576040517f5386698100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556040517f1e5e34eea33501aecf2ebec9fe0e884a40804275ea7fe10b2ba084c8374308b390600090a1565b600881901c60008181526069602052604081208054600160ff861690811b91821892839055929091908183169003620009c9576040517f646cf55800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600062002dd9826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff16620031119092919063ffffffff16565b80519091501562001f22578080602001905181019062002dfa919062004032565b62001f22576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f74207375636365656400000000000000000000000000000000000000000000606482015260840162001324565b600054610100900460ff1662002c7a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840162001324565b6060604082511062002f435781806020019051810190620007a2919062004052565b8151602003620030d35760005b60208110801562002f9b575082818151811062002f715762002f7162003b8a565b01602001517fff000000000000000000000000000000000000000000000000000000000000001615155b1562002fb6578062002fad8162003be8565b91505062002f50565b8060000362002ffa57505060408051808201909152601281527f4e4f545f56414c49445f454e434f44494e4700000000000000000000000000006020820152919050565b60008167ffffffffffffffff81111562003018576200301862003891565b6040519080825280601f01601f19166020018201604052801562003043576020820181803683370190505b50905060005b82811015620030cb5784818151811062003067576200306762003b8a565b602001015160f81c60f81b82828151811062003087576200308762003b8a565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535080620030c28162003be8565b91505062003049565b509392505050565b505060408051808201909152601281527f4e4f545f56414c49445f454e434f44494e470000000000000000000000000000602082015290565b919050565b606062001d748484600085856000808673ffffffffffffffffffffffffffffffffffffffff168587604051620031489190620038e6565b60006040518083038185875af1925050503d806000811462003187576040519150601f19603f3d011682016040523d82523d6000602084013e6200318c565b606091505b50915091506200319f87838387620031aa565b979650505050505050565b60608315620032455782516000036200323d5773ffffffffffffffffffffffffffffffffffffffff85163b6200323d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640162001324565b508162001d74565b62001d7483838151156200325c5781518083602001fd5b806040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620013249190620040d2565b611b6680620040e883390190565b803563ffffffff811681146200310c57600080fd5b73ffffffffffffffffffffffffffffffffffffffff81168114620032d857600080fd5b50565b60008060408385031215620032ef57600080fd5b620032fa83620032a0565b915060208301356200330c81620032b5565b809150509250929050565b8015158114620032d857600080fd5b60008083601f8401126200333957600080fd5b50813567ffffffffffffffff8111156200335257600080fd5b6020830191508360208285010111156200336b57600080fd5b9250929050565b6000806000806000608086880312156200338b57600080fd5b6200339686620032a0565b94506020860135620033a881620032b5565b93506040860135620033ba8162003317565b9250606086013567ffffffffffffffff811115620033d757600080fd5b620033e58882890162003326565b969995985093965092949392505050565b806104008101831015620007a257600080fd5b60008060008060008060008060008060006105208c8e0312156200342c57600080fd5b620034388d8d620033f6565b9a50620034496104008d01620032a0565b99506104208c013598506104408c013597506200346a6104608d01620032a0565b96506104808c01356200347d81620032b5565b95506200348e6104a08d01620032a0565b94506104c08c0135620034a181620032b5565b93506104e08c013592506105008c013567ffffffffffffffff811115620034c757600080fd5b620034d58e828f0162003326565b915080935050809150509295989b509295989b9093969950565b6000602082840312156200350257600080fd5b81356200350f81620032b5565b9392505050565b60ff81168114620032d857600080fd5b600080600080600080600060e0888a0312156200354257600080fd5b87356200354f8162003516565b96506200355f60208901620032a0565b955060408801356200357181620032b5565b94506200358160608901620032a0565b935060808801356200359381620032b5565b9699959850939692959460a0840135945060c09093013592915050565b600080600060608486031215620035c657600080fd5b620035d184620032a0565b92506020840135620035e381620032b5565b91506040840135620035f581620032b5565b809150509250925092565b6000602082840312156200361357600080fd5b5035919050565b600080600080600080600060a0888a0312156200363657600080fd5b6200364188620032a0565b965060208801356200365381620032b5565b9550604088013567ffffffffffffffff808211156200367157600080fd5b6200367f8b838c0162003326565b909750955060608a01359150808211156200369957600080fd5b50620036a88a828b0162003326565b9094509250506080880135620036be8162003516565b8091505092959891949750929550565b600080600080600080600060c0888a031215620036ea57600080fd5b620036f588620032a0565b965060208801356200370781620032b5565b95506040880135945060608801356200372081620032b5565b93506080880135620037328162003317565b925060a088013567ffffffffffffffff8111156200374f57600080fd5b6200375d8a828b0162003326565b989b979a50959850939692959293505050565b60008060008061046085870312156200378857600080fd5b843593506200379b8660208701620033f6565b9250620037ac6104208601620032a0565b939692955092936104400135925050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600061010060ff8c16835263ffffffff808c16602085015273ffffffffffffffffffffffffffffffffffffffff808c166040860152818b166060860152808a166080860152508760a08501528160c0850152620038678285018789620037bd565b925080851660e085015250509a9950505050505050505050565b8183823760009101908152919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60005b83811015620038dd578181015183820152602001620038c3565b50506000910152565b60008251620038fa818460208701620038c0565b9190910192915050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156200394e576200394e62003891565b604052919050565b600067ffffffffffffffff82111562003973576200397362003891565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f830112620039b157600080fd5b8135620039c8620039c28262003956565b62003904565b818152846020838601011115620039de57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060006060848603121562003a1157600080fd5b833567ffffffffffffffff8082111562003a2a57600080fd5b62003a38878388016200399f565b9450602086013591508082111562003a4f57600080fd5b5062003a5e868287016200399f565b9250506040840135620035f58162003516565b6000815180845262003a8b816020860160208601620038c0565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60608152600062003ad2606083018662003a71565b828103602084015262003ae6818662003a71565b91505060ff83166040830152949350505050565b63ffffffff86168152600073ffffffffffffffffffffffffffffffffffffffff8087166020840152808616604084015250608060608301526200319f608083018486620037bd565b73ffffffffffffffffffffffffffffffffffffffff8516815263ffffffff8416602082015260606040820152600062003b80606083018486620037bd565b9695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820362003c1c5762003c1c62003bb9565b5060010190565b60608152600062003c39606083018789620037bd565b828103602084015262003c4e818688620037bd565b91505060ff831660408301529695505050505050565b6000835162003c78818460208801620038c0565b83519083019062003c8e818360208801620038c0565b01949350505050565b60006020828403121562003caa57600080fd5b5051919050565b81810381811115620007a257620007a262003bb9565b600061010060ff8b16835263ffffffff808b16602085015273ffffffffffffffffffffffffffffffffffffffff808b166040860152818a1660608601528089166080860152508660a08501528160c085015262003d278285018762003a71565b925080851660e085015250509998505050505050505050565b600181815b8085111562003d9f57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111562003d835762003d8362003bb9565b8085161562003d9157918102915b93841c939080029062003d45565b509250929050565b60008262003db857506001620007a2565b8162003dc757506000620007a2565b816001811462003de0576002811462003deb5762003e0b565b6001915050620007a2565b60ff84111562003dff5762003dff62003bb9565b50506001821b620007a2565b5060208310610133831016604e8410600b841016171562003e30575081810a620007a2565b62003e3c838362003d40565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111562003e715762003e7162003bb9565b029392505050565b60006200350f838362003da7565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b6000808585111562003ec757600080fd5b8386111562003ed557600080fd5b5050820193919092039150565b7fffffffff00000000000000000000000000000000000000000000000000000000813581811691600485101562003f235780818660040360031b1b83161692505b505092915050565b600080600080600080600060e0888a03121562003f4757600080fd5b873562003f5481620032b5565b9650602088013562003f6681620032b5565b955060408801359450606088013593506080880135620035938162003516565b600080600080600080600080610100898b03121562003fa457600080fd5b883562003fb181620032b5565b9750602089013562003fc381620032b5565b96506040890135955060608901359450608089013562003fe38162003317565b935060a089013562003ff58162003516565b979a969950949793969295929450505060c08201359160e0013590565b6000602082840312156200402557600080fd5b81516200350f8162003516565b6000602082840312156200404557600080fd5b81516200350f8162003317565b6000602082840312156200406557600080fd5b815167ffffffffffffffff8111156200407d57600080fd5b8201601f810184136200408f57600080fd5b8051620040a0620039c28262003956565b818152856020838501011115620040b657600080fd5b620040c9826020830160208601620038c0565b95945050505050565b6020815260006200350f602083018462003a7156fe6101006040523480156200001257600080fd5b5060405162001b6638038062001b6683398101604081905262000035916200028d565b82826003620000458382620003a1565b506004620000548282620003a1565b50503360c0525060ff811660e052466080819052620000739062000080565b60a052506200046d915050565b60007f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f620000ad6200012e565b805160209182012060408051808201825260018152603160f81b90840152805192830193909352918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc66060820152608081018390523060a082015260c001604051602081830303815290604052805190602001209050919050565b6060600380546200013f9062000312565b80601f01602080910402602001604051908101604052809291908181526020018280546200016d9062000312565b8015620001be5780601f106200019257610100808354040283529160200191620001be565b820191906000526020600020905b815481529060010190602001808311620001a057829003601f168201915b5050505050905090565b634e487b7160e01b600052604160045260246000fd5b600082601f830112620001f057600080fd5b81516001600160401b03808211156200020d576200020d620001c8565b604051601f8301601f19908116603f01168101908282118183101715620002385762000238620001c8565b816040528381526020925086838588010111156200025557600080fd5b600091505b838210156200027957858201830151818301840152908201906200025a565b600093810190920192909252949350505050565b600080600060608486031215620002a357600080fd5b83516001600160401b0380821115620002bb57600080fd5b620002c987838801620001de565b94506020860151915080821115620002e057600080fd5b50620002ef86828701620001de565b925050604084015160ff811681146200030757600080fd5b809150509250925092565b600181811c908216806200032757607f821691505b6020821081036200034857634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200039c57600081815260208120601f850160051c81016020861015620003775750805b601f850160051c820191505b81811015620003985782815560010162000383565b5050505b505050565b81516001600160401b03811115620003bd57620003bd620001c8565b620003d581620003ce845462000312565b846200034e565b602080601f8311600181146200040d5760008415620003f45750858301515b600019600386901b1c1916600185901b17855562000398565b600085815260208120601f198616915b828110156200043e578886015182559484019460019091019084016200041d565b50858210156200045d5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b60805160a05160c05160e0516116aa620004bc6000396000610237015260008181610307015281816105c001526106a70152600061053a015260008181610379015261050401526116aa6000f3fe608060405234801561001057600080fd5b50600436106101775760003560e01c806370a08231116100d8578063a457c2d71161008c578063d505accf11610066578063d505accf1461039b578063dd62ed3e146103ae578063ffa1ad74146103f457600080fd5b8063a457c2d71461034e578063a9059cbb14610361578063cd0d00961461037457600080fd5b806395d89b41116100bd57806395d89b41146102e75780639dc29fac146102ef578063a3c573eb1461030257600080fd5b806370a08231146102915780637ecebe00146102c757600080fd5b806330adf81f1161012f5780633644e515116101145780633644e51514610261578063395093511461026957806340c10f191461027c57600080fd5b806330adf81f14610209578063313ce5671461023057600080fd5b806318160ddd1161016057806318160ddd146101bd57806320606b70146101cf57806323b872dd146101f657600080fd5b806306fdde031461017c578063095ea7b31461019a575b600080fd5b610184610430565b60405161019191906113e4565b60405180910390f35b6101ad6101a8366004611479565b6104c2565b6040519015158152602001610191565b6002545b604051908152602001610191565b6101c17f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f81565b6101ad6102043660046114a3565b6104dc565b6101c17f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c981565b60405160ff7f0000000000000000000000000000000000000000000000000000000000000000168152602001610191565b6101c1610500565b6101ad610277366004611479565b61055c565b61028f61028a366004611479565b6105a8565b005b6101c161029f3660046114df565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6101c16102d53660046114df565b60056020526000908152604090205481565b610184610680565b61028f6102fd366004611479565b61068f565b6103297f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610191565b6101ad61035c366004611479565b61075e565b6101ad61036f366004611479565b61082f565b6101c17f000000000000000000000000000000000000000000000000000000000000000081565b61028f6103a9366004611501565b61083d565b6101c16103bc366004611574565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b6101846040518060400160405280600181526020017f310000000000000000000000000000000000000000000000000000000000000081525081565b60606003805461043f906115a7565b80601f016020809104026020016040519081016040528092919081815260200182805461046b906115a7565b80156104b85780601f1061048d576101008083540402835291602001916104b8565b820191906000526020600020905b81548152906001019060200180831161049b57829003601f168201915b5050505050905090565b6000336104d0818585610b73565b60019150505b92915050565b6000336104ea858285610d27565b6104f5858585610dfe565b506001949350505050565b60007f00000000000000000000000000000000000000000000000000000000000000004614610537576105324661106d565b905090565b507f000000000000000000000000000000000000000000000000000000000000000090565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549091906104d090829086906105a3908790611629565b610b73565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610672576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f546f6b656e577261707065643a3a6f6e6c794272696467653a204e6f7420506f60448201527f6c79676f6e5a6b45564d4272696467650000000000000000000000000000000060648201526084015b60405180910390fd5b61067c8282611135565b5050565b60606004805461043f906115a7565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610754576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f546f6b656e577261707065643a3a6f6e6c794272696467653a204e6f7420506f60448201527f6c79676f6e5a6b45564d427269646765000000000000000000000000000000006064820152608401610669565b61067c8282611228565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919083811015610822576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f0000000000000000000000000000000000000000000000000000006064820152608401610669565b6104f58286868403610b73565b6000336104d0818585610dfe565b834211156108cc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f546f6b656e577261707065643a3a7065726d69743a204578706972656420706560448201527f726d6974000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8716600090815260056020526040812080547f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c9918a918a918a9190866109268361163c565b9091555060408051602081019690965273ffffffffffffffffffffffffffffffffffffffff94851690860152929091166060840152608083015260a082015260c0810186905260e0016040516020818303038152906040528051906020012090506000610991610500565b6040517f19010000000000000000000000000000000000000000000000000000000000006020820152602281019190915260428101839052606201604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120600080855291840180845281905260ff89169284019290925260608301879052608083018690529092509060019060a0016020604051602081039080840390855afa158015610a55573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015191505073ffffffffffffffffffffffffffffffffffffffff811615801590610ad057508973ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16145b610b5c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602760248201527f546f6b656e577261707065643a3a7065726d69743a20496e76616c696420736960448201527f676e6174757265000000000000000000000000000000000000000000000000006064820152608401610669565b610b678a8a8a610b73565b50505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff8316610c15576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f72657373000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8216610cb8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f73730000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610df85781811015610deb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610669565b610df88484848403610b73565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8316610ea1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f64726573730000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8216610f44576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f65737300000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205481811015610ffa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e636500000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610df8565b60007f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f611098610430565b8051602091820120604080518082018252600181527f310000000000000000000000000000000000000000000000000000000000000090840152805192830193909352918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc66060820152608081018390523060a082015260c001604051602081830303815290604052805190602001209050919050565b73ffffffffffffffffffffffffffffffffffffffff82166111b2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610669565b80600260008282546111c49190611629565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b73ffffffffffffffffffffffffffffffffffffffff82166112cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f73000000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff821660009081526020819052604090205481811015611381576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f63650000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff83166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9101610d1a565b600060208083528351808285015260005b81811015611411578581018301518582016040015282016113f5565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461147457600080fd5b919050565b6000806040838503121561148c57600080fd5b61149583611450565b946020939093013593505050565b6000806000606084860312156114b857600080fd5b6114c184611450565b92506114cf60208501611450565b9150604084013590509250925092565b6000602082840312156114f157600080fd5b6114fa82611450565b9392505050565b600080600080600080600060e0888a03121561151c57600080fd5b61152588611450565b965061153360208901611450565b95506040880135945060608801359350608088013560ff8116811461155757600080fd5b9699959850939692959460a0840135945060c09093013592915050565b6000806040838503121561158757600080fd5b61159083611450565b915061159e60208401611450565b90509250929050565b600181811c908216806115bb57607f821691505b6020821081036115f4577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156104d6576104d66115fa565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361166d5761166d6115fa565b506001019056fea26469706673582212208d88fee561cff7120d381c345cfc534cef8229a272dc5809d4bbb685ad67141164736f6c63430008110033a2646970667358221220d9b3ca7b13ec80ac58634ddf0ecebe71e209a71f532614949b9e720413f50c8364736f6c63430008110033" + }, + { + "contractName": "PolygonZkEVMBridge proxy", + "balance": "200000000000000000000000000", + "nonce": "1", + "address": "0x528e26b25a34a4A5d0dbDa1d57D318153d2ED582", + "bytecode": "0x60806040526004361061005e5760003560e01c80635c60da1b116100435780635c60da1b146100a85780638f283970146100e6578063f851a440146101065761006d565b80633659cfe6146100755780634f1ef286146100955761006d565b3661006d5761006b61011b565b005b61006b61011b565b34801561008157600080fd5b5061006b61009036600461088b565b610135565b61006b6100a33660046108a6565b61017f565b3480156100b457600080fd5b506100bd6101f3565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100f257600080fd5b5061006b61010136600461088b565b610231565b34801561011257600080fd5b506100bd61025e565b6101236102d4565b61013361012e6103ab565b6103b5565b565b61013d6103d9565b73ffffffffffffffffffffffffffffffffffffffff1633036101775761017481604051806020016040528060008152506000610419565b50565b61017461011b565b6101876103d9565b73ffffffffffffffffffffffffffffffffffffffff1633036101eb576101e68383838080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525060019250610419915050565b505050565b6101e661011b565b60006101fd6103d9565b73ffffffffffffffffffffffffffffffffffffffff163303610226576102216103ab565b905090565b61022e61011b565b90565b6102396103d9565b73ffffffffffffffffffffffffffffffffffffffff1633036101775761017481610444565b60006102686103d9565b73ffffffffffffffffffffffffffffffffffffffff163303610226576102216103d9565b60606102b183836040518060600160405280602781526020016109bb602791396104a5565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b6102dc6103d9565b73ffffffffffffffffffffffffffffffffffffffff163303610133576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f7879207461726760648201527f6574000000000000000000000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b600061022161052a565b3660008037600080366000845af43d6000803e8080156103d4573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b5473ffffffffffffffffffffffffffffffffffffffff16919050565b61042283610552565b60008251118061042f5750805b156101e65761043e838361028c565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f61046d6103d9565b6040805173ffffffffffffffffffffffffffffffffffffffff928316815291841660208301520160405180910390a16101748161059f565b60606000808573ffffffffffffffffffffffffffffffffffffffff16856040516104cf919061094d565b600060405180830381855af49150503d806000811461050a576040519150601f19603f3d011682016040523d82523d6000602084013e61050f565b606091505b5091509150610520868383876106ab565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6103fd565b61055b81610753565b60405173ffffffffffffffffffffffffffffffffffffffff8216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b73ffffffffffffffffffffffffffffffffffffffff8116610642576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016103a2565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9290921691909117905550565b6060831561074157825160000361073a5773ffffffffffffffffffffffffffffffffffffffff85163b61073a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e747261637400000060448201526064016103a2565b508161074b565b61074b838361081e565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff81163b6107f7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201527f6f74206120636f6e74726163740000000000000000000000000000000000000060648201526084016103a2565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc610665565b81511561082e5781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103a29190610969565b803573ffffffffffffffffffffffffffffffffffffffff8116811461088657600080fd5b919050565b60006020828403121561089d57600080fd5b6102b182610862565b6000806000604084860312156108bb57600080fd5b6108c484610862565b9250602084013567ffffffffffffffff808211156108e157600080fd5b818601915086601f8301126108f557600080fd5b81358181111561090457600080fd5b87602082850101111561091657600080fd5b6020830194508093505050509250925092565b60005b8381101561094457818101518382015260200161092c565b50506000910152565b6000825161095f818460208701610929565b9190910192915050565b6020815260008251806020840152610988816040850160208701610929565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220a1af0d6cb4f1e31496a4c5c1448913bce4bd6ad3a39e47c6f7190c114d6f9bf464736f6c63430008110033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000068": "0x00000000000000a40d5f56745a118d0906a34e69aec8c0db1cb8fa0000000100", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000085ceb41028b1a5ed2b88e395145344837308b251", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000008bd36ca1a55e389335004872aa3c3be0969d3aa7" + } + }, + { + "contractName": "PolygonZkEVMGlobalExitRootL2 implementation", + "balance": "0", + "nonce": "1", + "address": "0x282a631D9F3Ef04Bf1A44B4C9e8bDC8EB278917f", + "bytecode": "0x608060405234801561001057600080fd5b506004361061004c5760003560e01c806301fd904414610051578063257b36321461006d57806333d6247d1461008d578063a3c573eb146100a2575b600080fd5b61005a60015481565b6040519081526020015b60405180910390f35b61005a61007b366004610162565b60006020819052908152604090205481565b6100a061009b366004610162565b6100ee565b005b6100c97f000000000000000000000000528e26b25a34a4a5d0dbda1d57d318153d2ed58281565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610064565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000528e26b25a34a4a5d0dbda1d57d318153d2ed582161461015d576040517fb49365dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600155565b60006020828403121561017457600080fd5b503591905056fea2646970667358221220a187fc278346c1b61c449ea3641002b6eac2bda3351a122a12c35099f933696864736f6c63430008110033" + }, + { + "contractName": "PolygonZkEVMGlobalExitRootL2 proxy", + "balance": "0", + "nonce": "1", + "address": "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa", + "bytecode": "0x60806040523661001357610011610017565b005b6100115b61001f6101b7565b6001600160a01b0316336001600160a01b0316141561016f5760606001600160e01b031960003516631b2ce7f360e11b8114156100655761005e6101ea565b9150610167565b6001600160e01b0319811663278f794360e11b14156100865761005e610241565b6001600160e01b031981166308f2839760e41b14156100a75761005e610287565b6001600160e01b031981166303e1469160e61b14156100c85761005e6102b8565b6001600160e01b03198116635c60da1b60e01b14156100e95761005e6102f8565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b61017761030c565b565b606061019e83836040518060600160405280602781526020016108576027913961031c565b9392505050565b90565b6001600160a01b03163b151590565b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101f4610394565b600061020336600481846106a2565b81019061021091906106e8565b905061022d8160405180602001604052806000815250600061039f565b505060408051602081019091526000815290565b606060008061025336600481846106a2565b8101906102609190610719565b915091506102708282600161039f565b604051806020016040528060008152509250505090565b6060610291610394565b60006102a036600481846106a2565b8101906102ad91906106e8565b905061022d816103cb565b60606102c2610394565b60006102cc6101b7565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b6060610302610394565b60006102cc610422565b610177610317610422565b610431565b6060600080856001600160a01b0316856040516103399190610807565b600060405180830381855af49150503d8060008114610374576040519150601f19603f3d011682016040523d82523d6000602084013e610379565b606091505b509150915061038a86838387610455565b9695505050505050565b341561017757600080fd5b6103a8836104d3565b6000825111806103b55750805b156103c6576103c48383610179565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103f46101b7565b604080516001600160a01b03928316815291841660208301520160405180910390a161041f81610513565b50565b600061042c6105bc565b905090565b3660008037600080366000845af43d6000803e808015610450573d6000f35b3d6000fd5b606083156104c15782516104ba576001600160a01b0385163b6104ba5760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161015e565b50816104cb565b6104cb83836105e4565b949350505050565b6104dc8161060e565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105785760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161015e565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6101db565b8151156105f45781518083602001fd5b8060405162461bcd60e51b815260040161015e9190610823565b6001600160a01b0381163b61067b5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161015e565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61059b565b600080858511156106b257600080fd5b838611156106bf57600080fd5b5050820193919092039150565b80356001600160a01b03811681146106e357600080fd5b919050565b6000602082840312156106fa57600080fd5b61019e826106cc565b634e487b7160e01b600052604160045260246000fd5b6000806040838503121561072c57600080fd5b610735836106cc565b9150602083013567ffffffffffffffff8082111561075257600080fd5b818501915085601f83011261076657600080fd5b81358181111561077857610778610703565b604051601f8201601f19908116603f011681019083821181831017156107a0576107a0610703565b816040528281528860208487010111156107b957600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b60005b838110156107f65781810151838201526020016107de565b838111156103c45750506000910152565b600082516108198184602087016107db565b9190910192915050565b60208152600082518060208401526108428160408501602087016107db565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a264697066735822122012bb4f564f73959a03513dc74fc3c6e40e8386e6f02c16b78d6db00ce0aa16af64736f6c63430008090033", + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000085ceb41028b1a5ed2b88e395145344837308b251", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x000000000000000000000000282a631d9f3ef04bf1a44b4c9e8bdc8eb278917f" + } + }, + { + "contractName": "PolygonZkEVMTimelock", + "balance": "0", + "nonce": "1", + "address": "0xdbC6981a11fc2B000c635bFA7C47676b25C87D39", + "bytecode": "0x6080604052600436106101c65760003560e01c806364d62353116100f7578063b1c5f42711610095578063d547741f11610064578063d547741f14610661578063e38335e514610681578063f23a6e6114610694578063f27a0c92146106d957600080fd5b8063b1c5f427146105af578063bc197c81146105cf578063c4d252f514610614578063d45c44351461063457600080fd5b80638f61f4f5116100d15780638f61f4f5146104e157806391d1485414610515578063a217fddf14610566578063b08e51c01461057b57600080fd5b806364d62353146104815780638065657f146104a15780638f2a0bb0146104c157600080fd5b8063248a9ca31161016457806331d507501161013e57806331d50750146103c857806336568abe146103e85780633a6aae7214610408578063584b153e1461046157600080fd5b8063248a9ca3146103475780632ab0f529146103775780632f2ff15d146103a857600080fd5b80630d3cf6fc116101a05780630d3cf6fc1461026b578063134008d31461029f57806313bc9f20146102b2578063150b7a02146102d257600080fd5b806301d5062a146101d257806301ffc9a7146101f457806307bd02651461022957600080fd5b366101cd57005b600080fd5b3480156101de57600080fd5b506101f26101ed366004611c52565b6106ee565b005b34801561020057600080fd5b5061021461020f366004611cc7565b610783565b60405190151581526020015b60405180910390f35b34801561023557600080fd5b5061025d7fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e6381565b604051908152602001610220565b34801561027757600080fd5b5061025d7f5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca581565b6101f26102ad366004611d09565b6107df565b3480156102be57600080fd5b506102146102cd366004611d75565b6108d7565b3480156102de57600080fd5b506103166102ed366004611e9a565b7f150b7a0200000000000000000000000000000000000000000000000000000000949350505050565b6040517fffffffff000000000000000000000000000000000000000000000000000000009091168152602001610220565b34801561035357600080fd5b5061025d610362366004611d75565b60009081526020819052604090206001015490565b34801561038357600080fd5b50610214610392366004611d75565b6000908152600160208190526040909120541490565b3480156103b457600080fd5b506101f26103c3366004611f02565b6108fd565b3480156103d457600080fd5b506102146103e3366004611d75565b610927565b3480156103f457600080fd5b506101f2610403366004611f02565b610940565b34801561041457600080fd5b5061043c7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610220565b34801561046d57600080fd5b5061021461047c366004611d75565b6109f8565b34801561048d57600080fd5b506101f261049c366004611d75565b610a0e565b3480156104ad57600080fd5b5061025d6104bc366004611d09565b610ade565b3480156104cd57600080fd5b506101f26104dc366004611f73565b610b1d565b3480156104ed57600080fd5b5061025d7fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc181565b34801561052157600080fd5b50610214610530366004611f02565b60009182526020828152604080842073ffffffffffffffffffffffffffffffffffffffff93909316845291905290205460ff1690565b34801561057257600080fd5b5061025d600081565b34801561058757600080fd5b5061025d7ffd643c72710c63c0180259aba6b2d05451e3591a24e58b62239378085726f78381565b3480156105bb57600080fd5b5061025d6105ca366004612025565b610d4f565b3480156105db57600080fd5b506103166105ea36600461214e565b7fbc197c810000000000000000000000000000000000000000000000000000000095945050505050565b34801561062057600080fd5b506101f261062f366004611d75565b610d94565b34801561064057600080fd5b5061025d61064f366004611d75565b60009081526001602052604090205490565b34801561066d57600080fd5b506101f261067c366004611f02565b610e8f565b6101f261068f366004612025565b610eb4565b3480156106a057600080fd5b506103166106af3660046121f8565b7ff23a6e610000000000000000000000000000000000000000000000000000000095945050505050565b3480156106e557600080fd5b5061025d611161565b7fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc161071881611244565b6000610728898989898989610ade565b90506107348184611251565b6000817f4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca8b8b8b8b8b8a604051610770969594939291906122a6565b60405180910390a3505050505050505050565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f4e2312e00000000000000000000000000000000000000000000000000000000014806107d957506107d98261139e565b92915050565b600080527fdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d70696020527f5ba6852781629bcdcd4bdaa6de76d786f1c64b16acdac474e55bebc0ea157951547fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e639060ff1661085c5761085c8133611435565b600061086c888888888888610ade565b905061087881856114ed565b6108848888888861162a565b6000817fc2617efa69bab66782fa219543714338489c4e9e178271560a91b82c3f612b588a8a8a8a6040516108bc94939291906122f1565b60405180910390a36108cd8161172e565b5050505050505050565b6000818152600160205260408120546001811180156108f65750428111155b9392505050565b60008281526020819052604090206001015461091881611244565b61092283836117d7565b505050565b60008181526001602052604081205481905b1192915050565b73ffffffffffffffffffffffffffffffffffffffff811633146109ea576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602f60248201527f416363657373436f6e74726f6c3a2063616e206f6e6c792072656e6f756e636560448201527f20726f6c657320666f722073656c66000000000000000000000000000000000060648201526084015b60405180910390fd5b6109f482826118c7565b5050565b6000818152600160208190526040822054610939565b333014610a9d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f54696d656c6f636b436f6e74726f6c6c65723a2063616c6c6572206d7573742060448201527f62652074696d656c6f636b00000000000000000000000000000000000000000060648201526084016109e1565b60025460408051918252602082018390527f11c24f4ead16507c69ac467fbd5e4eed5fb5c699626d2cc6d66421df253886d5910160405180910390a1600255565b6000868686868686604051602001610afb969594939291906122a6565b6040516020818303038152906040528051906020012090509695505050505050565b7fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc1610b4781611244565b888714610bd6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109e1565b888514610c65576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109e1565b6000610c778b8b8b8b8b8b8b8b610d4f565b9050610c838184611251565b60005b8a811015610d415780827f4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca8e8e85818110610cc357610cc3612331565b9050602002016020810190610cd89190612360565b8d8d86818110610cea57610cea612331565b905060200201358c8c87818110610d0357610d03612331565b9050602002810190610d15919061237b565b8c8b604051610d29969594939291906122a6565b60405180910390a3610d3a8161240f565b9050610c86565b505050505050505050505050565b60008888888888888888604051602001610d709897969594939291906124f7565b60405160208183030381529060405280519060200120905098975050505050505050565b7ffd643c72710c63c0180259aba6b2d05451e3591a24e58b62239378085726f783610dbe81611244565b610dc7826109f8565b610e53576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603160248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20636160448201527f6e6e6f742062652063616e63656c6c656400000000000000000000000000000060648201526084016109e1565b6000828152600160205260408082208290555183917fbaa1eb22f2a492ba1a5fea61b8df4d27c6c8b5f3971e63bb58fa14ff72eedb7091a25050565b600082815260208190526040902060010154610eaa81611244565b61092283836118c7565b600080527fdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d70696020527f5ba6852781629bcdcd4bdaa6de76d786f1c64b16acdac474e55bebc0ea157951547fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e639060ff16610f3157610f318133611435565b878614610fc0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109e1565b87841461104f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109e1565b60006110618a8a8a8a8a8a8a8a610d4f565b905061106d81856114ed565b60005b8981101561114b5760008b8b8381811061108c5761108c612331565b90506020020160208101906110a19190612360565b905060008a8a848181106110b7576110b7612331565b9050602002013590503660008a8a868181106110d5576110d5612331565b90506020028101906110e7919061237b565b915091506110f78484848461162a565b84867fc2617efa69bab66782fa219543714338489c4e9e178271560a91b82c3f612b588686868660405161112e94939291906122f1565b60405180910390a350505050806111449061240f565b9050611070565b506111558161172e565b50505050505050505050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff161580159061123257507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166315064c966040518163ffffffff1660e01b8152600401602060405180830381865afa15801561120e573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061123291906125be565b1561123d5750600090565b5060025490565b61124e8133611435565b50565b61125a82610927565b156112e7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602f60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20616c60448201527f7265616479207363686564756c6564000000000000000000000000000000000060648201526084016109e1565b6112ef611161565b81101561137e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f54696d656c6f636b436f6e74726f6c6c65723a20696e73756666696369656e7460448201527f2064656c6179000000000000000000000000000000000000000000000000000060648201526084016109e1565b61138881426125e0565b6000928352600160205260409092209190915550565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f7965db0b0000000000000000000000000000000000000000000000000000000014806107d957507f01ffc9a7000000000000000000000000000000000000000000000000000000007fffffffff000000000000000000000000000000000000000000000000000000008316146107d9565b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff166109f4576114738161197e565b61147e83602061199d565b60405160200161148f929190612617565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527f08c379a00000000000000000000000000000000000000000000000000000000082526109e191600401612698565b6114f6826108d7565b611582576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20697360448201527f206e6f742072656164790000000000000000000000000000000000000000000060648201526084016109e1565b80158061159e5750600081815260016020819052604090912054145b6109f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f54696d656c6f636b436f6e74726f6c6c65723a206d697373696e67206465706560448201527f6e64656e6379000000000000000000000000000000000000000000000000000060648201526084016109e1565b60008473ffffffffffffffffffffffffffffffffffffffff168484846040516116549291906126e9565b60006040518083038185875af1925050503d8060008114611691576040519150601f19603f3d011682016040523d82523d6000602084013e611696565b606091505b5050905080611727576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603360248201527f54696d656c6f636b436f6e74726f6c6c65723a20756e6465726c79696e67207460448201527f72616e73616374696f6e2072657665727465640000000000000000000000000060648201526084016109e1565b5050505050565b611737816108d7565b6117c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20697360448201527f206e6f742072656164790000000000000000000000000000000000000000000060648201526084016109e1565b600090815260016020819052604090912055565b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff166109f45760008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff85168452909152902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556118693390565b73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45050565b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff16156109f45760008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516808552925280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905551339285917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a45050565b60606107d973ffffffffffffffffffffffffffffffffffffffff831660145b606060006119ac8360026126f9565b6119b79060026125e0565b67ffffffffffffffff8111156119cf576119cf611d8e565b6040519080825280601f01601f1916602001820160405280156119f9576020820181803683370190505b5090507f300000000000000000000000000000000000000000000000000000000000000081600081518110611a3057611a30612331565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053507f780000000000000000000000000000000000000000000000000000000000000081600181518110611a9357611a93612331565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053506000611acf8460026126f9565b611ada9060016125e0565b90505b6001811115611b77577f303132333435363738396162636465660000000000000000000000000000000085600f1660108110611b1b57611b1b612331565b1a60f81b828281518110611b3157611b31612331565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535060049490941c93611b7081612710565b9050611add565b5083156108f6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e7460448201526064016109e1565b803573ffffffffffffffffffffffffffffffffffffffff81168114611c0457600080fd5b919050565b60008083601f840112611c1b57600080fd5b50813567ffffffffffffffff811115611c3357600080fd5b602083019150836020828501011115611c4b57600080fd5b9250929050565b600080600080600080600060c0888a031215611c6d57600080fd5b611c7688611be0565b965060208801359550604088013567ffffffffffffffff811115611c9957600080fd5b611ca58a828b01611c09565b989b979a50986060810135976080820135975060a09091013595509350505050565b600060208284031215611cd957600080fd5b81357fffffffff00000000000000000000000000000000000000000000000000000000811681146108f657600080fd5b60008060008060008060a08789031215611d2257600080fd5b611d2b87611be0565b955060208701359450604087013567ffffffffffffffff811115611d4e57600080fd5b611d5a89828a01611c09565b979a9699509760608101359660809091013595509350505050565b600060208284031215611d8757600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611e0457611e04611d8e565b604052919050565b600082601f830112611e1d57600080fd5b813567ffffffffffffffff811115611e3757611e37611d8e565b611e6860207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611dbd565b818152846020838601011115611e7d57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060808587031215611eb057600080fd5b611eb985611be0565b9350611ec760208601611be0565b925060408501359150606085013567ffffffffffffffff811115611eea57600080fd5b611ef687828801611e0c565b91505092959194509250565b60008060408385031215611f1557600080fd5b82359150611f2560208401611be0565b90509250929050565b60008083601f840112611f4057600080fd5b50813567ffffffffffffffff811115611f5857600080fd5b6020830191508360208260051b8501011115611c4b57600080fd5b600080600080600080600080600060c08a8c031215611f9157600080fd5b893567ffffffffffffffff80821115611fa957600080fd5b611fb58d838e01611f2e565b909b50995060208c0135915080821115611fce57600080fd5b611fda8d838e01611f2e565b909950975060408c0135915080821115611ff357600080fd5b506120008c828d01611f2e565b9a9d999c50979a969997986060880135976080810135975060a0013595509350505050565b60008060008060008060008060a0898b03121561204157600080fd5b883567ffffffffffffffff8082111561205957600080fd5b6120658c838d01611f2e565b909a50985060208b013591508082111561207e57600080fd5b61208a8c838d01611f2e565b909850965060408b01359150808211156120a357600080fd5b506120b08b828c01611f2e565b999c989b509699959896976060870135966080013595509350505050565b600082601f8301126120df57600080fd5b8135602067ffffffffffffffff8211156120fb576120fb611d8e565b8160051b61210a828201611dbd565b928352848101820192828101908785111561212457600080fd5b83870192505b848310156121435782358252918301919083019061212a565b979650505050505050565b600080600080600060a0868803121561216657600080fd5b61216f86611be0565b945061217d60208701611be0565b9350604086013567ffffffffffffffff8082111561219a57600080fd5b6121a689838a016120ce565b945060608801359150808211156121bc57600080fd5b6121c889838a016120ce565b935060808801359150808211156121de57600080fd5b506121eb88828901611e0c565b9150509295509295909350565b600080600080600060a0868803121561221057600080fd5b61221986611be0565b945061222760208701611be0565b93506040860135925060608601359150608086013567ffffffffffffffff81111561225157600080fd5b6121eb88828901611e0c565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff8716815285602082015260a0604082015260006122dc60a08301868861225d565b60608301949094525060800152949350505050565b73ffffffffffffffffffffffffffffffffffffffff8516815283602082015260606040820152600061232760608301848661225d565b9695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60006020828403121561237257600080fd5b6108f682611be0565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126123b057600080fd5b83018035915067ffffffffffffffff8211156123cb57600080fd5b602001915036819003821315611c4b57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203612440576124406123e0565b5060010190565b81835260006020808501808196508560051b810191508460005b878110156124ea57828403895281357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18836030181126124a057600080fd5b8701858101903567ffffffffffffffff8111156124bc57600080fd5b8036038213156124cb57600080fd5b6124d686828461225d565b9a87019a9550505090840190600101612461565b5091979650505050505050565b60a0808252810188905260008960c08301825b8b8110156125455773ffffffffffffffffffffffffffffffffffffffff61253084611be0565b1682526020928301929091019060010161250a565b5083810360208501528881527f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff89111561257e57600080fd5b8860051b9150818a602083013701828103602090810160408501526125a69082018789612447565b60608401959095525050608001529695505050505050565b6000602082840312156125d057600080fd5b815180151581146108f657600080fd5b808201808211156107d9576107d96123e0565b60005b8381101561260e5781810151838201526020016125f6565b50506000910152565b7f416363657373436f6e74726f6c3a206163636f756e742000000000000000000081526000835161264f8160178501602088016125f3565b7f206973206d697373696e6720726f6c6520000000000000000000000000000000601791840191820152835161268c8160288401602088016125f3565b01602801949350505050565b60208152600082518060208401526126b78160408501602087016125f3565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b8183823760009101908152919050565b80820281158282048414176107d9576107d96123e0565b60008161271f5761271f6123e0565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff019056fea26469706673582212206416c4e08f97752b4bb06159524dac058d3dccd8775e57ef1b01505751ebf7af64736f6c63430008110033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000e10", + "0xf587dde6f8846415188f807710a3304f72092565918b30307d60efdc8014f20b": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x07020fe9de9b8274d1e6cc0668a6f6344a870f35e5a847590c8069dfa85ac78f": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x64494413541ff93b31aa309254e3fed72a7456e9845988b915b4c7a7ceba8814": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0xc8e266e0814671642b74f3807affd27009fcc23f713ea92d1743e0ee0c1e7603": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x3412d5605ac6cd444957cedb533e5dacad6378b4bc819ebe3652188a665066d6": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0x9b3efc411c5f69533db363941e091f6f3af8b7e306525413577a56d27e5dbe73": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d706a": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0xa2001bdd6a5944149e83176d089ee9a8246bd56aecf38fe4d6c66f5fbac18675": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xc3ad33e20b0c56a223ad5104fff154aa010f8715b9c981fd38fdc60a4d1a52fc": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5" + } + }, + { + "accountName": "keyless Deployer", + "balance": "0", + "nonce": "1", + "address": "0x1754175c450BEbB9B6E14dEe542649c0402A25d2" + }, + { + "accountName": "deployer", + "balance": "100000000000000000000000", + "nonce": "8", + "address": "0xff6250d0E86A2465B0C1bF8e36409503d6a26963" + } + ] + } +` diff --git a/config/config.go b/config/config.go index acfb2186f8..56321ab485 100644 --- a/config/config.go +++ b/config/config.go @@ -31,7 +31,7 @@ const ( FlagYes = "yes" // FlagCfg is the flag for cfg. FlagCfg = "cfg" - // FlagNetwork is the flag for the network name. Valid values: ["testnet", "mainnet", "custom"]. + // FlagNetwork is the flag for the network name. Valid values: ["testnet", "mainnet", "cardona", "custom"]. FlagNetwork = "network" // FlagCustomNetwork is the flag for the custom network file. This is required if --network=custom FlagCustomNetwork = "custom-network-file" diff --git a/config/environments/cardona/example.env b/config/environments/cardona/example.env new file mode 100644 index 0000000000..a78f9b52cc --- /dev/null +++ b/config/environments/cardona/example.env @@ -0,0 +1,9 @@ +ZKEVM_NETWORK = "cardona" +# URL of a JSON RPC for Goerli +ZKEVM_NODE_ETHERMAN_URL = "http://your.L1node.url" +# PATH WHERE THE STATEDB POSTGRES CONTAINER WILL STORE PERSISTENT DATA +ZKEVM_NODE_STATEDB_DATA_DIR = "/path/to/persistent/data/statedb" +# PATH WHERE THE POOLDB POSTGRES CONTAINER WILL STORE PERSISTENT DATA +ZKEVM_NODE_POOLDB_DATA_DIR = "/path/to/persistent/data/pooldb" +# OPTIONAL, UNCOMENT IF YOU WANT TO DO ADVANCED CONFIG +# ZKEVM_ADVANCED_CONFIG_DIR = "/should/be/same/path/as/ZKEVM_CONFIG_DIR" \ No newline at end of file diff --git a/config/environments/cardona/node.config.toml b/config/environments/cardona/node.config.toml new file mode 100644 index 0000000000..4896815a38 --- /dev/null +++ b/config/environments/cardona/node.config.toml @@ -0,0 +1,97 @@ +[Log] +Environment = "development" # "production" or "development" +Level = "info" +Outputs = ["stderr"] + +[State] + [State.DB] + User = "state_user" + Password = "state_password" + Name = "state_db" + Host = "zkevm-state-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + [State.Batch] + [State.Batch.Constraints] + MaxTxsPerBatch = 300 + MaxBatchBytesSize = 120000 + MaxCumulativeGasUsed = 30000000 + MaxKeccakHashes = 2145 + MaxPoseidonHashes = 252357 + MaxPoseidonPaddings = 135191 + MaxMemAligns = 236585 + MaxArithmetics = 236585 + MaxBinaries = 473170 + MaxSteps = 7570538 + MaxSHA256Hashes = 1596 + +[Pool] +IntervalToRefreshBlockedAddresses = "5m" +IntervalToRefreshGasPrices = "5s" +MaxTxBytesSize=100132 +MaxTxDataBytesSize=100000 +DefaultMinGasPriceAllowed = 1000000000 +MinAllowedGasPriceInterval = "5m" +PollMinAllowedGasPriceInterval = "15s" +AccountQueue = 64 +GlobalQueue = 1024 + [Pool.DB] + User = "pool_user" + Password = "pool_password" + Name = "pool_db" + Host = "zkevm-pool-db" + Port = "5432" + EnableLog = false + MaxConns = 200 + +[Etherman] +URL = "http://your.L1node.url" +ForkIDChunkSize = 20000 +MultiGasProvider = false + [Etherman.Etherscan] + ApiKey = "" + +[RPC] +Host = "0.0.0.0" +Port = 8545 +ReadTimeout = "60s" +WriteTimeout = "60s" +MaxRequestsPerIPAndSecond = 5000 +SequencerNodeURI = "https://rpc.devnet.zkevm-rpc.com" +EnableL2SuggestedGasPricePolling = false + [RPC.WebSockets] + Enabled = true + Port = 8546 + +[Synchronizer] +SyncInterval = "2s" +SyncChunkSize = 100 +TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +L1SynchronizationMode = "sequential" + +[MTClient] +URI = "zkevm-prover:50061" + +[Executor] +URI = "zkevm-prover:50071" +MaxResourceExhaustedAttempts = 3 +WaitOnResourceExhaustion = "1s" +MaxGRPCMessageSize = 100000000 + +[Metrics] +Host = "0.0.0.0" +Port = 9091 +Enabled = false +ProfilingHost = "0.0.0.0" +ProfilingPort = 6060 +ProfilingEnabled = false + +[HashDB] +User = "prover_user" +Password = "prover_pass" +Name = "prover_db" +Host = "zkevm-state-db" +Port = "5432" +EnableLog = false +MaxConns = 200 diff --git a/config/environments/cardona/postgresql.conf b/config/environments/cardona/postgresql.conf new file mode 100644 index 0000000000..51dff68697 --- /dev/null +++ b/config/environments/cardona/postgresql.conf @@ -0,0 +1,815 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 8GB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +temp_buffers = 64MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +work_mem = 104857kB # min 64kB +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem +maintenance_work_mem = 2GB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is usually the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +effective_io_concurrency = 300 # 1-1000; 0 disables prefetching +#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching +max_worker_processes = 16 # (change requires restart) +max_parallel_workers_per_gather = 4 # taken from max_parallel_workers +max_parallel_maintenance_workers = 4 # taken from max_parallel_workers +max_parallel_workers = 16 # maximum number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enables compression of full-page writes; + # off, pglz, lz4, zstd, or on +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +wal_buffers = 16MB # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 8GB +min_wal_size = 2GB + +# - Prefetching during recovery - + +#recovery_prefetch = try # prefetch pages referenced in the WAL? +#wal_decode_buffer_size = 512kB # lookahead window used for prefetching + # (change requires restart) + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_library = '' # library to use to archive a logfile segment + # (empty string indicates archive_command should + # be used) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived logfile segment + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#promote_trigger_file = '' # file name whose presence ends recovery +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +random_page_cost = 1.1 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 24GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan +#recursive_worktable_factor = 10.0 # range 0.001-1000000 + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, jsonlog, syslog, and + # eventlog, depending on platform. + # csvlog and jsonlog require + # logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr, jsonlog, + # and csvlog into log files. Required + # to be on for csvlogs and jsonlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +#log_startup_progress_interval = 10s # Time between progress updates for + # long-running startup operations. + # 0 disables the feature, > 0 indicates + # the interval in milliseconds. + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = 10min # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = on +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Etc/UTC' + + +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Cumulative Query and Index Statistics - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_fetch_consistency = cache + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table + # size before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'Etc/UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.utf8' # locale for system error message + # strings +lc_monetary = 'en_US.utf8' # locale for monetary formatting +lc_numeric = 'en_US.utf8' # locale for number formatting +lc_time = 'en_US.utf8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +#shared_preload_libraries = '' # (change requires restart) +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_destdir = '' # prepend path when loading extensions + # and shared objects (added by Debian) +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/config/environments/cardona/prover.config.json b/config/environments/cardona/prover.config.json new file mode 100644 index 0000000000..15c999b37b --- /dev/null +++ b/config/environments/cardona/prover.config.json @@ -0,0 +1,117 @@ +{ + "runProverServer": false, + "runProverServerMock": false, + "runProverClient": false, + + "runExecutorServer": true, + "runExecutorClient": false, + "runExecutorClientMultithread": false, + + "runHashDBServer": true, + "runHashDBTest": false, + + "runAggregatorServer": false, + "runAggregatorClient": false, + + "runFileGenProof": false, + "runFileGenBatchProof": false, + "runFileGenAggregatedProof": false, + "runFileGenFinalProof": false, + "runFileProcessBatch": false, + "runFileProcessBatchMultithread": false, + + "runKeccakScriptGenerator": false, + "runKeccakTest": false, + "runStorageSMTest": false, + "runBinarySMTest": false, + "runMemAlignSMTest": false, + "runSHA256Test": false, + "runBlakeTest": false, + + "executeInParallel": true, + "useMainExecGenerated": true, + "saveRequestToFile": false, + "saveInputToFile": false, + "saveDbReadsToFile": false, + "saveDbReadsToFileOnChange": false, + "saveOutputToFile": false, + "saveResponseToFile": false, + "loadDBToMemCache": true, + "opcodeTracer": false, + "logRemoteDbReads": false, + "logExecutorServerResponses": false, + + "proverServerPort": 50051, + "proverServerMockPort": 50052, + "proverServerMockTimeout": 10000000, + "proverClientPort": 50051, + "proverClientHost": "127.0.0.1", + + "executorServerPort": 50071, + "executorROMLineTraces": false, + "executorClientPort": 50071, + "executorClientHost": "127.0.0.1", + + "hashDBServerPort": 50061, + "hashDBURL": "local", + + "aggregatorServerPort": 50081, + "aggregatorClientPort": 50081, + "aggregatorClientHost": "127.0.0.1", + + "inputFile": "input_executor.json", + "outputPath": "output", + "cmPolsFile_disabled": "zkevm.commit", + "cmPolsFileC12a_disabled": "zkevm.c12a.commit", + "cmPolsFileRecursive1_disabled": "zkevm.recursive1.commit", + "constPolsFile": "zkevm.const", + "constPolsC12aFile": "zkevm.c12a.const", + "constPolsRecursive1File": "zkevm.recursive1.const", + "mapConstPolsFile": false, + "constantsTreeFile": "zkevm.consttree", + "constantsTreeC12aFile": "zkevm.c12a.consttree", + "constantsTreeRecursive1File": "zkevm.recursive1.consttree", + "mapConstantsTreeFile": false, + "starkFile": "zkevm.prove.json", + "starkZkIn": "zkevm.proof.zkin.json", + "starkZkInC12a":"zkevm.c12a.zkin.proof.json", + "starkFileRecursive1": "zkevm.recursive1.proof.json", + "verifierFile": "zkevm.verifier.dat", + "verifierFileRecursive1": "zkevm.recursive1.verifier.dat", + "witnessFile_disabled": "zkevm.witness.wtns", + "witnessFileRecursive1": "zkevm.recursive1.witness.wtns", + "execC12aFile": "zkevm.c12a.exec", + "execRecursive1File": "zkevm.recursive1.exec", + "starkVerifierFile": "zkevm.g16.0001.zkey", + "publicStarkFile": "zkevm.public.json", + "publicFile": "public.json", + "proofFile": "proof.json", + "keccakScriptFile": "keccak_script.json", + "keccakPolsFile_DISABLED": "keccak_pols.json", + "keccakConnectionsFile": "keccak_connections.json", + "starkInfoFile": "zkevm.starkinfo.json", + "starkInfoC12aFile": "zkevm.c12a.starkinfo.json", + "starkInfoRecursive1File": "zkevm.recursive1.starkinfo.json", + "databaseURL": "postgresql://prover_user:prover_pass@zkevm-state-db:5432/prover_db", + "dbNodesTableName": "state.nodes", + "dbProgramTableName": "state.program", + "dbAsyncWrite": false, + "dbMultiWrite": true, + "dbConnectionsPool": true, + "dbNumberOfPoolConnections": 30, + "dbMetrics": true, + "dbClearCache": false, + "dbGetTree": true, + "dbReadOnly": false, + "dbMTCacheSize": 8192, + "dbProgramCacheSize": 1024, + "cleanerPollingPeriod": 600, + "requestsPersistence": 3600, + "maxExecutorThreads": 20, + "maxProverThreads": 8, + "maxHashDBThreads": 8, + "ECRecoverPrecalc": false, + "ECRecoverPrecalcNThreads": 4, + "stateManager": true, + "useAssociativeCache" : false +} diff --git a/config/environments/mainnet/node.config.toml b/config/environments/mainnet/node.config.toml index abd79f3d5e..64d2644974 100644 --- a/config/environments/mainnet/node.config.toml +++ b/config/environments/mainnet/node.config.toml @@ -66,6 +66,7 @@ EnableL2SuggestedGasPricePolling = false SyncInterval = "2s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +L1SynchronizationMode = "sequential" [MTClient] URI = "zkevm-prover:50061" @@ -91,4 +92,4 @@ Name = "prover_db" Host = "zkevm-state-db" Port = "5432" EnableLog = false -MaxConns = 200 \ No newline at end of file +MaxConns = 200 diff --git a/config/environments/testnet/node.config.toml b/config/environments/testnet/node.config.toml index 485eaedb85..ed9cb7401d 100644 --- a/config/environments/testnet/node.config.toml +++ b/config/environments/testnet/node.config.toml @@ -4,7 +4,6 @@ Level = "info" Outputs = ["stderr"] [State] -AccountQueue = 64 [State.DB] User = "state_user" Password = "state_password" @@ -35,6 +34,8 @@ MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" +AccountQueue = 64 +GlobalQueue = 1024 [Pool.DB] User = "pool_user" Password = "pool_password" @@ -66,6 +67,8 @@ EnableL2SuggestedGasPricePolling = false [Synchronizer] SyncInterval = "2s" SyncChunkSize = 100 +TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +L1SynchronizationMode = "sequential" [MTClient] URI = "zkevm-prover:50061" @@ -91,4 +94,4 @@ Name = "prover_db" Host = "zkevm-state-db" Port = "5432" EnableLog = false -MaxConns = 200 \ No newline at end of file +MaxConns = 200 diff --git a/config/network.go b/config/network.go index d66728638b..20094ee8ab 100644 --- a/config/network.go +++ b/config/network.go @@ -27,6 +27,7 @@ type network string const mainnet network = "mainnet" const testnet network = "testnet" +const cardona network = "cardona" const custom network = "custom" // GenesisFromJSON is the config file for network_custom @@ -63,6 +64,8 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { networkJSON = MainnetNetworkConfigJSON case string(testnet): networkJSON = TestnetNetworkConfigJSON + case string(cardona): + networkJSON = CardonaNetworkConfigJSON case string(custom): var err error cfgPath := ctx.String(FlagCustomNetwork) @@ -71,7 +74,7 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { panic(err.Error()) } default: - log.Fatalf("unsupported --network value. Must be one of: [%s, %s, %s]", mainnet, testnet, custom) + log.Fatalf("unsupported --network value. Must be one of: [%s, %s, %s]", mainnet, testnet, cardona, custom) } config, err := LoadGenesisFromJSONString(networkJSON) if err != nil { diff --git a/config/network_test.go b/config/network_test.go index 533d89ff38..1d126ce986 100644 --- a/config/network_test.go +++ b/config/network_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/merkletree" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" @@ -13,6 +14,18 @@ import ( "github.com/urfave/cli/v2" ) +func TestCardona(t *testing.T) { + cfg := Config{} + fs := flag.NewFlagSet("", flag.ExitOnError) + fs.String(FlagNetwork, string(cardona), string(cardona)) + err := fs.Set(FlagNetwork, string(cardona)) + require.NoError(t, err) + app := cli.NewApp() + ctx := cli.NewContext(app, fs, nil) + + log.Info("flag=", ctx.String(FlagNetwork)) + cfg.loadNetworkConfig(ctx) +} func TestLoadCustomNetworkConfig(t *testing.T) { tcs := []struct { description string diff --git a/docs/networks.md b/docs/networks.md index d4ce7c907c..9f194102ee 100644 --- a/docs/networks.md +++ b/docs/networks.md @@ -2,4 +2,5 @@ | Network Name | ChainID | RPC URL | Explorer | Bridge Info | |--------------|---------|---------|----------|------------------| -| Public Testnet | `1402` | https://rpc.public.zkevm-test.net | https://explorer.public.zkevm-test.net | https://public.zkevm-test.net/ \ No newline at end of file +| Public Testnet | `1402` | https://rpc.public.zkevm-test.net | https://explorer.public.zkevm-test.net | https://public.zkevm-test.net/ +| Cardona Testnet | `2442` | https://rpc.cardona.zkevm-rpc.com/ | https://etherscan.cardona.zkevm-rpc.com/ | https://bridge-ui.cardona.zkevm-rpc.com/ \ No newline at end of file diff --git a/docs/production-setup.md b/docs/production-setup.md index 50c6637273..50480a68ed 100644 --- a/docs/production-setup.md +++ b/docs/production-setup.md @@ -12,7 +12,7 @@ Note that sequencing and proving functionalities are not covered in this documen - A machine to run the zkEVM node with the following requirements: - Hardware: 32G RAM, 4 cores, 128G Disk with high IOPS (as the network is super young the current disk requirements are quite low, but they will increase over time. Also note that this requirement is true if the DBs run on the same machine, but it's recommended to run Postgres on dedicated infra). Currently ARM-based CPUs are not supported - Software: Ubuntu 22.04, Docker -- A L1 node: we recommend using geth, but what it's actually needed is access to a JSON RPC interface for the L1 network (Goerli for zkEVM testnet, Ethereum mainnet for zkEVM mainnet) +- A L1 node: we recommend using geth, but what it's actually needed is access to a JSON RPC interface for the L1 network (Sepolia for Cardona zkEVM testnet, Goerli for zkEVM testnet, Ethereum mainnet for zkEVM mainnet) ## Setup @@ -37,7 +37,7 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker ### Explained step by step: -1. Define network: `ZKEVM_NET=testnet` or `ZKEVM_NET=mainnet` +1. Define network: `ZKEVM_NET=cardona` or `ZKEVM_NET=testnet` or `ZKEVM_NET=mainnet` 2. Define installation path: `ZKEVM_DIR=./path/to/install` 3. Define a config directory: `ZKEVM_CONFIG_DIR=./path/to/config` 4. It's recommended to source this env vars in your `~/.bashrc`, `~/.zshrc` or whatever you're using diff --git a/tools/state/main.go b/tools/state/main.go index 7696dc87e0..dfce883d19 100644 --- a/tools/state/main.go +++ b/tools/state/main.go @@ -32,7 +32,7 @@ var ( networkFlag = cli.StringFlag{ Name: config.FlagNetwork, Aliases: []string{"net"}, - Usage: "Load default network configuration. Supported values: [`mainnet`, `testnet`, `custom`]", + Usage: "Load default network configuration. Supported values: [`mainnet`, `testnet`, `cardona`, `custom`]", Required: false, } customNetworkFlag = cli.StringFlag{ From f3b86d49c5b84200e8bc79e6135af601abb822a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 26 Jan 2024 23:54:37 +0100 Subject: [PATCH 35/54] new fields in stream (#3149) * new fields in stream * update test * remove unused constant * add UpgradeEtrogBatchNumber handling to stream * doc * add log to test * add log to test * add protection * add check * fix * fix --- docs/config-file/node-config-doc.html | 2 +- docs/config-file/node-config-doc.md | 31 +++- docs/config-file/node-config-schema.json | 5 + sequencer/config.go | 2 + sequencer/datastreamer.go | 21 +-- sequencer/forcedbatch.go | 2 +- sequencer/l2block.go | 2 +- sequencer/sequencer.go | 44 ++++-- state/datastream.go | 175 ++++++++++++--------- state/test/datastream_test.go | 39 +++-- test/config/test.node.config.toml | 2 + test/e2e/jsonrpc2_test.go | 1 + tools/datastreamer/config/config.go | 2 + tools/datastreamer/config/default.go | 1 + tools/datastreamer/config/tool.config.toml | 1 + tools/datastreamer/main.go | 24 ++- 16 files changed, 226 insertions(+), 128 deletions(-) diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index d1f7eb383c..71988b24ae 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -50,7 +50,7 @@
"300ms"
 

Default: "3s"Type: string

L2BlockMaxDeltaTimestamp is the resolution of the timestamp used to close a L2 block


Examples:

"1m"
 
"300ms"
-

Default: 0Type: integer

HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number


Default: falseType: boolean

SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)


Default: trueType: boolean

SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func


StreamServerCfg is the config for the stream server
Default: 0Type: integer

Port to listen on


Default: ""Type: string

Filename of the binary data file


Default: 0Type: integer

Version of the binary data file


Default: 0Type: integer

ChainID is the chain ID


Default: falseType: boolean

Enabled is a flag to enable/disable the data streamer


Log is the log configuration
Default: ""Type: enum (of string)

Must be one of:

  • "production"
  • "development"

Default: ""Type: enum (of string)

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Type: array of string

Each item of this array must be:


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
+

Default: 0Type: integer

HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number


Default: falseType: boolean

SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)


Default: trueType: boolean

SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func


StreamServerCfg is the config for the stream server
Default: 0Type: integer

Port to listen on


Default: ""Type: string

Filename of the binary data file


Default: 0Type: integer

Version of the binary data file


Default: 0Type: integer

ChainID is the chain ID


Default: falseType: boolean

Enabled is a flag to enable/disable the data streamer


Log is the log configuration
Default: ""Type: enum (of string)

Must be one of:

  • "production"
  • "development"

Default: ""Type: enum (of string)

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Type: array of string

Each item of this array must be:


Default: 0Type: integer

UpgradeEtrogBatchNumber is the batch number of the upgrade etrog


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
 
"300ms"
 

Default: "5s"Type: string

LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index abeaa062ab..7994f637f5 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -1992,14 +1992,15 @@ SequentialProcessL2Block=true
 **Type:** : `object`
 **Description:** StreamServerCfg is the config for the stream server
 
-| Property                                        | Pattern | Type    | Deprecated | Definition | Title/Description                                     |
-| ----------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------------------------------------------- |
-| - [Port](#Sequencer_StreamServer_Port )         | No      | integer | No         | -          | Port to listen on                                     |
-| - [Filename](#Sequencer_StreamServer_Filename ) | No      | string  | No         | -          | Filename of the binary data file                      |
-| - [Version](#Sequencer_StreamServer_Version )   | No      | integer | No         | -          | Version of the binary data file                       |
-| - [ChainID](#Sequencer_StreamServer_ChainID )   | No      | integer | No         | -          | ChainID is the chain ID                               |
-| - [Enabled](#Sequencer_StreamServer_Enabled )   | No      | boolean | No         | -          | Enabled is a flag to enable/disable the data streamer |
-| - [Log](#Sequencer_StreamServer_Log )           | No      | object  | No         | -          | Log is the log configuration                          |
+| Property                                                                      | Pattern | Type    | Deprecated | Definition | Title/Description                                                |
+| ----------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------------- |
+| - [Port](#Sequencer_StreamServer_Port )                                       | No      | integer | No         | -          | Port to listen on                                                |
+| - [Filename](#Sequencer_StreamServer_Filename )                               | No      | string  | No         | -          | Filename of the binary data file                                 |
+| - [Version](#Sequencer_StreamServer_Version )                                 | No      | integer | No         | -          | Version of the binary data file                                  |
+| - [ChainID](#Sequencer_StreamServer_ChainID )                                 | No      | integer | No         | -          | ChainID is the chain ID                                          |
+| - [Enabled](#Sequencer_StreamServer_Enabled )                                 | No      | boolean | No         | -          | Enabled is a flag to enable/disable the data streamer            |
+| - [Log](#Sequencer_StreamServer_Log )                                         | No      | object  | No         | -          | Log is the log configuration                                     |
+| - [UpgradeEtrogBatchNumber](#Sequencer_StreamServer_UpgradeEtrogBatchNumber ) | No      | integer | No         | -          | UpgradeEtrogBatchNumber is the batch number of the upgrade etrog |
 
 #### 10.8.1. `Sequencer.StreamServer.Port`
 
@@ -2123,6 +2124,20 @@ Must be one of:
 
 **Type:** : `array of string`
 
+#### 10.8.7. `Sequencer.StreamServer.UpgradeEtrogBatchNumber`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Description:** UpgradeEtrogBatchNumber is the batch number of the upgrade etrog
+
+**Example setting the default value** (0):
+```
+[Sequencer.StreamServer]
+UpgradeEtrogBatchNumber=0
+```
+
 ## 11. `[SequenceSender]`
 
 **Type:** : `object`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index 2ffad27300..73f9750306 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -805,6 +805,11 @@
 							"additionalProperties": false,
 							"type": "object",
 							"description": "Log is the log configuration"
+						},
+						"UpgradeEtrogBatchNumber": {
+							"type": "integer",
+							"description": "UpgradeEtrogBatchNumber is the batch number of the upgrade etrog",
+							"default": 0
 						}
 					},
 					"additionalProperties": false,
diff --git a/sequencer/config.go b/sequencer/config.go
index b560d4aa98..f7cb0101bb 100644
--- a/sequencer/config.go
+++ b/sequencer/config.go
@@ -46,6 +46,8 @@ type StreamServerCfg struct {
 	Enabled bool `mapstructure:"Enabled"`
 	// Log is the log configuration
 	Log log.Config `mapstructure:"Log"`
+	// UpgradeEtrogBatchNumber is the batch number of the upgrade etrog
+	UpgradeEtrogBatchNumber uint64 `mapstructure:"UpgradeEtrogBatchNumber"`
 }
 
 // FinalizerCfg contains the finalizer's configuration properties
diff --git a/sequencer/datastreamer.go b/sequencer/datastreamer.go
index 314e192686..9feac914e9 100644
--- a/sequencer/datastreamer.go
+++ b/sequencer/datastreamer.go
@@ -4,21 +4,22 @@ import (
 	"github.com/0xPolygonHermez/zkevm-node/state"
 )
 
-func (f *finalizer) DSSendL2Block(batchNumber uint64, blockResponse *state.ProcessBlockResponse) error {
+func (f *finalizer) DSSendL2Block(batchNumber uint64, blockResponse *state.ProcessBlockResponse, l1InfoTreeIndex uint32) error {
 	forkID := f.stateIntf.GetForkIDByBatchNumber(batchNumber)
 
 	// Send data to streamer
 	if f.streamServer != nil {
 		l2Block := state.DSL2Block{
-			BatchNumber:    batchNumber,
-			L2BlockNumber:  blockResponse.BlockNumber,
-			Timestamp:      int64(blockResponse.Timestamp),
-			L1BlockHash:    blockResponse.BlockHashL1,
-			GlobalExitRoot: blockResponse.GlobalExitRoot,
-			Coinbase:       f.sequencerAddress,
-			ForkID:         uint16(forkID),
-			BlockHash:      blockResponse.BlockHash,
-			StateRoot:      blockResponse.BlockHash, //From etrog, the blockhash is the block root
+			BatchNumber:     batchNumber,
+			L2BlockNumber:   blockResponse.BlockNumber,
+			Timestamp:       int64(blockResponse.Timestamp),
+			L1InfoTreeIndex: l1InfoTreeIndex,
+			L1BlockHash:     blockResponse.BlockHashL1,
+			GlobalExitRoot:  blockResponse.GlobalExitRoot,
+			Coinbase:        f.sequencerAddress,
+			ForkID:          uint16(forkID),
+			BlockHash:       blockResponse.BlockHash,
+			StateRoot:       blockResponse.BlockHash, //From etrog, the blockhash is the block root
 		}
 
 		l2Transactions := []state.DSL2Transaction{}
diff --git a/sequencer/forcedbatch.go b/sequencer/forcedbatch.go
index 211d9a6374..9ad384761d 100644
--- a/sequencer/forcedbatch.go
+++ b/sequencer/forcedbatch.go
@@ -195,7 +195,7 @@ func (f *finalizer) handleProcessForcedBatchResponse(ctx context.Context, batchR
 		}
 
 		// Send L2 block to data streamer
-		err = f.DSSendL2Block(batchResponse.NewBatchNumber, forcedL2BlockResponse)
+		err = f.DSSendL2Block(batchResponse.NewBatchNumber, forcedL2BlockResponse, 0)
 		if err != nil {
 			//TODO: we need to halt/rollback the L2 block if we had an error sending to the data streamer?
 			log.Errorf("error sending L2 block %d to data streamer, error: %v", forcedL2BlockResponse.BlockNumber, err)
diff --git a/sequencer/l2block.go b/sequencer/l2block.go
index 8ae00be9db..772c4ec662 100644
--- a/sequencer/l2block.go
+++ b/sequencer/l2block.go
@@ -390,7 +390,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
 	}
 
 	// Send L2 block to data streamer
-	err = f.DSSendL2Block(f.wipBatch.batchNumber, blockResponse)
+	err = f.DSSendL2Block(f.wipBatch.batchNumber, blockResponse, l2Block.getL1InfoTreeIndex())
 	if err != nil {
 		//TODO: we need to halt/rollback the L2 block if we had an error sending to the data streamer?
 		log.Errorf("error sending L2 block %d [%d] to data streamer, error: %v", blockResponse.BlockNumber, l2Block.trackingNum, err)
diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go
index 0e04de2905..165d0721e5 100644
--- a/sequencer/sequencer.go
+++ b/sequencer/sequencer.go
@@ -90,13 +90,13 @@ func (s *Sequencer) Start(ctx context.Context) {
 			log.Fatalf("failed to start stream server, error: %v", err)
 		}
 
-		s.updateDataStreamerFile(ctx)
+		s.updateDataStreamerFile(ctx, s.cfg.StreamServer.ChainID)
 	}
 
 	go s.loadFromPool(ctx)
 
 	if s.streamServer != nil {
-		go s.sendDataToStreamer()
+		go s.sendDataToStreamer(s.cfg.StreamServer.ChainID)
 	}
 
 	s.worker = NewWorker(s.stateIntf, s.batchCfg.Constraints)
@@ -129,8 +129,8 @@ func (s *Sequencer) checkStateInconsistency(ctx context.Context) {
 	}
 }
 
-func (s *Sequencer) updateDataStreamerFile(ctx context.Context) {
-	err := state.GenerateDataStreamerFile(ctx, s.streamServer, s.stateIntf, true, nil)
+func (s *Sequencer) updateDataStreamerFile(ctx context.Context, chainID uint64) {
+	err := state.GenerateDataStreamerFile(ctx, s.streamServer, s.stateIntf, true, nil, chainID, s.cfg.StreamServer.UpgradeEtrogBatchNumber)
 	if err != nil {
 		log.Fatalf("failed to generate data streamer file, error: %v", err)
 	}
@@ -221,7 +221,7 @@ func (s *Sequencer) addTxToWorker(ctx context.Context, tx pool.Transaction) erro
 }
 
 // sendDataToStreamer sends data to the data stream server
-func (s *Sequencer) sendDataToStreamer() {
+func (s *Sequencer) sendDataToStreamer(chainID uint64) {
 	var err error
 	for {
 		// Read error from previous iteration
@@ -259,14 +259,34 @@ func (s *Sequencer) sendDataToStreamer() {
 					continue
 				}
 
+				// Get previous block timestamp to calculate delta timestamp
+				previousL2Block := state.DSL2BlockStart{}
+				if l2Block.L2BlockNumber > 0 {
+					bookMark = state.DSBookMark{
+						Type:  state.BookMarkTypeL2Block,
+						Value: l2Block.L2BlockNumber - 1,
+					}
+
+					previousL2BlockEntry, err := s.streamServer.GetFirstEventAfterBookmark(bookMark.Encode())
+					if err != nil {
+						log.Errorf("failed to get previous l2block %d, error: %v", l2Block.L2BlockNumber-1, err)
+						continue
+					}
+
+					previousL2Block = state.DSL2BlockStart{}.Decode(previousL2BlockEntry.Data)
+				}
+
 				blockStart := state.DSL2BlockStart{
-					BatchNumber:    l2Block.BatchNumber,
-					L2BlockNumber:  l2Block.L2BlockNumber,
-					Timestamp:      l2Block.Timestamp,
-					L1BlockHash:    l2Block.L1BlockHash,
-					GlobalExitRoot: l2Block.GlobalExitRoot,
-					Coinbase:       l2Block.Coinbase,
-					ForkID:         l2Block.ForkID,
+					BatchNumber:     l2Block.BatchNumber,
+					L2BlockNumber:   l2Block.L2BlockNumber,
+					Timestamp:       l2Block.Timestamp,
+					DeltaTimestamp:  uint32(l2Block.Timestamp - previousL2Block.Timestamp),
+					L1InfoTreeIndex: l2Block.L1InfoTreeIndex,
+					L1BlockHash:     l2Block.L1BlockHash,
+					GlobalExitRoot:  l2Block.GlobalExitRoot,
+					Coinbase:        l2Block.Coinbase,
+					ForkID:          l2Block.ForkID,
+					ChainID:         uint32(chainID),
 				}
 
 				_, err = s.streamServer.AddStreamEntry(state.EntryTypeL2BlockStart, blockStart.Encode())
diff --git a/state/datastream.go b/state/datastream.go
index 2ce8516397..74b5e7b1a8 100644
--- a/state/datastream.go
+++ b/state/datastream.go
@@ -55,50 +55,63 @@ type DSL2FullBlock struct {
 
 // DSL2Block is a full l2 block
 type DSL2Block struct {
-	BatchNumber    uint64         // 8 bytes
-	L2BlockNumber  uint64         // 8 bytes
-	Timestamp      int64          // 8 bytes
-	L1BlockHash    common.Hash    // 32 bytes
-	GlobalExitRoot common.Hash    // 32 bytes
-	Coinbase       common.Address // 20 bytes
-	ForkID         uint16         // 2 bytes
-	BlockHash      common.Hash    // 32 bytes
-	StateRoot      common.Hash    // 32 bytes
+	BatchNumber     uint64         // 8 bytes
+	L2BlockNumber   uint64         // 8 bytes
+	Timestamp       int64          // 8 bytes
+	L1InfoTreeIndex uint32         // 4 bytes
+	L1BlockHash     common.Hash    // 32 bytes
+	GlobalExitRoot  common.Hash    // 32 bytes
+	Coinbase        common.Address // 20 bytes
+	ForkID          uint16         // 2 bytes
+	ChainID         uint32         // 4 bytes
+	BlockHash       common.Hash    // 32 bytes
+	StateRoot       common.Hash    // 32 bytes
 }
 
 // DSL2BlockStart represents a data stream L2 block start
 type DSL2BlockStart struct {
-	BatchNumber    uint64         // 8 bytes
-	L2BlockNumber  uint64         // 8 bytes
-	Timestamp      int64          // 8 bytes
-	L1BlockHash    common.Hash    // 32 bytes
-	GlobalExitRoot common.Hash    // 32 bytes
-	Coinbase       common.Address // 20 bytes
-	ForkID         uint16         // 2 bytes
+	BatchNumber     uint64         // 8 bytes
+	L2BlockNumber   uint64         // 8 bytes
+	Timestamp       int64          // 8 bytes
+	DeltaTimestamp  uint32         // 4 bytes
+	L1InfoTreeIndex uint32         // 4 bytes
+	L1BlockHash     common.Hash    // 32 bytes
+	GlobalExitRoot  common.Hash    // 32 bytes
+	Coinbase        common.Address // 20 bytes
+	ForkID          uint16         // 2 bytes
+	ChainID         uint32         // 4 bytes
+
 }
 
 // Encode returns the encoded DSL2BlockStart as a byte slice
 func (b DSL2BlockStart) Encode() []byte {
 	bytes := make([]byte, 0)
-	bytes = binary.LittleEndian.AppendUint64(bytes, b.BatchNumber)
-	bytes = binary.LittleEndian.AppendUint64(bytes, b.L2BlockNumber)
-	bytes = binary.LittleEndian.AppendUint64(bytes, uint64(b.Timestamp))
+	bytes = binary.BigEndian.AppendUint64(bytes, b.BatchNumber)
+	bytes = binary.BigEndian.AppendUint64(bytes, b.L2BlockNumber)
+	bytes = binary.BigEndian.AppendUint64(bytes, uint64(b.Timestamp))
+	bytes = binary.BigEndian.AppendUint32(bytes, b.DeltaTimestamp)
+	bytes = binary.BigEndian.AppendUint32(bytes, b.L1InfoTreeIndex)
 	bytes = append(bytes, b.L1BlockHash.Bytes()...)
 	bytes = append(bytes, b.GlobalExitRoot.Bytes()...)
 	bytes = append(bytes, b.Coinbase.Bytes()...)
-	bytes = binary.LittleEndian.AppendUint16(bytes, b.ForkID)
+	bytes = binary.BigEndian.AppendUint16(bytes, b.ForkID)
+	bytes = binary.BigEndian.AppendUint32(bytes, b.ChainID)
 	return bytes
 }
 
 // Decode decodes the DSL2BlockStart from a byte slice
 func (b DSL2BlockStart) Decode(data []byte) DSL2BlockStart {
-	b.BatchNumber = binary.LittleEndian.Uint64(data[0:8])
-	b.L2BlockNumber = binary.LittleEndian.Uint64(data[8:16])
-	b.Timestamp = int64(binary.LittleEndian.Uint64(data[16:24]))
-	b.L1BlockHash = common.BytesToHash(data[24:56])
-	b.GlobalExitRoot = common.BytesToHash(data[56:88])
-	b.Coinbase = common.BytesToAddress(data[88:108])
-	b.ForkID = binary.LittleEndian.Uint16(data[108:110])
+	b.BatchNumber = binary.BigEndian.Uint64(data[0:8])
+	b.L2BlockNumber = binary.BigEndian.Uint64(data[8:16])
+	b.Timestamp = int64(binary.BigEndian.Uint64(data[16:24]))
+	b.DeltaTimestamp = binary.BigEndian.Uint32(data[24:28])
+	b.L1InfoTreeIndex = binary.BigEndian.Uint32(data[28:32])
+	b.L1BlockHash = common.BytesToHash(data[32:64])
+	b.GlobalExitRoot = common.BytesToHash(data[64:96])
+	b.Coinbase = common.BytesToAddress(data[96:116])
+	b.ForkID = binary.BigEndian.Uint16(data[116:118])
+	b.ChainID = binary.BigEndian.Uint32(data[118:122])
+
 	return b
 }
 
@@ -118,7 +131,7 @@ func (l DSL2Transaction) Encode() []byte {
 	bytes = append(bytes, l.EffectiveGasPricePercentage)
 	bytes = append(bytes, l.IsValid)
 	bytes = append(bytes, l.StateRoot[:]...)
-	bytes = binary.LittleEndian.AppendUint32(bytes, l.EncodedLength)
+	bytes = binary.BigEndian.AppendUint32(bytes, l.EncodedLength)
 	bytes = append(bytes, l.Encoded...)
 	return bytes
 }
@@ -128,7 +141,7 @@ func (l DSL2Transaction) Decode(data []byte) DSL2Transaction {
 	l.EffectiveGasPricePercentage = data[0]
 	l.IsValid = data[1]
 	l.StateRoot = common.BytesToHash(data[2:34])
-	l.EncodedLength = binary.LittleEndian.Uint32(data[34:38])
+	l.EncodedLength = binary.BigEndian.Uint32(data[34:38])
 	l.Encoded = data[38:]
 	return l
 }
@@ -143,7 +156,7 @@ type DSL2BlockEnd struct {
 // Encode returns the encoded DSL2BlockEnd as a byte slice
 func (b DSL2BlockEnd) Encode() []byte {
 	bytes := make([]byte, 0)
-	bytes = binary.LittleEndian.AppendUint64(bytes, b.L2BlockNumber)
+	bytes = binary.BigEndian.AppendUint64(bytes, b.L2BlockNumber)
 	bytes = append(bytes, b.BlockHash[:]...)
 	bytes = append(bytes, b.StateRoot[:]...)
 	return bytes
@@ -151,7 +164,7 @@ func (b DSL2BlockEnd) Encode() []byte {
 
 // Decode decodes the DSL2BlockEnd from a byte slice
 func (b DSL2BlockEnd) Decode(data []byte) DSL2BlockEnd {
-	b.L2BlockNumber = binary.LittleEndian.Uint64(data[0:8])
+	b.L2BlockNumber = binary.BigEndian.Uint64(data[0:8])
 	b.BlockHash = common.BytesToHash(data[8:40])
 	b.StateRoot = common.BytesToHash(data[40:72])
 	return b
@@ -167,14 +180,14 @@ type DSBookMark struct {
 func (b DSBookMark) Encode() []byte {
 	bytes := make([]byte, 0)
 	bytes = append(bytes, b.Type)
-	bytes = binary.LittleEndian.AppendUint64(bytes, b.Value)
+	bytes = binary.BigEndian.AppendUint64(bytes, b.Value)
 	return bytes
 }
 
 // Decode decodes the DSBookMark from a byte slice
 func (b DSBookMark) Decode(data []byte) DSBookMark {
 	b.Type = data[0]
-	b.Value = binary.LittleEndian.Uint64(data[1:9])
+	b.Value = binary.BigEndian.Uint64(data[1:9])
 	return b
 }
 
@@ -185,29 +198,32 @@ type DSUpdateGER struct {
 	GlobalExitRoot common.Hash    // 32 bytes
 	Coinbase       common.Address // 20 bytes
 	ForkID         uint16         // 2 bytes
+	ChainID        uint32         // 4 bytes
 	StateRoot      common.Hash    // 32 bytes
 }
 
 // Encode returns the encoded DSUpdateGER as a byte slice
 func (g DSUpdateGER) Encode() []byte {
 	bytes := make([]byte, 0)
-	bytes = binary.LittleEndian.AppendUint64(bytes, g.BatchNumber)
-	bytes = binary.LittleEndian.AppendUint64(bytes, uint64(g.Timestamp))
+	bytes = binary.BigEndian.AppendUint64(bytes, g.BatchNumber)
+	bytes = binary.BigEndian.AppendUint64(bytes, uint64(g.Timestamp))
 	bytes = append(bytes, g.GlobalExitRoot[:]...)
 	bytes = append(bytes, g.Coinbase[:]...)
-	bytes = binary.LittleEndian.AppendUint16(bytes, g.ForkID)
+	bytes = binary.BigEndian.AppendUint16(bytes, g.ForkID)
+	bytes = binary.BigEndian.AppendUint32(bytes, g.ChainID)
 	bytes = append(bytes, g.StateRoot[:]...)
 	return bytes
 }
 
 // Decode decodes the DSUpdateGER from a byte slice
 func (g DSUpdateGER) Decode(data []byte) DSUpdateGER {
-	g.BatchNumber = binary.LittleEndian.Uint64(data[0:8])
-	g.Timestamp = int64(binary.LittleEndian.Uint64(data[8:16]))
+	g.BatchNumber = binary.BigEndian.Uint64(data[0:8])
+	g.Timestamp = int64(binary.BigEndian.Uint64(data[8:16]))
 	g.GlobalExitRoot = common.BytesToHash(data[16:48])
 	g.Coinbase = common.BytesToAddress(data[48:68])
-	g.ForkID = binary.LittleEndian.Uint16(data[68:70])
-	g.StateRoot = common.BytesToHash(data[70:102])
+	g.ForkID = binary.BigEndian.Uint16(data[68:70])
+	g.ChainID = binary.BigEndian.Uint32(data[70:74])
+	g.StateRoot = common.BytesToHash(data[74:106])
 	return g
 }
 
@@ -226,12 +242,13 @@ type DSState interface {
 }
 
 // GenerateDataStreamerFile generates or resumes a data stream file
-func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.StreamServer, stateDB DSState, readWIPBatch bool, imStateRoots *map[uint64][]byte) error {
+func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.StreamServer, stateDB DSState, readWIPBatch bool, imStateRoots *map[uint64][]byte, chainID uint64, upgradeEtrogBatchNumber uint64) error {
 	header := streamServer.GetHeader()
 
 	var currentBatchNumber uint64 = 0
 	var lastAddedL2BlockNumber uint64 = 0
 	var lastAddedBatchNumber uint64 = 0
+	var previousTimestamp int64 = 0
 
 	if header.TotalEntries == 0 {
 		// Get Genesis block
@@ -266,12 +283,15 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 		}
 
 		genesisBlock := DSL2BlockStart{
-			BatchNumber:    genesisL2Block.BatchNumber,
-			L2BlockNumber:  genesisL2Block.L2BlockNumber,
-			Timestamp:      genesisL2Block.Timestamp,
-			GlobalExitRoot: genesisL2Block.GlobalExitRoot,
-			Coinbase:       genesisL2Block.Coinbase,
-			ForkID:         genesisL2Block.ForkID,
+			BatchNumber:     genesisL2Block.BatchNumber,
+			L2BlockNumber:   genesisL2Block.L2BlockNumber,
+			Timestamp:       genesisL2Block.Timestamp,
+			DeltaTimestamp:  0,
+			L1InfoTreeIndex: 0,
+			GlobalExitRoot:  genesisL2Block.GlobalExitRoot,
+			Coinbase:        genesisL2Block.Coinbase,
+			ForkID:          genesisL2Block.ForkID,
+			ChainID:         uint32(chainID),
 		}
 
 		log.Infof("Genesis block: %+v", genesisBlock)
@@ -308,11 +328,12 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 		switch latestEntry.Type {
 		case EntryTypeUpdateGER:
 			log.Info("Latest entry type is UpdateGER")
-			currentBatchNumber = binary.LittleEndian.Uint64(latestEntry.Data[0:8])
+			currentBatchNumber = binary.BigEndian.Uint64(latestEntry.Data[0:8])
 			currentBatchNumber++
 		case EntryTypeL2BlockEnd:
 			log.Info("Latest entry type is L2BlockEnd")
-			currentL2BlockNumber := binary.LittleEndian.Uint64(latestEntry.Data[0:8])
+			blockEnd := DSL2BlockEnd{}.Decode(latestEntry.Data)
+			currentL2BlockNumber := blockEnd.L2BlockNumber
 
 			bookMark := DSBookMark{
 				Type:  BookMarkTypeL2Block,
@@ -323,7 +344,11 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 			if err != nil {
 				return err
 			}
-			currentBatchNumber = binary.LittleEndian.Uint64(firstEntry.Data[0:8])
+
+			blockStart := DSL2BlockStart{}.Decode(firstEntry.Data)
+
+			currentBatchNumber = blockStart.BatchNumber
+			previousTimestamp = blockStart.Timestamp
 			lastAddedL2BlockNumber = currentL2BlockNumber
 		case EntryTypeBookMark:
 			log.Info("Latest entry type is BookMark")
@@ -335,7 +360,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 				log.Fatalf("Latest entry type is an unexpected bookmark type: %v", bookMark.Type)
 			}
 		default:
-			log.Fatalf("Latest entry type is not am expected one: %v", latestEntry.Type)
+			log.Fatalf("Latest entry type is not an expected one: %v", latestEntry.Type)
 		}
 	}
 
@@ -430,6 +455,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 						GlobalExitRoot: batch.GlobalExitRoot,
 						Coinbase:       batch.Coinbase,
 						ForkID:         batch.ForkID,
+						ChainID:        uint32(chainID),
 						StateRoot:      batch.StateRoot,
 					}
 
@@ -440,21 +466,22 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 					currentGER = batch.GlobalExitRoot
 				}
 			} else {
-				for blockIndex, l2block := range batch.L2Blocks {
-					if l2block.L2BlockNumber <= lastAddedL2BlockNumber && lastAddedL2BlockNumber != 0 {
+				for blockIndex, l2Block := range batch.L2Blocks {
+					if l2Block.L2BlockNumber <= lastAddedL2BlockNumber && lastAddedL2BlockNumber != 0 {
 						continue
 					} else {
-						lastAddedL2BlockNumber = l2block.L2BlockNumber
+						lastAddedL2BlockNumber = l2Block.L2BlockNumber
 					}
 
 					l1BlockHash := common.Hash{}
+					l1InfoTreeIndex := uint32(0)
 
 					// Get L1 block hash
-					if l2block.ForkID >= FORKID_ETROG {
+					if l2Block.ForkID >= FORKID_ETROG {
 						isForcedBatch := false
 						batchRawData := &BatchRawV2{}
 
-						if batch.BatchNumber == 1 || batch.ForcedBatchNum != nil {
+						if batch.BatchNumber == 1 || (upgradeEtrogBatchNumber != 0 && batch.BatchNumber == upgradeEtrogBatchNumber) || batch.ForcedBatchNum != nil {
 							isForcedBatch = true
 						} else {
 							batchRawData, err = DecodeBatchV2(batch.BatchL2Data)
@@ -467,6 +494,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 						if !isForcedBatch {
 							// Get current block by index
 							l2blockRaw := batchRawData.Blocks[blockIndex]
+							l1InfoTreeIndex = l2blockRaw.IndexL1InfoTree
 							if l2blockRaw.IndexL1InfoTree != 0 {
 								l1InfoTreeExitRootStorageEntry, err := stateDB.GetL1InfoRootLeafByIndex(ctx, l2blockRaw.IndexL1InfoTree, nil)
 								if err != nil {
@@ -476,7 +504,7 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 							}
 						} else {
 							// Initial batch must be handled differently
-							if batch.BatchNumber == 1 {
+							if batch.BatchNumber == 1 || (upgradeEtrogBatchNumber != 0 && batch.BatchNumber == upgradeEtrogBatchNumber) {
 								l1BlockHash, err = stateDB.GetVirtualBatchParentHash(ctx, batch.BatchNumber, nil)
 								if err != nil {
 									return err
@@ -491,15 +519,20 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 					}
 
 					blockStart := DSL2BlockStart{
-						BatchNumber:    l2block.BatchNumber,
-						L2BlockNumber:  l2block.L2BlockNumber,
-						Timestamp:      l2block.Timestamp,
-						L1BlockHash:    l1BlockHash,
-						GlobalExitRoot: l2block.GlobalExitRoot,
-						Coinbase:       l2block.Coinbase,
-						ForkID:         l2block.ForkID,
+						BatchNumber:     l2Block.BatchNumber,
+						L2BlockNumber:   l2Block.L2BlockNumber,
+						Timestamp:       l2Block.Timestamp,
+						DeltaTimestamp:  uint32(l2Block.Timestamp - previousTimestamp),
+						L1InfoTreeIndex: l1InfoTreeIndex,
+						L1BlockHash:     l1BlockHash,
+						GlobalExitRoot:  l2Block.GlobalExitRoot,
+						Coinbase:        l2Block.Coinbase,
+						ForkID:          l2Block.ForkID,
+						ChainID:         uint32(chainID),
 					}
 
+					previousTimestamp = l2Block.Timestamp
+
 					bookMark := DSBookMark{
 						Type:  BookMarkTypeL2Block,
 						Value: blockStart.L2BlockNumber,
@@ -521,11 +554,11 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 						return err
 					}
 
-					for _, tx := range l2block.Txs {
+					for _, tx := range l2Block.Txs {
 						// Populate intermediate state root
 						if imStateRoots == nil || (*imStateRoots)[blockStart.L2BlockNumber] == nil {
-							position := GetSystemSCPosition(l2block.L2BlockNumber)
-							imStateRoot, err := stateDB.GetStorageAt(ctx, common.HexToAddress(SystemSC), big.NewInt(0).SetBytes(position), l2block.StateRoot)
+							position := GetSystemSCPosition(l2Block.L2BlockNumber)
+							imStateRoot, err := stateDB.GetStorageAt(ctx, common.HexToAddress(SystemSC), big.NewInt(0).SetBytes(position), l2Block.StateRoot)
 							if err != nil {
 								return err
 							}
@@ -541,16 +574,16 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
 					}
 
 					blockEnd := DSL2BlockEnd{
-						L2BlockNumber: l2block.L2BlockNumber,
-						BlockHash:     l2block.BlockHash,
-						StateRoot:     l2block.StateRoot,
+						L2BlockNumber: l2Block.L2BlockNumber,
+						BlockHash:     l2Block.BlockHash,
+						StateRoot:     l2Block.StateRoot,
 					}
 
 					_, err = streamServer.AddStreamEntry(EntryTypeL2BlockEnd, blockEnd.Encode())
 					if err != nil {
 						return err
 					}
-					currentGER = l2block.GlobalExitRoot
+					currentGER = l2Block.GlobalExitRoot
 				}
 			}
 			// Commit at the end of each batch group
diff --git a/state/test/datastream_test.go b/state/test/datastream_test.go
index 87e8b16feb..8860cf9740 100644
--- a/state/test/datastream_test.go
+++ b/state/test/datastream_test.go
@@ -12,23 +12,30 @@ import (
 
 func TestL2BlockStartEncode(t *testing.T) {
 	l2BlockStart := state.DSL2BlockStart{
-		BatchNumber:    1,                           // 8 bytes
-		L2BlockNumber:  2,                           // 8 bytes
-		Timestamp:      3,                           // 8 bytes
-		L1BlockHash:    common.HexToHash("0x04"),    // 32 bytes
-		GlobalExitRoot: common.HexToHash("0x05"),    // 32 bytes
-		Coinbase:       common.HexToAddress("0x06"), // 20 bytes
-		ForkID:         5,
+		BatchNumber:     1,
+		L2BlockNumber:   2,
+		Timestamp:       3,
+		DeltaTimestamp:  4,
+		L1InfoTreeIndex: 5,
+		L1BlockHash:     common.HexToHash("0x06"),
+		GlobalExitRoot:  common.HexToHash("0x07"),
+		Coinbase:        common.HexToAddress("0x08"),
+		ForkID:          9,
+		ChainID:         10,
 	}
 
 	encoded := l2BlockStart.Encode()
-	expected := []byte{1, 0, 0, 0, 0, 0, 0, 0,
-		2, 0, 0, 0, 0, 0, 0, 0,
-		3, 0, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4,
-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
-		5, 0}
+	expected := []byte{
+		0, 0, 0, 0, 0, 0, 0, 1,
+		0, 0, 0, 0, 0, 0, 0, 2,
+		0, 0, 0, 0, 0, 0, 0, 3,
+		0, 0, 0, 4,
+		0, 0, 0, 5,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
+		0, 9,
+		0, 0, 0, 10}
 
 	assert.Equal(t, expected, encoded)
 }
@@ -43,7 +50,7 @@ func TestL2TransactionEncode(t *testing.T) {
 	}
 
 	encoded := l2Transaction.Encode()
-	expected := []byte{128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 5, 0, 0, 0, 1, 2, 3, 4, 5}
+	expected := []byte{128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 5, 1, 2, 3, 4, 5}
 	assert.Equal(t, expected, encoded)
 }
 
@@ -55,7 +62,7 @@ func TestL2BlockEndEncode(t *testing.T) {
 	}
 
 	encoded := l2BlockEnd.Encode()
-	expected := []byte{1, 0, 0, 0, 0, 0, 0, 0,
+	expected := []byte{0, 0, 0, 0, 0, 0, 0, 1,
 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}
 
diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml
index 822f21740c..f95df2d849 100644
--- a/test/config/test.node.config.toml
+++ b/test/config/test.node.config.toml
@@ -121,7 +121,9 @@ StateConsistencyCheckInterval = "5s"
 		Port = 6900
 		Filename = "/datastreamer/datastream.bin"
 		Version = 1
+		ChainID = 1337
 		Enabled = true
+		
 
 [SequenceSender]
 WaitPeriodSendSequence = "15s"
diff --git a/test/e2e/jsonrpc2_test.go b/test/e2e/jsonrpc2_test.go
index 80f5f5d18b..54b30e194a 100644
--- a/test/e2e/jsonrpc2_test.go
+++ b/test/e2e/jsonrpc2_test.go
@@ -280,6 +280,7 @@ func Test_RevertOnConstructorTransaction(t *testing.T) {
 
 		err = operations.WaitTxToBeMined(ctx, client, scTx, operations.DefaultTimeoutTxToBeMined)
 		errMsg := err.Error()
+		log.Debugf("Error: %v", errMsg)
 		prefix := "transaction has failed, reason: execution reverted: Today is not juernes"
 		hasPrefix := strings.HasPrefix(errMsg, prefix)
 		require.True(t, hasPrefix)
diff --git a/tools/datastreamer/config/config.go b/tools/datastreamer/config/config.go
index 7fe41d7518..0acb225cf9 100644
--- a/tools/datastreamer/config/config.go
+++ b/tools/datastreamer/config/config.go
@@ -46,6 +46,8 @@ type StreamServerCfg struct {
 	ChainID uint64 `mapstructure:"ChainID"`
 	// Log is the log configuration
 	Log log.Config `mapstructure:"Log"`
+	// UpgradeEtrogBatchNumber is the batch number of the upgrade etrog
+	UpgradeEtrogBatchNumber uint64 `mapstructure:"UpgradeEtrogBatchNumber"`
 }
 
 // Config is the configuration for the tool
diff --git a/tools/datastreamer/config/default.go b/tools/datastreamer/config/default.go
index dc2860b55f..74c6f35b24 100644
--- a/tools/datastreamer/config/default.go
+++ b/tools/datastreamer/config/default.go
@@ -11,6 +11,7 @@ Port = 6901
 Filename = "datastreamer.bin"
 Version = 1
 ChainID = 1440
+UpgradeEtrogBatchNumber = 0
 
 [StateDB]
 User = "state_user"
diff --git a/tools/datastreamer/config/tool.config.toml b/tools/datastreamer/config/tool.config.toml
index bb45f619fa..c497f3362f 100644
--- a/tools/datastreamer/config/tool.config.toml
+++ b/tools/datastreamer/config/tool.config.toml
@@ -7,6 +7,7 @@ Port = 6901
 Filename = "datastream.bin"
 Version = 1
 ChainID = 1440
+UpgradeEtrogBatchNumber = 0
 
 [StateDB]
 User = "state_user"
diff --git a/tools/datastreamer/main.go b/tools/datastreamer/main.go
index 9e38a5c19b..0321c4c21d 100644
--- a/tools/datastreamer/main.go
+++ b/tools/datastreamer/main.go
@@ -272,7 +272,7 @@ func generate(cliCtx *cli.Context) error {
 		}
 	}
 
-	err = state.GenerateDataStreamerFile(cliCtx.Context, streamServer, stateDB, false, &imStateRoots)
+	err = state.GenerateDataStreamerFile(cliCtx.Context, streamServer, stateDB, false, &imStateRoots, c.Offline.ChainID, c.Offline.UpgradeEtrogBatchNumber) // nolint:gomnd
 	if err != nil {
 		log.Error(err)
 		os.Exit(1)
@@ -420,16 +420,16 @@ func reprocess(cliCtx *cli.Context) error {
 		case state.EntryTypeUpdateGER:
 			printEntry(currentEntry)
 			processBatchRequest = &executor.ProcessBatchRequest{
-				OldBatchNum:      binary.LittleEndian.Uint64(currentEntry.Data[0:8]) - 1,
+				OldBatchNum:      binary.BigEndian.Uint64(currentEntry.Data[0:8]) - 1,
 				Coinbase:         common.Bytes2Hex(currentEntry.Data[48:68]),
 				BatchL2Data:      nil,
 				OldStateRoot:     previousStateRoot,
 				GlobalExitRoot:   currentEntry.Data[16:48],
 				OldAccInputHash:  []byte{},
-				EthTimestamp:     binary.LittleEndian.Uint64(currentEntry.Data[8:16]),
+				EthTimestamp:     binary.BigEndian.Uint64(currentEntry.Data[8:16]),
 				UpdateMerkleTree: uint32(1),
 				ChainId:          c.Offline.ChainID,
-				ForkId:           uint64(binary.LittleEndian.Uint16(currentEntry.Data[68:70])),
+				ForkId:           uint64(binary.BigEndian.Uint16(currentEntry.Data[68:70])),
 			}
 
 			expectedNewRoot = currentEntry.Data[70:102]
@@ -452,7 +452,7 @@ func reprocess(cliCtx *cli.Context) error {
 			}
 			printEntry(endEntry)
 
-			forkID := uint64(binary.LittleEndian.Uint16(startEntry.Data[76:78]))
+			forkID := uint64(binary.BigEndian.Uint16(startEntry.Data[76:78]))
 
 			tx, err := state.DecodeTx(common.Bytes2Hex((txEntry.Data[6:])))
 			if err != nil {
@@ -471,16 +471,16 @@ func reprocess(cliCtx *cli.Context) error {
 			}
 
 			processBatchRequest = &executor.ProcessBatchRequest{
-				OldBatchNum:      binary.LittleEndian.Uint64(startEntry.Data[0:8]) - 1,
+				OldBatchNum:      binary.BigEndian.Uint64(startEntry.Data[0:8]) - 1,
 				Coinbase:         common.Bytes2Hex(startEntry.Data[56:76]),
 				BatchL2Data:      batchL2Data,
 				OldStateRoot:     oldStateRoot,
 				GlobalExitRoot:   startEntry.Data[24:56],
 				OldAccInputHash:  []byte{},
-				EthTimestamp:     binary.LittleEndian.Uint64(startEntry.Data[16:24]),
+				EthTimestamp:     binary.BigEndian.Uint64(startEntry.Data[16:24]),
 				UpdateMerkleTree: uint32(1),
 				ChainId:          c.Offline.ChainID,
-				ForkId:           uint64(binary.LittleEndian.Uint16(startEntry.Data[76:78])),
+				ForkId:           uint64(binary.BigEndian.Uint16(startEntry.Data[76:78])),
 			}
 
 			expectedNewRoot = endEntry.Data[40:72]
@@ -757,6 +757,10 @@ func printEntry(entry datastreamer.FileEntry) {
 		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.L2BlockNumber))
 		printColored(color.FgGreen, "Timestamp.......: ")
 		printColored(color.FgHiWhite, fmt.Sprintf("%v (%d)\n", time.Unix(blockStart.Timestamp, 0), blockStart.Timestamp))
+		printColored(color.FgGreen, "Delta Timestamp.: ")
+		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.DeltaTimestamp))
+		printColored(color.FgGreen, "L1 InfoTree Idx.: ")
+		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.L1InfoTreeIndex))
 		printColored(color.FgGreen, "L1 Block Hash...: ")
 		printColored(color.FgHiWhite, fmt.Sprintf("%s\n", blockStart.L1BlockHash))
 		printColored(color.FgGreen, "Global Exit Root: ")
@@ -765,6 +769,8 @@ func printEntry(entry datastreamer.FileEntry) {
 		printColored(color.FgHiWhite, fmt.Sprintf("%s\n", blockStart.Coinbase))
 		printColored(color.FgGreen, "Fork ID.........: ")
 		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.ForkID))
+		printColored(color.FgGreen, "Chain ID........: ")
+		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", blockStart.ChainID))
 	case state.EntryTypeL2Tx:
 		dsTx := state.DSL2Transaction{}.Decode(entry.Data)
 		printColored(color.FgGreen, "Entry Type......: ")
@@ -827,6 +833,8 @@ func printEntry(entry datastreamer.FileEntry) {
 		printColored(color.FgHiWhite, fmt.Sprintf("%s\n", updateGer.Coinbase))
 		printColored(color.FgGreen, "Fork ID.........: ")
 		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", updateGer.ForkID))
+		printColored(color.FgGreen, "Chain ID........: ")
+		printColored(color.FgHiWhite, fmt.Sprintf("%d\n", updateGer.ChainID))
 		printColored(color.FgGreen, "State Root......: ")
 		printColored(color.FgHiWhite, fmt.Sprint(updateGer.StateRoot.Hex()+"\n"))
 	}

From b47986abc1edfff567f5f95a81b14a1733443e4d Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Fri, 26 Jan 2024 23:57:01 +0100
Subject: [PATCH 36/54] set stateManagerPurge to false in Cardona (#3158)

---
 config/environments/cardona/prover.config.json | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/config/environments/cardona/prover.config.json b/config/environments/cardona/prover.config.json
index 15c999b37b..3f433246b8 100644
--- a/config/environments/cardona/prover.config.json
+++ b/config/environments/cardona/prover.config.json
@@ -3,6 +3,8 @@
     "runProverServerMock": false,
     "runProverClient": false,
 
+    "stateManagerPurge": false,
+
     "runExecutorServer": true,
     "runExecutorClient": false,
     "runExecutorClientMultithread": false,

From eea3833ee0d2ea579f49db2318949de89f66e33d Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Sat, 27 Jan 2024 13:09:33 +0100
Subject: [PATCH 37/54] synchronizer: fix case emtpy batch and unittest (#3159)

* fix case emtpy batch and unittest

* fix error in conversion

* fix unittest
---
 synchronizer/common/converters.go             |  2 +-
 .../executor_trusted_batch_sync.go            |  2 +-
 .../executor_trusted_batch_sync_test.go       | 48 +++++++++++++++++--
 3 files changed, 47 insertions(+), 5 deletions(-)

diff --git a/synchronizer/common/converters.go b/synchronizer/common/converters.go
index 116f1f3076..d0a31083ca 100644
--- a/synchronizer/common/converters.go
+++ b/synchronizer/common/converters.go
@@ -18,7 +18,7 @@ func RpcBatchToStateBatch(rpcBatch *types.Batch) *state.Batch {
 		StateRoot:      rpcBatch.StateRoot,
 		BatchL2Data:    rpcBatch.BatchL2Data,
 		GlobalExitRoot: rpcBatch.GlobalExitRoot,
-		LocalExitRoot:  rpcBatch.MainnetExitRoot,
+		LocalExitRoot:  rpcBatch.LocalExitRoot,
 		Timestamp:      time.Unix(int64(rpcBatch.Timestamp), 0),
 		WIP:            !rpcBatch.Closed,
 	}
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
index ae330fe628..e74424fa0e 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
@@ -127,7 +127,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) CreateEmptyBatch(ctx context.Context,
 // FullProcess process a batch that is not on database, so is the first time we process it
 func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data *l2_shared.ProcessData, dbTx pgx.Tx) (*l2_shared.ProcessResponse, error) {
 	log.Debugf("%s FullProcess", data.DebugPrefix)
-	if len(data.TrustedBatch.BatchL2Data) == 0 && data.BatchMustBeClosed {
+	if len(data.TrustedBatch.BatchL2Data) == 0 && !data.BatchMustBeClosed {
 		return b.CreateEmptyBatch(ctx, data, dbTx)
 	}
 	err := b.checkIfWeAreSyncedFromL1ToProcessGlobalExitRoot(ctx, data, dbTx)
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
index 342430b313..c62a95ae02 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
@@ -182,14 +182,16 @@ func TestNothingProcessDoesntMatchBatchReprocess(t *testing.T) {
 		StateBatch: &state.Batch{
 			BatchNumber: 123,
 			StateRoot:   common.HexToHash(hashExamplesValues[1]),
+			BatchL2Data: []byte{1, 2, 3, 4},
 			WIP:         true,
 		},
 		TrustedBatch: &types.Batch{
-			Number:    123,
-			StateRoot: common.HexToHash(hashExamplesValues[0]),
+			Number:      123,
+			StateRoot:   common.HexToHash(hashExamplesValues[0]),
+			BatchL2Data: []byte{1, 2, 3, 4},
 		},
 		PreviousStateBatch: &state.Batch{
-			BatchNumber: 123,
+			BatchNumber: 122,
 			StateRoot:   common.HexToHash(hashExamplesValues[2]),
 		},
 	}
@@ -261,3 +263,43 @@ func TestCloseBatchGivenAlreadyClosedAndTheDataAreRightThenNoError(t *testing.T)
 	res := testData.sut.CloseBatch(testData.ctx, data.TrustedBatch, nil, "test")
 	require.NoError(t, res)
 }
+
+func TestEmptyBatch(t *testing.T) {
+	testData := newTestData(t)
+	// Arrange
+	expectedBatch := state.Batch{
+		BatchNumber:    123,
+		Coinbase:       common.HexToAddress("0x01"),
+		StateRoot:      common.HexToHash("0x02"),
+		GlobalExitRoot: common.HexToHash("0x03"),
+		LocalExitRoot:  common.HexToHash("0x04"),
+		Timestamp:      time.Now().Truncate(time.Second),
+		WIP:            true,
+	}
+	data := l2_shared.ProcessData{
+		BatchNumber:       123,
+		Mode:              l2_shared.FullProcessMode,
+		BatchMustBeClosed: false,
+		DebugPrefix:       "test",
+		StateBatch:        nil,
+		TrustedBatch: &types.Batch{
+			Number:         123,
+			Coinbase:       expectedBatch.Coinbase,
+			StateRoot:      expectedBatch.StateRoot,
+			GlobalExitRoot: expectedBatch.GlobalExitRoot,
+			LocalExitRoot:  expectedBatch.LocalExitRoot,
+			Timestamp:      (types.ArgUint64)(expectedBatch.Timestamp.Unix()),
+			Closed:         false,
+		},
+	}
+	testData.stateMock.EXPECT().OpenBatch(testData.ctx, mock.Anything, mock.Anything).Return(nil).Once()
+	testData.stateMock.EXPECT().UpdateWIPBatch(testData.ctx, mock.Anything, mock.Anything).Return(nil).Once()
+
+	response, err := testData.sut.FullProcess(testData.ctx, &data, nil)
+	require.NoError(t, err)
+	require.Equal(t, false, response.ClearCache)
+	require.Equal(t, false, response.UpdateBatchWithProcessBatchResponse)
+	require.Equal(t, true, response.UpdateBatch.WIP)
+	require.Equal(t, 0, len(response.UpdateBatch.BatchL2Data))
+	require.Equal(t, expectedBatch, *response.UpdateBatch)
+}

From 7497d07cfb2dc3b4da9eca9feb09156e84f7d132 Mon Sep 17 00:00:00 2001
From: agnusmor <100322135+agnusmor@users.noreply.github.com>
Date: Sat, 27 Jan 2024 14:21:17 +0100
Subject: [PATCH 38/54] update prover image to v4.0.1 (#3160)

---
 docker-compose.yml      | 2 +-
 test/docker-compose.yml | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docker-compose.yml b/docker-compose.yml
index 73a4372685..2e2ca9c55e 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -107,7 +107,7 @@ services:
   zkevm-prover:
     container_name: zkevm-prover
     restart: unless-stopped
-    image: hermeznetwork/zkevm-prover:v4.0.0
+    image: hermeznetwork/zkevm-prover:v4.0.1
     depends_on:
       zkevm-state-db:
         condition: service_healthy
diff --git a/test/docker-compose.yml b/test/docker-compose.yml
index 16eb52a75a..8c6270ecd2 100644
--- a/test/docker-compose.yml
+++ b/test/docker-compose.yml
@@ -513,7 +513,7 @@ services:
 
   zkevm-prover:
     container_name: zkevm-prover
-    image: hermeznetwork/zkevm-prover:v4.0.0
+    image: hermeznetwork/zkevm-prover:v4.0.1
     ports:
       - 50061:50061 # MT
       - 50071:50071 # Executor
@@ -602,7 +602,7 @@ services:
 
   zkevm-permissionless-prover:
     container_name: zkevm-permissionless-prover
-    image: hermeznetwork/zkevm-prover:v4.0.0
+    image: hermeznetwork/zkevm-prover:v4.0.1
     ports:
       # - 50058:50058 # Prover
       - 50059:50052 # Mock prover

From 12022c2c9604f28b221e83601c715ca8c5855c64 Mon Sep 17 00:00:00 2001
From: agnusmor <100322135+agnusmor@users.noreply.github.com>
Date: Sat, 27 Jan 2024 19:43:31 +0100
Subject: [PATCH 39/54] update prover image to v4.0.2 (#3162)

---
 docker-compose.yml      | 2 +-
 test/docker-compose.yml | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docker-compose.yml b/docker-compose.yml
index 2e2ca9c55e..77f49766cf 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -107,7 +107,7 @@ services:
   zkevm-prover:
     container_name: zkevm-prover
     restart: unless-stopped
-    image: hermeznetwork/zkevm-prover:v4.0.1
+    image: hermeznetwork/zkevm-prover:v4.0.2
     depends_on:
       zkevm-state-db:
         condition: service_healthy
diff --git a/test/docker-compose.yml b/test/docker-compose.yml
index 8c6270ecd2..13ae22f73b 100644
--- a/test/docker-compose.yml
+++ b/test/docker-compose.yml
@@ -513,7 +513,7 @@ services:
 
   zkevm-prover:
     container_name: zkevm-prover
-    image: hermeznetwork/zkevm-prover:v4.0.1
+    image: hermeznetwork/zkevm-prover:v4.0.2
     ports:
       - 50061:50061 # MT
       - 50071:50071 # Executor
@@ -602,7 +602,7 @@ services:
 
   zkevm-permissionless-prover:
     container_name: zkevm-permissionless-prover
-    image: hermeznetwork/zkevm-prover:v4.0.1
+    image: hermeznetwork/zkevm-prover:v4.0.2
     ports:
       # - 50058:50058 # Prover
       - 50059:50052 # Mock prover

From a2964847ff7664d04a0112f6537fcf43f1774c23 Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Sat, 27 Jan 2024 22:56:51 +0100
Subject: [PATCH 40/54] Add a note in deployment instruction to restart rpc
 after forkId 7 (#3163)

* note to restart zkevm-rpc

* update comment
---
 docs/production-setup.md | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 50480a68ed..9c4481789f 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -61,6 +61,11 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
 If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
 
 `curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
+10. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
+
+`docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
+
+> **NOTE**: You only need to restart one time
 
 ## Troubleshooting
 

From 5dfe003d68f1e02e2b6f6a269274759967c514ad Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:13:12 +0100
Subject: [PATCH 41/54] change position of point 10

---
 docs/production-setup.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 9c4481789f..96407f9321 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -57,13 +57,13 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
    3. zkevm-state-db
    4. zkevm-pool-db
    5. zkevm-prover
+10. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
+`docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 
 If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
 
 `curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
-10. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
 
-`docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 
 > **NOTE**: You only need to restart one time
 

From 479ba46a12973d13923237cf0dfdca7ed69cdbcd Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:14:21 +0100
Subject: [PATCH 42/54] add point 11

---
 docs/production-setup.md | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 96407f9321..d5e739bca5 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -57,12 +57,11 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
    3. zkevm-state-db
    4. zkevm-pool-db
    5. zkevm-prover
-10. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
+10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
+`curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
+11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
 `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 
-If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
-
-`curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
 
 
 > **NOTE**: You only need to restart one time

From 9e2a95dbadda0f39655c8483ee036aba81033d78 Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:15:40 +0100
Subject: [PATCH 43/54] add lines

---
 docs/production-setup.md | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index d5e739bca5..14c1a0de84 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -58,8 +58,11 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
    4. zkevm-pool-db
    5. zkevm-prover
 10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
+
 `curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
-11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`)
+
+11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`):
+
 `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 
 

From b216df0d443a05987af10e346e1e4e24e97a9804 Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:16:47 +0100
Subject: [PATCH 44/54] add breakline

---
 docs/production-setup.md | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 14c1a0de84..551ff5b8ce 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -57,11 +57,9 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
    3. zkevm-state-db
    4. zkevm-pool-db
    5. zkevm-prover
-10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing:
-
-`curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545`
-
-11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`):
+10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing: 
+`curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545` 
+11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`): 
 
 `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 

From f5bc98307977411e409d4d03c4f9b5bbfb41d8cc Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:18:21 +0100
Subject: [PATCH 45/54] add lines

---
 docs/production-setup.md | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 551ff5b8ce..8ff9d8f4f4 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -58,15 +58,12 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
    4. zkevm-pool-db
    5. zkevm-prover
 10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing: 
+
 `curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545` 
 11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`): 
 
 `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`
 
-
-
-> **NOTE**: You only need to restart one time
-
 ## Troubleshooting
 
 - It's possible that the machine you're using already uses some of the necessary ports. In this case you can change them directly at `$ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml`

From 87bb73154ba3d7dff8a79c085bfbff24335b1b13 Mon Sep 17 00:00:00 2001
From: agnusmor 
Date: Sat, 27 Jan 2024 23:18:47 +0100
Subject: [PATCH 46/54] add line

---
 docs/production-setup.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/docs/production-setup.md b/docs/production-setup.md
index 8ff9d8f4f4..a1565b0674 100644
--- a/docs/production-setup.md
+++ b/docs/production-setup.md
@@ -60,6 +60,7 @@ docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker
 10. If everything has gone as expected you should be able to run queries to the JSON RPC at `http://localhost:8545`. For instance you can run the following query that fetches the latest synchronized L2 block, if you call this every few seconds, you should see the number increasing: 
 
 `curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":83}' http://localhost:8545` 
+
 11. Restart zkevm-rpc when the permission less node is full synced for first time (you could use the next endpoint of the rpc to check if the node is synced: `eth_syncing`): 
 
 `docker compose --env-file $ZKEVM_CONFIG_DIR/.env -f $ZKEVM_DIR/$ZKEVM_NET/docker-compose.yml restart zkevm-rpc`

From fd05d50a2d190a0429e09464a4d70a1f19b599ea Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Mon, 29 Jan 2024 10:17:59 +0100
Subject: [PATCH 47/54] EncodeBatchV2 allow to set Tx in binary (#3143)

* EncodeBatchV2 allow to set Tx in binary
* actions/setup-go to v5 to avoid use node 16
---
 .github/workflows/jsonschema.yml        |   4 +-
 .github/workflows/lint.yml              |   4 +-
 .github/workflows/release.yml           |   2 +-
 .github/workflows/test-e2e.yml          |   4 +-
 .github/workflows/test-from-prover.yml  |   4 +-
 .github/workflows/test-full-non-e2e.yml |   4 +-
 state/batchV2.go                        |  11 ++
 state/encoding_batch_v2.go              | 166 +++++++++++++++++-------
 state/encoding_batch_v2_test.go         |  50 ++++++-
 state/helper.go                         |   6 +-
 state/interfaces.go                     |   1 -
 state/pgstatestorage/batch.go           |  20 ---
 state/test/forkid_etrog/etrog_test.go   |   8 +-
 synchronizer/synchronizer_test.go       |   8 +-
 14 files changed, 195 insertions(+), 97 deletions(-)

diff --git a/.github/workflows/jsonschema.yml b/.github/workflows/jsonschema.yml
index 003263e725..cd04090435 100644
--- a/.github/workflows/jsonschema.yml
+++ b/.github/workflows/jsonschema.yml
@@ -19,14 +19,14 @@ jobs:
     runs-on: ubuntu-latest
     steps:
     - name: Checkout code
-      uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 #v3
+      uses: actions/checkout@v4
       # https://github.com/actions/checkout#Checkout-pull-request-HEAD-commit-instead-of-merge-commit
       # Checkout pull request HEAD commit instead of merge commit
       with:
         ref: ${{ github.event.pull_request.head.sha }}
 
     - name: Install Go
-      uses: actions/setup-go@v3
+      uses: actions/setup-go@v5
       with:
         go-version: ${{ matrix.go-version }}
       env:
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index b722b6b05a..c07f5ed505 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -13,11 +13,11 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Install Go
-        uses: actions/setup-go@v3
+        uses: actions/setup-go@v5
         with:
           go-version: 1.21.x
       - name: Checkout code
-        uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+        uses: actions/checkout@v4
       - name: Lint
         run: |
           make install-linter
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index f72164cc09..cb4f6881a2 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -14,7 +14,7 @@ jobs:
           fetch-depth: 0
 
       - name: Set up Go
-        uses: actions/setup-go@v3
+        uses: actions/setup-go@v5
         with:
           go-version: 1.21
 
diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml
index a04d80f335..4f499053c5 100644
--- a/.github/workflows/test-e2e.yml
+++ b/.github/workflows/test-e2e.yml
@@ -21,10 +21,10 @@ jobs:
     runs-on: ubuntu-latest
     steps:
     - name: Checkout code
-      uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
+      uses: actions/checkout@v4
 
     - name: Install Go
-      uses: actions/setup-go@v3
+      uses: actions/setup-go@v5
       with:
         go-version: ${{ matrix.go-version }}
       env:
diff --git a/.github/workflows/test-from-prover.yml b/.github/workflows/test-from-prover.yml
index eb4b07871b..4f423fbcfd 100644
--- a/.github/workflows/test-from-prover.yml
+++ b/.github/workflows/test-from-prover.yml
@@ -23,12 +23,12 @@ jobs:
 
     steps:
     - name: Checkout code
-      uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 #v3
+      uses: actions/checkout@v4
       with:
         repository: 0xPolygonHermez/zkevm-node
 
     - name: Install Go
-      uses: actions/setup-go@v3
+      uses: actions/setup-go@v5
       with:
         go-version: ${{ matrix.go-version }}
       env:
diff --git a/.github/workflows/test-full-non-e2e.yml b/.github/workflows/test-full-non-e2e.yml
index 237b03c749..163c5052f7 100644
--- a/.github/workflows/test-full-non-e2e.yml
+++ b/.github/workflows/test-full-non-e2e.yml
@@ -19,10 +19,10 @@ jobs:
     runs-on: ubuntu-latest
     steps:
     - name: Checkout code
-      uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
+      uses: actions/checkout@v4
 
     - name: Install Go
-      uses: actions/setup-go@v3
+      uses: actions/setup-go@v5
       with:
         go-version: ${{ matrix.go-version }}
       env:
diff --git a/state/batchV2.go b/state/batchV2.go
index d93ec7daf8..0079491ea1 100644
--- a/state/batchV2.go
+++ b/state/batchV2.go
@@ -409,3 +409,14 @@ func (s *State) ProcessAndStoreClosedBatchV2(ctx context.Context, processingCtx
 		BatchL2Data:   *BatchL2Data,
 	}, dbTx)
 }
+
+// BuildChangeL2Block returns a changeL2Block tx to use in the BatchL2Data
+func (p *State) BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte {
+	l2block := ChangeL2BlockHeader{
+		DeltaTimestamp:  deltaTimestamp,
+		IndexL1InfoTree: l1InfoTreeIndex,
+	}
+	var data []byte
+	data = l2block.Encode(data)
+	return data
+}
diff --git a/state/encoding_batch_v2.go b/state/encoding_batch_v2.go
index d8a9a90649..750ff8ab72 100644
--- a/state/encoding_batch_v2.go
+++ b/state/encoding_batch_v2.go
@@ -2,7 +2,13 @@
 This file provide functions to work with ETROG batches:
 - EncodeBatchV2 (equivalent to EncodeTransactions)
 - DecodeBatchV2 (equivalent to DecodeTxs)
-- DecodeForcedBatchV2)
+- DecodeForcedBatchV2
+
+Also provide a builder class to create batches (BatchV2Encoder):
+ This method doesnt check anything, so is more flexible but you need to know what you are doing
+ - `builder := NewBatchV2Encoder()` : Create a new `BatchV2Encoder``
+ - You can call to `AddBlockHeader` or `AddTransaction` to add a block header or a transaction as you wish
+ - You can call to `GetResult` to get the batch data
 
 
 // batch data format:
@@ -27,11 +33,25 @@ This file provide functions to work with ETROG batches:
 // 0x00							   | 32 | V
 // 0x00							   | 1  | efficiencyPercentage
 // Repeat Transaction
+//
+// Usage:
+// There are 2 ways of use this module, direct calls or a builder class:
+// 1) Direct calls:
+// - EncodeBatchV2: Encode a batch of transactions
+// - DecodeBatchV2: Decode a batch of transactions
+//
+// 2) Builder class:
+//  This method doesnt check anything, so is more flexible but you need to know what you are doing
+// - builder := NewBatchV2Encoder(): Create a new BatchV2Encoder
+//    - You can call to `AddBlockHeader` or `AddTransaction` to add a block header or a transaction as you wish
+//    - You can call to `GetResult` to get the batch data
+
 */
 
 package state
 
 import (
+	"encoding/binary"
 	"errors"
 	"fmt"
 	"strconv"
@@ -42,11 +62,16 @@ import (
 	"github.com/ethereum/go-ethereum/rlp"
 )
 
-// L2BlockRaw is the raw representation of a L2 block.
-type L2BlockRaw struct {
+// ChangeL2BlockHeader is the header of a L2 block.
+type ChangeL2BlockHeader struct {
 	DeltaTimestamp  uint32
 	IndexL1InfoTree uint32
-	Transactions    []L2TxRaw
+}
+
+// L2BlockRaw is the raw representation of a L2 block.
+type L2BlockRaw struct {
+	ChangeL2BlockHeader
+	Transactions []L2TxRaw
 }
 
 // BatchRawV2 is the  representation of a batch of transactions.
@@ -61,12 +86,15 @@ type ForcedBatchRawV2 struct {
 
 // L2TxRaw is the raw representation of a L2 transaction  inside a L2 block.
 type L2TxRaw struct {
-	Tx                   types.Transaction
-	EfficiencyPercentage uint8
+	EfficiencyPercentage uint8             // valid always
+	TxAlreadyEncoded     bool              // If true the tx is already encoded (data field is used)
+	Tx                   types.Transaction // valid if TxAlreadyEncoded == false
+	Data                 []byte            // valid if TxAlreadyEncoded == true
 }
 
 const (
 	changeL2Block = uint8(0x0b)
+	sizeUInt32    = 4
 )
 
 var (
@@ -92,57 +120,88 @@ func (b *BatchRawV2) String() string {
 
 // EncodeBatchV2 encodes a batch of transactions into a byte slice.
 func EncodeBatchV2(batch *BatchRawV2) ([]byte, error) {
-	var err error
-	var batchData []byte
 	if batch == nil {
 		return nil, fmt.Errorf("batch is nil: %w", ErrInvalidBatchV2)
 	}
-	blocks := batch.Blocks
-	if len(blocks) == 0 {
+	if len(batch.Blocks) == 0 {
 		return nil, fmt.Errorf("a batch need minimum a L2Block: %w", ErrInvalidBatchV2)
 	}
-	for _, block := range blocks {
-		batchData, err = EncodeBlockHeaderV2(batchData, block)
+
+	encoder := NewBatchV2Encoder()
+	for _, block := range batch.Blocks {
+		encoder.AddBlockHeader(block.ChangeL2BlockHeader)
+		err := encoder.AddTransactions(block.Transactions)
 		if err != nil {
-			return nil, fmt.Errorf("can't encode block header: %w", err)
+			return nil, fmt.Errorf("can't encode tx: %w", err)
 		}
-		for _, tx := range block.Transactions {
-			batchData, err = encodeTxRLP(batchData, tx)
-			if err != nil {
-				return nil, fmt.Errorf("can't encode tx: %w", err)
-			}
+	}
+	return encoder.GetResult(), nil
+}
+
+// BatchV2Encoder is a builder of the batchl2data used by EncodeBatchV2
+type BatchV2Encoder struct {
+	batchData []byte
+}
+
+// NewBatchV2Encoder creates a new BatchV2Encoder.
+func NewBatchV2Encoder() *BatchV2Encoder {
+	return &BatchV2Encoder{}
+}
+
+// AddBlockHeader adds a block header to the batch.
+func (b *BatchV2Encoder) AddBlockHeader(l2BlockHeader ChangeL2BlockHeader) {
+	b.batchData = l2BlockHeader.Encode(b.batchData)
+}
+
+// AddTransactions adds a set of transactions to the batch.
+func (b *BatchV2Encoder) AddTransactions(transactions []L2TxRaw) error {
+	for _, tx := range transactions {
+		err := b.AddTransaction(tx)
+		if err != nil {
+			return fmt.Errorf("can't encode tx: %w", err)
 		}
 	}
-	return batchData, nil
+	return nil
+}
+
+// AddTransaction adds a transaction to the batch.
+func (b *BatchV2Encoder) AddTransaction(transaction L2TxRaw) error {
+	var err error
+	b.batchData, err = transaction.Encode(b.batchData)
+	if err != nil {
+		return fmt.Errorf("can't encode tx: %w", err)
+	}
+	return nil
 }
 
-// EncodeBlockHeaderV2 encodes a batch of l2blocks header into a byte slice.
-func EncodeBlockHeaderV2(batchData []byte, block L2BlockRaw) ([]byte, error) {
+// GetResult returns the batch data.
+func (b *BatchV2Encoder) GetResult() []byte {
+	return b.batchData
+}
+
+// Encode encodes a batch of l2blocks header into a byte slice.
+func (c ChangeL2BlockHeader) Encode(batchData []byte) []byte {
 	batchData = append(batchData, changeL2Block)
-	batchData = append(batchData, serializeUint32(block.DeltaTimestamp)...)
-	batchData = append(batchData, serializeUint32(block.IndexL1InfoTree)...)
-	return batchData, nil
+	batchData = append(batchData, encodeUint32(c.DeltaTimestamp)...)
+	batchData = append(batchData, encodeUint32(c.IndexL1InfoTree)...)
+	return batchData
 }
 
-func encodeTxRLP(batchData []byte, tx L2TxRaw) ([]byte, error) {
-	rlpTx, err := prepareRPLTxData(tx.Tx)
-	if err != nil {
-		return nil, fmt.Errorf("can't encode tx to RLP: %w", err)
+// Encode encodes a transaction into a byte slice.
+func (tx L2TxRaw) Encode(batchData []byte) ([]byte, error) {
+	if tx.TxAlreadyEncoded {
+		batchData = append(batchData, tx.Data...)
+	} else {
+		rlpTx, err := prepareRLPTxData(tx.Tx)
+		if err != nil {
+			return nil, fmt.Errorf("can't encode tx to RLP: %w", err)
+		}
+		batchData = append(batchData, rlpTx...)
 	}
-	batchData = append(batchData, rlpTx...)
 	batchData = append(batchData, tx.EfficiencyPercentage)
 	return batchData, nil
 }
 
-func serializeUint32(value uint32) []byte {
-	return []byte{
-		byte(value >> 24), // nolint:gomnd
-		byte(value >> 16), // nolint:gomnd
-		byte(value >> 8),  // nolint:gomnd
-		byte(value),
-	} // nolint:gomnd
-}
-
 // DecodeBatchV2 decodes a batch of transactions from a byte slice.
 func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
 	// The transactions is not RLP encoded. Is the raw bytes in this form: 1 byte for the transaction type (always 0b for changeL2Block) + 4 bytes for deltaTimestamp + for bytes for indexL1InfoTree
@@ -164,7 +223,7 @@ func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
 		// is a tx
 		default:
 			if currentBlock == nil {
-				_, _, err := decodeTxRLP(txsData, pos)
+				_, _, err := DecodeTxRLP(txsData, pos)
 				if err == nil {
 					// There is no changeL2Block but have a valid RLP transaction
 					return nil, ErrBatchV2DontStartWithChangeL2Block
@@ -174,7 +233,7 @@ func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
 				}
 			}
 			var tx *L2TxRaw
-			pos, tx, err = decodeTxRLP(txsData, pos)
+			pos, tx, err = DecodeTxRLP(txsData, pos)
 			if err != nil {
 				return nil, fmt.Errorf("can't decode transactions: %w", err)
 			}
@@ -215,11 +274,11 @@ func DecodeForcedBatchV2(txsData []byte) (*ForcedBatchRawV2, error) {
 func decodeBlockHeader(txsData []byte, pos int) (int, *L2BlockRaw, error) {
 	var err error
 	currentBlock := &L2BlockRaw{}
-	pos, currentBlock.DeltaTimestamp, err = deserializeUint32(txsData, pos)
+	pos, currentBlock.DeltaTimestamp, err = decodeUint32(txsData, pos)
 	if err != nil {
 		return 0, nil, fmt.Errorf("can't get deltaTimestamp: %w", err)
 	}
-	pos, currentBlock.IndexL1InfoTree, err = deserializeUint32(txsData, pos)
+	pos, currentBlock.IndexL1InfoTree, err = decodeUint32(txsData, pos)
 	if err != nil {
 		return 0, nil, fmt.Errorf("can't get leafIndex: %w", err)
 	}
@@ -227,7 +286,8 @@ func decodeBlockHeader(txsData []byte, pos int) (int, *L2BlockRaw, error) {
 	return pos, currentBlock, nil
 }
 
-func decodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) {
+// DecodeTxRLP decodes a transaction from a byte slice.
+func DecodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) {
 	var err error
 	length, err := decodeRLPListLengthFromOffset(txsData, offset)
 	if err != nil {
@@ -265,13 +325,6 @@ func decodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) {
 	return int(endPos), l2Tx, err
 }
 
-func deserializeUint32(txsData []byte, pos int) (int, uint32, error) {
-	if len(txsData)-pos < 4 { // nolint:gomnd
-		return 0, 0, fmt.Errorf("can't get u32 because not enough data: %w", ErrInvalidBatchV2)
-	}
-	return pos + 4, uint32(txsData[pos])<<24 | uint32(txsData[pos+1])<<16 | uint32(txsData[pos+2])<<8 | uint32(txsData[pos+3]), nil // nolint:gomnd
-}
-
 // It returns the length of data from the param offset
 // ex:
 // 0xc0 -> empty data -> 1 byte because it include the 0xc0
@@ -302,3 +355,16 @@ func decodeRLPListLengthFromOffset(txsData []byte, offset int) (uint64, error) {
 	}
 	return length + headerByteLength, nil
 }
+
+func encodeUint32(value uint32) []byte {
+	data := make([]byte, sizeUInt32)
+	binary.BigEndian.PutUint32(data, value)
+	return data
+}
+
+func decodeUint32(txsData []byte, pos int) (int, uint32, error) {
+	if len(txsData)-pos < sizeUInt32 {
+		return 0, 0, fmt.Errorf("can't get u32 because not enough data: %w", ErrInvalidBatchV2)
+	}
+	return pos + sizeUInt32, binary.BigEndian.Uint32(txsData[pos : pos+sizeUInt32]), nil
+}
diff --git a/state/encoding_batch_v2_test.go b/state/encoding_batch_v2_test.go
index fa87ed6b9a..cdf1588b31 100644
--- a/state/encoding_batch_v2_test.go
+++ b/state/encoding_batch_v2_test.go
@@ -186,14 +186,19 @@ func TestDecodeRLPLength(t *testing.T) {
 
 func TestEncodeBatchV2(t *testing.T) {
 	block1 := L2BlockRaw{
-		DeltaTimestamp:  123,
-		IndexL1InfoTree: 456,
-		Transactions:    []L2TxRaw{},
+		ChangeL2BlockHeader: ChangeL2BlockHeader{
+			DeltaTimestamp:  123,
+			IndexL1InfoTree: 456,
+		},
+		Transactions: []L2TxRaw{},
 	}
+
 	block2 := L2BlockRaw{
-		DeltaTimestamp:  789,
-		IndexL1InfoTree: 101112,
-		Transactions:    []L2TxRaw{},
+		ChangeL2BlockHeader: ChangeL2BlockHeader{
+			DeltaTimestamp:  789,
+			IndexL1InfoTree: 101112,
+		},
+		Transactions: []L2TxRaw{},
 	}
 	blocks := []L2BlockRaw{block1, block2}
 
@@ -239,3 +244,36 @@ func TestDecodeForcedBatchV2WithRegularBatch(t *testing.T) {
 	_, err = DecodeForcedBatchV2(batchL2Data)
 	require.Error(t, err)
 }
+
+func TestEncodeBatchV2WithTxInBinary(t *testing.T) {
+	block1 := L2BlockRaw{
+		ChangeL2BlockHeader: ChangeL2BlockHeader{
+			DeltaTimestamp:  123,
+			IndexL1InfoTree: 456,
+		},
+		Transactions: []L2TxRaw{
+			{
+				EfficiencyPercentage: 255,
+				TxAlreadyEncoded:     true,
+				Data:                 []byte{0x01, 0x02, 0x03},
+			},
+		},
+	}
+
+	block2 := L2BlockRaw{
+		ChangeL2BlockHeader: ChangeL2BlockHeader{
+			DeltaTimestamp:  789,
+			IndexL1InfoTree: 101112,
+		},
+		Transactions: []L2TxRaw{},
+	}
+	blocks := []L2BlockRaw{block1, block2}
+
+	expectedBatchData := []byte{
+		0xb, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x1, 0xc8, 0x1, 0x2, 0x3, 0xff, 0xb, 0x0, 0x0, 0x3, 0x15, 0x0, 0x1, 0x8a, 0xf8,
+	}
+
+	batchData, err := EncodeBatchV2(&BatchRawV2{Blocks: blocks})
+	require.NoError(t, err)
+	require.Equal(t, expectedBatchData, batchData)
+}
diff --git a/state/helper.go b/state/helper.go
index 473725b33d..9c89440ca9 100644
--- a/state/helper.go
+++ b/state/helper.go
@@ -39,7 +39,7 @@ func EncodeTransactions(txs []types.Transaction, effectivePercentages []uint8, f
 	var batchL2Data []byte
 
 	for i, tx := range txs {
-		txData, err := prepareRPLTxData(tx)
+		txData, err := prepareRLPTxData(tx)
 		if err != nil {
 			return nil, err
 		}
@@ -57,7 +57,7 @@ func EncodeTransactions(txs []types.Transaction, effectivePercentages []uint8, f
 	return batchL2Data, nil
 }
 
-func prepareRPLTxData(tx types.Transaction) ([]byte, error) {
+func prepareRLPTxData(tx types.Transaction) ([]byte, error) {
 	v, r, s := tx.RawSignatureValues()
 	sign := 1 - (v.Uint64() & 1)
 
@@ -99,7 +99,7 @@ func EncodeTransactionsWithoutEffectivePercentage(txs []types.Transaction) ([]by
 	var batchL2Data []byte
 
 	for _, tx := range txs {
-		txData, err := prepareRPLTxData(tx)
+		txData, err := prepareRLPTxData(tx)
 		if err != nil {
 			return nil, err
 		}
diff --git a/state/interfaces.go b/state/interfaces.go
index 6b0cf82e68..3f6873d3eb 100644
--- a/state/interfaces.go
+++ b/state/interfaces.go
@@ -140,7 +140,6 @@ type storage interface {
 	GetForkIDByBlockNumber(blockNumber uint64) uint64
 	GetForkIDByBatchNumber(batchNumber uint64) uint64
 	GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint32, error)
-	BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte
 	GetRawBatchTimestamps(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*time.Time, *time.Time, error)
 	GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error)
 	GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error)
diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go
index cb9cbc9bb9..7bcf118d3f 100644
--- a/state/pgstatestorage/batch.go
+++ b/state/pgstatestorage/batch.go
@@ -2,7 +2,6 @@ package pgstatestorage
 
 import (
 	"context"
-	"encoding/binary"
 	"encoding/json"
 	"errors"
 	"fmt"
@@ -936,25 +935,6 @@ func (p *PostgresStorage) GetBlockNumVirtualBatchByBatchNum(ctx context.Context,
 	return blockNum, nil
 }
 
-// BuildChangeL2Block returns a changeL2Block tx to use in the BatchL2Data
-func (p *PostgresStorage) BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte {
-	changeL2BlockMark := []byte{0x0B}
-	changeL2Block := []byte{}
-
-	// changeL2Block transaction mark
-	changeL2Block = append(changeL2Block, changeL2BlockMark...)
-	// changeL2Block deltaTimeStamp
-	deltaTimestampBytes := make([]byte, 4) //nolint:gomnd
-	binary.BigEndian.PutUint32(deltaTimestampBytes, deltaTimestamp)
-	changeL2Block = append(changeL2Block, deltaTimestampBytes...)
-	// changeL2Block l1InfoTreeIndexBytes
-	l1InfoTreeIndexBytes := make([]byte, 4) //nolint:gomnd
-	binary.BigEndian.PutUint32(l1InfoTreeIndexBytes, l1InfoTreeIndex)
-	changeL2Block = append(changeL2Block, l1InfoTreeIndexBytes...)
-
-	return changeL2Block
-}
-
 // GetRawBatchTimestamps returns the timestamp of the batch with the given number.
 // it returns batch_num.tstamp and virtual_batch.batch_timestamp
 func (p *PostgresStorage) GetRawBatchTimestamps(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*time.Time, *time.Time, error) {
diff --git a/state/test/forkid_etrog/etrog_test.go b/state/test/forkid_etrog/etrog_test.go
index d8dfd77398..cea81fa86b 100644
--- a/state/test/forkid_etrog/etrog_test.go
+++ b/state/test/forkid_etrog/etrog_test.go
@@ -98,9 +98,11 @@ func TestStateTransition(t *testing.T) {
 			if len(txs) > 0 {
 				// Generate batchdata from the txs in the test and compared with the vector
 				l2block := state.L2BlockRaw{
-					DeltaTimestamp:  uint32(timestampLimit.Uint64()),
-					IndexL1InfoTree: testCase.Txs[0].IndexL1InfoTree,
-					Transactions:    txs,
+					ChangeL2BlockHeader: state.ChangeL2BlockHeader{
+						DeltaTimestamp:  uint32(timestampLimit.Uint64()),
+						IndexL1InfoTree: testCase.Txs[0].IndexL1InfoTree,
+					},
+					Transactions: txs,
 				}
 
 				batch := state.BatchRawV2{
diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go
index f89f23d06a..b9fbc72de5 100644
--- a/synchronizer/synchronizer_test.go
+++ b/synchronizer/synchronizer_test.go
@@ -696,9 +696,11 @@ func createBatchL2DataEtrog(howManyBlocks int, howManyTx int) ([]byte, []types.T
 	transactions := []types.TransactionOrHash{}
 	for nBlock := 0; nBlock < howManyBlocks; nBlock++ {
 		block := state.L2BlockRaw{
-			DeltaTimestamp:  123,
-			IndexL1InfoTree: 456,
-			Transactions:    []state.L2TxRaw{},
+			ChangeL2BlockHeader: state.ChangeL2BlockHeader{
+				DeltaTimestamp:  123,
+				IndexL1InfoTree: 456,
+			},
+			Transactions: []state.L2TxRaw{},
 		}
 		for i := 0; i < howManyTx; i++ {
 			tx := createTransaction(uint64(i + 1))

From b938572f138ba6cc40ef6736153c469afeb11c96 Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Tue, 30 Jan 2024 09:20:25 +0100
Subject: [PATCH 48/54] synchronizer: unittest and remove pool_reorg  (#3111)

* removed reorgPool call
* add test for l1 sequenced batches
* removed panic after halt call as PR comments ask for
---
 .../etrog/processor_l1_sequence_batches.go    |  68 +----
 .../processor_l1_sequence_batches_test.go     | 263 +++++++++++++++---
 synchronizer/default_l1processors.go          |   2 +-
 3 files changed, 235 insertions(+), 98 deletions(-)

diff --git a/synchronizer/actions/etrog/processor_l1_sequence_batches.go b/synchronizer/actions/etrog/processor_l1_sequence_batches.go
index 46133a66a6..162ce0edd8 100644
--- a/synchronizer/actions/etrog/processor_l1_sequence_batches.go
+++ b/synchronizer/actions/etrog/processor_l1_sequence_batches.go
@@ -18,7 +18,6 @@ import (
 	syncCommon "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
 	"github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
 	"github.com/ethereum/go-ethereum/common"
-	ethTypes "github.com/ethereum/go-ethereum/core/types"
 	"github.com/jackc/pgx/v4"
 )
 
@@ -32,19 +31,9 @@ type stateProcessSequenceBatches interface {
 	AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error
 	AddVirtualBatch(ctx context.Context, virtualBatch *state.VirtualBatch, dbTx pgx.Tx) error
 	AddTrustedReorg(ctx context.Context, trustedReorg *state.TrustedReorg, dbTx pgx.Tx) error
-	GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*ethTypes.Transaction, error)
 	GetL1InfoTreeDataFromBatchL2Data(ctx context.Context, batchL2Data []byte, dbTx pgx.Tx) (map[uint32]state.L1DataV2, common.Hash, common.Hash, error)
 }
 
-type ethermanProcessSequenceBatches interface {
-	GetLatestBatchNumber() (uint64, error)
-}
-
-type poolProcessSequenceBatchesInterface interface {
-	DeleteReorgedTransactions(ctx context.Context, txs []*ethTypes.Transaction) error
-	StoreTx(ctx context.Context, tx ethTypes.Transaction, ip string, isWIP bool) error
-}
-
 type syncProcessSequenceBatchesInterface interface {
 	PendingFlushID(flushID uint64, proverID string)
 	IsTrustedSequencer() bool
@@ -55,8 +44,6 @@ type syncProcessSequenceBatchesInterface interface {
 type ProcessorL1SequenceBatchesEtrog struct {
 	actions.ProcessorBase[ProcessorL1SequenceBatchesEtrog]
 	state        stateProcessSequenceBatches
-	etherMan     ethermanProcessSequenceBatches
-	pool         poolProcessSequenceBatchesInterface
 	sync         syncProcessSequenceBatchesInterface
 	timeProvider syncCommon.TimeProvider
 	halter       syncinterfaces.CriticalErrorHandler
@@ -64,8 +51,6 @@ type ProcessorL1SequenceBatchesEtrog struct {
 
 // NewProcessorL1SequenceBatches returns instance of a processor for SequenceBatchesOrder
 func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches,
-	etherMan ethermanProcessSequenceBatches,
-	pool poolProcessSequenceBatchesInterface,
 	sync syncProcessSequenceBatchesInterface,
 	timeProvider syncCommon.TimeProvider,
 	halter syncinterfaces.CriticalErrorHandler) *ProcessorL1SequenceBatchesEtrog {
@@ -74,8 +59,6 @@ func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches,
 			SupportedEvent:    []etherman.EventOrder{etherman.SequenceBatchesOrder},
 			SupportedForkdIds: &ForksIdOnlyEtrog},
 		state:        state,
-		etherMan:     etherMan,
-		pool:         pool,
 		sync:         sync,
 		timeProvider: timeProvider,
 		halter:       halter,
@@ -294,18 +277,6 @@ func (p *ProcessorL1SequenceBatchesEtrog) processSequenceBatches(ctx context.Con
 		// Call the check trusted state method to compare trusted and virtual state
 		status := p.checkTrustedState(ctx, batch, tBatch, newRoot, dbTx)
 		if status {
-			// Reorg Pool
-			err := p.reorgPool(ctx, dbTx)
-			if err != nil {
-				rollbackErr := dbTx.Rollback(ctx)
-				if rollbackErr != nil {
-					log.Errorf("error rolling back state. BatchNumber: %d, BlockNumber: %d, rollbackErr: %s, error : %v", tBatch.BatchNumber, blockNumber, rollbackErr.Error(), err)
-					return rollbackErr
-				}
-				log.Errorf("error: %v. BatchNumber: %d, BlockNumber: %d", err, tBatch.BatchNumber, blockNumber)
-				return err
-			}
-
 			// Clean trustedState sync variables to avoid sync the trusted state from the wrong starting point.
 			// This wrong starting point would force the trusted sync to clean the virtualization of the batch reaching an inconsistency.
 			p.sync.CleanTrustedState()
@@ -377,43 +348,6 @@ func (p *ProcessorL1SequenceBatchesEtrog) processSequenceBatches(ctx context.Con
 	return nil
 }
 
-func (p *ProcessorL1SequenceBatchesEtrog) reorgPool(ctx context.Context, dbTx pgx.Tx) error {
-	latestBatchNum, err := p.etherMan.GetLatestBatchNumber()
-	if err != nil {
-		log.Error("error getting the latestBatchNumber virtualized in the smc. Error: ", err)
-		return err
-	}
-	batchNumber := latestBatchNum + 1
-	// Get transactions that have to be included in the pool again
-	txs, err := p.state.GetReorgedTransactions(ctx, batchNumber, dbTx)
-	if err != nil {
-		log.Errorf("error getting txs from trusted state. BatchNumber: %d, error: %v", batchNumber, err)
-		return err
-	}
-	log.Debug("Reorged transactions: ", txs)
-
-	// Remove txs from the pool
-	err = p.pool.DeleteReorgedTransactions(ctx, txs)
-	if err != nil {
-		log.Errorf("error deleting txs from the pool. BatchNumber: %d, error: %v", batchNumber, err)
-		return err
-	}
-	log.Debug("Delete reorged transactions")
-
-	// Add txs to the pool
-	for _, tx := range txs {
-		// Insert tx in WIP status to avoid the sequencer to grab them before it gets restarted
-		// When the sequencer restarts, it will update the status to pending non-wip
-		err = p.pool.StoreTx(ctx, *tx, "", true)
-		if err != nil {
-			log.Errorf("error storing tx into the pool again. TxHash: %s. BatchNumber: %d, error: %v", tx.Hash().String(), batchNumber, err)
-			return err
-		}
-		log.Debug("Reorged transactions inserted in the pool: ", tx.Hash())
-	}
-	return nil
-}
-
 func (p *ProcessorL1SequenceBatchesEtrog) checkTrustedState(ctx context.Context, batch state.Batch, tBatch *state.Batch, newRoot common.Hash, dbTx pgx.Tx) bool {
 	//Compare virtual state with trusted state
 	var reorgReasons strings.Builder
@@ -449,7 +383,9 @@ func (p *ProcessorL1SequenceBatchesEtrog) checkTrustedState(ctx context.Context,
 
 		if p.sync.IsTrustedSequencer() {
 			log.Errorf("TRUSTED REORG DETECTED! Batch: %d reson:%s", batch.BatchNumber, reason)
+			// Halt function never have to return! it must blocks the process
 			p.halt(ctx, fmt.Errorf("TRUSTED REORG DETECTED! Batch: %d", batch.BatchNumber))
+			log.Errorf("CRITICAL!!!: Never have to execute this code. Halt function never have to return! it must blocks the process")
 		}
 		if !tBatch.WIP {
 			log.Warnf("missmatch in trusted state detected for Batch Number: %d. Reasons: %s", tBatch.BatchNumber, reason)
diff --git a/synchronizer/actions/etrog/processor_l1_sequence_batches_test.go b/synchronizer/actions/etrog/processor_l1_sequence_batches_test.go
index c5ecb33af4..4d6a47e95f 100644
--- a/synchronizer/actions/etrog/processor_l1_sequence_batches_test.go
+++ b/synchronizer/actions/etrog/processor_l1_sequence_batches_test.go
@@ -3,10 +3,12 @@ package etrog
 import (
 	"context"
 	"testing"
+	"time"
 
 	"github.com/0xPolygonHermez/zkevm-node/etherman"
 	"github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevm"
 	"github.com/0xPolygonHermez/zkevm-node/state"
+	"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
 	"github.com/0xPolygonHermez/zkevm-node/synchronizer/actions"
 	syncCommon "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
 	mock_syncinterfaces "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces/mocks"
@@ -32,7 +34,6 @@ var (
 type mocksEtrogProcessorL1 struct {
 	Etherman             *mock_syncinterfaces.EthermanFullInterface
 	State                *mock_syncinterfaces.StateFullInterface
-	Pool                 *mock_syncinterfaces.PoolInterface
 	Synchronizer         *mock_syncinterfaces.SynchronizerFullInterface
 	DbTx                 *syncMocks.DbTxMock
 	TimeProvider         *syncCommon.MockTimerProvider
@@ -41,21 +42,18 @@ type mocksEtrogProcessorL1 struct {
 
 func createMocks(t *testing.T) *mocksEtrogProcessorL1 {
 	mocks := &mocksEtrogProcessorL1{
-		Etherman:     mock_syncinterfaces.NewEthermanFullInterface(t),
-		State:        mock_syncinterfaces.NewStateFullInterface(t),
-		Pool:         mock_syncinterfaces.NewPoolInterface(t),
-		Synchronizer: mock_syncinterfaces.NewSynchronizerFullInterface(t),
-		DbTx:         syncMocks.NewDbTxMock(t),
-		//ZKEVMClient:          mock_syncinterfaces.NewZKEVMClientInterface(t),
+		Etherman:             mock_syncinterfaces.NewEthermanFullInterface(t),
+		State:                mock_syncinterfaces.NewStateFullInterface(t),
+		Synchronizer:         mock_syncinterfaces.NewSynchronizerFullInterface(t),
+		DbTx:                 syncMocks.NewDbTxMock(t),
 		TimeProvider:         &syncCommon.MockTimerProvider{},
 		CriticalErrorHandler: mock_syncinterfaces.NewCriticalErrorHandler(t),
-		//EventLog:     &eventLogMock{},
 	}
 	return mocks
 }
 
 func createSUT(mocks *mocksEtrogProcessorL1) *ProcessorL1SequenceBatchesEtrog {
-	return NewProcessorL1SequenceBatches(mocks.State, mocks.Etherman, mocks.Pool, mocks.Synchronizer,
+	return NewProcessorL1SequenceBatches(mocks.State, mocks.Synchronizer,
 		mocks.TimeProvider, mocks.CriticalErrorHandler)
 }
 
@@ -82,34 +80,237 @@ func TestL1SequenceBatchesPermissionlessNewBatchSequenced(t *testing.T) {
 	mocks := createMocks(t)
 	sut := createSUT(mocks)
 	ctx := context.Background()
+	batch := newStateBatch(3)
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	expectationsPreExecution(t, mocks, ctx, batch, state.ErrNotFound)
+	executionResponse := newProcessBatchResponseV2(batch)
+	expectationsProcessAndStoreClosedBatchV2(t, mocks, ctx, executionResponse, nil)
+	expectationsAddSequencedBatch(t, mocks, ctx, executionResponse)
+	mocks.Synchronizer.EXPECT().PendingFlushID(mock.Anything, mock.Anything)
+	err := sut.Process(ctx, etherman.Order{Pos: 1}, newL1Block(mocks, batch, l1InfoRoot), mocks.DbTx)
+	require.NoError(t, err)
+}
+
+func TestL1SequenceBatchesTrustedBatchSequencedThatAlreadyExistsHappyPath(t *testing.T) {
+	mocks := createMocks(t)
+	sut := createSUT(mocks)
+	ctx := context.Background()
+	batch := newStateBatch(3)
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	l1Block := newL1Block(mocks, batch, l1InfoRoot)
+	expectationsPreExecution(t, mocks, ctx, batch, nil)
+	executionResponse := newProcessBatchResponseV2(batch)
+	expectationsForExecution(t, mocks, ctx, l1Block.SequencedBatches[1][0], l1Block.ReceivedAt, executionResponse)
+	mocks.State.EXPECT().AddAccumulatedInputHash(ctx, executionResponse.NewBatchNum, common.BytesToHash(executionResponse.NewAccInputHash), mocks.DbTx).Return(nil)
+	expectationsAddSequencedBatch(t, mocks, ctx, executionResponse)
+	err := sut.Process(ctx, etherman.Order{Pos: 1}, l1Block, mocks.DbTx)
+	require.NoError(t, err)
+}
+
+func TestL1SequenceBatchesPermissionlessBatchSequencedThatAlreadyExistsHappyPath(t *testing.T) {
+	mocks := createMocks(t)
+	sut := createSUT(mocks)
+	ctx := context.Background()
+	batch := newStateBatch(3)
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	l1Block := newL1Block(mocks, batch, l1InfoRoot)
+	expectationsPreExecution(t, mocks, ctx, batch, nil)
+	executionResponse := newProcessBatchResponseV2(batch)
+	expectationsForExecution(t, mocks, ctx, l1Block.SequencedBatches[1][0], l1Block.ReceivedAt, executionResponse)
+	mocks.State.EXPECT().AddAccumulatedInputHash(ctx, executionResponse.NewBatchNum, common.BytesToHash(executionResponse.NewAccInputHash), mocks.DbTx).Return(nil)
+	expectationsAddSequencedBatch(t, mocks, ctx, executionResponse)
+	err := sut.Process(ctx, etherman.Order{Pos: 1}, l1Block, mocks.DbTx)
+	require.NoError(t, err)
+}
+
+// CASE: A permissionless process a L1 sequenced batch that already is in state (presumably synced from Trusted)
+// - Execute it
+// - Check if match state batch
+// - Don't match -> Reorg Pool and reset trusted state
+// - Reprocess again as a new batch
+func TestL1SequenceBatchesPermissionlessBatchSequencedThatAlreadyExistsMismatch(t *testing.T) {
+	mocks := createMocks(t)
+	sut := createSUT(mocks)
+	ctx := context.Background()
+	batch := newStateBatch(3)
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	l1Block := newL1Block(mocks, batch, l1InfoRoot)
+	expectationsPreExecution(t, mocks, ctx, batch, nil)
+	executionResponse := newProcessBatchResponseV2(batch)
+	executionResponse.NewStateRoot = common.HexToHash(hashExamplesValues[2]).Bytes()
+	expectationsForExecution(t, mocks, ctx, l1Block.SequencedBatches[1][0], l1Block.ReceivedAt, executionResponse)
+	mocks.State.EXPECT().AddAccumulatedInputHash(ctx, executionResponse.NewBatchNum, common.BytesToHash(executionResponse.NewAccInputHash), mocks.DbTx).Return(nil)
+	mocks.Synchronizer.EXPECT().IsTrustedSequencer().Return(false)
+	mocks.State.EXPECT().AddTrustedReorg(ctx, mock.Anything, mocks.DbTx).Return(nil)
+	mocks.State.EXPECT().ResetTrustedState(ctx, batch.BatchNumber-1, mocks.DbTx).Return(nil)
+	mocks.Synchronizer.EXPECT().CleanTrustedState()
+
+	// Reexecute it as a new batch
+	expectationsProcessAndStoreClosedBatchV2(t, mocks, ctx, executionResponse, nil)
+	expectationsAddSequencedBatch(t, mocks, ctx, executionResponse)
+	mocks.Synchronizer.EXPECT().PendingFlushID(mock.Anything, mock.Anything)
+	err := sut.Process(ctx, etherman.Order{Pos: 1}, l1Block, mocks.DbTx)
+	require.NoError(t, err)
+}
+
+type CriticalErrorHandlerPanic struct {
+}
+
+func (c CriticalErrorHandlerPanic) CriticalError(ctx context.Context, err error) {
+	panic("CriticalError")
+}
+
+// CASE: A TRUSTED SYNCHRONIZER process a L1 sequenced batch that already is in state but it doesnt match with the trusted State
+// - Execute it
+// - Check if match state batch
+// - Don't match -> HALT
+func TestL1SequenceBatchesTrustedBatchSequencedThatAlreadyExistsMismatch(t *testing.T) {
+	mocks := createMocks(t)
+	CriticalErrorHandlerPanic := CriticalErrorHandlerPanic{}
+	sut := NewProcessorL1SequenceBatches(mocks.State, mocks.Synchronizer,
+		mocks.TimeProvider, CriticalErrorHandlerPanic)
+	ctx := context.Background()
+	batch := newStateBatch(3)
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	l1Block := newL1Block(mocks, batch, l1InfoRoot)
+	expectationsPreExecution(t, mocks, ctx, batch, nil)
+	executionResponse := newProcessBatchResponseV2(batch)
+	executionResponse.NewStateRoot = common.HexToHash(hashExamplesValues[2]).Bytes()
+	expectationsForExecution(t, mocks, ctx, l1Block.SequencedBatches[1][0], l1Block.ReceivedAt, executionResponse)
+	mocks.State.EXPECT().AddAccumulatedInputHash(ctx, executionResponse.NewBatchNum, common.BytesToHash(executionResponse.NewAccInputHash), mocks.DbTx).Return(nil)
+	mocks.Synchronizer.EXPECT().IsTrustedSequencer().Return(true)
+
+	// CriticalError call in a real implementation is a blocking call, in the test is going to panic
+	assertPanic(t, func() { sut.Process(ctx, etherman.Order{Pos: 1}, l1Block, mocks.DbTx) }) //nolint
+}
+
+func TestL1SequenceForcedBatchesNum1TrustedBatch(t *testing.T) {
+	mocks := createMocks(t)
+	sut := createSUT(mocks)
+	ctx := context.Background()
+	batch := newStateBatch(3)
+	forcedTime := mocks.TimeProvider.Now()
+	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
+	forcedGlobalExitRoot := common.HexToHash(hashExamplesValues[1])
+	forcedBlockHash := common.HexToHash(hashExamplesValues[2])
+	sequencedForcedBatch := newForcedSequenceBatch(batch, l1InfoRoot, forcedTime, forcedGlobalExitRoot, forcedBlockHash)
+
+	l1Block := newComposedL1Block(mocks, sequencedForcedBatch, l1InfoRoot)
+
+	mocks.State.EXPECT().GetNextForcedBatches(ctx, int(1), mocks.DbTx).Return([]state.ForcedBatch{
+		{
+			BlockNumber:       32,
+			ForcedBatchNumber: 4,
+			Sequencer:         common.HexToAddress(addrExampleValues[0]),
+			GlobalExitRoot:    forcedGlobalExitRoot,
+			RawTxsData:        []byte{},
+			ForcedAt:          forcedTime,
+		},
+	}, nil)
+	expectationsPreExecution(t, mocks, ctx, batch, state.ErrNotFound)
+
+	executionResponse := newProcessBatchResponseV2(batch)
+	executionResponse.NewStateRoot = common.HexToHash(hashExamplesValues[2]).Bytes()
+
+	expectationsProcessAndStoreClosedBatchV2(t, mocks, ctx, executionResponse, nil)
+	expectationsAddSequencedBatch(t, mocks, ctx, executionResponse)
+	mocks.Synchronizer.EXPECT().PendingFlushID(mock.Anything, mock.Anything)
+
+	err := sut.Process(ctx, etherman.Order{Pos: 1}, l1Block, mocks.DbTx)
+	require.NoError(t, err)
+}
+
+// --------------------- Helper functions ----------------------------------------------------------------------------------------------------
+
+func expectationsPreExecution(t *testing.T, mocks *mocksEtrogProcessorL1, ctx context.Context, trustedBatch *state.Batch, responseError error) {
+	mocks.State.EXPECT().GetL1InfoTreeDataFromBatchL2Data(ctx, mock.Anything, mocks.DbTx).Return(map[uint32]state.L1DataV2{}, state.ZeroHash, state.ZeroHash, nil).Maybe()
+	mocks.State.EXPECT().GetBatchByNumber(ctx, trustedBatch.BatchNumber, mocks.DbTx).Return(trustedBatch, responseError)
+}
+
+func expectationsAddSequencedBatch(t *testing.T, mocks *mocksEtrogProcessorL1, ctx context.Context, response *executor.ProcessBatchResponseV2) {
+	mocks.State.EXPECT().AddVirtualBatch(ctx, mock.Anything, mocks.DbTx).Return(nil)
+	mocks.State.EXPECT().AddSequence(ctx, state.Sequence{FromBatchNumber: 3, ToBatchNumber: 3}, mocks.DbTx).Return(nil)
+}
+
+func expectationsProcessAndStoreClosedBatchV2(t *testing.T, mocks *mocksEtrogProcessorL1, ctx context.Context, response *executor.ProcessBatchResponseV2, responseError error) {
+	newStateRoot := common.BytesToHash(response.NewStateRoot)
+	mocks.State.EXPECT().ProcessAndStoreClosedBatchV2(ctx, mock.Anything, mocks.DbTx, mock.Anything).Return(newStateRoot, response.FlushId, response.ProverId, responseError)
+}
+
+func expectationsForExecution(t *testing.T, mocks *mocksEtrogProcessorL1, ctx context.Context, sequencedBatch etherman.SequencedBatch, timestampLimit time.Time, response *executor.ProcessBatchResponseV2) {
+	mocks.State.EXPECT().ExecuteBatchV2(ctx,
+		mock.Anything, *sequencedBatch.L1InfoRoot, mock.Anything, timestampLimit, false,
+		uint32(1), (*common.Hash)(nil), mocks.DbTx).Return(response, nil)
+}
+
+func newProcessBatchResponseV2(batch *state.Batch) *executor.ProcessBatchResponseV2 {
+	return &executor.ProcessBatchResponseV2{
+		NewBatchNum:     batch.BatchNumber,
+		NewAccInputHash: batch.AccInputHash[:],
+		NewStateRoot:    batch.StateRoot[:],
+		FlushId:         uint64(1234),
+		ProverId:        "prover-id",
+	}
+}
+
+func newStateBatch(number uint64) *state.Batch {
+	return &state.Batch{
+		BatchNumber: number,
+		StateRoot:   common.HexToHash(hashExamplesValues[3]),
+		Coinbase:    common.HexToAddress(addrExampleValues[0]),
+	}
+}
+
+func newForcedSequenceBatch(batch *state.Batch, l1InfoRoot common.Hash, forcedTimestamp time.Time, forcedGlobalExitRoot, forcedBlockHashL1 common.Hash) *etherman.SequencedBatch {
+	return ðerman.SequencedBatch{
+		BatchNumber:   batch.BatchNumber,
+		L1InfoRoot:    &l1InfoRoot,
+		TxHash:        state.HashByteArray(batch.BatchL2Data),
+		Coinbase:      batch.Coinbase,
+		SequencerAddr: common.HexToAddress(addrExampleValues[0]),
+		PolygonRollupBaseEtrogBatchData: &polygonzkevm.PolygonRollupBaseEtrogBatchData{
+			Transactions:         []byte{},
+			ForcedTimestamp:      uint64(forcedTimestamp.Unix()),
+			ForcedGlobalExitRoot: forcedGlobalExitRoot,
+			ForcedBlockHashL1:    forcedBlockHashL1,
+		},
+	}
+}
+
+func newL1Block(mocks *mocksEtrogProcessorL1, batch *state.Batch, l1InfoRoot common.Hash) *etherman.Block {
+	sbatch := etherman.SequencedBatch{
+		BatchNumber:   batch.BatchNumber,
+		L1InfoRoot:    &l1InfoRoot,
+		TxHash:        state.HashByteArray(batch.BatchL2Data),
+		Coinbase:      batch.Coinbase,
+		SequencerAddr: common.HexToAddress(addrExampleValues[0]),
+		PolygonRollupBaseEtrogBatchData: &polygonzkevm.PolygonRollupBaseEtrogBatchData{
+			Transactions: []byte{},
+		},
+	}
+
+	return newComposedL1Block(mocks, &sbatch, l1InfoRoot)
+}
+
+func newComposedL1Block(mocks *mocksEtrogProcessorL1, forcedBatch *etherman.SequencedBatch, l1InfoRoot common.Hash) *etherman.Block {
 	l1Block := etherman.Block{
 		BlockNumber:      123,
 		ReceivedAt:       mocks.TimeProvider.Now(),
 		SequencedBatches: [][]etherman.SequencedBatch{},
 	}
-	l1InfoRoot := common.HexToHash(hashExamplesValues[0])
 	l1Block.SequencedBatches = append(l1Block.SequencedBatches, []etherman.SequencedBatch{})
 	l1Block.SequencedBatches = append(l1Block.SequencedBatches, []etherman.SequencedBatch{
-		{
-			BatchNumber:   3,
-			L1InfoRoot:    &l1InfoRoot,
-			TxHash:        common.HexToHash(hashExamplesValues[1]),
-			Coinbase:      common.HexToAddress(addrExampleValues[0]),
-			SequencerAddr: common.HexToAddress(addrExampleValues[1]),
-			PolygonRollupBaseEtrogBatchData: &polygonzkevm.PolygonRollupBaseEtrogBatchData{
-				Transactions: []byte{},
-			},
-		},
+		*forcedBatch,
 	})
-	mocks.State.EXPECT().GetL1InfoTreeDataFromBatchL2Data(ctx, mock.Anything, mocks.DbTx).Return(map[uint32]state.L1DataV2{}, state.ZeroHash, state.ZeroHash, nil)
-	mocks.State.EXPECT().GetBatchByNumber(ctx, uint64(3), mocks.DbTx).Return(nil, state.ErrNotFound)
-	mocks.Synchronizer.EXPECT().PendingFlushID(mock.Anything, mock.Anything)
-	mocks.State.EXPECT().AddVirtualBatch(ctx, mock.Anything, mocks.DbTx).Return(nil)
-	mocks.State.EXPECT().AddSequence(ctx, mock.Anything, mocks.DbTx).Return(nil)
-	newStateRoot := common.HexToHash(hashExamplesValues[2])
-	flushID := uint64(1234)
-	proverID := "prover-id"
-	mocks.State.EXPECT().ProcessAndStoreClosedBatchV2(ctx, mock.Anything, mocks.DbTx, mock.Anything).Return(newStateRoot, flushID, proverID, nil)
-	err := sut.Process(ctx, etherman.Order{Pos: 1}, &l1Block, mocks.DbTx)
-	require.NoError(t, err)
+	return &l1Block
+}
+
+// https://stackoverflow.com/questions/31595791/how-to-test-panics
+func assertPanic(t *testing.T, f func()) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("The code did not panic")
+		}
+	}()
+	f()
 }
diff --git a/synchronizer/default_l1processors.go b/synchronizer/default_l1processors.go
index 591b4fe334..5b6bd7fb6f 100644
--- a/synchronizer/default_l1processors.go
+++ b/synchronizer/default_l1processors.go
@@ -15,7 +15,7 @@ func defaultsL1EventProcessors(sync *ClientSynchronizer) *processor_manager.L1Ev
 	p.Register(incaberry.NewProcessL1SequenceForcedBatches(sync.state, sync))
 	p.Register(incaberry.NewProcessorForkId(sync.state, sync))
 	p.Register(etrog.NewProcessorL1InfoTreeUpdate(sync.state))
-	p.Register(etrog.NewProcessorL1SequenceBatches(sync.state, sync.etherMan, sync.pool, sync, common.DefaultTimeProvider{}, sync.halter))
+	p.Register(etrog.NewProcessorL1SequenceBatches(sync.state, sync, common.DefaultTimeProvider{}, sync.halter))
 	p.Register(incaberry.NewProcessorL1VerifyBatch(sync.state))
 	p.Register(etrog.NewProcessorL1UpdateEtrogSequence(sync.state, sync, common.DefaultTimeProvider{}))
 	return p.Build()

From 3f537afb79030fa4c44c604051f86949ac36f9c4 Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos 
Date: Tue, 30 Jan 2024 13:38:27 -0300
Subject: [PATCH 49/54] avoid unnecessary get code call when address doesn't
 have code hash (#3170)

---
 merkletree/tree.go      |  3 ++
 merkletree/tree_test.go | 86 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 89 insertions(+)
 create mode 100644 merkletree/tree_test.go

diff --git a/merkletree/tree.go b/merkletree/tree.go
index 8294435324..4a42f08096 100644
--- a/merkletree/tree.go
+++ b/merkletree/tree.go
@@ -93,6 +93,9 @@ func (tree *StateTree) GetCode(ctx context.Context, address common.Address, root
 	}
 
 	k := new(big.Int).SetBytes(scCodeHash)
+	if k.Cmp(big.NewInt(0)) == 0 {
+		return []byte{}, nil
+	}
 
 	// this code gets actual smart contract code from sc code storage
 	scCode, err := tree.getProgram(ctx, scalarToh4(k))
diff --git a/merkletree/tree_test.go b/merkletree/tree_test.go
new file mode 100644
index 0000000000..32b2b4a36b
--- /dev/null
+++ b/merkletree/tree_test.go
@@ -0,0 +1,86 @@
+package merkletree
+
+import (
+	"context"
+	"fmt"
+	"testing"
+
+	"github.com/0xPolygonHermez/zkevm-node/hex"
+	"github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/EmitLog2"
+	"github.com/0xPolygonHermez/zkevm-node/test/testutils"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/google/uuid"
+	"github.com/stretchr/testify/require"
+)
+
+func TestGetCode(t *testing.T) {
+	ctx := context.Background()
+	zkProverURI := testutils.GetEnv("ZKPROVER_URI", "localhost")
+
+	cfg := Config{URI: fmt.Sprintf("%s:50061", zkProverURI)}
+	c, _, _ := NewMTDBServiceClient(ctx, cfg)
+	sTree := NewStateTree(c)
+
+	type testCase struct {
+		name           string
+		addr           common.Address
+		root           []byte
+		expectedResult []byte
+		expectedError  error
+		setup          func(*testing.T, *testCase, *StateTree)
+	}
+
+	testCases := []testCase{
+		{
+			name:           "get existent code successfully",
+			addr:           common.HexToAddress("0x1"),
+			root:           common.HexToHash("0x0").Bytes(),
+			expectedResult: hex.DecodeBig(EmitLog2.EmitLog2Bin).Bytes(),
+			expectedError:  nil,
+			setup: func(t *testing.T, tc *testCase, sTree *StateTree) {
+				txID := uuid.NewString()
+
+				err := sTree.StartBlock(ctx, common.Hash(tc.root), txID)
+				require.NoError(t, err)
+
+				newRoot, _, err := sTree.SetCode(ctx, tc.addr, tc.expectedResult, tc.root, txID)
+				require.NoError(t, err)
+				tc.root = newRoot
+
+				err = sTree.FinishBlock(ctx, common.Hash(tc.root), txID)
+				require.NoError(t, err)
+
+				err = sTree.Flush(ctx, common.Hash(newRoot), txID)
+				require.NoError(t, err)
+			},
+		},
+		{
+			name:           "get non-existent code successfully",
+			addr:           common.HexToAddress("0x2"),
+			root:           common.HexToHash("0x0").Bytes(),
+			expectedResult: []byte{},
+			expectedError:  nil,
+			setup: func(t *testing.T, tc *testCase, sTree *StateTree) {
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			tc := tc
+			tc.setup(t, &tc, sTree)
+
+			result, err := sTree.GetCode(ctx, tc.addr, tc.root)
+			require.NoError(t, err)
+
+			if tc.expectedResult != nil || result != nil {
+				require.Equal(t, len(tc.expectedResult), len(result))
+				require.ElementsMatch(t, tc.expectedResult, result)
+			}
+
+			if tc.expectedError != nil || err != nil {
+				require.Equal(t, tc.expectedError, err)
+			}
+		})
+	}
+}

From 188969bb20b1a15daa21d24d623a72d29dc1d9d8 Mon Sep 17 00:00:00 2001
From: agnusmor <100322135+agnusmor@users.noreply.github.com>
Date: Wed, 31 Jan 2024 15:39:12 +0100
Subject: [PATCH 50/54] fix parentHash when storing a new L2 block (#3171)
 (#3174)

---
 state/interfaces.go             |  1 +
 state/mocks/mock_storage.go     | 60 +++++++++++++++++++++++++++++++++
 state/pgstatestorage/l2block.go | 21 ++++++++++++
 state/transaction.go            |  9 +++--
 4 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/state/interfaces.go b/state/interfaces.go
index 3f6873d3eb..8949275a37 100644
--- a/state/interfaces.go
+++ b/state/interfaces.go
@@ -60,6 +60,7 @@ type storage interface {
 	GetBatchNumberOfL2Block(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (uint64, error)
 	BatchNumberByL2BlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (uint64, error)
 	GetL2BlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*L2Block, error)
+	GetL2BlockHashByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (common.Hash, error)
 	GetL2BlocksByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]L2Block, error)
 	GetLastL2BlockCreatedAt(ctx context.Context, dbTx pgx.Tx) (*time.Time, error)
 	GetTransactionByHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*types.Transaction, error)
diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go
index eac22d7cf5..5f943ec7d3 100644
--- a/state/mocks/mock_storage.go
+++ b/state/mocks/mock_storage.go
@@ -2888,6 +2888,66 @@ func (_c *StorageMock_GetL2BlockByNumber_Call) RunAndReturn(run func(context.Con
 	return _c
 }
 
+// GetL2BlockHashByNumber provides a mock function with given fields: ctx, blockNumber, dbTx
+func (_m *StorageMock) GetL2BlockHashByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (common.Hash, error) {
+	ret := _m.Called(ctx, blockNumber, dbTx)
+
+	if len(ret) == 0 {
+		panic("no return value specified for GetL2BlockHashByNumber")
+	}
+
+	var r0 common.Hash
+	var r1 error
+	if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (common.Hash, error)); ok {
+		return rf(ctx, blockNumber, dbTx)
+	}
+	if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) common.Hash); ok {
+		r0 = rf(ctx, blockNumber, dbTx)
+	} else {
+		if ret.Get(0) != nil {
+			r0 = ret.Get(0).(common.Hash)
+		}
+	}
+
+	if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+		r1 = rf(ctx, blockNumber, dbTx)
+	} else {
+		r1 = ret.Error(1)
+	}
+
+	return r0, r1
+}
+
+// StorageMock_GetL2BlockHashByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL2BlockHashByNumber'
+type StorageMock_GetL2BlockHashByNumber_Call struct {
+	*mock.Call
+}
+
+// GetL2BlockHashByNumber is a helper method to define mock.On call
+//   - ctx context.Context
+//   - blockNumber uint64
+//   - dbTx pgx.Tx
+func (_e *StorageMock_Expecter) GetL2BlockHashByNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StorageMock_GetL2BlockHashByNumber_Call {
+	return &StorageMock_GetL2BlockHashByNumber_Call{Call: _e.mock.On("GetL2BlockHashByNumber", ctx, blockNumber, dbTx)}
+}
+
+func (_c *StorageMock_GetL2BlockHashByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetL2BlockHashByNumber_Call {
+	_c.Call.Run(func(args mock.Arguments) {
+		run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+	})
+	return _c
+}
+
+func (_c *StorageMock_GetL2BlockHashByNumber_Call) Return(_a0 common.Hash, _a1 error) *StorageMock_GetL2BlockHashByNumber_Call {
+	_c.Call.Return(_a0, _a1)
+	return _c
+}
+
+func (_c *StorageMock_GetL2BlockHashByNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (common.Hash, error)) *StorageMock_GetL2BlockHashByNumber_Call {
+	_c.Call.Return(run)
+	return _c
+}
+
 // GetL2BlockHashesSince provides a mock function with given fields: ctx, since, dbTx
 func (_m *StorageMock) GetL2BlockHashesSince(ctx context.Context, since time.Time, dbTx pgx.Tx) ([]common.Hash, error) {
 	ret := _m.Called(ctx, since, dbTx)
diff --git a/state/pgstatestorage/l2block.go b/state/pgstatestorage/l2block.go
index a01853931d..fe68559387 100644
--- a/state/pgstatestorage/l2block.go
+++ b/state/pgstatestorage/l2block.go
@@ -424,6 +424,27 @@ func (p *PostgresStorage) GetL2BlockHeaderByNumber(ctx context.Context, blockNum
 	return header, nil
 }
 
+// GetL2BlockHashByNumber gets the block hash by block number
+func (p *PostgresStorage) GetL2BlockHashByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (common.Hash, error) {
+	const getL2BlockHeaderByNumberSQL = "SELECT block_hash FROM state.l2block b WHERE b.block_num = $1"
+
+	blockHash := state.ZeroHash
+
+	var blockHashStr string
+	q := p.getExecQuerier(dbTx)
+	err := q.QueryRow(ctx, getL2BlockHeaderByNumberSQL, blockNumber).Scan(&blockHashStr)
+
+	if errors.Is(err, pgx.ErrNoRows) {
+		return blockHash, state.ErrNotFound
+	} else if err != nil {
+		return blockHash, err
+	}
+
+	blockHash = common.HexToHash(blockHashStr)
+
+	return blockHash, nil
+}
+
 // GetL2BlockHashesSince gets the block hashes added since the provided date
 func (p *PostgresStorage) GetL2BlockHashesSince(ctx context.Context, since time.Time, dbTx pgx.Tx) ([]common.Hash, error) {
 	const getL2BlockHashesSinceSQL = "SELECT block_hash FROM state.l2block WHERE created_at >= $1"
diff --git a/state/transaction.go b/state/transaction.go
index cca22989b6..083fd2e3be 100644
--- a/state/transaction.go
+++ b/state/transaction.go
@@ -203,11 +203,16 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P
 	log.Debugf("storing l2 block %d, txs %d, hash %s", l2Block.BlockNumber, len(l2Block.TransactionResponses), l2Block.BlockHash.String())
 	start := time.Now()
 
+	prevL2BlockHash, err := s.GetL2BlockHashByNumber(ctx, l2Block.BlockNumber-1, dbTx)
+	if err != nil {
+		return err
+	}
+
 	header := &types.Header{
 		Number:     new(big.Int).SetUint64(l2Block.BlockNumber),
-		ParentHash: l2Block.ParentHash,
+		ParentHash: prevL2BlockHash,
 		Coinbase:   l2Block.Coinbase,
-		Root:       l2Block.BlockHash, //BlockHash is the StateRoot in Etrog
+		Root:       l2Block.BlockHash, //BlockHash returned by the executor is the StateRoot in Etrog
 		GasUsed:    l2Block.GasUsed,
 		GasLimit:   s.cfg.MaxCumulativeGasUsed,
 		Time:       l2Block.Timestamp,

From aed508822134b3f618cb4f37086f42b2bc60f0f7 Mon Sep 17 00:00:00 2001
From: Alonso Rodriguez 
Date: Wed, 31 Jan 2024 18:11:41 +0100
Subject: [PATCH 51/54] new geth version v1.13.11 (#3179)

* new geth version v1.13.11
---
 etherman/etherman_test.go                     | 33 ++++-----
 etherman/simulated.go                         | 39 +++++-----
 go.mod                                        | 36 ++++++----
 go.sum                                        | 72 ++++++++++---------
 jsonrpc/endpoints_eth_test.go                 | 11 +--
 jsonrpc/endpoints_zkevm_test.go               | 12 ++--
 jsonrpc/server_test.go                        |  3 +-
 state/genesis.go                              |  3 +-
 state/pgstatestorage/pgstatestorage_test.go   |  9 ++-
 .../forkid_dragonfruit/dragonfruit_test.go    |  3 +-
 .../forkid_independent/independent_test.go    |  3 +-
 state/transaction.go                          |  6 +-
 12 files changed, 132 insertions(+), 98 deletions(-)

diff --git a/etherman/etherman_test.go b/etherman/etherman_test.go
index a321adc70c..653035e331 100644
--- a/etherman/etherman_test.go
+++ b/etherman/etherman_test.go
@@ -18,10 +18,10 @@ import (
 	"github.com/0xPolygonHermez/zkevm-node/test/constants"
 	"github.com/ethereum/go-ethereum"
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
-	"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -38,7 +38,7 @@ func init() {
 }
 
 // This function prepare the blockchain, the wallet with funds and deploy the smc
-func newTestingEnv() (ethman *Client, ethBackend *backends.SimulatedBackend, auth *bind.TransactOpts, polAddr common.Address, br *polygonzkevmbridge.Polygonzkevmbridge) {
+func newTestingEnv() (ethman *Client, ethBackend *simulated.Backend, auth *bind.TransactOpts, polAddr common.Address, br *polygonzkevmbridge.Polygonzkevmbridge) {
 	privateKey, err := crypto.GenerateKey()
 	if err != nil {
 		log.Fatal(err)
@@ -82,7 +82,7 @@ func TestGEREvent(t *testing.T) {
 	blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
 	require.NoError(t, err)
 	t.Logf("Blocks: %+v", blocks)
-	assert.Equal(t, uint64(5), blocks[0].L1InfoTree[0].BlockNumber)
+	assert.Equal(t, uint64(8), blocks[0].L1InfoTree[0].BlockNumber)
 	assert.NotEqual(t, common.Hash{}, blocks[0].L1InfoTree[0].MainnetExitRoot)
 	assert.Equal(t, common.Hash{}, blocks[0].L1InfoTree[0].RollupExitRoot)
 }
@@ -114,8 +114,8 @@ func TestForcedBatchEvent(t *testing.T) {
 	blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
 	require.NoError(t, err)
 	t.Logf("Blocks: %+v", blocks)
-	assert.Equal(t, uint64(5), blocks[0].BlockNumber)
-	assert.Equal(t, uint64(5), blocks[0].ForcedBatches[0].BlockNumber)
+	assert.Equal(t, uint64(8), blocks[0].BlockNumber)
+	assert.Equal(t, uint64(8), blocks[0].ForcedBatches[0].BlockNumber)
 	assert.NotEqual(t, common.Hash{}, blocks[0].ForcedBatches[0].GlobalExitRoot)
 	assert.NotEqual(t, time.Time{}, blocks[0].ForcedBatches[0].ForcedAt)
 	assert.Equal(t, uint64(1), blocks[0].ForcedBatches[0].ForcedBatchNumber)
@@ -219,7 +219,7 @@ func TestVerifyBatchEvent(t *testing.T) {
 	blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
 	require.NoError(t, err)
 	t.Logf("Blocks: %+v, \nOrder: %+v", blocks, order)
-	assert.Equal(t, uint64(6), blocks[1].BlockNumber)
+	assert.Equal(t, uint64(9), blocks[1].BlockNumber)
 	assert.Equal(t, uint64(1), blocks[1].VerifiedBatches[0].BatchNumber)
 	assert.NotEqual(t, common.Address{}, blocks[1].VerifiedBatches[0].Aggregator)
 	assert.NotEqual(t, common.Hash{}, blocks[1].VerifiedBatches[0].TxHash)
@@ -246,6 +246,7 @@ func TestSequenceForceBatchesEvent(t *testing.T) {
 	_, err = etherman.ZkEVM.ForceBatch(auth, data, amount)
 	require.NoError(t, err)
 	ethBackend.Commit()
+	ethBackend.Commit()
 
 	err = ethBackend.AdjustTime((24*7 + 1) * time.Hour)
 	require.NoError(t, err)
@@ -281,7 +282,7 @@ func TestSequenceForceBatchesEvent(t *testing.T) {
 	blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
 	require.NoError(t, err)
 	t.Logf("Blocks: %+v", blocks)
-	assert.Equal(t, uint64(7), blocks[1].BlockNumber)
+	assert.Equal(t, uint64(12), blocks[1].BlockNumber)
 	assert.Equal(t, uint64(2), blocks[1].SequencedForceBatches[0][0].BatchNumber)
 	assert.Equal(t, forcedGer, common.BytesToHash(blocks[1].SequencedForceBatches[0][0].ForcedGlobalExitRoot[:]))
 	assert.Equal(t, forcedTimestamp, blocks[1].SequencedForceBatches[0][0].ForcedTimestamp)
@@ -341,15 +342,15 @@ func TestGasPrice(t *testing.T) {
 	etherman.GasProviders.Providers = []ethereum.GasPricer{etherman.EthClient, etherscanM, ethGasStationM}
 	ctx := context.Background()
 
-	etherscanM.On("SuggestGasPrice", ctx).Return(big.NewInt(765625003), nil)
-	ethGasStationM.On("SuggestGasPrice", ctx).Return(big.NewInt(765625002), nil)
+	etherscanM.On("SuggestGasPrice", ctx).Return(big.NewInt(1448795322), nil)
+	ethGasStationM.On("SuggestGasPrice", ctx).Return(big.NewInt(1448795321), nil)
 	gp := etherman.GetL1GasPrice(ctx)
-	assert.Equal(t, big.NewInt(765625003), gp)
+	assert.Equal(t, big.NewInt(1448795322), gp)
 
 	etherman.GasProviders.Providers = []ethereum.GasPricer{etherman.EthClient, ethGasStationM}
 
 	gp = etherman.GetL1GasPrice(ctx)
-	assert.Equal(t, big.NewInt(765625002), gp)
+	assert.Equal(t, big.NewInt(1448795321), gp)
 }
 
 func TestErrorEthGasStationPrice(t *testing.T) {
@@ -361,14 +362,14 @@ func TestErrorEthGasStationPrice(t *testing.T) {
 
 	ethGasStationM.On("SuggestGasPrice", ctx).Return(big.NewInt(0), fmt.Errorf("error getting gasPrice from ethGasStation"))
 	gp := etherman.GetL1GasPrice(ctx)
-	assert.Equal(t, big.NewInt(512908937), gp)
+	assert.Equal(t, big.NewInt(1392695906), gp)
 
 	etherscanM := new(etherscanMock)
 	etherman.GasProviders.Providers = []ethereum.GasPricer{etherman.EthClient, etherscanM, ethGasStationM}
 
-	etherscanM.On("SuggestGasPrice", ctx).Return(big.NewInt(765625003), nil)
+	etherscanM.On("SuggestGasPrice", ctx).Return(big.NewInt(1448795322), nil)
 	gp = etherman.GetL1GasPrice(ctx)
-	assert.Equal(t, big.NewInt(765625003), gp)
+	assert.Equal(t, big.NewInt(1448795322), gp)
 }
 
 func TestErrorEtherScanPrice(t *testing.T) {
@@ -380,9 +381,9 @@ func TestErrorEtherScanPrice(t *testing.T) {
 	ctx := context.Background()
 
 	etherscanM.On("SuggestGasPrice", ctx).Return(big.NewInt(0), fmt.Errorf("error getting gasPrice from etherscan"))
-	ethGasStationM.On("SuggestGasPrice", ctx).Return(big.NewInt(765625002), nil)
+	ethGasStationM.On("SuggestGasPrice", ctx).Return(big.NewInt(1448795321), nil)
 	gp := etherman.GetL1GasPrice(ctx)
-	assert.Equal(t, big.NewInt(765625002), gp)
+	assert.Equal(t, big.NewInt(1448795321), gp)
 }
 
 func TestGetForks(t *testing.T) {
diff --git a/etherman/simulated.go b/etherman/simulated.go
index 11604fb2f0..3e1d0a54d2 100644
--- a/etherman/simulated.go
+++ b/etherman/simulated.go
@@ -15,15 +15,15 @@ import (
 	"github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/proxy"
 	"github.com/0xPolygonHermez/zkevm-node/log"
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
-	"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
 )
 
 // NewSimulatedEtherman creates an etherman that uses a simulated blockchain. It's important to notice that the ChainID of the auth
 // must be 1337. The address that holds the auth will have an initial balance of 10 ETH
-func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client, ethBackend *backends.SimulatedBackend, polAddr common.Address, br *polygonzkevmbridge.Polygonzkevmbridge, err error) {
+func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (*Client, *simulated.Backend, common.Address, *polygonzkevmbridge.Polygonzkevmbridge, error) {
 	if auth == nil {
 		// read only client
 		return &Client{}, nil, common.Address{}, nil, nil
@@ -37,21 +37,21 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client
 		},
 	}
 	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
-	client := backends.NewSimulatedBackend(genesisAlloc, blockGasLimit)
+	client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	// Deploy contracts
 	const polDecimalPlaces = 18
 	totalSupply, _ := new(big.Int).SetString("10000000000000000000000000000", 10) //nolint:gomnd
-	polAddr, _, polContract, err := pol.DeployPol(auth, client, "Pol Token", "POL", polDecimalPlaces, totalSupply)
+	polAddr, _, polContract, err := pol.DeployPol(auth, client.Client(), "Pol Token", "POL", polDecimalPlaces, totalSupply)
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	rollupVerifierAddr, _, _, err := mockverifier.DeployMockverifier(auth, client)
+	rollupVerifierAddr, _, _, err := mockverifier.DeployMockverifier(auth, client.Client())
 	if err != nil {
 		return nil, nil, common.Address{}, nil, err
 	}
-	nonce, err := client.PendingNonceAt(context.TODO(), auth.From)
+	nonce, err := client.Client().PendingNonceAt(context.TODO(), auth.From)
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
@@ -61,28 +61,29 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client
 	const posRollupManager = 4
 	calculatedRollupManagerAddr := crypto.CreateAddress(auth.From, nonce+posRollupManager)
 	genesis := common.HexToHash("0xfd3434cd8f67e59d73488a2b8da242dd1f02849ea5dd99f0ca22c836c3d5b4a9") // Random value. Needs to be different to 0x0
-	exitManagerAddr, _, globalExitRoot, err := polygonzkevmglobalexitroot.DeployPolygonzkevmglobalexitroot(auth, client, calculatedRollupManagerAddr, calculatedBridgeAddr)
+	exitManagerAddr, _, globalExitRoot, err := polygonzkevmglobalexitroot.DeployPolygonzkevmglobalexitroot(auth, client.Client(), calculatedRollupManagerAddr, calculatedBridgeAddr)
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	implementationBridgeAddr, _, _, err := polygonzkevmbridge.DeployPolygonzkevmbridge(auth, client)
+	implementationBridgeAddr, _, _, err := polygonzkevmbridge.DeployPolygonzkevmbridge(auth, client.Client())
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
 
-	implementationMockRollupManagerAddr, _, _, err := mockpolygonrollupmanager.DeployMockpolygonrollupmanager(auth, client, exitManagerAddr, polAddr, calculatedBridgeAddr)
+	implementationMockRollupManagerAddr, _, _, err := mockpolygonrollupmanager.DeployMockpolygonrollupmanager(auth, client.Client(), exitManagerAddr, polAddr, calculatedBridgeAddr)
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	bridgeAddr, _, _, err := proxy.DeployProxy(auth, client, implementationBridgeAddr, implementationBridgeAddr, []byte{})
+	client.Commit()
+	bridgeAddr, _, _, err := proxy.DeployProxy(auth, client.Client(), implementationBridgeAddr, implementationBridgeAddr, []byte{})
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	mockRollupManagerAddr, _, _, err := proxy.DeployProxy(auth, client, implementationMockRollupManagerAddr, implementationMockRollupManagerAddr, []byte{})
+	mockRollupManagerAddr, _, _, err := proxy.DeployProxy(auth, client.Client(), implementationMockRollupManagerAddr, implementationMockRollupManagerAddr, []byte{})
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
@@ -91,31 +92,34 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client
 		return nil, nil, common.Address{}, nil, fmt.Errorf("RollupManagerAddr (%s) is different from the expected contract address (%s)",
 			mockRollupManagerAddr.String(), calculatedRollupManagerAddr.String())
 	}
-	initZkevmAddr, _, _, err := polygonzkevm.DeployPolygonzkevm(auth, client, exitManagerAddr, polAddr, bridgeAddr, mockRollupManagerAddr)
+	initZkevmAddr, _, _, err := polygonzkevm.DeployPolygonzkevm(auth, client.Client(), exitManagerAddr, polAddr, bridgeAddr, mockRollupManagerAddr)
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	mockRollupManager, err := mockpolygonrollupmanager.NewMockpolygonrollupmanager(mockRollupManagerAddr, client)
+	mockRollupManager, err := mockpolygonrollupmanager.NewMockpolygonrollupmanager(mockRollupManagerAddr, client.Client())
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
-	br, err = polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, client)
+	br, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridgeAddr, client.Client())
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
+	client.Commit()
 	_, err = br.Initialize(auth, 0, common.Address{}, 0, exitManagerAddr, mockRollupManagerAddr, []byte{})
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
+	client.Commit()
 	_, err = mockRollupManager.Initialize(auth, auth.From, 10000, 10000, auth.From, auth.From, auth.From, common.Address{}, common.Address{}, 0, 0) //nolint:gomnd
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
+	client.Commit()
 	_, err = mockRollupManager.AddNewRollupType(auth, initZkevmAddr, rollupVerifierAddr, 6, 0, genesis, "PolygonZkEvm Rollup") //nolint:gomnd
 	if err != nil {
 		log.Error("error: ", err)
@@ -153,13 +157,13 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client
 			bridgeAddr.String(), calculatedBridgeAddr.String())
 	}
 
-	rollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(mockRollupManagerAddr, client)
+	rollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(mockRollupManagerAddr, client.Client())
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
 	}
 
-	trueZkevm, err := polygonzkevm.NewPolygonzkevm(zkevmAddr, client) //nolint
+	trueZkevm, err := polygonzkevm.NewPolygonzkevm(zkevmAddr, client.Client()) //nolint
 	if err != nil {
 		log.Error("error: ", err)
 		return nil, nil, common.Address{}, nil, err
@@ -185,9 +189,8 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (etherman *Client
 	}
 	client.Commit()
 
-	client.Commit()
 	c := &Client{
-		EthClient:             client,
+		EthClient:             client.Client(),
 		ZkEVM:                 trueZkevm,
 		RollupManager:         rollupManager,
 		Pol:                   polContract,
diff --git a/go.mod b/go.mod
index 01c401f986..afb6ff9d5a 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ require (
 	github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18
 	github.com/didip/tollbooth/v6 v6.1.2
 	github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
-	github.com/ethereum/go-ethereum v1.13.2
+	github.com/ethereum/go-ethereum v1.13.11
 	github.com/go-git/go-billy/v5 v5.5.0
 	github.com/go-git/go-git/v5 v5.11.0
 	github.com/gobuffalo/packr/v2 v2.8.3
@@ -42,48 +42,54 @@ require (
 	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
 	github.com/StackExchange/wmi v1.2.1 // indirect
-	github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
+	github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
 	github.com/bahlo/generic-list-go v0.2.0 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/bits-and-blooms/bitset v1.7.0 // indirect
+	github.com/bits-and-blooms/bitset v1.10.0 // indirect
 	github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
 	github.com/buger/jsonparser v1.1.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/cloudflare/circl v1.3.3 // indirect
 	github.com/cockroachdb/errors v1.9.1 // indirect
 	github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
-	github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect
+	github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect
 	github.com/cockroachdb/redact v1.1.3 // indirect
+	github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
 	github.com/consensys/bavard v0.1.13 // indirect
-	github.com/consensys/gnark-crypto v0.12.0 // indirect
+	github.com/consensys/gnark-crypto v0.12.1 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
-	github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
+	github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
+	github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.4 // indirect
 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
 	github.com/deckarep/golang-set/v2 v2.1.0 // indirect
 	github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
 	github.com/dlclark/regexp2 v1.7.0 // indirect
 	github.com/emirpasic/gods v1.18.1 // indirect
-	github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
+	github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
+	github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
 	github.com/fsnotify/fsnotify v1.6.0 // indirect
 	github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
+	github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
 	github.com/getsentry/sentry-go v0.18.0 // indirect
 	github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
 	github.com/go-gorp/gorp/v3 v3.1.0 // indirect
-	github.com/go-ole/go-ole v1.2.6 // indirect
+	github.com/go-ole/go-ole v1.3.0 // indirect
 	github.com/go-pkgz/expirable-cache v0.0.3 // indirect
 	github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
-	github.com/go-stack/stack v1.8.1 // indirect
 	github.com/gobuffalo/logger v1.0.7 // indirect
 	github.com/gobuffalo/packd v1.0.2 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
+	github.com/hashicorp/go-bexpr v0.1.10 // indirect
 	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
 	github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
 	github.com/huin/goupnp v1.3.0 // indirect
 	github.com/jackc/chunkreader/v2 v2.0.1 // indirect
@@ -108,8 +114,9 @@ require (
 	github.com/markbates/safe v1.0.1 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
 	github.com/mattn/go-isatty v0.0.20 // indirect
-	github.com/mattn/go-runewidth v0.0.9 // indirect
+	github.com/mattn/go-runewidth v0.0.13 // indirect
 	github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
+	github.com/mitchellh/pointerstructure v1.2.0 // indirect
 	github.com/mmcloughlin/addchain v0.4.0 // indirect
 	github.com/olekukonko/tablewriter v0.0.5 // indirect
 	github.com/pelletier/go-toml/v2 v2.1.0 // indirect
@@ -117,7 +124,9 @@ require (
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
 	github.com/prometheus/procfs v0.12.0 // indirect
+	github.com/rivo/uniseg v0.2.0 // indirect
 	github.com/rogpeppe/go-internal v1.11.0 // indirect
+	github.com/rs/cors v1.7.0 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
 	github.com/sagikazarmark/locafero v0.3.0 // indirect
 	github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@@ -141,14 +150,15 @@ require (
 	github.com/xanzy/ssh-agent v0.3.3 // indirect
 	github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
 	go.uber.org/multierr v1.10.0 // indirect
-	golang.org/x/mod v0.12.0 // indirect
+	golang.org/x/mod v0.14.0 // indirect
 	golang.org/x/sys v0.16.0 // indirect
 	golang.org/x/term v0.16.0 // indirect
 	golang.org/x/text v0.14.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
-	golang.org/x/tools v0.13.0 // indirect
+	golang.org/x/tools v0.15.0 // indirect
 	google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
 	gopkg.in/warnings.v0 v0.1.2 // indirect
 	rsc.io/tmplfunc v0.0.3 // indirect
 )
@@ -163,5 +173,5 @@ require (
 	github.com/fatih/color v1.16.0
 	github.com/joho/godotenv v1.5.1
 	github.com/prometheus/client_golang v1.18.0
-	golang.org/x/exp v0.0.0-20230905200255-921286631fa9
+	golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
 )
diff --git a/go.sum b/go.sum
index e11cd6f36d..579d0d5b6b 100644
--- a/go.sum
+++ b/go.sum
@@ -45,6 +45,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOv
 github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
 github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
@@ -64,8 +66,8 @@ github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjA
 github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
 github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
 github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
-github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
-github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
+github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
+github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@@ -84,8 +86,8 @@ github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xW
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
-github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
+github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
 github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
 github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
 github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
@@ -100,7 +102,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
 github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -118,22 +119,24 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
 github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
 github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
 github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
-github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o=
-github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
+github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
+github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
 github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
 github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
 github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
 github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
 github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
-github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w=
-github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s=
+github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=
+github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo=
 github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
 github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
+github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
+github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
 github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
 github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
 github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg=
-github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
+github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
+github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
 github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU=
 github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -147,8 +150,10 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
-github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
+github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
+github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
+github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
+github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
@@ -193,10 +198,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
 github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
-github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
-github.com/ethereum/go-ethereum v1.13.2 h1:g9mCpfPWqCA1OL4e6C98PeVttb0HadfBRuKTGvMnOvw=
-github.com/ethereum/go-ethereum v1.13.2/go.mod h1:gkQ5Ygi64ZBh9M/4iXY1R8WqoNCx1Ey0CkYn2BD4/fw=
+github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
+github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
+github.com/ethereum/go-ethereum v1.13.11 h1:b51Dsm+rEg7anFRUMGB8hODXHvNfcRKzz9vcj8wSdUs=
+github.com/ethereum/go-ethereum v1.13.11/go.mod h1:gFtlVORuUcT+UUIcJ/veCNjkuOSujCi338uSHJrYAew=
 github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
@@ -214,6 +219,8 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
 github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
+github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=
+github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc=
 github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
 github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
 github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
@@ -243,8 +250,8 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
 github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
-github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
+github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
 github.com/go-pg/pg/v10 v10.11.0 h1:CMKJqLgTrfpE/aOVeLdybezR2om071Vh38OLZjsyMI0=
 github.com/go-pg/pg/v10 v10.11.0/go.mod h1:4BpHRoxE61y4Onpof3x1a2SQvi9c+q1dJnrNdMjsroA=
 github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU=
@@ -256,8 +263,6 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
 github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
 github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
-github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs=
 github.com/gobuffalo/logger v1.0.7 h1:LTLwWelETXDYyqF/ASf0nxaIcdEOIJNxRokPcfI/xbU=
@@ -281,10 +286,9 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
-github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
 github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
-github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
-github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -318,7 +322,6 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx
 github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -565,8 +568,9 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
 github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
 github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
 github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
 github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
+github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
 github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
 github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
@@ -655,6 +659,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
 github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
 github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
 github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
@@ -874,8 +880,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
+golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -903,8 +909,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1039,7 +1045,6 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1058,6 +1063,7 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1160,8 +1166,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
-golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
+golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
 golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/jsonrpc/endpoints_eth_test.go b/jsonrpc/endpoints_eth_test.go
index e3dc38e9d8..64e7997978 100644
--- a/jsonrpc/endpoints_eth_test.go
+++ b/jsonrpc/endpoints_eth_test.go
@@ -996,7 +996,7 @@ func TestGetL2BlockByHash(t *testing.T) {
 		ExpectedError  interface{}
 		SetupMocks     func(*mocksWrapper, *testCase)
 	}
-
+	st := trie.NewStackTrie(nil)
 	testCases := []testCase{
 		{
 			Name:           "Block not found",
@@ -1049,7 +1049,7 @@ func TestGetL2BlockByHash(t *testing.T) {
 				[]*ethTypes.Transaction{ethTypes.NewTransaction(1, common.Address{}, big.NewInt(1), 1, big.NewInt(1), []byte{})},
 				nil,
 				[]*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))},
-				&trie.StackTrie{},
+				st,
 			),
 			ExpectedError: nil,
 			SetupMocks: func(m *mocksWrapper, tc *testCase) {
@@ -1057,7 +1057,7 @@ func TestGetL2BlockByHash(t *testing.T) {
 				for _, uncle := range tc.ExpectedResult.Uncles() {
 					uncles = append(uncles, state.NewL2Header(uncle))
 				}
-				block := state.NewL2Block(state.NewL2Header(tc.ExpectedResult.Header()), tc.ExpectedResult.Transactions(), uncles, []*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))}, &trie.StackTrie{})
+				block := state.NewL2Block(state.NewL2Header(tc.ExpectedResult.Header()), tc.ExpectedResult.Transactions(), uncles, []*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))}, st)
 
 				m.DbTx.
 					On("Commit", context.Background()).
@@ -1182,7 +1182,8 @@ func TestGetL2BlockByNumber(t *testing.T) {
 	l2Header := state.NewL2Header(header)
 	l2Header.GlobalExitRoot = common.HexToHash("0x16")
 	l2Header.BlockInfoRoot = common.HexToHash("0x17")
-	l2Block := state.NewL2Block(l2Header, signedTransactions, uncles, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := state.NewL2Block(l2Header, signedTransactions, uncles, receipts, st)
 
 	for _, receipt := range receipts {
 		receipt.BlockHash = l2Block.Hash()
@@ -1410,7 +1411,7 @@ func TestGetL2BlockByNumber(t *testing.T) {
 			SetupMocks: func(m *mocksWrapper, tc *testCase) {
 				lastBlockHeader := ðTypes.Header{Number: big.NewInt(0).SetUint64(uint64(rpcBlock.Number))}
 				lastBlockHeader.Number.Sub(lastBlockHeader.Number, big.NewInt(1))
-				lastBlock := state.NewL2Block(state.NewL2Header(lastBlockHeader), nil, nil, nil, &trie.StackTrie{})
+				lastBlock := state.NewL2Block(state.NewL2Header(lastBlockHeader), nil, nil, nil, st)
 
 				tc.ExpectedResult = &types.Block{}
 				tc.ExpectedResult.ParentHash = lastBlock.Hash()
diff --git a/jsonrpc/endpoints_zkevm_test.go b/jsonrpc/endpoints_zkevm_test.go
index 9b56552dca..9983455d67 100644
--- a/jsonrpc/endpoints_zkevm_test.go
+++ b/jsonrpc/endpoints_zkevm_test.go
@@ -1210,6 +1210,7 @@ func TestGetL2FullBlockByHash(t *testing.T) {
 		SetupMocks     func(*mocksWrapper, *testCase)
 	}
 
+	st := trie.NewStackTrie(nil)
 	testCases := []testCase{
 		{
 			Name:           "Block not found",
@@ -1262,7 +1263,7 @@ func TestGetL2FullBlockByHash(t *testing.T) {
 				[]*ethTypes.Transaction{ethTypes.NewTransaction(1, common.Address{}, big.NewInt(1), 1, big.NewInt(1), []byte{})},
 				nil,
 				[]*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))},
-				&trie.StackTrie{},
+				st,
 			),
 			ExpectedError: nil,
 			SetupMocks: func(m *mocksWrapper, tc *testCase) {
@@ -1270,7 +1271,8 @@ func TestGetL2FullBlockByHash(t *testing.T) {
 				for _, uncle := range tc.ExpectedResult.Uncles() {
 					uncles = append(uncles, state.NewL2Header(uncle))
 				}
-				block := state.NewL2Block(state.NewL2Header(tc.ExpectedResult.Header()), tc.ExpectedResult.Transactions(), uncles, []*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))}, &trie.StackTrie{})
+				st := trie.NewStackTrie(nil)
+				block := state.NewL2Block(state.NewL2Header(tc.ExpectedResult.Header()), tc.ExpectedResult.Transactions(), uncles, []*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))}, st)
 
 				m.DbTx.
 					On("Commit", context.Background()).
@@ -1402,7 +1404,8 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
 	l2Header := state.NewL2Header(header)
 	l2Header.GlobalExitRoot = common.HexToHash("0x16")
 	l2Header.BlockInfoRoot = common.HexToHash("0x17")
-	l2Block := state.NewL2Block(l2Header, signedTransactions, uncles, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := state.NewL2Block(l2Header, signedTransactions, uncles, receipts, st)
 
 	for _, receipt := range receipts {
 		receipt.BlockHash = l2Block.Hash()
@@ -1620,7 +1623,8 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
 			SetupMocks: func(m *mocksWrapper, tc *testCase) {
 				lastBlockHeader := ðTypes.Header{Number: big.NewInt(0).SetUint64(uint64(rpcBlock.Number))}
 				lastBlockHeader.Number.Sub(lastBlockHeader.Number, big.NewInt(1))
-				lastBlock := state.NewL2Block(state.NewL2Header(lastBlockHeader), nil, nil, nil, &trie.StackTrie{})
+				st := trie.NewStackTrie(nil)
+				lastBlock := state.NewL2Block(state.NewL2Header(lastBlockHeader), nil, nil, nil, st)
 
 				tc.ExpectedResult = &types.Block{}
 				tc.ExpectedResult.ParentHash = lastBlock.Hash()
diff --git a/jsonrpc/server_test.go b/jsonrpc/server_test.go
index db5715fee2..ffb60f8717 100644
--- a/jsonrpc/server_test.go
+++ b/jsonrpc/server_test.go
@@ -228,12 +228,13 @@ func TestBatchRequests(t *testing.T) {
 		SetupMocks           func(m *mocksWrapper, tc testCase)
 	}
 
+	st := trie.NewStackTrie(nil)
 	block := state.NewL2Block(
 		state.NewL2Header(ðTypes.Header{Number: big.NewInt(2), UncleHash: ethTypes.EmptyUncleHash, Root: ethTypes.EmptyRootHash}),
 		[]*ethTypes.Transaction{ethTypes.NewTransaction(1, common.Address{}, big.NewInt(1), 1, big.NewInt(1), []byte{})},
 		nil,
 		[]*ethTypes.Receipt{ethTypes.NewReceipt([]byte{}, false, uint64(0))},
-		&trie.StackTrie{},
+		st,
 	)
 
 	testCases := []testCase{
diff --git a/state/genesis.go b/state/genesis.go
index 5abc927c49..52cbb35812 100644
--- a/state/genesis.go
+++ b/state/genesis.go
@@ -187,7 +187,8 @@ func (s *State) SetGenesis(ctx context.Context, block Block, genesis Genesis, m
 	log.Info("Genesis root ", rootHex)
 
 	receipts := []*types.Receipt{}
-	l2Block := NewL2Block(header, []*types.Transaction{}, []*L2Header{}, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := NewL2Block(header, []*types.Transaction{}, []*L2Header{}, receipts, st)
 	l2Block.ReceivedAt = block.ReceivedAt
 
 	// Sanity check
diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go
index b37689e709..9407701b17 100644
--- a/state/pgstatestorage/pgstatestorage_test.go
+++ b/state/pgstatestorage/pgstatestorage_test.go
@@ -190,7 +190,8 @@ func TestGetBatchByL2BlockNumber(t *testing.T) {
 	receipts := []*types.Receipt{receipt}
 
 	// Create block to be able to calculate its hash
-	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st)
 	receipt.BlockHash = l2Block.Hash()
 
 	numTxs := len(transactions)
@@ -933,7 +934,8 @@ func TestGetLogs(t *testing.T) {
 			Time:       uint64(time.Unix()),
 		})
 
-		l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+		st := trie.NewStackTrie(nil)
+		l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st)
 		for _, receipt := range receipts {
 			receipt.BlockHash = l2Block.Hash()
 		}
@@ -1063,7 +1065,8 @@ func TestGetNativeBlockHashesInRange(t *testing.T) {
 			Time:       uint64(time.Unix()),
 		})
 
-		l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+		st := trie.NewStackTrie(nil)
+		l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st)
 		for _, receipt := range receipts {
 			receipt.BlockHash = l2Block.Hash()
 		}
diff --git a/state/test/forkid_dragonfruit/dragonfruit_test.go b/state/test/forkid_dragonfruit/dragonfruit_test.go
index 791804137a..64adbf042f 100644
--- a/state/test/forkid_dragonfruit/dragonfruit_test.go
+++ b/state/test/forkid_dragonfruit/dragonfruit_test.go
@@ -1490,7 +1490,8 @@ func TestExecutorRevert(t *testing.T) {
 	transactions := []*types.Transaction{signedTx0, signedTx1}
 
 	// Create block to be able to calculate its hash
-	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st)
 	l2Block.ReceivedAt = time.Now()
 
 	receipt.BlockHash = l2Block.Hash()
diff --git a/state/test/forkid_independent/independent_test.go b/state/test/forkid_independent/independent_test.go
index a7dec58a10..7b2eb0bdf1 100644
--- a/state/test/forkid_independent/independent_test.go
+++ b/state/test/forkid_independent/independent_test.go
@@ -645,7 +645,8 @@ func TestAddGetL2Block(t *testing.T) {
 	receipts := []*types.Receipt{receipt}
 
 	// Create block to be able to calculate its hash
-	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st)
 	l2Block.ReceivedAt = time
 
 	receipt.BlockHash = l2Block.Hash()
diff --git a/state/transaction.go b/state/transaction.go
index 083fd2e3be..11c223a7f8 100644
--- a/state/transaction.go
+++ b/state/transaction.go
@@ -174,7 +174,8 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce
 			receipts := []*types.Receipt{receipt}
 
 			// Create l2Block to be able to calculate its hash
-			l2Block := NewL2Block(header, transactions, []*L2Header{}, receipts, &trie.StackTrie{})
+			st := trie.NewStackTrie(nil)
+			l2Block := NewL2Block(header, transactions, []*L2Header{}, receipts, st)
 			l2Block.ReceivedAt = processingContext.Timestamp
 
 			receipt.BlockHash = l2Block.Hash()
@@ -640,7 +641,8 @@ func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, proces
 	receipts := []*types.Receipt{receipt}
 
 	// Create l2Block to be able to calculate its hash
-	l2Block := NewL2Block(header, transactions, []*L2Header{}, receipts, &trie.StackTrie{})
+	st := trie.NewStackTrie(nil)
+	l2Block := NewL2Block(header, transactions, []*L2Header{}, receipts, st)
 	l2Block.ReceivedAt = time.Unix(int64(timestamp), 0)
 
 	receipt.BlockHash = l2Block.Hash()

From 2f51cda3c830a7a69438901b757b7822db58053f Mon Sep 17 00:00:00 2001
From: Alonso Rodriguez 
Date: Thu, 1 Feb 2024 08:00:06 +0100
Subject: [PATCH 52/54] Fix/rom error (#3177) (#3178)

* fix rom error

* linter

* fix
---
 state/batchV2.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/state/batchV2.go b/state/batchV2.go
index 0079491ea1..e6e3e941bd 100644
--- a/state/batchV2.go
+++ b/state/batchV2.go
@@ -378,7 +378,7 @@ func (s *State) ProcessAndStoreClosedBatchV2(ctx context.Context, processingCtx
 		return common.Hash{}, noFlushID, noProverID, err
 	}
 	processed, err := s.processBatchV2(ctx, &processingCtx, caller, dbTx)
-	if err != nil {
+	if err != nil && processed.ErrorRom == executor.RomError_ROM_ERROR_NO_ERROR {
 		log.Errorf("%s error processBatchV2: %v", debugPrefix, err)
 		return common.Hash{}, noFlushID, noProverID, err
 	}
@@ -392,7 +392,7 @@ func (s *State) ProcessAndStoreClosedBatchV2(ctx context.Context, processingCtx
 		log.Errorf("%s error isRomOOCError: %v", debugPrefix, err)
 	}
 
-	if len(processedBatch.BlockResponses) > 0 && !processedBatch.IsRomOOCError {
+	if len(processedBatch.BlockResponses) > 0 && !processedBatch.IsRomOOCError && processedBatch.RomError_V2 == nil {
 		for _, blockResponse := range processedBatch.BlockResponses {
 			err = s.StoreL2Block(ctx, processingCtx.BatchNumber, blockResponse, nil, dbTx)
 			if err != nil {

From cb2424add59b860cd31f96f10586fe8cd71c7271 Mon Sep 17 00:00:00 2001
From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Date: Thu, 1 Feb 2024 14:31:18 +0100
Subject: [PATCH 53/54] synchronizer: update fromTrusted fix cache update
 (#3169) (#3183)

* fix cache update and sync from Trusted of closed batch
---
 .../l2_shared/processor_trusted_batch_sync.go | 16 ++---
 .../processor_trusted_batch_sync_test.go      | 65 +++++++++++++++++++
 .../l2_sync/l2_shared/trusted_state.go        | 16 +++++
 .../executor_trusted_batch_sync.go            |  8 +--
 .../executor_trusted_batch_sync_test.go       |  3 +-
 5 files changed, 95 insertions(+), 13 deletions(-)

diff --git a/synchronizer/l2_sync/l2_shared/processor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_shared/processor_trusted_batch_sync.go
index aa012ea8b0..e9f85b8a63 100644
--- a/synchronizer/l2_sync/l2_shared/processor_trusted_batch_sync.go
+++ b/synchronizer/l2_sync/l2_shared/processor_trusted_batch_sync.go
@@ -193,14 +193,10 @@ func (s *ProcessorTrustedBatchSync) GetNextStatus(status TrustedState, processBa
 			log.Warnf("%s error checking sanity of processBatchResp. Error: ", debugPrefix, err)
 		}
 	}
-	if processBatchResp != nil && !processBatchResp.ClearCache {
-		newStatus := updateStatus(status, processBatchResp, closedBatch)
-		log.Debugf("%s Batch synchronized, updated cache for next run", debugPrefix)
-		return &newStatus, nil
-	} else {
-		log.Debugf("%s Batch synchronized -> clear cache", debugPrefix)
-		return nil, nil
-	}
+
+	newStatus := updateStatus(status, processBatchResp, closedBatch)
+	log.Debugf("%s Batch synchronized, updated cache for next run", debugPrefix)
+	return &newStatus, nil
 }
 
 // ExecuteProcessBatch execute the batch and process it
@@ -239,6 +235,10 @@ func updateStatus(status TrustedState, response *ProcessResponse, closedBatch bo
 	if response == nil || response.ClearCache {
 		return res
 	}
+
+	res.LastTrustedBatches[0] = status.GetCurrentBatch()
+	res.LastTrustedBatches[1] = status.GetPreviousBatch()
+
 	if response.UpdateBatch != nil {
 		res.LastTrustedBatches[0] = response.UpdateBatch
 	}
diff --git a/synchronizer/l2_sync/l2_shared/tests/processor_trusted_batch_sync_test.go b/synchronizer/l2_sync/l2_shared/tests/processor_trusted_batch_sync_test.go
index 26f05a2fcf..5730dfad1b 100644
--- a/synchronizer/l2_sync/l2_shared/tests/processor_trusted_batch_sync_test.go
+++ b/synchronizer/l2_sync/l2_shared/tests/processor_trusted_batch_sync_test.go
@@ -256,3 +256,68 @@ func TestGetNextStatusUpdate(t *testing.T) {
 	require.Nil(t, res.LastTrustedBatches[0])
 	require.Equal(t, processBatchResp.ProcessBatchResponse.NewStateRoot, res.LastTrustedBatches[1].StateRoot)
 }
+
+func TestGetNextStatusUpdateNothing(t *testing.T) {
+	testData := newTestDataForProcessorTrustedBatchSync(t)
+
+	batch0 := state.Batch{
+		BatchNumber: 123,
+	}
+	batch1 := state.Batch{
+		BatchNumber: 122,
+	}
+	previousStatus := l2_shared.TrustedState{
+		LastTrustedBatches: []*state.Batch{&batch0, &batch1},
+	}
+	ProcessResponse := l2_shared.NewProcessResponse()
+	newStatus, err := testData.sut.GetNextStatus(previousStatus, &ProcessResponse, false, "test")
+	require.NoError(t, err)
+	require.Equal(t, &previousStatus, newStatus)
+	// If batch is close move current batch to previous one
+	newStatus, err = testData.sut.GetNextStatus(previousStatus, &ProcessResponse, true, "test")
+	require.NoError(t, err)
+	require.Equal(t, &l2_shared.TrustedState{
+		LastTrustedBatches: []*state.Batch{nil, &batch0},
+	}, newStatus)
+}
+
+func TestGetNextStatusDiscardCache(t *testing.T) {
+	testData := newTestDataForProcessorTrustedBatchSync(t)
+	ProcessResponse := l2_shared.NewProcessResponse()
+	ProcessResponse.DiscardCache()
+	newStatus, err := testData.sut.GetNextStatus(l2_shared.TrustedState{}, &ProcessResponse, false, "test")
+	require.NoError(t, err)
+	require.True(t, newStatus.IsEmpty())
+}
+
+func TestGetNextStatusUpdateCurrentBatch(t *testing.T) {
+	testData := newTestDataForProcessorTrustedBatchSync(t)
+	ProcessResponse := l2_shared.NewProcessResponse()
+	batch := state.Batch{
+		BatchNumber: 123,
+	}
+	ProcessResponse.UpdateCurrentBatch(&batch)
+	newStatus, err := testData.sut.GetNextStatus(l2_shared.TrustedState{}, &ProcessResponse, false, "test")
+	require.NoError(t, err)
+	require.Equal(t, &l2_shared.TrustedState{
+		LastTrustedBatches: []*state.Batch{&batch, nil},
+	}, newStatus)
+}
+
+func TestGetNextStatusUpdateExecutionResult(t *testing.T) {
+	testData := newTestDataForProcessorTrustedBatchSync(t)
+	ProcessResponse := l2_shared.NewProcessResponse()
+	batch := state.Batch{
+		BatchNumber: 123,
+	}
+	previousStatus := l2_shared.TrustedState{
+		LastTrustedBatches: []*state.Batch{nil, nil},
+	}
+
+	ProcessResponse.UpdateCurrentBatchWithExecutionResult(&batch, &state.ProcessBatchResponse{
+		NewStateRoot: common.HexToHash("0x123"),
+	})
+	newStatus, err := testData.sut.GetNextStatus(previousStatus, &ProcessResponse, false, "test")
+	require.NoError(t, err)
+	require.Equal(t, common.HexToHash("0x123"), newStatus.LastTrustedBatches[0].StateRoot)
+}
diff --git a/synchronizer/l2_sync/l2_shared/trusted_state.go b/synchronizer/l2_sync/l2_shared/trusted_state.go
index dd652ba201..0f44d226f2 100644
--- a/synchronizer/l2_sync/l2_shared/trusted_state.go
+++ b/synchronizer/l2_sync/l2_shared/trusted_state.go
@@ -31,6 +31,22 @@ func (ts *TrustedState) IsEmpty() bool {
 	return false
 }
 
+// GetCurrentBatch returns the current batch or nil
+func (ts *TrustedState) GetCurrentBatch() *state.Batch {
+	if ts == nil || len(ts.LastTrustedBatches) == 0 {
+		return nil
+	}
+	return ts.LastTrustedBatches[0]
+}
+
+// GetPreviousBatch returns the previous batch or nil
+func (ts *TrustedState) GetPreviousBatch() *state.Batch {
+	if ts == nil || len(ts.LastTrustedBatches) < 2 {
+		return nil
+	}
+	return ts.LastTrustedBatches[1]
+}
+
 // TrustedStateManager is the trusted state manager, basically contains the batch cache and create the TrustedState
 type TrustedStateManager struct {
 	Cache *common.Cache[uint64, *state.Batch]
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
index e74424fa0e..9bd2f161cf 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go
@@ -89,7 +89,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) NothingProcess(ctx context.Context, d
 			return nil, ErrCriticalClosedBatchDontContainExpectedData
 		}
 	}
-
+	res := l2_shared.NewProcessResponse()
 	if data.BatchMustBeClosed {
 		log.Debugf("%s Closing batch", data.DebugPrefix)
 		err := b.CloseBatch(ctx, data.TrustedBatch, dbTx, data.DebugPrefix)
@@ -97,10 +97,10 @@ func (b *SyncTrustedBatchExecutorForEtrog) NothingProcess(ctx context.Context, d
 			log.Error("%s error closing batch. Error: ", data.DebugPrefix, err)
 			return nil, err
 		}
+		data.StateBatch.WIP = false
+		res.UpdateCurrentBatch(data.StateBatch)
 	}
-	data.StateBatch.WIP = !data.BatchMustBeClosed
-	res := l2_shared.NewProcessResponse()
-	res.UpdateCurrentBatch(data.StateBatch)
+
 	return &res, nil
 }
 
diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
index c62a95ae02..706fb204ec 100644
--- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
+++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go
@@ -130,13 +130,14 @@ func newData() l2_shared.ProcessData {
 
 func TestNothingProcessDontCloseBatch(t *testing.T) {
 	testData := newTestData(t)
+
 	// Arrange
 	data := l2_shared.ProcessData{
 		BatchNumber:       123,
 		Mode:              l2_shared.NothingProcessMode,
 		BatchMustBeClosed: false,
 		DebugPrefix:       "test",
-		StateBatch:        &state.Batch{},
+		StateBatch:        &state.Batch{WIP: true},
 		TrustedBatch:      &types.Batch{},
 	}
 

From 36930bb941b6bd34c22bde90df5836b897185923 Mon Sep 17 00:00:00 2001
From: Victor Castell <0x@vcastellm.xyz>
Date: Thu, 1 Feb 2024 17:08:41 +0100
Subject: [PATCH 54/54] Merge latest changes

---
 go.mod |  7 +++----
 go.sum | 11 ++++-------
 2 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/go.mod b/go.mod
index 8766f77bc5..0f87e70d12 100644
--- a/go.mod
+++ b/go.mod
@@ -38,7 +38,6 @@ require (
 
 require (
 	dario.cat/mergo v1.0.0 // indirect
-	github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b // indirect
 	github.com/DataDog/zstd v1.5.2 // indirect
 	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
@@ -46,7 +45,7 @@ require (
 	github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
 	github.com/bahlo/generic-list-go v0.2.0 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/bits-and-blooms/bitset v1.10.0 // indirect
+	github.com/bits-and-blooms/bitset v1.12.0 // indirect
 	github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
 	github.com/buger/jsonparser v1.1.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -69,7 +68,7 @@ require (
 	github.com/emirpasic/gods v1.18.1 // indirect
 	github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
 	github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
-	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/fsnotify/fsnotify v1.7.0 // indirect
 	github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
 	github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
 	github.com/getsentry/sentry-go v0.18.0 // indirect
@@ -171,7 +170,7 @@ require (
 )
 
 require (
-	github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc
+	github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b
 	github.com/fatih/color v1.16.0
 	github.com/joho/godotenv v1.5.1
 	github.com/prometheus/client_golang v1.18.0
diff --git a/go.sum b/go.sum
index e4c0bc40b8..51422835c6 100644
--- a/go.sum
+++ b/go.sum
@@ -41,8 +41,6 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b h1:zwZP+YFDOLWL8qgWDu/D7E09MuLwIKXz6EqyT/CYBEY=
 github.com/0xPolygon/agglayer v0.0.0-20240126091628-9016453dc02b/go.mod h1:grNx11QYbBuhO8UrYjcWAam96cOKgCd3UC+s3Y60KGE=
-github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc h1:yUoC5oDBHbriJSRfVYWNji+rYJdjxqIibmVpzoXUM3g=
-github.com/0xPolygon/beethoven v0.0.0-20240112113018-0275d183b7bc/go.mod h1:V+tv5idj5g9yO/sTPzi9j4waUOcGAC2VakTBWGHB3lw=
 github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18 h1:InqeTcHrNbfj1OUfn2aFplFay7ibd7KhYqvmMZYZfn0=
 github.com/0xPolygonHermez/zkevm-data-streamer v0.1.18/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE=
 github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
@@ -90,8 +88,8 @@ github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xW
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
-github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
+github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
+github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
 github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
 github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
 github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
@@ -141,8 +139,8 @@ github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/Yj
 github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
 github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
 github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
-github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU=
-github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
+github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -1066,7 +1064,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=