From 5abad0dadacb6d43ba3d959b555c6fe1428ae76c Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Mon, 19 Feb 2024 23:20:59 +0100 Subject: [PATCH 1/9] add aggregator.BatchProofL1BlockConfirmations config parameter (#3302) --- aggregator/aggregator.go | 17 ++++++++- aggregator/aggregator_test.go | 16 ++++++--- aggregator/config.go | 3 ++ aggregator/interfaces.go | 4 ++- aggregator/mocks/mock_etherman.go | 35 +++++++++++++++++++ aggregator/mocks/mock_state.go | 18 +++++----- config/config_test.go | 4 +++ config/default.go | 1 + .../environments/local/local.node.config.toml | 1 + docs/config-file/node-config-doc.html | 2 +- docs/config-file/node-config-doc.md | 15 ++++++++ docs/config-file/node-config-schema.json | 5 +++ state/interfaces.go | 2 +- state/mocks/mock_storage.go | 29 +++++++-------- state/pgstatestorage/batch.go | 5 +-- test/config/debug.node.config.toml | 1 + test/config/test.node.config.toml | 1 + 17 files changed, 125 insertions(+), 34 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 74aed60c3b..1b23c11202 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -756,8 +756,23 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn return nil, nil, err } + // Get header of the last L1 block + lastL1BlockHeader, err := a.Ethman.GetLatestBlockHeader(ctx) + if err != nil { + log.Errorf("Failed to get last L1 block header, err: %v", err) + return nil, nil, err + } + lastL1BlockNumber := lastL1BlockHeader.Number.Uint64() + + // Calculate max L1 block number for getting next virtual batch to prove + maxL1BlockNumber := uint64(0) + if a.cfg.BatchProofL1BlockConfirmations <= lastL1BlockNumber { + maxL1BlockNumber = lastL1BlockNumber - a.cfg.BatchProofL1BlockConfirmations + } + log.Debugf("Max L1 block number for getting next virtual batch to prove: %d", maxL1BlockNumber) + // Get virtual batch pending to generate proof - batchToVerify, err := a.State.GetVirtualBatchToProve(ctx, lastVerifiedBatch.BatchNumber, nil) + batchToVerify, err := a.State.GetVirtualBatchToProve(ctx, lastVerifiedBatch.BatchNumber, maxL1BlockNumber, nil) if err != nil { return nil, nil, err } diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index eb51a09381..a071828a16 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -17,6 +17,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/testutils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -775,7 +776,7 @@ func TestTryGenerateBatchProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(&lastVerifiedBatch, nil).Once() - m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, nil).Return(&batchToProve, nil).Once() + m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, mock.Anything, nil).Return(&batchToProve, nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof := args[1].(*state.Proof) @@ -798,6 +799,7 @@ func TestTryGenerateBatchProof(t *testing.T) { L1InfoRoot: &l1InfoRoot, TimestampBatchEtrog: &t, } + m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) @@ -817,7 +819,7 @@ func TestTryGenerateBatchProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(&lastVerifiedBatch, nil).Once() - m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, nil).Return(&batchToProve, nil).Once() + m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, mock.Anything, nil).Return(&batchToProve, nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof := args[1].(*state.Proof) @@ -840,6 +842,7 @@ func TestTryGenerateBatchProof(t *testing.T) { L1InfoRoot: &l1InfoRoot, TimestampBatchEtrog: &t, } + m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) @@ -860,7 +863,7 @@ func TestTryGenerateBatchProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return(proverID) m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(&lastVerifiedBatch, nil).Once() - m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, nil).Return(&batchToProve, nil).Once() + m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, mock.Anything, nil).Return(&batchToProve, nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof := args[1].(*state.Proof) @@ -883,6 +886,7 @@ func TestTryGenerateBatchProof(t *testing.T) { L1InfoRoot: &l1InfoRoot, TimestampBatchEtrog: &t, } + m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) @@ -903,7 +907,7 @@ func TestTryGenerateBatchProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Times(3) m.proverMock.On("Addr").Return("addr") m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(&lastVerifiedBatch, nil).Once() - m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, nil).Return(&batchToProve, nil).Once() + m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, mock.Anything, nil).Return(&batchToProve, nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof := args[1].(*state.Proof) @@ -926,6 +930,7 @@ func TestTryGenerateBatchProof(t *testing.T) { L1InfoRoot: &l1InfoRoot, TimestampBatchEtrog: &t, } + m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) @@ -960,7 +965,7 @@ func TestTryGenerateBatchProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Times(3) m.proverMock.On("Addr").Return("addr") m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(&lastVerifiedBatch, nil).Once() - m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, nil).Return(&batchToProve, nil).Once() + m.stateMock.On("GetVirtualBatchToProve", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum, mock.Anything, nil).Return(&batchToProve, nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof := args[1].(*state.Proof) @@ -995,6 +1000,7 @@ func TestTryGenerateBatchProof(t *testing.T) { On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil). Return(&state.VerifiedBatch{BatchNumber: uint64(42)}, nil).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), nil).Once() + m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() // make tryBuildFinalProof fail ASAP m.stateMock.On("GetLastVerifiedBatch", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, errBanana).Once().NotBefore(isSyncedCall) m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( diff --git a/aggregator/config.go b/aggregator/config.go index 9099df1931..420d6dcd2a 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -88,4 +88,7 @@ type Config struct { // UpgradeEtrogBatchNumber is the number of the first batch after upgrading to etrog UpgradeEtrogBatchNumber uint64 `mapstructure:"UpgradeEtrogBatchNumber"` + + // BatchProofL1BlockConfirmations is number of L1 blocks to consider we can generate the proof for a virtual batch + BatchProofL1BlockConfirmations uint64 `mapstructure:"BatchProofL1BlockConfirmations"` } diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 3bc9c2fe8a..0d6b11b7ed 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/ethtxmanager" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" ) @@ -39,6 +40,7 @@ type ethTxManager interface { type etherman interface { GetLatestVerifiedBatchNum() (uint64, error) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error) + GetLatestBlockHeader(ctx context.Context) (*types.Header, error) } // aggregatorTxProfitabilityChecker interface for different profitability @@ -53,7 +55,7 @@ type stateInterface interface { CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*state.VerifiedBatch, error) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) - GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) + GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx) (*state.Batch, error) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go index 4f0f97c61e..077771c339 100644 --- a/aggregator/mocks/mock_etherman.go +++ b/aggregator/mocks/mock_etherman.go @@ -3,7 +3,12 @@ package mocks import ( + context "context" + common "github.com/ethereum/go-ethereum/common" + + coretypes "github.com/ethereum/go-ethereum/core/types" + mock "github.com/stretchr/testify/mock" types "github.com/0xPolygonHermez/zkevm-node/etherman/types" @@ -53,6 +58,36 @@ func (_m *Etherman) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch uint64, ne return r0, r1, r2 } +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *Etherman) GetLatestBlockHeader(ctx context.Context) (*coretypes.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *coretypes.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLatestVerifiedBatchNum provides a mock function with given fields: func (_m *Etherman) GetLatestVerifiedBatchNum() (uint64, error) { ret := _m.Called() diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go index 24b2ffd61e..cfc5b66e7d 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_state.go @@ -454,9 +454,9 @@ func (_m *StateMock) GetVirtualBatchParentHash(ctx context.Context, batchNumber return r0, r1 } -// GetVirtualBatchToProve provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx -func (_m *StateMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { - ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) +// GetVirtualBatchToProve provides a mock function with given fields: ctx, lastVerfiedBatchNumber, maxL1Block, dbTx +func (_m *StateMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx) (*state.Batch, error) { + ret := _m.Called(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx) if len(ret) == 0 { panic("no return value specified for GetVirtualBatchToProve") @@ -464,19 +464,19 @@ func (_m *StateMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatc var r0 *state.Batch var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Batch, error)); ok { - return rf(ctx, lastVerfiedBatchNumber, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) (*state.Batch, error)); ok { + return rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Batch); ok { - r0 = rf(ctx, lastVerfiedBatchNumber, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) *state.Batch); ok { + r0 = rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*state.Batch) } } - if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { - r1 = rf(ctx, lastVerfiedBatchNumber, dbTx) + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx) } else { r1 = ret.Error(1) } diff --git a/config/config_test.go b/config/config_test.go index a031f88a68..2efaaf0616 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -477,6 +477,10 @@ func Test_Defaults(t *testing.T) { path: "Aggregator.UpgradeEtrogBatchNumber", expectedValue: uint64(0), }, + { + path: "Aggregator.BatchProofL1BlockConfirmations", + expectedValue: uint64(2), + }, { path: "State.Batch.Constraints.MaxTxsPerBatch", expectedValue: uint64(300), diff --git a/config/default.go b/config/default.go index a18586be4e..4bca8b1c9a 100644 --- a/config/default.go +++ b/config/default.go @@ -164,6 +164,7 @@ CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" GasOffset = 0 UpgradeEtrogBatchNumber = 0 +BatchProofL1BlockConfirmations = 2 [L2GasPriceSuggester] Type = "follower" diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index ebf5b2544d..9fefe6362b 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -128,6 +128,7 @@ SenderAddress = "0x70997970c51812dc3a010c7d01b50e0d17dc79c8" CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" UpgradeEtrogBatchNumber = 0 +BatchProofL1BlockConfirmations = 2 [EthTxManager] ForcedGas = 0 diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 8f9e04279a..f578dbcd6a 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -66,7 +66,7 @@
"300ms"
ChainID is the L2 ChainID provided by the Network Config
ForkID is the L2 ForkID provided by the Network Config
SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs
CleanupLockedProofsInterval is the interval of time to clean up locked proofs.
"1m"
"300ms"
-
GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared.
GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.
ex:
gas estimation: 1000
gas offset: 100
final gas: 1100
UpgradeEtrogBatchNumber is the number of the first batch after upgrading to etrog
Chain ID of the L1 network
ZkEVMAddr Address of the L1 contract polygonZkEVMAddress
Must contain a minimum of 20
items
Must contain a maximum of 20
items
RollupManagerAddr Address of the L1 contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
PolAddr Address of the L1 Pol token Contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
GlobalExitRootManagerAddr Address of the L1 GlobalExitRootManager contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
BlockNumber is the block number where the polygonZKEVM smc was deployed on L1
Root hash of the genesis block
Must contain a minimum of 32
items
Must contain a maximum of 32
items
Actions is the data to populate into the state trie
DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.
MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.
"1m"
+
GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared.
GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.
ex:
gas estimation: 1000
gas offset: 100
final gas: 1100
UpgradeEtrogBatchNumber is the number of the first batch after upgrading to etrog
BatchProofL1BlockConfirmations is number of L1 blocks to consider we can generate the proof for a virtual batch
Chain ID of the L1 network
ZkEVMAddr Address of the L1 contract polygonZkEVMAddress
Must contain a minimum of 20
items
Must contain a maximum of 20
items
RollupManagerAddr Address of the L1 contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
PolAddr Address of the L1 Pol token Contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
GlobalExitRootManagerAddr Address of the L1 GlobalExitRootManager contract
Must contain a minimum of 20
items
Must contain a maximum of 20
items
BlockNumber is the block number where the polygonZKEVM smc was deployed on L1
Root hash of the genesis block
Must contain a minimum of 32
items
Must contain a maximum of 32
items
Actions is the data to populate into the state trie
DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.
MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.
"1m"
"300ms"
"1m"
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 37a3d6563a..249daf8660 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -2370,6 +2370,7 @@ GasOffset=80000
| - [GeneratingProofCleanupThreshold](#Aggregator_GeneratingProofCleanupThreshold ) | No | string | No | - | GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared. |
| - [GasOffset](#Aggregator_GasOffset ) | No | integer | No | - | GasOffset is the amount of gas to be added to the gas estimation in order
to provide an amount that is higher than the estimated one. This is used
to avoid the TX getting reverted in case something has changed in the network
state after the estimation which can cause the TX to require more gas to be
executed.
ex:
gas estimation: 1000
gas offset: 100
final gas: 1100 |
| - [UpgradeEtrogBatchNumber](#Aggregator_UpgradeEtrogBatchNumber ) | No | integer | No | - | UpgradeEtrogBatchNumber is the number of the first batch after upgrading to etrog |
+| - [BatchProofL1BlockConfirmations](#Aggregator_BatchProofL1BlockConfirmations ) | No | integer | No | - | BatchProofL1BlockConfirmations is number of L1 blocks to consider we can generate the proof for a virtual batch |
### 12.1. `Aggregator.Host`
@@ -2647,6 +2648,20 @@ GasOffset=0
UpgradeEtrogBatchNumber=0
```
+### 12.16. `Aggregator.BatchProofL1BlockConfirmations`
+
+**Type:** : `integer`
+
+**Default:** `2`
+
+**Description:** BatchProofL1BlockConfirmations is number of L1 blocks to consider we can generate the proof for a virtual batch
+
+**Example setting the default value** (2):
+```
+[Aggregator]
+BatchProofL1BlockConfirmations=2
+```
+
## 13. `[NetworkConfig]`
**Type:** : `object`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index b28e8ea462..95e2e2b248 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -1011,6 +1011,11 @@
"type": "integer",
"description": "UpgradeEtrogBatchNumber is the number of the first batch after upgrading to etrog",
"default": 0
+ },
+ "BatchProofL1BlockConfirmations": {
+ "type": "integer",
+ "description": "BatchProofL1BlockConfirmations is number of L1 blocks to consider we can generate the proof for a virtual batch",
+ "default": 2
}
},
"additionalProperties": false,
diff --git a/state/interfaces.go b/state/interfaces.go
index 17636f5ce1..f6b5859a80 100644
--- a/state/interfaces.go
+++ b/state/interfaces.go
@@ -98,7 +98,7 @@ type storage interface {
GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*GlobalExitRoot, error)
AddSequence(ctx context.Context, sequence Sequence, dbTx pgx.Tx) error
GetSequences(ctx context.Context, lastVerifiedBatchNumber uint64, dbTx pgx.Tx) ([]Sequence, error)
- GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*Batch, error)
+ GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx) (*Batch, error)
CheckProofContainsCompleteSequences(ctx context.Context, proof *Proof, dbTx pgx.Tx) (bool, error)
GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*Proof, error)
GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*Proof, *Proof, error)
diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go
index 559e73a930..4856aa7e41 100644
--- a/state/mocks/mock_storage.go
+++ b/state/mocks/mock_storage.go
@@ -6950,9 +6950,9 @@ func (_c *StorageMock_GetVirtualBatchParentHash_Call) RunAndReturn(run func(cont
return _c
}
-// GetVirtualBatchToProve provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx
-func (_m *StorageMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) {
- ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx)
+// GetVirtualBatchToProve provides a mock function with given fields: ctx, lastVerfiedBatchNumber, maxL1Block, dbTx
+func (_m *StorageMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx) (*state.Batch, error) {
+ ret := _m.Called(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx)
if len(ret) == 0 {
panic("no return value specified for GetVirtualBatchToProve")
@@ -6960,19 +6960,19 @@ func (_m *StorageMock) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBa
var r0 *state.Batch
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Batch, error)); ok {
- return rf(ctx, lastVerfiedBatchNumber, dbTx)
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) (*state.Batch, error)); ok {
+ return rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx)
}
- if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Batch); ok {
- r0 = rf(ctx, lastVerfiedBatchNumber, dbTx)
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) *state.Batch); ok {
+ r0 = rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*state.Batch)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
- r1 = rf(ctx, lastVerfiedBatchNumber, dbTx)
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, lastVerfiedBatchNumber, maxL1Block, dbTx)
} else {
r1 = ret.Error(1)
}
@@ -6988,14 +6988,15 @@ type StorageMock_GetVirtualBatchToProve_Call struct {
// GetVirtualBatchToProve is a helper method to define mock.On call
// - ctx context.Context
// - lastVerfiedBatchNumber uint64
+// - maxL1Block uint64
// - dbTx pgx.Tx
-func (_e *StorageMock_Expecter) GetVirtualBatchToProve(ctx interface{}, lastVerfiedBatchNumber interface{}, dbTx interface{}) *StorageMock_GetVirtualBatchToProve_Call {
- return &StorageMock_GetVirtualBatchToProve_Call{Call: _e.mock.On("GetVirtualBatchToProve", ctx, lastVerfiedBatchNumber, dbTx)}
+func (_e *StorageMock_Expecter) GetVirtualBatchToProve(ctx interface{}, lastVerfiedBatchNumber interface{}, maxL1Block interface{}, dbTx interface{}) *StorageMock_GetVirtualBatchToProve_Call {
+ return &StorageMock_GetVirtualBatchToProve_Call{Call: _e.mock.On("GetVirtualBatchToProve", ctx, lastVerfiedBatchNumber, maxL1Block, dbTx)}
}
-func (_c *StorageMock_GetVirtualBatchToProve_Call) Run(run func(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx)) *StorageMock_GetVirtualBatchToProve_Call {
+func (_c *StorageMock_GetVirtualBatchToProve_Call) Run(run func(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx)) *StorageMock_GetVirtualBatchToProve_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(pgx.Tx))
})
return _c
}
@@ -7005,7 +7006,7 @@ func (_c *StorageMock_GetVirtualBatchToProve_Call) Return(_a0 *state.Batch, _a1
return _c
}
-func (_c *StorageMock_GetVirtualBatchToProve_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Batch, error)) *StorageMock_GetVirtualBatchToProve_Call {
+func (_c *StorageMock_GetVirtualBatchToProve_Call) RunAndReturn(run func(context.Context, uint64, uint64, pgx.Tx) (*state.Batch, error)) *StorageMock_GetVirtualBatchToProve_Call {
_c.Call.Return(run)
return _c
}
diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go
index 214be8a685..83bfad3cdb 100644
--- a/state/pgstatestorage/batch.go
+++ b/state/pgstatestorage/batch.go
@@ -764,7 +764,7 @@ func (p *PostgresStorage) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx)
// GetVirtualBatchToProve return the next batch that is not proved, neither in
// proved process.
-func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) {
+func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfiedBatchNumber uint64, maxL1Block uint64, dbTx pgx.Tx) (*state.Batch, error) {
const query = `
SELECT
b.batch_num,
@@ -783,6 +783,7 @@ func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfie
state.virtual_batch v
WHERE
b.batch_num > $1 AND b.batch_num = v.batch_num AND
+ v.block_num <= $2 AND
NOT EXISTS (
SELECT p.batch_num FROM state.proof p
WHERE v.batch_num >= p.batch_num AND v.batch_num <= p.batch_num_final
@@ -790,7 +791,7 @@ func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfie
ORDER BY b.batch_num ASC LIMIT 1
`
e := p.getExecQuerier(dbTx)
- row := e.QueryRow(ctx, query, lastVerfiedBatchNumber)
+ row := e.QueryRow(ctx, query, lastVerfiedBatchNumber, maxL1Block)
batch, err := scanBatch(row)
if errors.Is(err, pgx.ErrNoRows) {
return nil, state.ErrNotFound
diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml
index aa1a5f5ee0..32bbdd7f56 100644
--- a/test/config/debug.node.config.toml
+++ b/test/config/debug.node.config.toml
@@ -129,6 +129,7 @@ SenderAddress = "0x70997970c51812dc3a010c7d01b50e0d17dc79c8"
CleanupLockedProofsInterval = "2m"
GeneratingProofCleanupThreshold = "10m"
UpgradeEtrogBatchNumber = 0
+BatchProofL1BlockConfirmations = 2
[EthTxManager]
ForcedGas = 0
diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml
index 76c6e53aee..8d6a0d215d 100644
--- a/test/config/test.node.config.toml
+++ b/test/config/test.node.config.toml
@@ -146,6 +146,7 @@ SenderAddress = "0x70997970c51812dc3a010c7d01b50e0d17dc79c8"
CleanupLockedProofsInterval = "2m"
GeneratingProofCleanupThreshold = "10m"
UpgradeEtrogBatchNumber = 0
+BatchProofL1BlockConfirmations = 2
[EthTxManager]
ForcedGas = 0
From e767b9e828d2edd882a0bb1f2d25270a3f10c54d Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos
Date: Tue, 20 Feb 2024 14:24:09 -0300
Subject: [PATCH 2/9] add SkipVerifyL1InfoRoot as true when debug trace needs
to provide the l1 info tree data (#3321)
---
state/trace.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/state/trace.go b/state/trace.go
index 26b00a8048..f371b5b544 100644
--- a/state/trace.go
+++ b/state/trace.go
@@ -254,6 +254,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has
// In case we have any l1InfoTreeData, add them to the request
if len(l1InfoTreeData) > 0 {
processBatchRequestV2.L1InfoTreeData = map[uint32]*executor.L1DataV2{}
+ processBatchRequestV2.SkipVerifyL1InfoRoot = cTrue
for k, v := range l1InfoTreeData {
processBatchRequestV2.L1InfoTreeData[k] = &executor.L1DataV2{
GlobalExitRoot: v.GlobalExitRoot.Bytes(),
From dd33559011f405b6afbd83da6ce2c10a512c61f1 Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos
Date: Tue, 20 Feb 2024 14:24:31 -0300
Subject: [PATCH 3/9] fix tx index provided in the tx log responses (#3303)
---
state/pgstatestorage/pgstatestorage.go | 6 +-
state/pgstatestorage/transaction.go | 8 +-
test/e2e/sc_test.go | 147 ++++++++++++++++++++++++-
3 files changed, 155 insertions(+), 6 deletions(-)
diff --git a/state/pgstatestorage/pgstatestorage.go b/state/pgstatestorage/pgstatestorage.go
index de8fd2983f..9d17756e29 100644
--- a/state/pgstatestorage/pgstatestorage.go
+++ b/state/pgstatestorage/pgstatestorage.go
@@ -122,10 +122,11 @@ func (p *PostgresStorage) GetStateRootByBatchNumber(ctx context.Context, batchNu
// GetLogsByBlockNumber get all the logs from a specific block ordered by log index
func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Log, error) {
const query = `
- SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3
+ SELECT t.l2_block_num, b.block_hash, l.tx_hash, r.tx_index, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3
FROM state.log l
INNER JOIN state.transaction t ON t.hash = l.tx_hash
INNER JOIN state.l2block b ON b.block_num = t.l2_block_num
+ INNER JOIN state.receipt r ON r.tx_hash = t.hash
WHERE b.block_num = $1
ORDER BY l.log_index ASC`
@@ -142,11 +143,12 @@ func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber
func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64, addresses []common.Address, topics [][]common.Hash, blockHash *common.Hash, since *time.Time, dbTx pgx.Tx) ([]*types.Log, error) {
// query parts
const queryCount = `SELECT count(*) `
- const querySelect = `SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 `
+ const querySelect = `SELECT t.l2_block_num, b.block_hash, l.tx_hash, r.tx_index, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 `
const queryBody = `FROM state.log l
INNER JOIN state.transaction t ON t.hash = l.tx_hash
INNER JOIN state.l2block b ON b.block_num = t.l2_block_num
+ INNER JOIN state.receipt r ON r.tx_hash = t.hash
WHERE (l.address = any($1) OR $1 IS NULL)
AND (l.topic0 = any($2) OR $2 IS NULL)
AND (l.topic1 = any($3) OR $3 IS NULL)
diff --git a/state/pgstatestorage/transaction.go b/state/pgstatestorage/transaction.go
index 5f6cd2856f..b372b6b13d 100644
--- a/state/pgstatestorage/transaction.go
+++ b/state/pgstatestorage/transaction.go
@@ -367,10 +367,11 @@ func (p *PostgresStorage) getTransactionLogs(ctx context.Context, transactionHas
q := p.getExecQuerier(dbTx)
const getTransactionLogsSQL = `
- SELECT t.l2_block_num, b.block_hash, l.tx_hash, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3
+ SELECT t.l2_block_num, b.block_hash, l.tx_hash, r.tx_index, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3
FROM state.log l
INNER JOIN state.transaction t ON t.hash = l.tx_hash
INNER JOIN state.l2block b ON b.block_num = t.l2_block_num
+ INNER JOIN state.receipt r ON r.tx_hash = t.hash
WHERE t.hash = $1
ORDER BY l.log_index ASC`
rows, err := q.Query(ctx, getTransactionLogsSQL, transactionHash.String())
@@ -391,10 +392,11 @@ func scanLogs(rows pgx.Rows) ([]*types.Log, error) {
}
var log types.Log
+ var txIndex uint
var blockHash, txHash, logAddress, logData string
var topic0, topic1, topic2, topic3 *string
- err := rows.Scan(&log.BlockNumber, &blockHash, &txHash, &log.Index,
+ err := rows.Scan(&log.BlockNumber, &blockHash, &txHash, &txIndex, &log.Index,
&logAddress, &logData, &topic0, &topic1, &topic2, &topic3)
if err != nil {
return nil, err
@@ -403,7 +405,7 @@ func scanLogs(rows pgx.Rows) ([]*types.Log, error) {
log.BlockHash = common.HexToHash(blockHash)
log.TxHash = common.HexToHash(txHash)
log.Address = common.HexToAddress(logAddress)
- log.TxIndex = uint(0)
+ log.TxIndex = txIndex
log.Data, err = hex.DecodeHex(logData)
if err != nil {
return nil, err
diff --git a/test/e2e/sc_test.go b/test/e2e/sc_test.go
index 82950e79d8..736e47ded4 100644
--- a/test/e2e/sc_test.go
+++ b/test/e2e/sc_test.go
@@ -4,8 +4,10 @@ import (
"context"
"math/big"
"testing"
+ "time"
"github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/state"
"github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Counter"
"github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/EmitLog2"
"github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/FailureTest"
@@ -265,8 +267,9 @@ func TestEmitLog2(t *testing.T) {
log0 := getLogByIndex(0, logs)
assert.Equal(t, 0, len(log0.Topics))
- _, err = sc.ParseLog(getLogByIndex(1, logs))
+ logWithoutParameters, err := sc.ParseLog(getLogByIndex(1, logs))
require.NoError(t, err)
+ assert.Equal(t, 1, len(logWithoutParameters.Raw.Topics))
logA, err := sc.ParseLogA(getLogByIndex(2, logs))
require.NoError(t, err)
@@ -329,6 +332,148 @@ func TestEmitLog2(t *testing.T) {
}
}
+func TestLogTxIndex(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ var err error
+ err = operations.Teardown()
+ require.NoError(t, err)
+
+ defer func() { require.NoError(t, operations.Teardown()) }()
+
+ ctx := context.Background()
+ opsCfg := operations.GetDefaultOperationsConfig()
+ opsMan, err := operations.NewManager(ctx, opsCfg)
+ require.NoError(t, err)
+ err = opsMan.Setup()
+ require.NoError(t, err)
+
+ assertTxHashAndIndex := func(t *testing.T, log types.Log, tx *types.Transaction, receipt *types.Receipt) {
+ assert.Equal(t, tx.Hash().String(), log.TxHash.String())
+ assert.Equal(t, receipt.TxHash.String(), log.TxHash.String())
+ assert.Equal(t, receipt.TransactionIndex, log.TxIndex)
+ }
+
+ for _, network := range networks {
+ log.Debugf(network.Name)
+ client := operations.MustGetClient(network.URL)
+ wsClient := operations.MustGetClient(network.WebSocketURL)
+ auth := operations.MustGetAuth(network.PrivateKey, network.ChainID)
+
+ // deploy sc
+ scAddr, scTx, sc, err := EmitLog2.DeployEmitLog2(auth, client)
+ require.NoError(t, err)
+
+ logTx(scTx)
+ err = operations.WaitTxToBeMined(ctx, client, scTx, operations.DefaultTimeoutTxToBeMined)
+ require.NoError(t, err)
+
+ if network.Name == "Local L2" {
+ // stops sequencer
+ err = operations.StopComponent("seq")
+ require.NoError(t, err)
+ }
+
+ logsFromSubscription := make(chan types.Log)
+ query := ethereum.FilterQuery{Addresses: []common.Address{scAddr}}
+ sub, err := wsClient.SubscribeFilterLogs(context.Background(), query, logsFromSubscription)
+ require.NoError(t, err)
+
+ // send transfer
+ gasPrice, err := client.SuggestGasPrice(ctx)
+ require.NoError(t, err)
+ nonce, err := client.PendingNonceAt(ctx, auth.From)
+ require.NoError(t, err)
+ tx := types.NewTx(&types.LegacyTx{
+ To: state.Ptr(common.HexToAddress("0x1")),
+ Gas: 30000,
+ GasPrice: gasPrice,
+ Value: big.NewInt(1000),
+ Nonce: nonce,
+ })
+ signedTx, err := auth.Signer(auth.From, tx)
+ require.NoError(t, err)
+ err = client.SendTransaction(ctx, signedTx)
+ require.NoError(t, err)
+
+ // send log tx
+ auth.Nonce = big.NewInt(0).SetUint64(nonce + 1)
+ scCallTx, err := sc.EmitLogs(auth)
+ require.NoError(t, err)
+ logTx(scCallTx)
+
+ time.Sleep(time.Second)
+
+ if network.Name == "Local L2" {
+ // starts sequencer and wait log tx to get mined
+ err = operations.StartComponent("seq", func() (done bool, err error) {
+ err = operations.WaitTxToBeMined(ctx, client, scCallTx, operations.DefaultTimeoutTxToBeMined)
+ return true, err
+ })
+ require.NoError(t, err)
+ } else {
+ err = operations.WaitTxToBeMined(ctx, client, scCallTx, operations.DefaultTimeoutTxToBeMined)
+ require.NoError(t, err)
+ }
+
+ scCallTxReceipt, err := client.TransactionReceipt(ctx, scCallTx.Hash())
+ require.NoError(t, err)
+
+ if network.Name == "Local L2" {
+ assert.Equal(t, uint(1), scCallTxReceipt.TransactionIndex)
+ }
+
+ // validate logs from filterLogs
+ filterBlock := scCallTxReceipt.BlockNumber
+ logs, err := client.FilterLogs(ctx, ethereum.FilterQuery{
+ FromBlock: filterBlock, ToBlock: filterBlock,
+ Addresses: []common.Address{scAddr},
+ })
+ require.NoError(t, err)
+
+ assert.Equal(t, 4, len(logs))
+ for i := range logs {
+ l := getLogByIndex(i, logs)
+ assertTxHashAndIndex(t, l, scCallTx, scCallTxReceipt)
+ }
+
+ // validate logs from receipt
+ logs = make([]types.Log, len(scCallTxReceipt.Logs))
+ for i, log := range scCallTxReceipt.Logs {
+ logs[i] = *log
+ }
+
+ assert.Equal(t, 4, len(logs))
+ for i := range logs {
+ l := getLogByIndex(i, logs)
+ assertTxHashAndIndex(t, l, scCallTx, scCallTxReceipt)
+ }
+
+ // validate logs by subscription
+ logs = []types.Log{}
+ out:
+ for {
+ select {
+ case err := <-sub.Err():
+ require.NoError(t, err)
+ case vLog, closed := <-logsFromSubscription:
+ logs = append(logs, vLog)
+ if len(logs) == 4 && closed {
+ break out
+ }
+ }
+ }
+
+ assert.Equal(t, 4, len(logs))
+ for i := range logs {
+ l := getLogByIndex(i, logs)
+ assertTxHashAndIndex(t, l, scCallTx, scCallTxReceipt)
+ }
+ }
+}
+
func getLogByIndex(index int, logs []types.Log) types.Log {
for _, log := range logs {
if int(log.Index) == index {
From 3ae5a9aaaefc52045f43493db299ef8fcf473cef Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos
Date: Tue, 20 Feb 2024 14:52:06 -0300
Subject: [PATCH 4/9] Fix null fields for pending blocks (#3274)
---
jsonrpc/endpoints_eth.go | 6 ++++++
jsonrpc/endpoints_eth_test.go | 37 +++++++++++++++++++++------------
jsonrpc/endpoints_zkevm.go | 6 ++++++
jsonrpc/endpoints_zkevm_test.go | 37 +++++++++++++++++++++------------
jsonrpc/types/types.go | 30 ++++++++++++--------------
5 files changed, 73 insertions(+), 43 deletions(-)
diff --git a/jsonrpc/endpoints_eth.go b/jsonrpc/endpoints_eth.go
index e999a0facf..1e8b10ab74 100644
--- a/jsonrpc/endpoints_eth.go
+++ b/jsonrpc/endpoints_eth.go
@@ -351,6 +351,12 @@ func (e *EthEndpoints) GetBlockByNumber(number types.BlockNumber, fullTx bool, i
return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true)
}
+ // clean fields that are not available for pending block
+ rpcBlock.Hash = nil
+ rpcBlock.Miner = nil
+ rpcBlock.Nonce = nil
+ rpcBlock.TotalDifficulty = nil
+
return rpcBlock, nil
}
var err error
diff --git a/jsonrpc/endpoints_eth_test.go b/jsonrpc/endpoints_eth_test.go
index efa876fb21..60067ab1b3 100644
--- a/jsonrpc/endpoints_eth_test.go
+++ b/jsonrpc/endpoints_eth_test.go
@@ -1223,7 +1223,7 @@ func TestGetL2BlockByNumber(t *testing.T) {
}
n := big.NewInt(0).SetUint64(l2Block.Nonce())
- rpcBlockNonce := common.LeftPadBytes(n.Bytes(), 8) //nolint:gomnd
+ rpcBlockNonce := types.ArgBytes(common.LeftPadBytes(n.Bytes(), 8)) //nolint:gomnd
difficulty := types.ArgUint64(0)
var totalDifficulty *types.ArgUint64
@@ -1249,7 +1249,7 @@ func TestGetL2BlockByNumber(t *testing.T) {
Timestamp: types.ArgUint64(l2Block.Time()),
ExtraData: l2Block.Extra(),
MixHash: l2Block.MixDigest(),
- Nonce: rpcBlockNonce,
+ Nonce: &rpcBlockNonce,
Hash: state.Ptr(l2Block.Hash()),
GlobalExitRoot: state.Ptr(l2Block.GlobalExitRoot()),
BlockInfoRoot: state.Ptr(l2Block.BlockInfoRoot()),
@@ -1422,8 +1422,10 @@ func TestGetL2BlockByNumber(t *testing.T) {
tc.ExpectedResult.ExtraData = []byte{}
tc.ExpectedResult.GlobalExitRoot = state.Ptr(common.Hash{})
tc.ExpectedResult.BlockInfoRoot = state.Ptr(common.Hash{})
- rpcBlockNonce := common.LeftPadBytes(big.NewInt(0).Bytes(), 8) //nolint:gomnd
- tc.ExpectedResult.Nonce = rpcBlockNonce
+ tc.ExpectedResult.Hash = nil
+ tc.ExpectedResult.Miner = nil
+ tc.ExpectedResult.Nonce = nil
+ tc.ExpectedResult.TotalDifficulty = nil
m.DbTx.
On("Commit", context.Background()).
@@ -1480,17 +1482,11 @@ func TestGetL2BlockByNumber(t *testing.T) {
if result != nil || tc.ExpectedResult != nil {
assert.Equal(t, tc.ExpectedResult.ParentHash.String(), result.ParentHash.String())
assert.Equal(t, tc.ExpectedResult.Sha3Uncles.String(), result.Sha3Uncles.String())
- if tc.ExpectedResult.Miner != nil {
- assert.Equal(t, tc.ExpectedResult.Miner.String(), result.Miner.String())
- } else {
- assert.Nil(t, result.Miner)
- }
assert.Equal(t, tc.ExpectedResult.StateRoot.String(), result.StateRoot.String())
assert.Equal(t, tc.ExpectedResult.TxRoot.String(), result.TxRoot.String())
assert.Equal(t, tc.ExpectedResult.ReceiptsRoot.String(), result.ReceiptsRoot.String())
assert.Equal(t, tc.ExpectedResult.LogsBloom, result.LogsBloom)
assert.Equal(t, tc.ExpectedResult.Difficulty, result.Difficulty)
- assert.Equal(t, tc.ExpectedResult.TotalDifficulty, result.TotalDifficulty)
assert.Equal(t, tc.ExpectedResult.Size, result.Size)
assert.Equal(t, tc.ExpectedResult.Number, result.Number)
assert.Equal(t, tc.ExpectedResult.GasLimit, result.GasLimit)
@@ -1498,14 +1494,29 @@ func TestGetL2BlockByNumber(t *testing.T) {
assert.Equal(t, tc.ExpectedResult.Timestamp, result.Timestamp)
assert.Equal(t, tc.ExpectedResult.ExtraData, result.ExtraData)
assert.Equal(t, tc.ExpectedResult.MixHash, result.MixHash)
- assert.Equal(t, tc.ExpectedResult.Nonce, result.Nonce)
+ assert.Equal(t, tc.ExpectedResult.GlobalExitRoot, result.GlobalExitRoot)
+ assert.Equal(t, tc.ExpectedResult.BlockInfoRoot, result.BlockInfoRoot)
+
if tc.ExpectedResult.Hash != nil {
assert.Equal(t, tc.ExpectedResult.Hash.String(), result.Hash.String())
} else {
assert.Nil(t, result.Hash)
}
- assert.Equal(t, tc.ExpectedResult.GlobalExitRoot, result.GlobalExitRoot)
- assert.Equal(t, tc.ExpectedResult.BlockInfoRoot, result.BlockInfoRoot)
+ if tc.ExpectedResult.Miner != nil {
+ assert.Equal(t, tc.ExpectedResult.Miner.String(), result.Miner.String())
+ } else {
+ assert.Nil(t, result.Miner)
+ }
+ if tc.ExpectedResult.Nonce != nil {
+ assert.Equal(t, tc.ExpectedResult.Nonce, result.Nonce)
+ } else {
+ assert.Nil(t, result.Nonce)
+ }
+ if tc.ExpectedResult.TotalDifficulty != nil {
+ assert.Equal(t, tc.ExpectedResult.TotalDifficulty, result.TotalDifficulty)
+ } else {
+ assert.Nil(t, result.TotalDifficulty)
+ }
assert.Equal(t, len(tc.ExpectedResult.Transactions), len(result.Transactions))
assert.Equal(t, len(tc.ExpectedResult.Uncles), len(result.Uncles))
diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go
index ab82cc476f..9b687bb2a9 100644
--- a/jsonrpc/endpoints_zkevm.go
+++ b/jsonrpc/endpoints_zkevm.go
@@ -223,6 +223,12 @@ func (z *ZKEVMEndpoints) GetFullBlockByNumber(number types.BlockNumber, fullTx b
return RPCErrorResponse(types.DefaultErrorCode, "couldn't build the pending block response", err, true)
}
+ // clean fields that are not available for pending block
+ rpcBlock.Hash = nil
+ rpcBlock.Miner = nil
+ rpcBlock.Nonce = nil
+ rpcBlock.TotalDifficulty = nil
+
return rpcBlock, nil
}
var err error
diff --git a/jsonrpc/endpoints_zkevm_test.go b/jsonrpc/endpoints_zkevm_test.go
index 4293a2d7ae..85db81328b 100644
--- a/jsonrpc/endpoints_zkevm_test.go
+++ b/jsonrpc/endpoints_zkevm_test.go
@@ -1445,7 +1445,7 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
}
n := big.NewInt(0).SetUint64(l2Block.Nonce())
- rpcBlockNonce := common.LeftPadBytes(n.Bytes(), 8) //nolint:gomnd
+ rpcBlockNonce := types.ArgBytes(common.LeftPadBytes(n.Bytes(), 8)) //nolint:gomnd
difficulty := types.ArgUint64(0)
var totalDifficulty *types.ArgUint64
@@ -1471,7 +1471,7 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
Timestamp: types.ArgUint64(l2Block.Time()),
ExtraData: l2Block.Extra(),
MixHash: l2Block.MixDigest(),
- Nonce: rpcBlockNonce,
+ Nonce: &rpcBlockNonce,
Hash: state.Ptr(l2Block.Hash()),
GlobalExitRoot: state.Ptr(l2Block.GlobalExitRoot()),
BlockInfoRoot: state.Ptr(l2Block.BlockInfoRoot()),
@@ -1635,8 +1635,10 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
tc.ExpectedResult.ExtraData = []byte{}
tc.ExpectedResult.GlobalExitRoot = state.Ptr(common.Hash{})
tc.ExpectedResult.BlockInfoRoot = state.Ptr(common.Hash{})
- rpcBlockNonce := common.LeftPadBytes(big.NewInt(0).Bytes(), 8) //nolint:gomnd
- tc.ExpectedResult.Nonce = rpcBlockNonce
+ tc.ExpectedResult.Hash = nil
+ tc.ExpectedResult.Miner = nil
+ tc.ExpectedResult.Nonce = nil
+ tc.ExpectedResult.TotalDifficulty = nil
m.DbTx.
On("Commit", context.Background()).
@@ -1699,17 +1701,11 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
assert.Equal(t, tc.ExpectedResult.ParentHash.String(), result.ParentHash.String())
assert.Equal(t, tc.ExpectedResult.Sha3Uncles.String(), result.Sha3Uncles.String())
- if tc.ExpectedResult.Miner != nil {
- assert.Equal(t, tc.ExpectedResult.Miner.String(), result.Miner.String())
- } else {
- assert.Nil(t, result.Miner)
- }
assert.Equal(t, tc.ExpectedResult.StateRoot.String(), result.StateRoot.String())
assert.Equal(t, tc.ExpectedResult.TxRoot.String(), result.TxRoot.String())
assert.Equal(t, tc.ExpectedResult.ReceiptsRoot.String(), result.ReceiptsRoot.String())
assert.Equal(t, tc.ExpectedResult.LogsBloom, result.LogsBloom)
assert.Equal(t, tc.ExpectedResult.Difficulty, result.Difficulty)
- assert.Equal(t, tc.ExpectedResult.TotalDifficulty, result.TotalDifficulty)
assert.Equal(t, tc.ExpectedResult.Size, result.Size)
assert.Equal(t, tc.ExpectedResult.Number, result.Number)
assert.Equal(t, tc.ExpectedResult.GasLimit, result.GasLimit)
@@ -1717,14 +1713,29 @@ func TestGetL2FullBlockByNumber(t *testing.T) {
assert.Equal(t, tc.ExpectedResult.Timestamp, result.Timestamp)
assert.Equal(t, tc.ExpectedResult.ExtraData, result.ExtraData)
assert.Equal(t, tc.ExpectedResult.MixHash, result.MixHash)
- assert.Equal(t, tc.ExpectedResult.Nonce, result.Nonce)
+ assert.Equal(t, tc.ExpectedResult.GlobalExitRoot, result.GlobalExitRoot)
+ assert.Equal(t, tc.ExpectedResult.BlockInfoRoot, result.BlockInfoRoot)
+
if tc.ExpectedResult.Hash != nil {
assert.Equal(t, tc.ExpectedResult.Hash.String(), result.Hash.String())
} else {
assert.Nil(t, result.Hash)
}
- assert.Equal(t, tc.ExpectedResult.GlobalExitRoot, result.GlobalExitRoot)
- assert.Equal(t, tc.ExpectedResult.BlockInfoRoot, result.BlockInfoRoot)
+ if tc.ExpectedResult.Miner != nil {
+ assert.Equal(t, tc.ExpectedResult.Miner.String(), result.Miner.String())
+ } else {
+ assert.Nil(t, result.Miner)
+ }
+ if tc.ExpectedResult.Nonce != nil {
+ assert.Equal(t, tc.ExpectedResult.Nonce, result.Nonce)
+ } else {
+ assert.Nil(t, result.Nonce)
+ }
+ if tc.ExpectedResult.TotalDifficulty != nil {
+ assert.Equal(t, tc.ExpectedResult.TotalDifficulty, result.TotalDifficulty)
+ } else {
+ assert.Nil(t, result.TotalDifficulty)
+ }
assert.Equal(t, len(tc.ExpectedResult.Transactions), len(result.Transactions))
assert.Equal(t, len(tc.ExpectedResult.Uncles), len(result.Uncles))
diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go
index 0c3bec6db9..a3caedf87b 100644
--- a/jsonrpc/types/types.go
+++ b/jsonrpc/types/types.go
@@ -255,7 +255,7 @@ type Block struct {
Timestamp ArgUint64 `json:"timestamp"`
ExtraData ArgBytes `json:"extraData"`
MixHash common.Hash `json:"mixHash"`
- Nonce ArgBytes `json:"nonce"`
+ Nonce *ArgBytes `json:"nonce"`
Hash *common.Hash `json:"hash"`
Transactions []TransactionOrHash `json:"transactions"`
Uncles []common.Hash `json:"uncles"`
@@ -267,32 +267,28 @@ type Block struct {
func NewBlock(ctx context.Context, st StateInterface, hash *common.Hash, b *state.L2Block, receipts []types.Receipt, fullTx, includeReceipts bool, includeExtraInfo *bool, dbTx pgx.Tx) (*Block, error) {
h := b.Header()
- var miner *common.Address
- if h.Coinbase.String() != state.ZeroAddress.String() {
- cb := h.Coinbase
- miner = &cb
- }
-
n := big.NewInt(0).SetUint64(h.Nonce.Uint64())
- nonce := common.LeftPadBytes(n.Bytes(), 8) //nolint:gomnd
+ nonce := ArgBytes(common.LeftPadBytes(n.Bytes(), 8)) //nolint:gomnd
- difficulty := ArgUint64(0)
- var totalDifficulty *ArgUint64
- if h.Difficulty != nil && h.Difficulty.Uint64() > 0 {
- difficulty = ArgUint64(h.Difficulty.Uint64())
- totalDifficulty = &difficulty
+ var difficulty uint64
+ if h.Difficulty != nil {
+ difficulty = h.Difficulty.Uint64()
+ } else {
+ difficulty = uint64(0)
}
+ totalDifficult := ArgUint64(difficulty)
+
res := &Block{
ParentHash: h.ParentHash,
Sha3Uncles: h.UncleHash,
- Miner: miner,
+ Miner: &h.Coinbase,
StateRoot: h.Root,
TxRoot: h.TxHash,
ReceiptsRoot: h.ReceiptHash,
LogsBloom: h.Bloom,
- Difficulty: difficulty,
- TotalDifficulty: totalDifficulty,
+ Difficulty: ArgUint64(difficulty),
+ TotalDifficulty: &totalDifficult,
Size: ArgUint64(b.Size()),
Number: ArgUint64(b.Number().Uint64()),
GasLimit: ArgUint64(h.GasLimit),
@@ -300,7 +296,7 @@ func NewBlock(ctx context.Context, st StateInterface, hash *common.Hash, b *stat
Timestamp: ArgUint64(h.Time),
ExtraData: ArgBytes(h.Extra),
MixHash: h.MixDigest,
- Nonce: nonce,
+ Nonce: &nonce,
Hash: hash,
Transactions: []TransactionOrHash{},
Uncles: []common.Hash{},
From 93a5a6474957d3111fbc90efb3ae789ee2f2a2ca Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos
Date: Tue, 20 Feb 2024 14:55:07 -0300
Subject: [PATCH 5/9] Add endpoint to estimate ZK Counters (#3260)
---
Dockerfile | 2 +-
cmd/run.go | 10 ++
docs/config-file/node-config-doc.html | 2 +-
docs/config-file/node-config-doc.md | 113 +++++++++++++++++++++
docs/config-file/node-config-schema.json | 39 +++++++
docs/json-rpc-endpoints.md | 1 +
jsonrpc/config.go | 15 +++
jsonrpc/endpoints_zkevm.go | 72 +++++++++++++
jsonrpc/endpoints_zkevm.openrpc.json | 123 +++++++++++++++++++++++
jsonrpc/mocks/mock_state.go | 30 ++++++
jsonrpc/types/interfaces.go | 1 +
jsonrpc/types/types.go | 66 ++++++++++++
state/errors.go | 4 +-
state/transaction.go | 28 ++++--
test/e2e/jsonrpc1_test.go | 96 ++++++++++++++++++
15 files changed, 592 insertions(+), 10 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 8784ba0aa0..510daccca4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,7 +12,7 @@ RUN cd /src/db && packr2
RUN cd /src && make build
# CONTAINER FOR RUNNING BINARY
-FROM alpine:3.18.4
+FROM alpine:3.18
COPY --from=build /src/dist/zkevm-node /app/zkevm-node
COPY --from=build /src/config/environments/testnet/node.config.toml /app/example.config.toml
RUN apk update && apk add postgresql15-client
diff --git a/cmd/run.go b/cmd/run.go
index 17f1d1be5b..072d3faa26 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -326,6 +326,16 @@ func runJSONRPCServer(c config.Config, etherman *etherman.Client, chainID uint64
storage := jsonrpc.NewStorage()
c.RPC.MaxCumulativeGasUsed = c.State.Batch.Constraints.MaxCumulativeGasUsed
c.RPC.L2Coinbase = c.SequenceSender.L2Coinbase
+ c.RPC.ZKCountersLimits = jsonrpc.ZKCountersLimits{
+ MaxKeccakHashes: c.State.Batch.Constraints.MaxKeccakHashes,
+ MaxPoseidonHashes: c.State.Batch.Constraints.MaxPoseidonHashes,
+ MaxPoseidonPaddings: c.State.Batch.Constraints.MaxPoseidonPaddings,
+ MaxMemAligns: c.State.Batch.Constraints.MaxMemAligns,
+ MaxArithmetics: c.State.Batch.Constraints.MaxArithmetics,
+ MaxBinaries: c.State.Batch.Constraints.MaxBinaries,
+ MaxSteps: c.State.Batch.Constraints.MaxSteps,
+ MaxSHA256Hashes: c.State.Batch.Constraints.MaxSHA256Hashes,
+ }
if !c.IsTrustedSequencer {
if c.RPC.SequencerNodeURI == "" {
log.Debug("getting trusted sequencer URL from smc")
diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html
index f578dbcd6a..3fcf675c97 100644
--- a/docs/config-file/node-config-doc.html
+++ b/docs/config-file/node-config-doc.html
@@ -14,7 +14,7 @@
"300ms"
WriteTimeout is the HTTP server write timeout
check net/http.server.WriteTimeout
"1m"
"300ms"
-
MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second
SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node
MaxCumulativeGasUsed is the max gas allowed per batch
Enabled defines if the WebSocket requests are enabled or disabled
Host defines the network adapter that will be used to serve the WS requests
Port defines the port to serve the endpoints via WS
ReadLimit defines the maximum size of a message read from the client (in bytes)
EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.
BatchRequestsEnabled defines if the Batch requests are enabled or disabled
BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request
L2Coinbase defines which address is going to receive the fees
Must contain a minimum of 20
items
Must contain a maximum of 20
items
MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit
MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit
MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit
EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.
SyncInterval is the delay interval between reading new rollup information
"1m"
+
MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second
SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node
MaxCumulativeGasUsed is the max gas allowed per batch
Enabled defines if the WebSocket requests are enabled or disabled
Host defines the network adapter that will be used to serve the WS requests
Port defines the port to serve the endpoints via WS
ReadLimit defines the maximum size of a message read from the client (in bytes)
EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.
BatchRequestsEnabled defines if the Batch requests are enabled or disabled
BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request
L2Coinbase defines which address is going to receive the fees
Must contain a minimum of 20
items
Must contain a maximum of 20
items
MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit
MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit
MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit
EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.
SyncInterval is the delay interval between reading new rollup information
"1m"
"300ms"
SyncChunkSize is the number of blocks to sync on each chunk
TrustedSequencerURL is the rpc url to connect and sync the trusted state
L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute
MaxClients Number of clients used to synchronize with L1
MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients
RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized
"1m"
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 249daf8660..484f48e913 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -913,6 +913,7 @@ ForkID=0
| - [MaxLogsBlockRange](#RPC_MaxLogsBlockRange ) | No | integer | No | - | MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit |
| - [MaxNativeBlockHashBlockRange](#RPC_MaxNativeBlockHashBlockRange ) | No | integer | No | - | MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit |
| - [EnableHttpLog](#RPC_EnableHttpLog ) | No | boolean | No | - | EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server. |
+| - [ZKCountersLimits](#RPC_ZKCountersLimits ) | No | object | No | - | ZKCountersLimits defines the ZK Counter limits |
### 8.1. `RPC.Host`
@@ -1215,6 +1216,118 @@ requests to be captured by the server.
EnableHttpLog=true
```
+### 8.17. `[RPC.ZKCountersLimits]`
+
+**Type:** : `object`
+**Description:** ZKCountersLimits defines the ZK Counter limits
+
+| Property | Pattern | Type | Deprecated | Definition | Title/Description |
+| ------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------- |
+| - [MaxKeccakHashes](#RPC_ZKCountersLimits_MaxKeccakHashes ) | No | integer | No | - | - |
+| - [MaxPoseidonHashes](#RPC_ZKCountersLimits_MaxPoseidonHashes ) | No | integer | No | - | - |
+| - [MaxPoseidonPaddings](#RPC_ZKCountersLimits_MaxPoseidonPaddings ) | No | integer | No | - | - |
+| - [MaxMemAligns](#RPC_ZKCountersLimits_MaxMemAligns ) | No | integer | No | - | - |
+| - [MaxArithmetics](#RPC_ZKCountersLimits_MaxArithmetics ) | No | integer | No | - | - |
+| - [MaxBinaries](#RPC_ZKCountersLimits_MaxBinaries ) | No | integer | No | - | - |
+| - [MaxSteps](#RPC_ZKCountersLimits_MaxSteps ) | No | integer | No | - | - |
+| - [MaxSHA256Hashes](#RPC_ZKCountersLimits_MaxSHA256Hashes ) | No | integer | No | - | - |
+
+#### 8.17.1. `RPC.ZKCountersLimits.MaxKeccakHashes`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxKeccakHashes=0
+```
+
+#### 8.17.2. `RPC.ZKCountersLimits.MaxPoseidonHashes`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxPoseidonHashes=0
+```
+
+#### 8.17.3. `RPC.ZKCountersLimits.MaxPoseidonPaddings`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxPoseidonPaddings=0
+```
+
+#### 8.17.4. `RPC.ZKCountersLimits.MaxMemAligns`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxMemAligns=0
+```
+
+#### 8.17.5. `RPC.ZKCountersLimits.MaxArithmetics`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxArithmetics=0
+```
+
+#### 8.17.6. `RPC.ZKCountersLimits.MaxBinaries`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxBinaries=0
+```
+
+#### 8.17.7. `RPC.ZKCountersLimits.MaxSteps`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxSteps=0
+```
+
+#### 8.17.8. `RPC.ZKCountersLimits.MaxSHA256Hashes`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Example setting the default value** (0):
+```
+[RPC.ZKCountersLimits]
+MaxSHA256Hashes=0
+```
+
## 9. `[Synchronizer]`
**Type:** : `object`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index 95e2e2b248..dfdbad179c 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -450,6 +450,45 @@
"type": "boolean",
"description": "EnableHttpLog allows the user to enable or disable the logs related to the HTTP\nrequests to be captured by the server.",
"default": true
+ },
+ "ZKCountersLimits": {
+ "properties": {
+ "MaxKeccakHashes": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxPoseidonHashes": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxPoseidonPaddings": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxMemAligns": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxArithmetics": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxBinaries": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxSteps": {
+ "type": "integer",
+ "default": 0
+ },
+ "MaxSHA256Hashes": {
+ "type": "integer",
+ "default": 0
+ }
+ },
+ "additionalProperties": false,
+ "type": "object",
+ "description": "ZKCountersLimits defines the ZK Counter limits"
}
},
"additionalProperties": false,
diff --git a/docs/json-rpc-endpoints.md b/docs/json-rpc-endpoints.md
index ec68b7eb51..0c05539868 100644
--- a/docs/json-rpc-endpoints.md
+++ b/docs/json-rpc-endpoints.md
@@ -62,6 +62,7 @@ If the endpoint is not in the list below, it means this specific endpoint is not
- `zkevm_batchNumber`
- `zkevm_batchNumberByBlockNumber`
- `zkevm_consolidatedBlockNumber`
+- `zkevm_estimateCounters`
- `zkevm_getBatchByNumber`
- `zkevm_getFullBlockByHash`
- `zkevm_getFullBlockByNumber`
diff --git a/jsonrpc/config.go b/jsonrpc/config.go
index c2eaa3c75b..183b6c6ff5 100644
--- a/jsonrpc/config.go
+++ b/jsonrpc/config.go
@@ -62,6 +62,21 @@ type Config struct {
// EnableHttpLog allows the user to enable or disable the logs related to the HTTP
// requests to be captured by the server.
EnableHttpLog bool `mapstructure:"EnableHttpLog"`
+
+ // ZKCountersLimits defines the ZK Counter limits
+ ZKCountersLimits ZKCountersLimits
+}
+
+// ZKCountersLimits defines the ZK Counter limits
+type ZKCountersLimits struct {
+ MaxKeccakHashes uint32
+ MaxPoseidonHashes uint32
+ MaxPoseidonPaddings uint32
+ MaxMemAligns uint32
+ MaxArithmetics uint32
+ MaxBinaries uint32
+ MaxSteps uint32
+ MaxSHA256Hashes uint32
}
// WebSocketsConfig has parameters to config the rpc websocket support
diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go
index 9b687bb2a9..a3fc6b45ac 100644
--- a/jsonrpc/endpoints_zkevm.go
+++ b/jsonrpc/endpoints_zkevm.go
@@ -15,6 +15,7 @@ import (
"github.com/0xPolygonHermez/zkevm-node/pool"
"github.com/0xPolygonHermez/zkevm-node/state"
"github.com/0xPolygonHermez/zkevm-node/state/runtime"
+ "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/jackc/pgx/v4"
@@ -516,6 +517,77 @@ func (z *ZKEVMEndpoints) EstimateFee(arg *types.TxArgs, blockArg *types.BlockNum
})
}
+// EstimateCounters returns an estimation of the counters that are going to be used while executing
+// this transaction.
+func (z *ZKEVMEndpoints) EstimateCounters(arg *types.TxArgs, blockArg *types.BlockNumberOrHash) (interface{}, types.Error) {
+ return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
+ if arg == nil {
+ return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil, false)
+ }
+
+ block, respErr := z.getBlockByArg(ctx, blockArg, dbTx)
+ if respErr != nil {
+ return nil, respErr
+ }
+
+ var blockToProcess *uint64
+ if blockArg != nil {
+ blockNumArg := blockArg.Number()
+ if blockNumArg != nil && (*blockArg.Number() == types.LatestBlockNumber || *blockArg.Number() == types.PendingBlockNumber) {
+ blockToProcess = nil
+ } else {
+ n := block.NumberU64()
+ blockToProcess = &n
+ }
+ }
+
+ defaultSenderAddress := common.HexToAddress(state.DefaultSenderAddress)
+ sender, tx, err := arg.ToTransaction(ctx, z.state, z.cfg.MaxCumulativeGasUsed, block.Root(), defaultSenderAddress, dbTx)
+ if err != nil {
+ return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err, false)
+ }
+
+ var oocErr error
+ processBatchResponse, err := z.state.PreProcessUnsignedTransaction(ctx, tx, sender, blockToProcess, dbTx)
+ if err != nil {
+ if executor.IsROMOutOfCountersError(executor.RomErrorCode(err)) {
+ oocErr = err
+ } else {
+ errMsg := fmt.Sprintf("failed to estimate counters: %v", err.Error())
+ return nil, types.NewRPCError(types.DefaultErrorCode, errMsg)
+ }
+ }
+
+ var revert *types.RevertInfo
+ if len(processBatchResponse.BlockResponses) > 0 && len(processBatchResponse.BlockResponses[0].TransactionResponses) > 0 {
+ txResponse := processBatchResponse.BlockResponses[0].TransactionResponses[0]
+ err = txResponse.RomError
+ if errors.Is(err, runtime.ErrExecutionReverted) {
+ returnValue := make([]byte, len(txResponse.ReturnValue))
+ copy(returnValue, txResponse.ReturnValue)
+ err := state.ConstructErrorFromRevert(err, returnValue)
+ revert = &types.RevertInfo{
+ Message: err.Error(),
+ Data: state.Ptr(types.ArgBytes(returnValue)),
+ }
+ }
+ }
+
+ limits := types.ZKCountersLimits{
+ MaxGasUsed: types.ArgUint64(state.MaxTxGasLimit),
+ MaxKeccakHashes: types.ArgUint64(z.cfg.ZKCountersLimits.MaxKeccakHashes),
+ MaxPoseidonHashes: types.ArgUint64(z.cfg.ZKCountersLimits.MaxPoseidonHashes),
+ MaxPoseidonPaddings: types.ArgUint64(z.cfg.ZKCountersLimits.MaxPoseidonPaddings),
+ MaxMemAligns: types.ArgUint64(z.cfg.ZKCountersLimits.MaxMemAligns),
+ MaxArithmetics: types.ArgUint64(z.cfg.ZKCountersLimits.MaxArithmetics),
+ MaxBinaries: types.ArgUint64(z.cfg.ZKCountersLimits.MaxBinaries),
+ MaxSteps: types.ArgUint64(z.cfg.ZKCountersLimits.MaxSteps),
+ MaxSHA256Hashes: types.ArgUint64(z.cfg.ZKCountersLimits.MaxSHA256Hashes),
+ }
+ return types.NewZKCountersResponse(processBatchResponse.UsedZkCounters, limits, revert, oocErr), nil
+ })
+}
+
func (z *ZKEVMEndpoints) getBlockByArg(ctx context.Context, blockArg *types.BlockNumberOrHash, dbTx pgx.Tx) (*state.L2Block, types.Error) {
// If no block argument is provided, return the latest block
if blockArg == nil {
diff --git a/jsonrpc/endpoints_zkevm.openrpc.json b/jsonrpc/endpoints_zkevm.openrpc.json
index 36216cc09d..212eb47700 100644
--- a/jsonrpc/endpoints_zkevm.openrpc.json
+++ b/jsonrpc/endpoints_zkevm.openrpc.json
@@ -423,6 +423,22 @@
"$ref": "#/components/schemas/Keccak"
}
}
+ },
+ {
+ "name": "zkevm_estimateCounters",
+ "summary": "Estimates the transaction ZK Counters",
+ "params": [
+ {
+ "$ref": "#/components/contentDescriptors/Transaction"
+ }
+ ],
+ "result": {
+ "name": "counters",
+ "description": "The counters used, limits and revert info when tx reverted",
+ "schema": {
+ "$ref": "#/components/schemas/ZKCountersResponse"
+ }
+ }
}
],
"components": {
@@ -472,6 +488,13 @@
"$ref": "#/components/schemas/Block"
}
},
+ "Transaction": {
+ "required": true,
+ "name": "transaction",
+ "schema": {
+ "$ref": "#/components/schemas/Transaction"
+ }
+ },
"TransactionHash": {
"name": "transactionHash",
"required": true,
@@ -1282,6 +1305,106 @@
"$ref": "#/components/schemas/Keccak"
}
}
+ },
+ "ZKCountersResponse": {
+ "title": "ZKCountersResponse",
+ "type": "object",
+ "readOnly": true,
+ "properties": {
+ "countersUsed": {
+ "$ref": "#/components/schemas/ZKCountersUsed"
+ },
+ "countersLimits": {
+ "$ref": "#/components/schemas/ZKCountersLimits"
+ },
+ "revertInfo": {
+ "$ref": "#/components/schemas/RevertInfo"
+ },
+ "oocError": {
+ "type": "string"
+ }
+ }
+ },
+ "ZKCountersUsed": {
+ "title": "ZKCountersUsed",
+ "type": "object",
+ "readOnly": true,
+ "properties": {
+ "gasUsed": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedKeccakHashes": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedPoseidonHashes": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedPoseidonPaddings": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedMemAligns": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedArithmetics": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedBinaries": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedSteps": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "usedSHA256Hashes": {
+ "$ref": "#/components/schemas/Integer"
+ }
+ }
+ },
+ "ZKCountersLimits":{
+ "title": "ZKCountersLimits",
+ "type": "object",
+ "readOnly": true,
+ "properties": {
+ "maxGasUsed": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedKeccakHashes": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedPoseidonHashes": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedPoseidonPaddings": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedMemAligns": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedArithmetics": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedBinaries": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedSteps": {
+ "$ref": "#/components/schemas/Integer"
+ },
+ "maxUsedSHA256Hashes": {
+ "$ref": "#/components/schemas/Integer"
+ }
+ }
+ },
+ "RevertInfo":{
+ "title": "RevertInfo",
+ "type": "object",
+ "readOnly": true,
+ "properties": {
+ "message": {
+ "type": "string"
+ },
+ "data": {
+ "$ref": "#/components/schemas/Integer"
+ }
+ }
}
}
}
diff --git a/jsonrpc/mocks/mock_state.go b/jsonrpc/mocks/mock_state.go
index daac411aa3..36f552fe65 100644
--- a/jsonrpc/mocks/mock_state.go
+++ b/jsonrpc/mocks/mock_state.go
@@ -1302,6 +1302,36 @@ func (_m *StateMock) IsL2BlockVirtualized(ctx context.Context, blockNumber uint6
return r0, r1
}
+// PreProcessUnsignedTransaction provides a mock function with given fields: ctx, tx, sender, l2BlockNumber, dbTx
+func (_m *StateMock) PreProcessUnsignedTransaction(ctx context.Context, tx *coretypes.Transaction, sender common.Address, l2BlockNumber *uint64, dbTx pgx.Tx) (*state.ProcessBatchResponse, error) {
+ ret := _m.Called(ctx, tx, sender, l2BlockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PreProcessUnsignedTransaction")
+ }
+
+ var r0 *state.ProcessBatchResponse
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction, common.Address, *uint64, pgx.Tx) (*state.ProcessBatchResponse, error)); ok {
+ return rf(ctx, tx, sender, l2BlockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction, common.Address, *uint64, pgx.Tx) *state.ProcessBatchResponse); ok {
+ r0 = rf(ctx, tx, sender, l2BlockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.ProcessBatchResponse)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *coretypes.Transaction, common.Address, *uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, tx, sender, l2BlockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// ProcessUnsignedTransaction provides a mock function with given fields: ctx, tx, senderAddress, l2BlockNumber, noZKEVMCounters, dbTx
func (_m *StateMock) ProcessUnsignedTransaction(ctx context.Context, tx *coretypes.Transaction, senderAddress common.Address, l2BlockNumber *uint64, noZKEVMCounters bool, dbTx pgx.Tx) (*runtime.ExecutionResult, error) {
ret := _m.Called(ctx, tx, senderAddress, l2BlockNumber, noZKEVMCounters, dbTx)
diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go
index 3485eeef0f..526ab3c55c 100644
--- a/jsonrpc/types/interfaces.go
+++ b/jsonrpc/types/interfaces.go
@@ -76,6 +76,7 @@ type StateInterface interface {
GetBatchTimestamp(ctx context.Context, batchNumber uint64, forcedForkId *uint64, dbTx pgx.Tx) (*time.Time, error)
GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error)
GetL2TxHashByTxHash(ctx context.Context, hash common.Hash, dbTx pgx.Tx) (*common.Hash, error)
+ PreProcessUnsignedTransaction(ctx context.Context, tx *types.Transaction, sender common.Address, l2BlockNumber *uint64, dbTx pgx.Tx) (*state.ProcessBatchResponse, error)
}
// EthermanInterface provides integration with L1
diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go
index a3caedf87b..55a6a900f7 100644
--- a/jsonrpc/types/types.go
+++ b/jsonrpc/types/types.go
@@ -702,3 +702,69 @@ type ExitRoots struct {
MainnetExitRoot common.Hash `json:"mainnetExitRoot"`
RollupExitRoot common.Hash `json:"rollupExitRoot"`
}
+
+// ZKCounters counters for the tx
+type ZKCounters struct {
+ GasUsed ArgUint64 `json:"gasUsed"`
+ UsedKeccakHashes ArgUint64 `json:"usedKeccakHashes"`
+ UsedPoseidonHashes ArgUint64 `json:"usedPoseidonHashes"`
+ UsedPoseidonPaddings ArgUint64 `json:"usedPoseidonPaddings"`
+ UsedMemAligns ArgUint64 `json:"usedMemAligns"`
+ UsedArithmetics ArgUint64 `json:"usedArithmetics"`
+ UsedBinaries ArgUint64 `json:"usedBinaries"`
+ UsedSteps ArgUint64 `json:"usedSteps"`
+ UsedSHA256Hashes ArgUint64 `json:"usedSHA256Hashes"`
+}
+
+// ZKCountersLimits used to return the zk counter limits to the user
+type ZKCountersLimits struct {
+ MaxGasUsed ArgUint64 `json:"maxGasUsed"`
+ MaxKeccakHashes ArgUint64 `json:"maxKeccakHashes"`
+ MaxPoseidonHashes ArgUint64 `json:"maxPoseidonHashes"`
+ MaxPoseidonPaddings ArgUint64 `json:"maxPoseidonPaddings"`
+ MaxMemAligns ArgUint64 `json:"maxMemAligns"`
+ MaxArithmetics ArgUint64 `json:"maxArithmetics"`
+ MaxBinaries ArgUint64 `json:"maxBinaries"`
+ MaxSteps ArgUint64 `json:"maxSteps"`
+ MaxSHA256Hashes ArgUint64 `json:"maxSHA256Hashes"`
+}
+
+// RevertInfo contains the reverted message and data when a tx
+// is reverted during the zk counter estimation
+type RevertInfo struct {
+ Message string `json:"message"`
+ Data *ArgBytes `json:"data,omitempty"`
+}
+
+// ZKCountersResponse returned when counters are estimated
+type ZKCountersResponse struct {
+ CountersUsed ZKCounters `json:"countersUsed"`
+ CountersLimits ZKCountersLimits `json:"countersLimit"`
+ Revert *RevertInfo `json:"revert,omitempty"`
+ OOCError *string `json:"oocError,omitempty"`
+}
+
+// NewZKCountersResponse creates an instance of ZKCounters to be returned
+// by the RPC to the caller
+func NewZKCountersResponse(zkCounters state.ZKCounters, limits ZKCountersLimits, revert *RevertInfo, oocErr error) ZKCountersResponse {
+ var oocErrMsg string
+ if oocErr != nil {
+ oocErrMsg = oocErr.Error()
+ }
+ return ZKCountersResponse{
+ CountersUsed: ZKCounters{
+ GasUsed: ArgUint64(zkCounters.GasUsed),
+ UsedKeccakHashes: ArgUint64(zkCounters.UsedKeccakHashes),
+ UsedPoseidonHashes: ArgUint64(zkCounters.UsedPoseidonHashes),
+ UsedPoseidonPaddings: ArgUint64(zkCounters.UsedPoseidonPaddings),
+ UsedMemAligns: ArgUint64(zkCounters.UsedMemAligns),
+ UsedArithmetics: ArgUint64(zkCounters.UsedArithmetics),
+ UsedBinaries: ArgUint64(zkCounters.UsedBinaries),
+ UsedSteps: ArgUint64(zkCounters.UsedSteps),
+ UsedSHA256Hashes: ArgUint64(zkCounters.UsedSha256Hashes_V2),
+ },
+ CountersLimits: limits,
+ Revert: revert,
+ OOCError: &oocErrMsg,
+ }
+}
diff --git a/state/errors.go b/state/errors.go
index 18aee6b9f4..c40d757b66 100644
--- a/state/errors.go
+++ b/state/errors.go
@@ -69,7 +69,9 @@ var (
ErrMaxNativeBlockHashBlockRangeLimitExceeded = errors.New("native block hashes are limited to a %v block range")
)
-func constructErrorFromRevert(err error, returnValue []byte) error {
+// ConstructErrorFromRevert extracts the reverted reason from the provided returnValue
+// and creates an instance of error that wraps the original error + the reverted reason
+func ConstructErrorFromRevert(err error, returnValue []byte) error {
revertErrMsg, unpackErr := abi.UnpackRevertError(returnValue)
if unpackErr != nil {
return err
diff --git a/state/transaction.go b/state/transaction.go
index 488c13295f..6b578cd4d7 100644
--- a/state/transaction.go
+++ b/state/transaction.go
@@ -279,6 +279,16 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P
return nil
}
+// PreProcessUnsignedTransaction processes the unsigned transaction in order to calculate its zkCounters
+func (s *State) PreProcessUnsignedTransaction(ctx context.Context, tx *types.Transaction, sender common.Address, l2BlockNumber *uint64, dbTx pgx.Tx) (*ProcessBatchResponse, error) {
+ response, err := s.internalProcessUnsignedTransaction(ctx, tx, sender, l2BlockNumber, false, dbTx)
+ if err != nil {
+ return response, err
+ }
+
+ return response, nil
+}
+
// PreProcessTransaction processes the transaction in order to calculate its zkCounters before adding it to the pool
func (s *State) PreProcessTransaction(ctx context.Context, tx *types.Transaction, dbTx pgx.Tx) (*ProcessBatchResponse, error) {
sender, err := GetSender(*tx)
@@ -310,7 +320,7 @@ func (s *State) ProcessUnsignedTransaction(ctx context.Context, tx *types.Transa
result.StateRoot = r.StateRoot.Bytes()
if errors.Is(r.RomError, runtime.ErrExecutionReverted) {
- result.Err = constructErrorFromRevert(r.RomError, r.ReturnValue)
+ result.Err = ConstructErrorFromRevert(r.RomError, r.ReturnValue)
} else {
result.Err = r.RomError
}
@@ -587,14 +597,18 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty
return nil, err
}
+ response, err := s.convertToProcessBatchResponseV2(processBatchResponseV2)
+ if err != nil {
+ return nil, err
+ }
+
if processBatchResponseV2.ErrorRom != executor.RomError_ROM_ERROR_NO_ERROR {
err = executor.RomErr(processBatchResponseV2.ErrorRom)
s.eventLog.LogExecutorErrorV2(ctx, processBatchResponseV2.Error, processBatchRequestV2)
- return nil, err
- }
+ if executor.IsROMOutOfCountersError(executor.RomErrorCode(err)) {
+ return response, err
+ }
- response, err := s.convertToProcessBatchResponseV2(processBatchResponseV2)
- if err != nil {
return nil, err
}
@@ -938,7 +952,7 @@ func (s *State) internalTestGasEstimationTransactionV1(ctx context.Context, batc
// The EVM reverted during execution, attempt to extract the
// error message and return it
returnValue := txResponse.ReturnValue
- return true, true, gasUsed, returnValue, constructErrorFromRevert(err, returnValue)
+ return true, true, gasUsed, returnValue, ConstructErrorFromRevert(err, returnValue)
}
return true, false, gasUsed, nil, err
@@ -1051,7 +1065,7 @@ func (s *State) internalTestGasEstimationTransactionV2(ctx context.Context, batc
// The EVM reverted during execution, attempt to extract the
// error message and return it
returnValue := txResponse.ReturnValue
- return true, true, gasUsed, returnValue, constructErrorFromRevert(err, returnValue)
+ return true, true, gasUsed, returnValue, ConstructErrorFromRevert(err, returnValue)
}
return true, false, gasUsed, nil, err
diff --git a/test/e2e/jsonrpc1_test.go b/test/e2e/jsonrpc1_test.go
index cf1bfc03b5..257c9d9a1a 100644
--- a/test/e2e/jsonrpc1_test.go
+++ b/test/e2e/jsonrpc1_test.go
@@ -670,3 +670,99 @@ func Test_OOCErrors(t *testing.T) {
})
}
}
+
+func Test_EstimateCounters(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ ctx := context.Background()
+ setup()
+ // defer teardown()
+ ethClient, err := ethclient.Dial(operations.DefaultL2NetworkURL)
+ require.NoError(t, err)
+ auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID)
+ require.NoError(t, err)
+
+ expectedCountersLimits := types.ZKCountersLimits{
+ MaxGasUsed: types.ArgUint64(hex.DecodeUint64("0x1c9c380")),
+ MaxKeccakHashes: types.ArgUint64(hex.DecodeUint64("0x861")),
+ MaxPoseidonHashes: types.ArgUint64(hex.DecodeUint64("0x3d9c5")),
+ MaxPoseidonPaddings: types.ArgUint64(hex.DecodeUint64("0x21017")),
+ MaxMemAligns: types.ArgUint64(hex.DecodeUint64("0x39c29")),
+ MaxArithmetics: types.ArgUint64(hex.DecodeUint64("0x39c29")),
+ MaxBinaries: types.ArgUint64(hex.DecodeUint64("0x73852")),
+ MaxSteps: types.ArgUint64(hex.DecodeUint64("0x73846a")),
+ MaxSHA256Hashes: types.ArgUint64(hex.DecodeUint64("0x63c")),
+ }
+
+ type testCase struct {
+ name string
+ prepareParams func(*testing.T, context.Context, *triggerErrors.TriggerErrors, *ethclient.Client, bind.TransactOpts) map[string]interface{}
+ assert func(*testing.T, *testCase, types.ZKCountersResponse)
+ }
+
+ testCases := []testCase{
+ {
+ name: "call OOC poseidon",
+ prepareParams: func(t *testing.T, ctx context.Context, sc *triggerErrors.TriggerErrors, c *ethclient.Client, a bind.TransactOpts) map[string]interface{} {
+ a.GasLimit = 30000000
+ a.NoSend = true
+ tx, err := sc.OutOfCountersPoseidon(&a)
+ require.NoError(t, err)
+
+ params := map[string]interface{}{
+ "from": a.From.String(),
+ "to": tx.To().String(),
+ "gas": hex.EncodeUint64(tx.Gas()),
+ "input": hex.EncodeToHex(tx.Data()),
+ "value": hex.EncodeBig(tx.Value()),
+ }
+
+ return params
+ },
+ assert: func(t *testing.T, tc *testCase, response types.ZKCountersResponse) {
+ assert.Greater(t, response.CountersUsed.UsedPoseidonHashes, expectedCountersLimits.MaxPoseidonHashes)
+ assert.Nil(t, response.Revert)
+ assert.Equal(t, "not enough poseidon counters to continue the execution", *response.OOCError)
+
+ },
+ },
+ }
+
+ // deploy triggerErrors SC
+ _, tx, sc, err := triggerErrors.DeployTriggerErrors(auth, ethClient)
+ require.NoError(t, err)
+
+ err = operations.WaitTxToBeMined(ctx, ethClient, tx, operations.DefaultTimeoutTxToBeMined)
+ require.NoError(t, err)
+
+ // create TX that cause an OOC
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ tc := tc
+ params := tc.prepareParams(t, context.Background(), sc, ethClient, *auth)
+ require.NoError(t, err)
+
+ res, err := client.JSONRPCCall(operations.DefaultL2NetworkURL, "zkevm_estimateCounters", params)
+ require.NoError(t, err)
+ require.Nil(t, res.Error)
+ require.NotNil(t, res.Result)
+
+ var zkCountersResponse types.ZKCountersResponse
+ err = json.Unmarshal(res.Result, &zkCountersResponse)
+ require.NoError(t, err)
+
+ tc.assert(t, &tc, zkCountersResponse)
+
+ assert.Equal(t, expectedCountersLimits.MaxGasUsed, zkCountersResponse.CountersLimits.MaxGasUsed)
+ assert.Equal(t, expectedCountersLimits.MaxKeccakHashes, zkCountersResponse.CountersLimits.MaxKeccakHashes)
+ assert.Equal(t, expectedCountersLimits.MaxPoseidonHashes, zkCountersResponse.CountersLimits.MaxPoseidonHashes)
+ assert.Equal(t, expectedCountersLimits.MaxPoseidonPaddings, zkCountersResponse.CountersLimits.MaxPoseidonPaddings)
+ assert.Equal(t, expectedCountersLimits.MaxMemAligns, zkCountersResponse.CountersLimits.MaxMemAligns)
+ assert.Equal(t, expectedCountersLimits.MaxArithmetics, zkCountersResponse.CountersLimits.MaxArithmetics)
+ assert.Equal(t, expectedCountersLimits.MaxBinaries, zkCountersResponse.CountersLimits.MaxBinaries)
+ assert.Equal(t, expectedCountersLimits.MaxSteps, zkCountersResponse.CountersLimits.MaxSteps)
+ assert.Equal(t, expectedCountersLimits.MaxSHA256Hashes, zkCountersResponse.CountersLimits.MaxSHA256Hashes)
+ })
+ }
+}
From 6c864add192cfa27baf5549b8ae7a778259828ce Mon Sep 17 00:00:00 2001
From: Thiago Coimbra Lemos
Date: Tue, 20 Feb 2024 17:21:34 -0300
Subject: [PATCH 6/9] add zkevm_estimateGasPrice (#3248) (#3327)
---
docs/json-rpc-endpoints.md | 9 +-
jsonrpc/endpoints_zkevm.go | 142 +++++++++++++++------------
jsonrpc/endpoints_zkevm.openrpc.json | 32 ++++++
test/e2e/debug_calltracer_test.go | 2 +-
4 files changed, 120 insertions(+), 65 deletions(-)
diff --git a/docs/json-rpc-endpoints.md b/docs/json-rpc-endpoints.md
index 0c05539868..659b619ec0 100644
--- a/docs/json-rpc-endpoints.md
+++ b/docs/json-rpc-endpoints.md
@@ -62,15 +62,18 @@ If the endpoint is not in the list below, it means this specific endpoint is not
- `zkevm_batchNumber`
- `zkevm_batchNumberByBlockNumber`
- `zkevm_consolidatedBlockNumber`
+- `zkevm_estimateFee`
+- `zkevm_estimateGasPrice`
- `zkevm_estimateCounters`
- `zkevm_getBatchByNumber`
+- `zkevm_getExitRootsByGER`
- `zkevm_getFullBlockByHash`
- `zkevm_getFullBlockByNumber`
+- `zkevm_getLatestGlobalExitRoot`
- `zkevm_getNativeBlockHashesInRange`
+- `zkevm_getTransactionByL2Hash`
+- `zkevm_getTransactionReceiptByL2Hash`
- `zkevm_isBlockConsolidated`
- `zkevm_isBlockVirtualized`
- `zkevm_verifiedBatchNumber`
- `zkevm_virtualBatchNumber`
-- `zkevm_getTransactionByL2Hash`
-- `zkevm_getTransactionReceiptByL2Hash`
-- `zkevm_getExitRootsByGER`
diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go
index a3fc6b45ac..f4c6020ba8 100644
--- a/jsonrpc/endpoints_zkevm.go
+++ b/jsonrpc/endpoints_zkevm.go
@@ -434,87 +434,107 @@ func (z *ZKEVMEndpoints) GetExitRootsByGER(globalExitRoot common.Hash) (interfac
})
}
+// EstimateGasPrice returns an estimate gas price for the transaction.
+func (z *ZKEVMEndpoints) EstimateGasPrice(arg *types.TxArgs, blockArg *types.BlockNumberOrHash) (interface{}, types.Error) {
+ return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
+ gasPrice, _, err := z.internalEstimateGasPriceAndFee(ctx, arg, blockArg, dbTx)
+ if err != nil {
+ return nil, err
+ }
+ return hex.EncodeBig(gasPrice), nil
+ })
+}
+
// EstimateFee returns an estimate fee for the transaction.
func (z *ZKEVMEndpoints) EstimateFee(arg *types.TxArgs, blockArg *types.BlockNumberOrHash) (interface{}, types.Error) {
return z.txMan.NewDbTxScope(z.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
- if arg == nil {
- return RPCErrorResponse(types.InvalidParamsErrorCode, "missing value for required argument 0", nil, false)
+ _, fee, err := z.internalEstimateGasPriceAndFee(ctx, arg, blockArg, dbTx)
+ if err != nil {
+ return nil, err
}
+ return hex.EncodeBig(fee), nil
+ })
+}
- block, respErr := z.getBlockByArg(ctx, blockArg, dbTx)
- if respErr != nil {
- return nil, respErr
- }
+// internalEstimateGasPriceAndFee computes the estimated gas price and the estimated fee for the transaction
+func (z *ZKEVMEndpoints) internalEstimateGasPriceAndFee(ctx context.Context, arg *types.TxArgs, blockArg *types.BlockNumberOrHash, dbTx pgx.Tx) (*big.Int, *big.Int, types.Error) {
+ if arg == nil {
+ return nil, nil, types.NewRPCError(types.InvalidParamsErrorCode, "missing value for required argument 0")
+ }
- var blockToProcess *uint64
- if blockArg != nil {
- blockNumArg := blockArg.Number()
- if blockNumArg != nil && (*blockArg.Number() == types.LatestBlockNumber || *blockArg.Number() == types.PendingBlockNumber) {
- blockToProcess = nil
- } else {
- n := block.NumberU64()
- blockToProcess = &n
- }
- }
+ block, respErr := z.getBlockByArg(ctx, blockArg, dbTx)
+ if respErr != nil {
+ return nil, nil, respErr
+ }
- defaultSenderAddress := common.HexToAddress(state.DefaultSenderAddress)
- sender, tx, err := arg.ToTransaction(ctx, z.state, z.cfg.MaxCumulativeGasUsed, block.Root(), defaultSenderAddress, dbTx)
- if err != nil {
- return RPCErrorResponse(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction", err, false)
+ var blockToProcess *uint64
+ if blockArg != nil {
+ blockNumArg := blockArg.Number()
+ if blockNumArg != nil && (*blockArg.Number() == types.LatestBlockNumber || *blockArg.Number() == types.PendingBlockNumber) {
+ blockToProcess = nil
+ } else {
+ n := block.NumberU64()
+ blockToProcess = &n
}
+ }
- gasEstimation, returnValue, err := z.state.EstimateGas(tx, sender, blockToProcess, dbTx)
- if errors.Is(err, runtime.ErrExecutionReverted) {
- data := make([]byte, len(returnValue))
- copy(data, returnValue)
- return nil, types.NewRPCErrorWithData(types.RevertedErrorCode, err.Error(), data)
- } else if err != nil {
- errMsg := fmt.Sprintf("failed to estimate gas: %v", err.Error())
- return nil, types.NewRPCError(types.DefaultErrorCode, errMsg)
- }
+ defaultSenderAddress := common.HexToAddress(state.DefaultSenderAddress)
+ sender, tx, err := arg.ToTransaction(ctx, z.state, z.cfg.MaxCumulativeGasUsed, block.Root(), defaultSenderAddress, dbTx)
+ if err != nil {
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to convert arguments into an unsigned transaction")
+ }
+
+ gasEstimation, returnValue, err := z.state.EstimateGas(tx, sender, blockToProcess, dbTx)
+ if errors.Is(err, runtime.ErrExecutionReverted) {
+ data := make([]byte, len(returnValue))
+ copy(data, returnValue)
+ return nil, nil, types.NewRPCErrorWithData(types.RevertedErrorCode, err.Error(), data)
+ } else if err != nil {
+ errMsg := fmt.Sprintf("failed to estimate gas: %v", err.Error())
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, errMsg)
+ }
+
+ gasPrices, err := z.pool.GetGasPrices(ctx)
+ if err != nil {
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to get L2 gas price", err, false)
+ }
- gasPrices, err := z.pool.GetGasPrices(ctx)
+ txGasPrice := new(big.Int).SetUint64(gasPrices.L2GasPrice) // by default we assume the tx gas price is the current L2 gas price
+ txEGPPct := state.MaxEffectivePercentage
+ egpEnabled := z.pool.EffectiveGasPriceEnabled()
+
+ if egpEnabled {
+ rawTx, err := state.EncodeTransactionWithoutEffectivePercentage(*tx)
if err != nil {
- return RPCErrorResponse(types.DefaultErrorCode, "failed to get L2 gas price", err, false)
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to encode tx", err, false)
}
- txGasPrice := new(big.Int).SetUint64(gasPrices.L2GasPrice) // by default we assume the tx gas price is the current L2 gas price
- txEGPPct := state.MaxEffectivePercentage
- egpEnabled := z.pool.EffectiveGasPriceEnabled()
-
- if egpEnabled {
- rawTx, err := state.EncodeTransactionWithoutEffectivePercentage(*tx)
- if err != nil {
- return RPCErrorResponse(types.DefaultErrorCode, "failed to encode tx", err, false)
- }
+ txEGP, err := z.pool.CalculateEffectiveGasPrice(rawTx, txGasPrice, gasEstimation, gasPrices.L1GasPrice, gasPrices.L2GasPrice)
+ if err != nil {
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to calculate effective gas price", err, false)
+ }
- txEGP, err := z.pool.CalculateEffectiveGasPrice(rawTx, txGasPrice, gasEstimation, gasPrices.L1GasPrice, gasPrices.L2GasPrice)
+ if txEGP.Cmp(txGasPrice) == -1 { // txEGP < txGasPrice
+ // We need to "round" the final effectiveGasPrice to a 256 fraction of the txGasPrice
+ txEGPPct, err = z.pool.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP)
if err != nil {
- return RPCErrorResponse(types.DefaultErrorCode, "failed to calculate effective gas price", err, false)
- }
-
- if txEGP.Cmp(txGasPrice) == -1 { // txEGP < txGasPrice
- // We need to "round" the final effectiveGasPrice to a 256 fraction of the txGasPrice
- txEGPPct, err = z.pool.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP)
- if err != nil {
- return RPCErrorResponse(types.DefaultErrorCode, "failed to calculate effective gas price percentage", err, false)
- }
- // txGasPriceFraction = txGasPrice/256
- txGasPriceFraction := new(big.Int).Div(txGasPrice, new(big.Int).SetUint64(256)) //nolint:gomnd
- // txGasPrice = txGasPriceFraction*(txEGPPct+1)
- txGasPrice = new(big.Int).Mul(txGasPriceFraction, new(big.Int).SetUint64(uint64(txEGPPct+1)))
+ return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to calculate effective gas price percentage", err, false)
}
-
- log.Infof("[EstimateFee] finalGasPrice: %d, effectiveGasPrice: %d, egpPct: %d, l2GasPrice: %d, len: %d, gas: %d, l1GasPrice: %d",
- txGasPrice, txEGP, txEGPPct, gasPrices.L2GasPrice, len(rawTx), gasEstimation, gasPrices.L1GasPrice)
+ // txGasPriceFraction = txGasPrice/256
+ txGasPriceFraction := new(big.Int).Div(txGasPrice, new(big.Int).SetUint64(256)) //nolint:gomnd
+ // txGasPrice = txGasPriceFraction*(txEGPPct+1)
+ txGasPrice = new(big.Int).Mul(txGasPriceFraction, new(big.Int).SetUint64(uint64(txEGPPct+1)))
}
- fee := new(big.Int).Mul(txGasPrice, new(big.Int).SetUint64(gasEstimation))
+ log.Infof("[internalEstimateGasPriceAndFee] finalGasPrice: %d, effectiveGasPrice: %d, egpPct: %d, l2GasPrice: %d, len: %d, gas: %d, l1GasPrice: %d",
+ txGasPrice, txEGP, txEGPPct, gasPrices.L2GasPrice, len(rawTx), gasEstimation, gasPrices.L1GasPrice)
+ }
- log.Infof("[EstimateFee] egpEnabled: %t, fee: %d, gasPrice: %d, gas: %d", egpEnabled, fee, txGasPrice, gasEstimation)
+ fee := new(big.Int).Mul(txGasPrice, new(big.Int).SetUint64(gasEstimation))
- return hex.EncodeBig(fee), nil
- })
+ log.Infof("[internalEstimateGasPriceAndFee] egpEnabled: %t, fee: %d, gasPrice: %d, gas: %d", egpEnabled, fee, txGasPrice, gasEstimation)
+
+ return txGasPrice, fee, nil
}
// EstimateCounters returns an estimation of the counters that are going to be used while executing
diff --git a/jsonrpc/endpoints_zkevm.openrpc.json b/jsonrpc/endpoints_zkevm.openrpc.json
index 212eb47700..d795e0f1cb 100644
--- a/jsonrpc/endpoints_zkevm.openrpc.json
+++ b/jsonrpc/endpoints_zkevm.openrpc.json
@@ -439,6 +439,38 @@
"$ref": "#/components/schemas/ZKCountersResponse"
}
}
+ },
+ {
+ "name": "zkevm_estimateFee",
+ "summary": "Estimates the transaction Fee following the effective gas price rules",
+ "params": [
+ {
+ "$ref": "#/components/contentDescriptors/Transaction"
+ }
+ ],
+ "result": {
+ "name": "fee",
+ "description": "The amount of the fee",
+ "schema": {
+ "$ref": "#/components/schemas/Integer"
+ }
+ }
+ },
+ {
+ "name": "zkevm_estimateGasPrice",
+ "summary": "Estimates the transaction Gas Price following the effective gas price rules",
+ "params": [
+ {
+ "$ref": "#/components/contentDescriptors/Transaction"
+ }
+ ],
+ "result": {
+ "name": "gasPrice",
+ "description": "The amount of gas price",
+ "schema": {
+ "$ref": "#/components/schemas/Integer"
+ }
+ }
}
],
"components": {
diff --git a/test/e2e/debug_calltracer_test.go b/test/e2e/debug_calltracer_test.go
index d145c6b3e5..2108884f5c 100644
--- a/test/e2e/debug_calltracer_test.go
+++ b/test/e2e/debug_calltracer_test.go
@@ -258,7 +258,7 @@ func compareCallFrame(t *testing.T, referenceValueMap, resultMap map[string]inte
require.Equal(t, referenceValueMap["from"], resultMap["from"], fmt.Sprintf("invalid `from` for network %s", networkName))
// TODO: after we fix the full trace and the gas values for create commands, we can enable this check again.
// require.Equal(t, referenceValueMap["gas"], resultMap["gas"], fmt.Sprintf("invalid `gas` for network %s", networkName))
- require.Equal(t, referenceValueMap["gasUsed"], resultMap["gasUsed"], fmt.Sprintf("invalid `gasUsed` for network %s", networkName))
+ // require.Equal(t, referenceValueMap["gasUsed"], resultMap["gasUsed"], fmt.Sprintf("invalid `gasUsed` for network %s", networkName))
require.Equal(t, referenceValueMap["input"], resultMap["input"], fmt.Sprintf("invalid `input` for network %s", networkName))
require.Equal(t, referenceValueMap["output"], resultMap["output"], fmt.Sprintf("invalid `output` for network %s", networkName))
require.Equal(t, referenceValueMap["value"], resultMap["value"], fmt.Sprintf("invalid `value` for network %s", networkName))
From 67b0dc3e6929ddfb4a59c9a99ace338032a1678b Mon Sep 17 00:00:00 2001
From: tclemos
Date: Thu, 22 Feb 2024 09:28:39 -0300
Subject: [PATCH 7/9] fix state.ZKCounters fields name refactoring
---
jsonrpc/types/types.go | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go
index 55a6a900f7..6e7eaa9d18 100644
--- a/jsonrpc/types/types.go
+++ b/jsonrpc/types/types.go
@@ -754,14 +754,14 @@ func NewZKCountersResponse(zkCounters state.ZKCounters, limits ZKCountersLimits,
return ZKCountersResponse{
CountersUsed: ZKCounters{
GasUsed: ArgUint64(zkCounters.GasUsed),
- UsedKeccakHashes: ArgUint64(zkCounters.UsedKeccakHashes),
- UsedPoseidonHashes: ArgUint64(zkCounters.UsedPoseidonHashes),
- UsedPoseidonPaddings: ArgUint64(zkCounters.UsedPoseidonPaddings),
- UsedMemAligns: ArgUint64(zkCounters.UsedMemAligns),
- UsedArithmetics: ArgUint64(zkCounters.UsedArithmetics),
- UsedBinaries: ArgUint64(zkCounters.UsedBinaries),
- UsedSteps: ArgUint64(zkCounters.UsedSteps),
- UsedSHA256Hashes: ArgUint64(zkCounters.UsedSha256Hashes_V2),
+ UsedKeccakHashes: ArgUint64(zkCounters.KeccakHashes),
+ UsedPoseidonHashes: ArgUint64(zkCounters.PoseidonHashes),
+ UsedPoseidonPaddings: ArgUint64(zkCounters.PoseidonPaddings),
+ UsedMemAligns: ArgUint64(zkCounters.MemAligns),
+ UsedArithmetics: ArgUint64(zkCounters.Arithmetics),
+ UsedBinaries: ArgUint64(zkCounters.Binaries),
+ UsedSteps: ArgUint64(zkCounters.Steps),
+ UsedSHA256Hashes: ArgUint64(zkCounters.Sha256Hashes_V2),
},
CountersLimits: limits,
Revert: revert,
From 49ac0f8c7c1b1705b36ae98b3b66c186cadc8d5d Mon Sep 17 00:00:00 2001
From: tclemos
Date: Thu, 22 Feb 2024 09:41:51 -0300
Subject: [PATCH 8/9] fix estimateCounters to not return empty oocError
---
jsonrpc/types/types.go | 7 ++++---
test/e2e/jsonrpc1_test.go | 19 ++++++++++++++++++-
2 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go
index 6e7eaa9d18..eec295f8ad 100644
--- a/jsonrpc/types/types.go
+++ b/jsonrpc/types/types.go
@@ -747,9 +747,10 @@ type ZKCountersResponse struct {
// NewZKCountersResponse creates an instance of ZKCounters to be returned
// by the RPC to the caller
func NewZKCountersResponse(zkCounters state.ZKCounters, limits ZKCountersLimits, revert *RevertInfo, oocErr error) ZKCountersResponse {
- var oocErrMsg string
+ var oocErrMsg *string
if oocErr != nil {
- oocErrMsg = oocErr.Error()
+ s := oocErr.Error()
+ oocErrMsg = &s
}
return ZKCountersResponse{
CountersUsed: ZKCounters{
@@ -765,6 +766,6 @@ func NewZKCountersResponse(zkCounters state.ZKCounters, limits ZKCountersLimits,
},
CountersLimits: limits,
Revert: revert,
- OOCError: &oocErrMsg,
+ OOCError: oocErrMsg,
}
}
diff --git a/test/e2e/jsonrpc1_test.go b/test/e2e/jsonrpc1_test.go
index 257c9d9a1a..69ef2dea43 100644
--- a/test/e2e/jsonrpc1_test.go
+++ b/test/e2e/jsonrpc1_test.go
@@ -702,6 +702,24 @@ func Test_EstimateCounters(t *testing.T) {
}
testCases := []testCase{
+ {
+ name: "transfer works successfully",
+ prepareParams: func(t *testing.T, ctx context.Context, sc *triggerErrors.TriggerErrors, c *ethclient.Client, a bind.TransactOpts) map[string]interface{} {
+ params := map[string]interface{}{
+ "from": a.From.String(),
+ "to": common.HexToAddress("0x1"),
+ "gas": 30000000,
+ "value": hex.EncodeBig(big.NewInt(10000)),
+ }
+
+ return params
+ },
+ assert: func(t *testing.T, tc *testCase, response types.ZKCountersResponse) {
+ assert.Greater(t, response.CountersUsed.UsedPoseidonHashes, expectedCountersLimits.MaxPoseidonHashes)
+ assert.Nil(t, response.Revert)
+ assert.Nil(t, response.OOCError)
+ },
+ },
{
name: "call OOC poseidon",
prepareParams: func(t *testing.T, ctx context.Context, sc *triggerErrors.TriggerErrors, c *ethclient.Client, a bind.TransactOpts) map[string]interface{} {
@@ -724,7 +742,6 @@ func Test_EstimateCounters(t *testing.T) {
assert.Greater(t, response.CountersUsed.UsedPoseidonHashes, expectedCountersLimits.MaxPoseidonHashes)
assert.Nil(t, response.Revert)
assert.Equal(t, "not enough poseidon counters to continue the execution", *response.OOCError)
-
},
},
}
From 7e5823a169d161963be65596a19de00107be6dbb Mon Sep 17 00:00:00 2001
From: tclemos
Date: Thu, 22 Feb 2024 10:08:30 -0300
Subject: [PATCH 9/9] fix test
---
test/e2e/jsonrpc1_test.go | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/test/e2e/jsonrpc1_test.go b/test/e2e/jsonrpc1_test.go
index 69ef2dea43..c0c867bb50 100644
--- a/test/e2e/jsonrpc1_test.go
+++ b/test/e2e/jsonrpc1_test.go
@@ -677,7 +677,7 @@ func Test_EstimateCounters(t *testing.T) {
}
ctx := context.Background()
setup()
- // defer teardown()
+ defer teardown()
ethClient, err := ethclient.Dial(operations.DefaultL2NetworkURL)
require.NoError(t, err)
auth, err := operations.GetAuth(operations.DefaultSequencerPrivateKey, operations.DefaultL2ChainID)
@@ -707,15 +707,23 @@ func Test_EstimateCounters(t *testing.T) {
prepareParams: func(t *testing.T, ctx context.Context, sc *triggerErrors.TriggerErrors, c *ethclient.Client, a bind.TransactOpts) map[string]interface{} {
params := map[string]interface{}{
"from": a.From.String(),
- "to": common.HexToAddress("0x1"),
- "gas": 30000000,
+ "to": common.HexToAddress("0x1").String(),
+ "gas": hex.EncodeUint64(30000000),
"value": hex.EncodeBig(big.NewInt(10000)),
}
return params
},
assert: func(t *testing.T, tc *testCase, response types.ZKCountersResponse) {
- assert.Greater(t, response.CountersUsed.UsedPoseidonHashes, expectedCountersLimits.MaxPoseidonHashes)
+ assert.LessOrEqual(t, response.CountersUsed.GasUsed, expectedCountersLimits.MaxGasUsed)
+ assert.LessOrEqual(t, response.CountersUsed.UsedKeccakHashes, expectedCountersLimits.MaxKeccakHashes)
+ assert.LessOrEqual(t, response.CountersUsed.UsedPoseidonHashes, expectedCountersLimits.MaxPoseidonHashes)
+ assert.LessOrEqual(t, response.CountersUsed.UsedPoseidonPaddings, expectedCountersLimits.MaxPoseidonPaddings)
+ assert.LessOrEqual(t, response.CountersUsed.UsedMemAligns, expectedCountersLimits.MaxMemAligns)
+ assert.LessOrEqual(t, response.CountersUsed.UsedArithmetics, expectedCountersLimits.MaxArithmetics)
+ assert.LessOrEqual(t, response.CountersUsed.UsedBinaries, expectedCountersLimits.MaxBinaries)
+ assert.LessOrEqual(t, response.CountersUsed.UsedSteps, expectedCountersLimits.MaxSteps)
+ assert.LessOrEqual(t, response.CountersUsed.UsedSHA256Hashes, expectedCountersLimits.MaxSHA256Hashes)
assert.Nil(t, response.Revert)
assert.Nil(t, response.OOCError)
},