diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 3f30965a48..2579104e5a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -570,8 +570,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] } if filter.MaxDATxSize != nil && !pool.locals.contains(addr) { for i, tx := range txs { - estimate := types.EstimatedL1SizeScaled(tx.RollupCostData()) - estimate = estimate.Div(estimate, big.NewInt(1e6)) + estimate := tx.RollupCostData().EstimatedDASize() if estimate.Cmp(filter.MaxDATxSize) > 0 { log.Debug("filtering tx that exceeds max da tx size", "hash", tx.Hash(), "txda", estimate, "dalimit", filter.MaxDATxSize) @@ -583,8 +582,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] if len(txs) > 0 { lazies := make([]*txpool.LazyTransaction, len(txs)) for i := 0; i < len(txs); i++ { - daBytes := types.EstimatedL1SizeScaled(txs[i].RollupCostData()) - daBytes = daBytes.Div(daBytes, big.NewInt(1e6)) + daBytes := txs[i].RollupCostData().EstimatedDASize() lazies[i] = &txpool.LazyTransaction{ Pool: pool, Hash: txs[i].Hash(), diff --git a/core/types/rollup_cost.go b/core/types/rollup_cost.go index 183ff12489..9c12265b63 100644 --- a/core/types/rollup_cost.go +++ b/core/types/rollup_cost.go @@ -364,7 +364,7 @@ func NewL1CostFuncFjord(l1BaseFee, l1BlobBaseFee, baseFeeScalar, blobFeeScalar * calldataCostPerByte := new(big.Int).Mul(scaledL1BaseFee, sixteen) blobCostPerByte := new(big.Int).Mul(blobFeeScalar, l1BlobBaseFee) l1FeeScaled := new(big.Int).Add(calldataCostPerByte, blobCostPerByte) - estimatedSize := EstimatedL1SizeScaled(costData) + estimatedSize := costData.estimatedDASizeScaled() l1CostScaled := new(big.Int).Mul(estimatedSize, l1FeeScaled) l1Cost := new(big.Int).Div(l1CostScaled, fjordDivisor) @@ -375,10 +375,10 @@ func NewL1CostFuncFjord(l1BaseFee, l1BlobBaseFee, baseFeeScalar, blobFeeScalar * } } -// EstimatedL1Size estimates the number of bytes the transaction will occupy in its L1 batch using -// the Fjord linear regression model, and returns this value scaled up by 1e6. -func EstimatedL1SizeScaled(costData RollupCostData) *big.Int { - fastLzSize := new(big.Int).SetUint64(costData.FastLzSize) +// estimatedDASizeScaled estimates the number of bytes the transaction will occupy in its L1 batch +// using the Fjord linear regression model, and returns this value scaled up by 1e6. +func (cd RollupCostData) estimatedDASizeScaled() *big.Int { + fastLzSize := new(big.Int).SetUint64(cd.FastLzSize) estimatedSize := new(big.Int).Add(L1CostIntercept, new(big.Int).Mul(L1CostFastlzCoef, fastLzSize)) if estimatedSize.Cmp(MinTransactionSizeScaled) < 0 { @@ -387,6 +387,13 @@ func EstimatedL1SizeScaled(costData RollupCostData) *big.Int { return estimatedSize } +// EstimatedDASize estimates the number of bytes the transaction will occupy in its L1 batch +// using the Fjord linear regression model. +func (cd RollupCostData) EstimatedDASize() *big.Int { + b := cd.estimatedDASizeScaled() + return b.Div(b, big.NewInt(1e6)) +} + func extractEcotoneFeeParams(l1FeeParams []byte) (l1BaseFeeScalar, l1BlobBaseFeeScalar *big.Int) { offset := scalarSectionStart l1BaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset : offset+4])