diff --git a/Makefile b/Makefile index e51d2d99eb..f0f9385e7b 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,10 @@ ios: @echo "Import \"$(GOBIN)/Geth.framework\" to use the library." test: - $(GOTEST) --timeout 5m -shuffle=on -cover -coverprofile=cover.out $(TESTALL) + $(GOTEST) --timeout 5m -shuffle=on -cover -short -coverprofile=cover.out -covermode=atomic $(TESTALL) + +test-txpool-race: + $(GOTEST) -run=TestPoolMiningDataRaces --timeout 600m -race -v ./core/ test-race: $(GOTEST) --timeout 15m -race -shuffle=on $(TESTALL) @@ -75,7 +78,7 @@ lint: lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.48.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.50.1 goimports: goimports -local "$(PACKAGE)" -w . diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go index 6f1c964ada..cf2039b66c 100644 --- a/cmd/evm/internal/t8ntool/transaction.go +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -24,6 +24,8 @@ import ( "os" "strings" + "gopkg.in/urfave/cli.v1" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" @@ -32,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" - "gopkg.in/urfave/cli.v1" ) type result struct { diff --git a/common/debug/debug.go b/common/debug/debug.go index 6a677e495d..056ebe2fa7 100644 --- a/common/debug/debug.go +++ b/common/debug/debug.go @@ -1,6 +1,7 @@ package debug import ( + "fmt" "runtime" ) @@ -26,3 +27,26 @@ func Callers(show int) []string { return callers } + +func CodeLine() (string, string, int) { + pc, filename, line, _ := runtime.Caller(1) + return runtime.FuncForPC(pc).Name(), filename, line +} + +func CodeLineStr() string { + pc, filename, line, _ := runtime.Caller(1) + return fmt.Sprintf("%s:%d - %s", filename, line, runtime.FuncForPC(pc).Name()) +} + +func Stack(all bool) []byte { + buf := make([]byte, 4096) + + for { + n := runtime.Stack(buf, all) + if n < len(buf) { + return buf[:n] + } + + buf = make([]byte, 2*len(buf)) + } +} diff --git a/common/math/big.go b/common/math/big.go index 1af5b4d879..4ccf89e38c 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -20,6 +20,8 @@ package math import ( "fmt" "math/big" + + "github.com/holiman/uint256" ) // Various big integer limit values. @@ -132,6 +134,7 @@ func MustParseBig256(s string) *big.Int { // BigPow returns a ** b as a big integer. func BigPow(a, b int64) *big.Int { r := big.NewInt(a) + return r.Exp(r, big.NewInt(b), nil) } @@ -140,6 +143,15 @@ func BigMax(x, y *big.Int) *big.Int { if x.Cmp(y) < 0 { return y } + + return x +} + +func BigMaxUint(x, y *uint256.Int) *uint256.Int { + if x.Lt(y) { + return y + } + return x } @@ -148,6 +160,15 @@ func BigMin(x, y *big.Int) *big.Int { if x.Cmp(y) > 0 { return y } + + return x +} + +func BigMinUint256(x, y *uint256.Int) *uint256.Int { + if x.Gt(y) { + return y + } + return x } @@ -227,10 +248,10 @@ func U256Bytes(n *big.Int) []byte { // S256 interprets x as a two's complement number. // x must not exceed 256 bits (the result is undefined if it does) and is not modified. // -// S256(0) = 0 -// S256(1) = 1 -// S256(2**255) = -2**255 -// S256(2**256-1) = -1 +// S256(0) = 0 +// S256(1) = 1 +// S256(2**255) = -2**255 +// S256(2**256-1) = -1 func S256(x *big.Int) *big.Int { if x.Cmp(tt255) < 0 { return x diff --git a/common/math/uint.go b/common/math/uint.go new file mode 100644 index 0000000000..96b8261884 --- /dev/null +++ b/common/math/uint.go @@ -0,0 +1,23 @@ +package math + +import ( + "math/big" + + "github.com/holiman/uint256" +) + +var ( + U0 = uint256.NewInt(0) + U1 = uint256.NewInt(1) + U100 = uint256.NewInt(100) +) + +func U256LTE(a, b *uint256.Int) bool { + return a.Lt(b) || a.Eq(b) +} + +func FromBig(v *big.Int) *uint256.Int { + u, _ := uint256.FromBig(v) + + return u +} diff --git a/common/time.go b/common/time.go new file mode 100644 index 0000000000..6c7662e04c --- /dev/null +++ b/common/time.go @@ -0,0 +1,9 @@ +package common + +import "time" + +const TimeMilliseconds = "15:04:05.000" + +func NowMilliseconds() string { + return time.Now().Format(TimeMilliseconds) +} diff --git a/common/tracing/context.go b/common/tracing/context.go index 510e45d775..c3c6342502 100644 --- a/common/tracing/context.go +++ b/common/tracing/context.go @@ -4,6 +4,7 @@ import ( "context" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -51,11 +52,16 @@ func Trace(ctx context.Context, spanName string) (context.Context, trace.Span) { return tr.Start(ctx, spanName) } -func Exec(ctx context.Context, spanName string, opts ...Option) { +func Exec(ctx context.Context, instrumentationName, spanName string, opts ...Option) { var span trace.Span tr := FromContext(ctx) + if tr == nil && len(instrumentationName) != 0 { + tr = otel.GetTracerProvider().Tracer(instrumentationName) + ctx = WithTracer(ctx, tr) + } + if tr != nil { ctx, span = tr.Start(ctx, spanName) } @@ -85,7 +91,7 @@ func ElapsedTime(ctx context.Context, span trace.Span, msg string, fn func(conte fn(ctx, span) if span != nil { - span.SetAttributes(attribute.Int(msg, int(time.Since(now).Milliseconds()))) + span.SetAttributes(attribute.Int(msg, int(time.Since(now).Microseconds()))) } } diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 1b4ddec45d..b6d643eeba 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -821,7 +821,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} - tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { // check and commit span err = c.checkAndCommitSpan(finalizeCtx, state, header, cx) }) @@ -832,7 +832,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } if c.HeimdallClient != nil { - tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) { // commit states stateSyncData, err = c.CommitStates(finalizeCtx, state, header, cx) }) @@ -844,7 +844,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } } - tracing.Exec(finalizeCtx, "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) { err = c.changeContractCodeIfNeeded(headerNumber, state) }) @@ -854,7 +854,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead } // No block rewards in PoA, so the state remains as it is - tracing.Exec(finalizeCtx, "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) { + tracing.Exec(finalizeCtx, "", "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) { header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) }) diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 193a5b84e2..00a8ab5b58 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -20,6 +20,8 @@ import ( "fmt" "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" @@ -92,3 +94,54 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { ) } } + +// CalcBaseFee calculates the basefee of the header. +func CalcBaseFeeUint(config *params.ChainConfig, parent *types.Header) *uint256.Int { + var ( + initialBaseFeeUint = uint256.NewInt(params.InitialBaseFee) + baseFeeChangeDenominatorUint64 = params.BaseFeeChangeDenominator(config.Bor, parent.Number) + baseFeeChangeDenominatorUint = uint256.NewInt(baseFeeChangeDenominatorUint64) + ) + + // If the current block is the first EIP-1559 block, return the InitialBaseFee. + if !config.IsLondon(parent.Number) { + return initialBaseFeeUint.Clone() + } + + var ( + parentGasTarget = parent.GasLimit / params.ElasticityMultiplier + parentGasTargetBig = uint256.NewInt(parentGasTarget) + ) + + // If the parent gasUsed is the same as the target, the baseFee remains unchanged. + if parent.GasUsed == parentGasTarget { + return math.FromBig(parent.BaseFee) + } + + if parent.GasUsed > parentGasTarget { + // If the parent block used more gas than its target, the baseFee should increase. + gasUsedDelta := uint256.NewInt(parent.GasUsed - parentGasTarget) + + parentBaseFee := math.FromBig(parent.BaseFee) + x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := math.BigMaxUint( + x.Div(y, baseFeeChangeDenominatorUint), + math.U1, + ) + + return x.Add(parentBaseFee, baseFeeDelta) + } + + // Otherwise if the parent block used less gas than its target, the baseFee should decrease. + gasUsedDelta := uint256.NewInt(parentGasTarget - parent.GasUsed) + parentBaseFee := math.FromBig(parent.BaseFee) + x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := x.Div(y, baseFeeChangeDenominatorUint) + + return math.BigMaxUint( + x.Sub(parentBaseFee, baseFeeDelta), + math.U0.Clone(), + ) +} diff --git a/core/tx_journal.go b/core/tx_journal.go index d282126a08..980bdb9864 100644 --- a/core/tx_journal.go +++ b/core/tx_journal.go @@ -61,11 +61,13 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { if _, err := os.Stat(journal.path); os.IsNotExist(err) { return nil } + // Open the journal for loading any past transactions input, err := os.Open(journal.path) if err != nil { return err } + defer input.Close() // Temporarily discard any journal additions (don't double add on load) @@ -80,29 +82,35 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { // appropriate progress counters. Then use this method to load all the // journaled transactions in small-ish batches. loadBatch := func(txs types.Transactions) { + errs := add(txs) + + dropped = len(errs) + for _, err := range add(txs) { - if err != nil { - log.Debug("Failed to add journaled transaction", "err", err) - dropped++ - } + log.Debug("Failed to add journaled transaction", "err", err) } } var ( failure error batch types.Transactions ) + for { // Parse the next transaction and terminate on error tx := new(types.Transaction) + if err = stream.Decode(tx); err != nil { if err != io.EOF { failure = err } + if batch.Len() > 0 { loadBatch(batch) } + break } + // New transaction parsed, queue up for later, import if threshold is reached total++ @@ -111,6 +119,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { batch = batch[:0] } } + log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) return failure diff --git a/core/tx_list.go b/core/tx_list.go index f141a03bbd..e763777e33 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -19,13 +19,15 @@ package core import ( "container/heap" "math" - "math/big" "sort" "sync" "sync/atomic" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" ) @@ -54,36 +56,67 @@ func (h *nonceHeap) Pop() interface{} { type txSortedMap struct { items map[uint64]*types.Transaction // Hash map storing the transaction data index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) - cache types.Transactions // Cache of the transactions already sorted + m sync.RWMutex + + cache types.Transactions // Cache of the transactions already sorted + isEmpty bool + cacheMu sync.RWMutex } // newTxSortedMap creates a new nonce-sorted transaction map. func newTxSortedMap() *txSortedMap { return &txSortedMap{ - items: make(map[uint64]*types.Transaction), - index: new(nonceHeap), + items: make(map[uint64]*types.Transaction), + index: new(nonceHeap), + isEmpty: true, } } // Get retrieves the current transactions associated with the given nonce. func (m *txSortedMap) Get(nonce uint64) *types.Transaction { + m.m.RLock() + defer m.m.RUnlock() + return m.items[nonce] } +func (m *txSortedMap) Has(nonce uint64) bool { + if m == nil { + return false + } + + m.m.RLock() + defer m.m.RUnlock() + + return m.items[nonce] != nil +} + // Put inserts a new transaction into the map, also updating the map's nonce // index. If a transaction already exists with the same nonce, it's overwritten. func (m *txSortedMap) Put(tx *types.Transaction) { + m.m.Lock() + defer m.m.Unlock() + nonce := tx.Nonce() if m.items[nonce] == nil { heap.Push(m.index, nonce) } - m.items[nonce], m.cache = tx, nil + + m.items[nonce] = tx + + m.cacheMu.Lock() + m.isEmpty = true + m.cache = nil + m.cacheMu.Unlock() } // Forward removes all transactions from the map with a nonce lower than the // provided threshold. Every removed transaction is returned for any post-removal // maintenance. func (m *txSortedMap) Forward(threshold uint64) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + var removed types.Transactions // Pop off heap items until the threshold is reached @@ -92,10 +125,15 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions { removed = append(removed, m.items[nonce]) delete(m.items, nonce) } + // If we had a cached order, shift the front + m.cacheMu.Lock() if m.cache != nil { + hitCacheCounter.Inc(1) m.cache = m.cache[len(removed):] } + m.cacheMu.Unlock() + return removed } @@ -105,6 +143,9 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions { // If you want to do several consecutive filterings, it's therefore better to first // do a .filter(func1) followed by .Filter(func2) or reheap() func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + removed := m.filter(filter) // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { @@ -115,11 +156,19 @@ func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transac func (m *txSortedMap) reheap() { *m.index = make([]uint64, 0, len(m.items)) + for nonce := range m.items { *m.index = append(*m.index, nonce) } + heap.Init(m.index) + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) } // filter is identical to Filter, but **does not** regenerate the heap. This method @@ -135,7 +184,12 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac } } if len(removed) > 0 { + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) } return removed } @@ -143,45 +197,66 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (m *txSortedMap) Cap(threshold int) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if the number of items is under the limit if len(m.items) <= threshold { return nil } + // Otherwise gather and drop the highest nonce'd transactions var drops types.Transactions sort.Sort(*m.index) + for size := len(m.items); size > threshold; size-- { drops = append(drops, m.items[(*m.index)[size-1]]) delete(m.items, (*m.index)[size-1]) } + *m.index = (*m.index)[:threshold] heap.Init(m.index) // If we had a cache, shift the back + m.cacheMu.Lock() if m.cache != nil { m.cache = m.cache[:len(m.cache)-len(drops)] } + m.cacheMu.Unlock() + return drops } // Remove deletes a transaction from the maintained map, returning whether the // transaction was found. func (m *txSortedMap) Remove(nonce uint64) bool { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if no transaction is present _, ok := m.items[nonce] if !ok { return false } + // Otherwise delete the transaction and fix the heap index for i := 0; i < m.index.Len(); i++ { if (*m.index)[i] == nonce { heap.Remove(m.index, i) + break } } + delete(m.items, nonce) + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) return true } @@ -194,55 +269,125 @@ func (m *txSortedMap) Remove(nonce uint64) bool { // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (m *txSortedMap) Ready(start uint64) types.Transactions { + m.m.Lock() + defer m.m.Unlock() + // Short circuit if no transactions are available if m.index.Len() == 0 || (*m.index)[0] > start { return nil } + // Otherwise start accumulating incremental transactions var ready types.Transactions + for next := (*m.index)[0]; m.index.Len() > 0 && (*m.index)[0] == next; next++ { ready = append(ready, m.items[next]) delete(m.items, next) heap.Pop(m.index) } + + m.cacheMu.Lock() m.cache = nil + m.isEmpty = true + m.cacheMu.Unlock() + + resetCacheGauge.Inc(1) return ready } // Len returns the length of the transaction map. func (m *txSortedMap) Len() int { + m.m.RLock() + defer m.m.RUnlock() + return len(m.items) } func (m *txSortedMap) flatten() types.Transactions { // If the sorting was not cached yet, create and cache it - if m.cache == nil { - m.cache = make(types.Transactions, 0, len(m.items)) + m.cacheMu.Lock() + defer m.cacheMu.Unlock() + + if m.isEmpty { + m.isEmpty = false // to simulate sync.Once + + m.cacheMu.Unlock() + + m.m.RLock() + + cache := make(types.Transactions, 0, len(m.items)) + for _, tx := range m.items { - m.cache = append(m.cache, tx) + cache = append(cache, tx) } - sort.Sort(types.TxByNonce(m.cache)) + + m.m.RUnlock() + + // exclude sorting from locks + sort.Sort(types.TxByNonce(cache)) + + m.cacheMu.Lock() + m.cache = cache + + reinitCacheGauge.Inc(1) + missCacheCounter.Inc(1) + } else { + hitCacheCounter.Inc(1) } + return m.cache } +func (m *txSortedMap) lastElement() *types.Transaction { + // If the sorting was not cached yet, create and cache it + m.cacheMu.Lock() + defer m.cacheMu.Unlock() + + cache := m.cache + + if m.isEmpty { + m.isEmpty = false // to simulate sync.Once + + m.cacheMu.Unlock() + + cache = make(types.Transactions, 0, len(m.items)) + + m.m.RLock() + + for _, tx := range m.items { + cache = append(cache, tx) + } + + m.m.RUnlock() + + // exclude sorting from locks + sort.Sort(types.TxByNonce(cache)) + + m.cacheMu.Lock() + m.cache = cache + + reinitCacheGauge.Inc(1) + missCacheCounter.Inc(1) + } else { + hitCacheCounter.Inc(1) + } + + return cache[len(cache)-1] +} + // Flatten creates a nonce-sorted slice of transactions based on the loosely // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. func (m *txSortedMap) Flatten() types.Transactions { // Copy the cache to prevent accidental modifications - cache := m.flatten() - txs := make(types.Transactions, len(cache)) - copy(txs, cache) - return txs + return m.flatten() } // LastElement returns the last element of a flattened list, thus, the // transaction with the highest nonce func (m *txSortedMap) LastElement() *types.Transaction { - cache := m.flatten() - return cache[len(cache)-1] + return m.lastElement() } // txList is a "list" of transactions belonging to an account, sorted by account @@ -253,17 +398,16 @@ type txList struct { strict bool // Whether nonces are strictly continuous or not txs *txSortedMap // Heap indexed sorted hash map of the transactions - costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance) - gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) + gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) } // newTxList create a new transaction list for maintaining nonce-indexable fast, // gapped, sortable transaction lists. func newTxList(strict bool) *txList { return &txList{ - strict: strict, - txs: newTxSortedMap(), - costcap: new(big.Int), + strict: strict, + txs: newTxSortedMap(), } } @@ -285,31 +429,36 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran if old.GasFeeCapCmp(tx) >= 0 || old.GasTipCapCmp(tx) >= 0 { return false, nil } + // thresholdFeeCap = oldFC * (100 + priceBump) / 100 - a := big.NewInt(100 + int64(priceBump)) - aFeeCap := new(big.Int).Mul(a, old.GasFeeCap()) - aTip := a.Mul(a, old.GasTipCap()) + a := uint256.NewInt(100 + priceBump) + aFeeCap := uint256.NewInt(0).Mul(a, old.GasFeeCapUint()) + aTip := a.Mul(a, old.GasTipCapUint()) // thresholdTip = oldTip * (100 + priceBump) / 100 - b := big.NewInt(100) + b := cmath.U100 thresholdFeeCap := aFeeCap.Div(aFeeCap, b) thresholdTip := aTip.Div(aTip, b) // We have to ensure that both the new fee cap and tip are higher than the // old ones as well as checking the percentage threshold to ensure that // this is accurate for low (Wei-level) gas price replacements. - if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { + if tx.GasFeeCapUIntLt(thresholdFeeCap) || tx.GasTipCapUIntLt(thresholdTip) { return false, nil } } + // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) - if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 { + + if cost := tx.CostUint(); l.costcap == nil || l.costcap.Lt(cost) { l.costcap = cost } + if gas := tx.Gas(); l.gascap < gas { l.gascap = gas } + return true, old } @@ -329,17 +478,20 @@ func (l *txList) Forward(threshold uint64) types.Transactions { // a point in calculating all the costs or if the balance covers all. If the threshold // is lower than the costgas cap, the caps will be reset to a new high after removing // the newly invalidated transactions. -func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) { +func (l *txList) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) { // If all transactions are below the threshold, short circuit - if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit { + if cmath.U256LTE(l.costcap, costLimit) && l.gascap <= gasLimit { return nil, nil } - l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds + + l.costcap = costLimit.Clone() // Lower the caps to the thresholds l.gascap = gasLimit // Filter out all the transactions above the account's funds + cost := uint256.NewInt(0) removed := l.txs.Filter(func(tx *types.Transaction) bool { - return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0 + cost.SetFromBig(tx.Cost()) + return tx.Gas() > gasLimit || cost.Gt(costLimit) }) if len(removed) == 0 { @@ -416,13 +568,18 @@ func (l *txList) LastElement() *types.Transaction { return l.txs.LastElement() } +func (l *txList) Has(nonce uint64) bool { + return l != nil && l.txs.items[nonce] != nil +} + // priceHeap is a heap.Interface implementation over transactions for retrieving // price-sorted transactions to discard when the pool fills up. If baseFee is set // then the heap is sorted based on the effective tip based on the given base fee. // If baseFee is nil then the sorting is based on gasFeeCap. type priceHeap struct { - baseFee *big.Int // heap should always be re-sorted after baseFee is changed - list []*types.Transaction + baseFee *uint256.Int // heap should always be re-sorted after baseFee is changed + list []*types.Transaction + baseFeeMu sync.RWMutex } func (h *priceHeap) Len() int { return len(h.list) } @@ -440,16 +597,24 @@ func (h *priceHeap) Less(i, j int) bool { } func (h *priceHeap) cmp(a, b *types.Transaction) int { + h.baseFeeMu.RLock() + if h.baseFee != nil { // Compare effective tips if baseFee is specified - if c := a.EffectiveGasTipCmp(b, h.baseFee); c != 0 { + if c := a.EffectiveGasTipTxUintCmp(b, h.baseFee); c != 0 { + h.baseFeeMu.RUnlock() + return c } } + + h.baseFeeMu.RUnlock() + // Compare fee caps if baseFee is not specified or effective tips are equal if c := a.GasFeeCapCmp(b); c != 0 { return c } + // Compare tips if effective tips and fee caps are equal return a.GasTipCapCmp(b) } @@ -629,7 +794,10 @@ func (l *txPricedList) Reheap() { // SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not // necessary to call right before SetBaseFee when processing a new block. -func (l *txPricedList) SetBaseFee(baseFee *big.Int) { +func (l *txPricedList) SetBaseFee(baseFee *uint256.Int) { + l.urgent.baseFeeMu.Lock() l.urgent.baseFee = baseFee + l.urgent.baseFeeMu.Unlock() + l.Reheap() } diff --git a/core/tx_list_test.go b/core/tx_list_test.go index ef49cae1dd..80b8c1ef32 100644 --- a/core/tx_list_test.go +++ b/core/tx_list_test.go @@ -17,10 +17,11 @@ package core import ( - "math/big" "math/rand" "testing" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" ) @@ -59,11 +60,15 @@ func BenchmarkTxListAdd(b *testing.B) { for i := 0; i < len(txs); i++ { txs[i] = transaction(uint64(i), 0, key) } + // Insert the transactions in a random order - priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit)) + priceLimit := uint256.NewInt(DefaultTxPoolConfig.PriceLimit) b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { list := newTxList(true) + for _, v := range rand.Perm(len(txs)) { list.Add(txs[v], DefaultTxPoolConfig.PriceBump) list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) diff --git a/core/tx_pool.go b/core/tx_pool.go index 7648668688..e98fd2e0ae 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -17,6 +17,7 @@ package core import ( + "context" "errors" "math" "math/big" @@ -25,8 +26,12 @@ import ( "sync/atomic" "time" + "github.com/holiman/uint256" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -126,6 +131,11 @@ var ( localGauge = metrics.NewRegisteredGauge("txpool/local", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + resetCacheGauge = metrics.NewRegisteredGauge("txpool/resetcache", nil) + reinitCacheGauge = metrics.NewRegisteredGauge("txpool/reinittcache", nil) + hitCacheCounter = metrics.NewRegisteredCounter("txpool/cachehit", nil) + missCacheCounter = metrics.NewRegisteredCounter("txpool/cachemiss", nil) + reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) @@ -231,14 +241,17 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { - config TxPoolConfig - chainconfig *params.ChainConfig - chain blockChain - gasPrice *big.Int - txFeed event.Feed - scope event.SubscriptionScope - signer types.Signer - mu sync.RWMutex + config TxPoolConfig + chainconfig *params.ChainConfig + chain blockChain + gasPrice *big.Int + gasPriceUint *uint256.Int + gasPriceMu sync.RWMutex + + txFeed event.Feed + scope event.SubscriptionScope + signer types.Signer + mu sync.RWMutex istanbul bool // Fork indicator whether we are in the istanbul stage. eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. @@ -251,11 +264,13 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + pendingCount int + pendingMu sync.RWMutex + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -300,6 +315,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block reorgShutdownCh: make(chan struct{}), initDoneCh: make(chan struct{}), gasPrice: new(big.Int).SetUint64(config.PriceLimit), + gasPriceUint: uint256.NewInt(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) @@ -376,9 +392,7 @@ func (pool *TxPool) loop() { // Handle stats reporting ticks case <-report.C: - pool.mu.RLock() pending, queued := pool.stats() - pool.mu.RUnlock() stales := int(atomic.LoadInt64(&pool.priced.stales)) if pending != prevPending || queued != prevQueued || stales != prevStales { @@ -388,22 +402,45 @@ func (pool *TxPool) loop() { // Handle inactive account transaction eviction case <-evict.C: - pool.mu.Lock() + now := time.Now() + + var ( + list types.Transactions + tx *types.Transaction + toRemove []common.Hash + ) + + pool.mu.RLock() for addr := range pool.queue { // Skip local transactions from the eviction mechanism if pool.locals.contains(addr) { continue } + // Any non-locals old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTx(tx.Hash(), true) + if now.Sub(pool.beats[addr]) > pool.config.Lifetime { + list = pool.queue[addr].Flatten() + for _, tx = range list { + toRemove = append(toRemove, tx.Hash()) } + queuedEvictionMeter.Mark(int64(len(list))) } } - pool.mu.Unlock() + + pool.mu.RUnlock() + + if len(toRemove) > 0 { + pool.mu.Lock() + + var hash common.Hash + + for _, hash = range toRemove { + pool.removeTx(hash, true) + } + + pool.mu.Unlock() + } // Handle local transaction journal rotation case <-journal.C: @@ -441,27 +478,45 @@ func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscripti // GasPrice returns the current gas price enforced by the transaction pool. func (pool *TxPool) GasPrice() *big.Int { - pool.mu.RLock() - defer pool.mu.RUnlock() + pool.gasPriceMu.RLock() + defer pool.gasPriceMu.RUnlock() return new(big.Int).Set(pool.gasPrice) } +func (pool *TxPool) GasPriceUint256() *uint256.Int { + pool.gasPriceMu.RLock() + defer pool.gasPriceMu.RUnlock() + + return pool.gasPriceUint.Clone() +} + // SetGasPrice updates the minimum price required by the transaction pool for a // new transaction, and drops all transactions below this threshold. func (pool *TxPool) SetGasPrice(price *big.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() + pool.gasPriceMu.Lock() + defer pool.gasPriceMu.Unlock() old := pool.gasPrice pool.gasPrice = price + + if pool.gasPriceUint == nil { + pool.gasPriceUint, _ = uint256.FromBig(price) + } else { + pool.gasPriceUint.SetFromBig(price) + } + // if the min miner fee increased, remove transactions below the new threshold if price.Cmp(old) > 0 { + pool.mu.Lock() + defer pool.mu.Unlock() + // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead drop := pool.all.RemotesBelowTip(price) for _, tx := range drop { pool.removeTx(tx.Hash(), false) } + pool.priced.Removed(len(drop)) } @@ -480,9 +535,6 @@ func (pool *TxPool) Nonce(addr common.Address) uint64 { // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) Stats() (int, int) { - pool.mu.RLock() - defer pool.mu.RUnlock() - return pool.stats() } @@ -490,47 +542,69 @@ func (pool *TxPool) Stats() (int, int) { // number of queued (non-executable) transactions. func (pool *TxPool) stats() (int, int) { pending := 0 + + pool.pendingMu.RLock() for _, list := range pool.pending { pending += list.Len() } + pool.pendingMu.RUnlock() + + pool.mu.RLock() + queued := 0 for _, list := range pool.queue { queued += list.Len() } + + pool.mu.RUnlock() + return pending, queued } // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and sorted by nonce. func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { - pool.mu.Lock() - defer pool.mu.Unlock() - pending := make(map[common.Address]types.Transactions) + + pool.pendingMu.RLock() for addr, list := range pool.pending { pending[addr] = list.Flatten() } + pool.pendingMu.RUnlock() + queued := make(map[common.Address]types.Transactions) + + pool.mu.RLock() + for addr, list := range pool.queue { queued[addr] = list.Flatten() } + + pool.mu.RUnlock() + return pending, queued } // ContentFrom retrieves the data content of the transaction pool, returning the // pending as well as queued transactions of this address, grouped by nonce. func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { - pool.mu.RLock() - defer pool.mu.RUnlock() - var pending types.Transactions + + pool.pendingMu.RLock() if list, ok := pool.pending[addr]; ok { pending = list.Flatten() } + pool.pendingMu.RUnlock() + + pool.mu.RLock() + var queued types.Transactions if list, ok := pool.queue[addr]; ok { queued = list.Flatten() } + + pool.mu.RUnlock() + return pending, queued } @@ -541,35 +615,74 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types. // The enforceTips parameter can be used to do an extra filtering on the pending // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. -func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { - pool.mu.Lock() - defer pool.mu.Unlock() +// +//nolint:gocognit +func (pool *TxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions { + pending := make(map[common.Address]types.Transactions, 10) - pending := make(map[common.Address]types.Transactions) - for addr, list := range pool.pending { - txs := list.Flatten() - - // If the miner requests tip enforcement, cap the lists now - if enforceTips && !pool.locals.contains(addr) { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { - txs = txs[:i] - break + tracing.Exec(ctx, "TxpoolPending", "txpool.Pending()", func(ctx context.Context, span trace.Span) { + tracing.ElapsedTime(ctx, span, "txpool.Pending.RLock()", func(ctx context.Context, s trace.Span) { + pool.pendingMu.RLock() + }) + + defer pool.pendingMu.RUnlock() + + pendingAccounts := len(pool.pending) + + var pendingTxs int + + tracing.ElapsedTime(ctx, span, "Loop", func(ctx context.Context, s trace.Span) { + gasPriceUint := uint256.NewInt(0) + baseFee := uint256.NewInt(0) + + for addr, list := range pool.pending { + txs := list.Flatten() + + // If the miner requests tip enforcement, cap the lists now + if enforceTips && !pool.locals.contains(addr) { + for i, tx := range txs { + pool.pendingMu.RUnlock() + + pool.gasPriceMu.RLock() + if pool.gasPriceUint != nil { + gasPriceUint.Set(pool.gasPriceUint) + } + + pool.priced.urgent.baseFeeMu.Lock() + if pool.priced.urgent.baseFee != nil { + baseFee.Set(pool.priced.urgent.baseFee) + } + pool.priced.urgent.baseFeeMu.Unlock() + + pool.gasPriceMu.RUnlock() + + pool.pendingMu.RLock() + + if tx.EffectiveGasTipUintLt(gasPriceUint, baseFee) { + txs = txs[:i] + break + } + } + } + + if len(txs) > 0 { + pending[addr] = txs + pendingTxs += len(txs) } } - } - if len(txs) > 0 { - pending[addr] = txs - } - } + + tracing.SetAttributes(span, + attribute.Int("pending-transactions", pendingTxs), + attribute.Int("pending-accounts", pendingAccounts), + ) + }) + }) + return pending } // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - return pool.locals.flatten() } @@ -578,14 +691,22 @@ func (pool *TxPool) Locals() []common.Address { // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { txs := make(map[common.Address]types.Transactions) + + pool.locals.m.RLock() + defer pool.locals.m.RUnlock() + for addr := range pool.locals.accounts { + pool.pendingMu.RLock() if pending := pool.pending[addr]; pending != nil { txs[addr] = append(txs[addr], pending.Flatten()...) } + pool.pendingMu.RUnlock() + if queued := pool.queue[addr]; queued != nil { txs[addr] = append(txs[addr], queued.Flatten()...) } } + return txs } @@ -596,60 +717,84 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if !pool.eip2718 && tx.Type() != types.LegacyTxType { return ErrTxTypeNotSupported } + // Reject dynamic fee transactions until EIP-1559 activates. if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { return ErrTxTypeNotSupported } + // Reject transactions over defined size to prevent DOS attacks if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } + // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { return ErrNegativeValue } + // Ensure the transaction doesn't exceed the current block limit gas. if pool.currentMaxGas < tx.Gas() { return ErrGasLimit } + // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { + gasFeeCap := tx.GasFeeCapRef() + if gasFeeCap.BitLen() > 256 { return ErrFeeCapVeryHigh } - if tx.GasTipCap().BitLen() > 256 { + + // do NOT use uint256 here. results vs *big.Int are different + gasTipCap := tx.GasTipCapRef() + if gasTipCap.BitLen() > 256 { return ErrTipVeryHigh } + // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + gasTipCapU, _ := uint256.FromBig(gasTipCap) + if tx.GasFeeCapUIntLt(gasTipCapU) { return ErrTipAboveFeeCap } + // Make sure the transaction is signed properly. from, err := types.Sender(pool.signer, tx) if err != nil { return ErrInvalidSender } + // Drop non-local transactions under our own minimal accepted gas price or tip - if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { + pool.gasPriceMu.RLock() + + if !local && tx.GasTipCapUIntLt(pool.gasPriceUint) { + pool.gasPriceMu.RUnlock() + return ErrUnderpriced } + + pool.gasPriceMu.RUnlock() + // Ensure the transaction adheres to nonce ordering if pool.currentState.GetNonce(from) > tx.Nonce() { return ErrNonceTooLow } + // Transactor should have enough funds to cover the costs // cost == V + GP * GL if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { return ErrInsufficientFunds } + // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } + if tx.Gas() < intrGas { return ErrIntrinsicGas } + return nil } @@ -682,7 +827,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint()) underpricedTxMeter.Mark(1) return false, ErrUnderpriced } @@ -710,26 +855,36 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e pool.changesSinceReorg += len(drop) // Kick out the underpriced remote transactions. for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint()) underpricedTxMeter.Mark(1) pool.removeTx(tx.Hash(), false) } } // Try to replace an existing transaction in the pending pool from, _ := types.Sender(pool.signer, tx) // already validated - if list := pool.pending[from]; list != nil && list.Overlaps(tx) { + + pool.pendingMu.RLock() + + list := pool.pending[from] + + if list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) + pool.pendingCount++ + pool.pendingMu.RUnlock() + if !inserted { pendingDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } + // New transaction is better, replace old one if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } + pool.all.Add(tx, isLocal) pool.priced.Put(tx, isLocal) pool.journalTx(from, tx) @@ -738,8 +893,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // Successful promotion, bump the heartbeat pool.beats[from] = time.Now() + return old != nil, nil } + + // it is not an unlocking of unlocked because of the return in previous 'if' + pool.pendingMu.RUnlock() + // New transaction isn't replacing a pending one, push into queue replaced, err = pool.enqueueTx(hash, tx, isLocal, true) if err != nil { @@ -829,19 +989,25 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T }() // Try to insert the transaction into the pending queue + pool.pendingMu.Lock() if pool.pending[addr] == nil { pool.pending[addr] = newTxList(true) } list := pool.pending[addr] inserted, old := list.Add(tx, pool.config.PriceBump) + pool.pendingCount++ + pool.pendingMu.Unlock() + if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) pool.priced.Removed(1) pendingDiscardMeter.Mark(1) + return false } + // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) @@ -851,11 +1017,13 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } + // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.Nonce()+1) // Successful promotion, bump the heartbeat pool.beats[addr] = time.Now() + return true } @@ -871,8 +1039,7 @@ func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { // AddLocal enqueues a single local transaction into the pool if it is valid. This is // a convenience wrapper aroundd AddLocals. func (pool *TxPool) AddLocal(tx *types.Transaction) error { - errs := pool.AddLocals([]*types.Transaction{tx}) - return errs[0] + return pool.addTx(tx, !pool.config.NoLocals, true) } // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the @@ -889,108 +1056,216 @@ func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { return pool.addTxs(txs, false, true) } +func (pool *TxPool) AddRemoteSync(txs *types.Transaction) error { + return pool.addTx(txs, false, true) +} + // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { - errs := pool.AddRemotesSync([]*types.Transaction{tx}) - return errs[0] + return pool.AddRemoteSync(tx) } // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience // wrapper around AddRemotes. -// -// Deprecated: use AddRemotes func (pool *TxPool) AddRemote(tx *types.Transaction) error { - errs := pool.AddRemotes([]*types.Transaction{tx}) - return errs[0] + return pool.addTx(tx, false, false) } // addTxs attempts to queue a batch of transactions if they are valid. func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( - errs = make([]error, len(txs)) + errs []error news = make([]*types.Transaction, 0, len(txs)) + err error + + hash common.Hash ) - for i, tx := range txs { + + for _, tx := range txs { // If the transaction is known, pre-set the error slot - if pool.all.Get(tx.Hash()) != nil { - errs[i] = ErrAlreadyKnown + hash = tx.Hash() + + if pool.all.Get(hash) != nil { + errs = append(errs, ErrAlreadyKnown) knownTxMeter.Mark(1) + continue } + // Exclude transactions with invalid signatures as soon as // possible and cache senders in transactions before // obtaining lock - _, err := types.Sender(pool.signer, tx) + _, err = types.Sender(pool.signer, tx) if err != nil { - errs[i] = ErrInvalidSender + errs = append(errs, ErrInvalidSender) invalidTxMeter.Mark(1) + continue } + // Accumulate all unknown transactions for deeper processing news = append(news, tx) } + if len(news) == 0 { return errs } // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) + errs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() - var nilSlot = 0 - for _, err := range newErrs { - for errs[nilSlot] != nil { - nilSlot++ + // Reorg the pool internals if needed and return + done := pool.requestPromoteExecutables(dirtyAddrs) + if sync { + <-done + } + + return errs +} + +// addTxs attempts to queue a batch of transactions if they are valid. +func (pool *TxPool) addTx(tx *types.Transaction, local, sync bool) error { + // Filter out known ones without obtaining the pool lock or recovering signatures + var ( + err error + hash common.Hash + ) + + func() { + // If the transaction is known, pre-set the error slot + hash = tx.Hash() + + if pool.all.Get(hash) != nil { + err = ErrAlreadyKnown + + knownTxMeter.Mark(1) + + return + } + + // Exclude transactions with invalid signatures as soon as + // possible and cache senders in transactions before + // obtaining lock + _, err = types.Sender(pool.signer, tx) + if err != nil { + invalidTxMeter.Mark(1) + + return } - errs[nilSlot] = err - nilSlot++ + }() + + if err != nil { + return err } + + var dirtyAddrs *accountSet + + // Process all the new transaction and merge any errors into the original slice + pool.mu.Lock() + err, dirtyAddrs = pool.addTxLocked(tx, local) + pool.mu.Unlock() + // Reorg the pool internals if needed and return done := pool.requestPromoteExecutables(dirtyAddrs) if sync { <-done } - return errs + + return err } // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) - for i, tx := range txs { - replaced, err := pool.add(tx, local) - errs[i] = err + + var ( + replaced bool + errs []error + ) + + for _, tx := range txs { + var err error + + replaced, err = pool.add(tx, local) if err == nil && !replaced { dirty.addTx(tx) } + + if err != nil { + errs = append(errs, err) + } } + validTxMeter.Mark(int64(len(dirty.accounts))) + return errs, dirty } +func (pool *TxPool) addTxLocked(tx *types.Transaction, local bool) (error, *accountSet) { + dirty := newAccountSet(pool.signer) + + var ( + replaced bool + err error + ) + + replaced, err = pool.add(tx, local) + if err == nil && !replaced { + dirty.addTx(tx) + } + + validTxMeter.Mark(int64(len(dirty.accounts))) + + return err, dirty +} + // Status returns the status (unknown/pending/queued) of a batch of transactions // identified by their hashes. func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) + + var ( + txList *txList + isPending bool + ) + for i, hash := range hashes { tx := pool.Get(hash) if tx == nil { continue } + from, _ := types.Sender(pool.signer, tx) // already validated - pool.mu.RLock() - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + + pool.pendingMu.RLock() + + if txList = pool.pending[from]; txList != nil && txList.txs.Has(tx.Nonce()) { status[i] = TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - status[i] = TxStatusQueued + isPending = true + } else { + isPending = false } + + pool.pendingMu.RUnlock() + + if !isPending { + pool.mu.RLock() + + if txList := pool.queue[from]; txList != nil && txList.txs.Has(tx.Nonce()) { + status[i] = TxStatusQueued + } + + pool.mu.RUnlock() + } + // implicit else: the tx may have been included into a block between // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct - pool.mu.RUnlock() } + return status } @@ -1013,6 +1288,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { if tx == nil { return } + addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // Remove it from the list of known transactions @@ -1020,34 +1296,52 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { if outofbound { pool.priced.Removed(1) } + if pool.locals.contains(addr) { localGauge.Dec(1) } + // Remove the transaction from the pending lists and reset the account nonce + pool.pendingMu.Lock() + if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { + pool.pendingCount-- + // If no more pending transactions are left, remove the list if pending.Empty() { delete(pool.pending, addr) } + + pool.pendingMu.Unlock() + // Postpone any invalidated transactions for _, tx := range invalids { // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(tx.Hash(), tx, false, false) } + // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) + // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) + return } + + pool.pendingMu.TryLock() } + + pool.pendingMu.Unlock() + // Transaction is in the future queue if future := pool.queue[addr]; future != nil { if removed, _ := future.Remove(tx); removed { // Reduce the queued counter queuedGauge.Dec(1) } + if future.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) @@ -1103,8 +1397,10 @@ func (pool *TxPool) scheduleReorgLoop() { for { // Launch next background reorg if needed if curDone == nil && launchNextRun { + ctx := context.Background() + // Run the background reorg and announcements - go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) + go pool.runReorg(ctx, nextDone, reset, dirtyAccounts, queuedEvents) // Prepare everything for the next round of reorg curDone, nextDone = nextDone, make(chan struct{}) @@ -1159,86 +1455,175 @@ func (pool *TxPool) scheduleReorgLoop() { } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { - defer func(t0 time.Time) { - reorgDurationTimer.Update(time.Since(t0)) - }(time.Now()) - defer close(done) - - var promoteAddrs []common.Address - if dirtyAccounts != nil && reset == nil { - // Only dirty accounts need to be promoted, unless we're resetting. - // For resets, all addresses in the tx queue will be promoted and - // the flatten operation can be avoided. - promoteAddrs = dirtyAccounts.flatten() - } - pool.mu.Lock() - if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) - - // Nonces were reset, discard any events that became stale - for addr := range events { - events[addr].Forward(pool.pendingNonces.get(addr)) - if events[addr].Len() == 0 { - delete(events, addr) +// +//nolint:gocognit +func (pool *TxPool) runReorg(ctx context.Context, done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { + tracing.Exec(ctx, "TxPoolReorg", "txpool-reorg", func(ctx context.Context, span trace.Span) { + defer func(t0 time.Time) { + reorgDurationTimer.Update(time.Since(t0)) + }(time.Now()) + + defer close(done) + + var promoteAddrs []common.Address + + tracing.ElapsedTime(ctx, span, "01 dirty accounts flattening", func(_ context.Context, innerSpan trace.Span) { + if dirtyAccounts != nil && reset == nil { + // Only dirty accounts need to be promoted, unless we're resetting. + // For resets, all addresses in the tx queue will be promoted and + // the flatten operation can be avoided. + promoteAddrs = dirtyAccounts.flatten() } + + tracing.SetAttributes( + innerSpan, + attribute.Int("promoteAddresses-flatten", len(promoteAddrs)), + ) + }) + + tracing.ElapsedTime(ctx, span, "02 obtaining pool.WMutex", func(_ context.Context, _ trace.Span) { + pool.mu.Lock() + }) + + if reset != nil { + tracing.ElapsedTime(ctx, span, "03 reset-head reorg", func(_ context.Context, innerSpan trace.Span) { + + // Reset from the old head to the new, rescheduling any reorged transactions + tracing.ElapsedTime(ctx, innerSpan, "04 reset-head-itself reorg", func(_ context.Context, innerSpan trace.Span) { + pool.reset(reset.oldHead, reset.newHead) + }) + + tracing.SetAttributes( + innerSpan, + attribute.Int("events-reset-head", len(events)), + ) + + // Nonces were reset, discard any events that became stale + for addr := range events { + events[addr].Forward(pool.pendingNonces.get(addr)) + + if events[addr].Len() == 0 { + delete(events, addr) + } + } + + // Reset needs promote for all addresses + promoteAddrs = make([]common.Address, 0, len(pool.queue)) + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } + + tracing.SetAttributes( + innerSpan, + attribute.Int("promoteAddresses-reset-head", len(promoteAddrs)), + ) + }) } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) + + // Check for pending transactions for every account that sent new ones + var promoted []*types.Transaction + + tracing.ElapsedTime(ctx, span, "05 promoteExecutables", func(_ context.Context, _ trace.Span) { + promoted = pool.promoteExecutables(promoteAddrs) + }) + + tracing.SetAttributes( + span, + attribute.Int("count.promoteAddresses-reset-head", len(promoteAddrs)), + attribute.Int("count.all", pool.all.Count()), + attribute.Int("count.pending", len(pool.pending)), + attribute.Int("count.queue", len(pool.queue)), + ) + + // If a new block appeared, validate the pool of pending transactions. This will + // remove any transaction that has been included in the block or was invalidated + // because of another transaction (e.g. higher gas price). + + if reset != nil { + tracing.ElapsedTime(ctx, span, "new block", func(_ context.Context, innerSpan trace.Span) { + + tracing.ElapsedTime(ctx, innerSpan, "06 demoteUnexecutables", func(_ context.Context, _ trace.Span) { + pool.demoteUnexecutables() + }) + + var nonces map[common.Address]uint64 + + tracing.ElapsedTime(ctx, innerSpan, "07 set_base_fee", func(_ context.Context, _ trace.Span) { + if reset.newHead != nil { + if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { + // london fork enabled, reset given the base fee + pendingBaseFee := misc.CalcBaseFeeUint(pool.chainconfig, reset.newHead) + pool.priced.SetBaseFee(pendingBaseFee) + } else { + // london fork not enabled, reheap to "reset" the priced list + pool.priced.Reheap() + } + } + + // Update all accounts to the latest known pending nonce + nonces = make(map[common.Address]uint64, len(pool.pending)) + }) + + tracing.ElapsedTime(ctx, innerSpan, "08 obtaining pendingMu.RMutex", func(_ context.Context, _ trace.Span) { + pool.pendingMu.RLock() + }) + + var highestPending *types.Transaction + + tracing.ElapsedTime(ctx, innerSpan, "09 fill nonces", func(_ context.Context, innerSpan trace.Span) { + for addr, list := range pool.pending { + highestPending = list.LastElement() + nonces[addr] = highestPending.Nonce() + 1 + } + }) + + pool.pendingMu.RUnlock() + + tracing.ElapsedTime(ctx, innerSpan, "10 reset nonces", func(_ context.Context, _ trace.Span) { + pool.pendingNonces.setAll(nonces) + }) + }) } - } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs) - - // If a new block appeared, validate the pool of pending transactions. This will - // remove any transaction that has been included in the block or was invalidated - // because of another transaction (e.g. higher gas price). - if reset != nil { - pool.demoteUnexecutables() - if reset.newHead != nil { - if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { - // london fork enabled, reset given the base fee - pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) - pool.priced.SetBaseFee(pendingBaseFee) - } else { - // london fork not enabled, reheap to "reset" the priced list - pool.priced.Reheap() + + // Ensure pool.queue and pool.pending sizes stay within the configured limits. + tracing.ElapsedTime(ctx, span, "11 truncatePending", func(_ context.Context, _ trace.Span) { + pool.truncatePending() + }) + + tracing.ElapsedTime(ctx, span, "12 truncateQueue", func(_ context.Context, _ trace.Span) { + pool.truncateQueue() + }) + + dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) + pool.changesSinceReorg = 0 // Reset change counter + + pool.mu.Unlock() + + // Notify subsystems for newly added transactions + tracing.ElapsedTime(ctx, span, "13 notify about new transactions", func(_ context.Context, _ trace.Span) { + for _, tx := range promoted { + addr, _ := types.Sender(pool.signer, tx) + + if _, ok := events[addr]; !ok { + events[addr] = newTxSortedMap() + } + + events[addr].Put(tx) } - } - // Update all accounts to the latest known pending nonce - nonces := make(map[common.Address]uint64, len(pool.pending)) - for addr, list := range pool.pending { - highestPending := list.LastElement() - nonces[addr] = highestPending.Nonce() + 1 - } - pool.pendingNonces.setAll(nonces) - } - // Ensure pool.queue and pool.pending sizes stay within the configured limits. - pool.truncatePending() - pool.truncateQueue() + }) - dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) - pool.changesSinceReorg = 0 // Reset change counter - pool.mu.Unlock() + if len(events) > 0 { + tracing.ElapsedTime(ctx, span, "14 txFeed", func(_ context.Context, _ trace.Span) { + var txs []*types.Transaction - // Notify subsystems for newly added transactions - for _, tx := range promoted { - addr, _ := types.Sender(pool.signer, tx) - if _, ok := events[addr]; !ok { - events[addr] = newTxSortedMap() - } - events[addr].Put(tx) - } - if len(events) > 0 { - var txs []*types.Transaction - for _, set := range events { - txs = append(txs, set.Flatten()...) + for _, set := range events { + txs = append(txs, set.Flatten()...) + } + + pool.txFeed.Send(NewTxsEvent{txs}) + }) } - pool.txFeed.Send(NewTxsEvent{txs}) - } + }) } // reset retrieves the current state of the blockchain and ensures the content @@ -1337,64 +1722,100 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // invalidated transactions (low nonce, low balance) are deleted. func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction + var ( + promoted []*types.Transaction + promotedLen int + forwards types.Transactions + forwardsLen int + caps types.Transactions + capsLen int + drops types.Transactions + dropsLen int + list *txList + hash common.Hash + readies types.Transactions + readiesLen int + ) + + balance := uint256.NewInt(0) // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { - list := pool.queue[addr] + list = pool.queue[addr] if list == nil { continue // Just in case someone calls with a non existing account } + // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) + forwards = list.Forward(pool.currentState.GetNonce(addr)) + forwardsLen = len(forwards) + for _, tx := range forwards { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) } - log.Trace("Removed old queued transactions", "count", len(forwards)) + + log.Trace("Removed old queued transactions", "count", forwardsLen) + // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + balance.SetFromBig(pool.currentState.GetBalance(addr)) + + drops, _ = list.Filter(balance, pool.currentMaxGas) + dropsLen = len(drops) + for _, tx := range drops { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Mark(int64(len(drops))) + + log.Trace("Removed unpayable queued transactions", "count", dropsLen) + queuedNofundsMeter.Mark(int64(dropsLen)) // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) + readies = list.Ready(pool.pendingNonces.get(addr)) + readiesLen = len(readies) + for _, tx := range readies { - hash := tx.Hash() + hash = tx.Hash() if pool.promoteTx(addr, hash, tx) { promoted = append(promoted, tx) } } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Dec(int64(len(readies))) + + log.Trace("Promoted queued transactions", "count", promotedLen) + queuedGauge.Dec(int64(readiesLen)) // Drop all transactions over the allowed limit - var caps types.Transactions if !pool.locals.contains(addr) { caps = list.Cap(int(pool.config.AccountQueue)) + capsLen = len(caps) + for _, tx := range caps { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) + log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } - queuedRateLimitMeter.Mark(int64(len(caps))) + + queuedRateLimitMeter.Mark(int64(capsLen)) } + // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + pool.priced.Removed(forwardsLen + dropsLen + capsLen) + + queuedGauge.Dec(int64(forwardsLen + dropsLen + capsLen)) + if pool.locals.contains(addr) { - localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + localGauge.Dec(int64(forwardsLen + dropsLen + capsLen)) } + // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) } } + return promoted } @@ -1402,86 +1823,162 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans // pending limit. The algorithm tries to reduce transaction counts by an approximately // equal number for all for accounts with many pending transactions. func (pool *TxPool) truncatePending() { - pending := uint64(0) - for _, list := range pool.pending { - pending += uint64(list.Len()) - } + pending := uint64(pool.pendingCount) if pending <= pool.config.GlobalSlots { return } pendingBeforeCap := pending + + var listLen int + + type pair struct { + address common.Address + value int64 + } + // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) + spammers := make([]pair, 0, 8) + count := 0 + + var ok bool + + pool.pendingMu.RLock() for addr, list := range pool.pending { // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) + listLen = len(list.txs.items) + + pool.pendingMu.RUnlock() + + pool.locals.m.RLock() + + if uint64(listLen) > pool.config.AccountSlots { + if _, ok = pool.locals.accounts[addr]; ok { + pool.locals.m.RUnlock() + + pool.pendingMu.RLock() + + continue + } + + count++ + + spammers = append(spammers, pair{addr, int64(listLen)}) } + + pool.locals.m.RUnlock() + + pool.pendingMu.RLock() } + + pool.pendingMu.RUnlock() + // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { + offenders := make([]common.Address, 0, len(spammers)) + sort.Slice(spammers, func(i, j int) bool { + return spammers[i].value < spammers[j].value + }) + + var ( + offender common.Address + caps types.Transactions + capsLen int + list *txList + hash common.Hash + ) + + // todo: metrics: spammers, offenders, total loops + for len(spammers) != 0 && pending > pool.config.GlobalSlots { // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) + offender, spammers = spammers[len(spammers)-1].address, spammers[:len(spammers)-1] + offenders = append(offenders, offender) + + var threshold int // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() + pool.pendingMu.RLock() + threshold = len(pool.pending[offender].txs.items) // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] + list = pool.pending[offenders[i]] + + caps = list.Cap(len(list.txs.items) - 1) + capsLen = len(caps) + + pool.pendingMu.RUnlock() - caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) + + pool.priced.Removed(capsLen) + + pendingGauge.Dec(int64(capsLen)) if pool.locals.contains(offenders[i]) { - localGauge.Dec(int64(len(caps))) + localGauge.Dec(int64(capsLen)) } + pending-- + + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } } // If still above threshold, reduce to limit or min allowance if pending > pool.config.GlobalSlots && len(offenders) > 0 { + + pool.pendingMu.RLock() + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { - list := pool.pending[addr] + list = pool.pending[addr] + + caps = list.Cap(len(list.txs.items) - 1) + capsLen = len(caps) + + pool.pendingMu.RUnlock() - caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(addr, tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(caps))) + + pool.priced.Removed(capsLen) + + pendingGauge.Dec(int64(capsLen)) + + if _, ok = pool.locals.accounts[addr]; ok { + localGauge.Dec(int64(capsLen)) } + pending-- + + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } @@ -1504,27 +2001,52 @@ func (pool *TxPool) truncateQueue() { } sort.Sort(addresses) + var ( + tx *types.Transaction + txs types.Transactions + list *txList + addr addressByHeartbeat + size uint64 + ) + // Drop transactions until the total is below the limit or only locals remain for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] + addr = addresses[len(addresses)-1] + list = pool.queue[addr.address] addresses = addresses[:len(addresses)-1] + var ( + listFlatten types.Transactions + isSet bool + ) + // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { + if size = uint64(list.Len()); size <= drop { + listFlatten = list.Flatten() + isSet = true + + for _, tx = range listFlatten { pool.removeTx(tx.Hash(), true) } + drop -= size queuedRateLimitMeter.Mark(int64(size)) + continue } + // Otherwise drop only last few transactions - txs := list.Flatten() + if !isSet { + listFlatten = list.Flatten() + } + + txs = listFlatten for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash(), true) + drop-- + queuedRateLimitMeter.Mark(1) } } @@ -1538,56 +2060,98 @@ func (pool *TxPool) truncateQueue() { // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *TxPool) demoteUnexecutables() { + balance := uint256.NewInt(0) + + var ( + olds types.Transactions + oldsLen int + hash common.Hash + drops types.Transactions + dropsLen int + invalids types.Transactions + invalidsLen int + gapped types.Transactions + gappedLen int + ) + // Iterate over all accounts and demote any non-executable transactions + pool.pendingMu.RLock() + for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) // Drop all transactions that are deemed too old (low nonce) - olds := list.Forward(nonce) + olds = list.Forward(nonce) + oldsLen = len(olds) + for _, tx := range olds { - hash := tx.Hash() + hash = tx.Hash() pool.all.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } + // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + balance.SetFromBig(pool.currentState.GetBalance(addr)) + drops, invalids = list.Filter(balance, pool.currentMaxGas) + dropsLen = len(drops) + invalidsLen = len(invalids) + for _, tx := range drops { - hash := tx.Hash() + hash = tx.Hash() + log.Trace("Removed unpayable pending transaction", "hash", hash) + pool.all.Remove(hash) } - pendingNofundsMeter.Mark(int64(len(drops))) + + pendingNofundsMeter.Mark(int64(dropsLen)) for _, tx := range invalids { - hash := tx.Hash() + hash = tx.Hash() + log.Trace("Demoting pending transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } - pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + + pendingGauge.Dec(int64(oldsLen + dropsLen + invalidsLen)) + if pool.locals.contains(addr) { - localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + localGauge.Dec(int64(oldsLen + dropsLen + invalidsLen)) } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { - gapped := list.Cap(0) + gapped = list.Cap(0) + gappedLen = len(gapped) + for _, tx := range gapped { - hash := tx.Hash() + hash = tx.Hash() log.Error("Demoting invalidated transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } - pendingGauge.Dec(int64(len(gapped))) + + pendingGauge.Dec(int64(gappedLen)) // This might happen in a reorg, so log it to the metering - blockReorgInvalidatedTx.Mark(int64(len(gapped))) + blockReorgInvalidatedTx.Mark(int64(gappedLen)) } + // Delete the entire pending entry if it became empty. if list.Empty() { + pool.pendingMu.RUnlock() + pool.pendingMu.Lock() + + pool.pendingCount -= pool.pending[addr].Len() delete(pool.pending, addr) + + pool.pendingMu.Unlock() + pool.pendingMu.RLock() } } + + pool.pendingMu.RUnlock() } // addressByHeartbeat is an account address tagged with its last activity timestamp. @@ -1605,9 +2169,10 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { - accounts map[common.Address]struct{} - signer types.Signer - cache *[]common.Address + accounts map[common.Address]struct{} + accountsFlatted []common.Address + signer types.Signer + m sync.RWMutex } // newAccountSet creates a new address set with an associated signer for sender @@ -1625,17 +2190,26 @@ func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { // contains checks if a given address is contained within the set. func (as *accountSet) contains(addr common.Address) bool { + as.m.RLock() + defer as.m.RUnlock() + _, exist := as.accounts[addr] return exist } func (as *accountSet) empty() bool { + as.m.RLock() + defer as.m.RUnlock() + return len(as.accounts) == 0 } // containsTx checks if the sender of a given tx is within the set. If the sender // cannot be derived, this method returns false. func (as *accountSet) containsTx(tx *types.Transaction) bool { + as.m.RLock() + defer as.m.RUnlock() + if addr, err := types.Sender(as.signer, tx); err == nil { return as.contains(addr) } @@ -1644,8 +2218,14 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { + as.m.Lock() + defer as.m.Unlock() + + if _, ok := as.accounts[addr]; !ok { + as.accountsFlatted = append(as.accountsFlatted, addr) + } + as.accounts[addr] = struct{}{} - as.cache = nil } // addTx adds the sender of tx into the set. @@ -1658,22 +2238,25 @@ func (as *accountSet) addTx(tx *types.Transaction) { // flatten returns the list of addresses within this set, also caching it for later // reuse. The returned slice should not be changed! func (as *accountSet) flatten() []common.Address { - if as.cache == nil { - accounts := make([]common.Address, 0, len(as.accounts)) - for account := range as.accounts { - accounts = append(accounts, account) - } - as.cache = &accounts - } - return *as.cache + as.m.RLock() + defer as.m.RUnlock() + + return as.accountsFlatted } // merge adds all addresses from the 'other' set into 'as'. func (as *accountSet) merge(other *accountSet) { + var ok bool + + as.m.Lock() + defer as.m.Unlock() + for addr := range other.accounts { + if _, ok = as.accounts[addr]; !ok { + as.accountsFlatted = append(as.accountsFlatted, addr) + } as.accounts[addr] = struct{}{} } - as.cache = nil } // txLookup is used internally by TxPool to track transactions while allowing @@ -1829,7 +2412,10 @@ func (t *txLookup) RemoteToLocals(locals *accountSet) int { var migrated int for hash, tx := range t.remotes { if locals.containsTx(tx) { + locals.m.Lock() t.locals[hash] = tx + locals.m.Unlock() + delete(t.remotes, hash) migrated += 1 } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 664ca6c9d4..63f712bb9c 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -21,6 +21,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "io" "io/ioutil" "math/big" "math/rand" @@ -32,11 +33,15 @@ import ( "testing" "time" + "github.com/holiman/uint256" + "go.uber.org/goleak" "gonum.org/v1/gonum/floats" "gonum.org/v1/gonum/stat" "pgregory.net/rapid" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/debug" + "github.com/ethereum/go-ethereum/common/leak" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -98,7 +103,7 @@ func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Tr } func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{0x01}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) return tx } @@ -153,12 +158,17 @@ func validateTxPoolInternals(pool *TxPool) error { if total := pool.all.Count(); total != pending+queued { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } + pool.priced.Reheap() priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.RemoteCount() if priced != remote { return fmt.Errorf("total priced transaction count %d != %d", priced, remote) } + // Ensure the next nonce to assign is the correct one + pool.pendingMu.RLock() + defer pool.pendingMu.RUnlock() + for addr, txs := range pool.pending { // Find the last transaction var last uint64 @@ -167,10 +177,12 @@ func validateTxPoolInternals(pool *TxPool) error { last = nonce } } + if nonce := pool.pendingNonces.get(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } } + return nil } @@ -325,10 +337,18 @@ func TestInvalidTransactions(t *testing.T) { } tx = transaction(1, 100000, key) + + pool.gasPriceMu.Lock() + pool.gasPrice = big.NewInt(1000) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + pool.gasPriceUint = uint256.NewInt(1000) + + pool.gasPriceMu.Unlock() + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Error("expected", ErrUnderpriced, "got", err) } + if err := pool.AddLocal(tx); err != nil { t.Error("expected", nil, "got", err) } @@ -347,9 +367,12 @@ func TestTransactionQueue(t *testing.T) { pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + + pool.pendingMu.RLock() if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) } + pool.pendingMu.RUnlock() tx = transaction(1, 100, key) from, _ = deriveSender(tx) @@ -357,9 +380,13 @@ func TestTransactionQueue(t *testing.T) { pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + + pool.pendingMu.RLock() if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { t.Error("expected transaction to be in tx pool") } + pool.pendingMu.RUnlock() + if len(pool.queue) > 0 { t.Error("expected transaction queue to be empty. is", len(pool.queue)) } @@ -383,9 +410,13 @@ func TestTransactionQueue2(t *testing.T) { pool.enqueueTx(tx3.Hash(), tx3, false, true) pool.promoteExecutables([]common.Address{from}) + + pool.pendingMu.RLock() if len(pool.pending) != 1 { t.Error("expected pending length to be 1, got", len(pool.pending)) } + pool.pendingMu.RUnlock() + if pool.queue[from].Len() != 2 { t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) } @@ -399,8 +430,10 @@ func TestTransactionNegativeValue(t *testing.T) { tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1)) - if err := pool.AddRemote(tx); err != ErrNegativeValue { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrNegativeValue) { t.Error("expected", ErrNegativeValue, "got", err) } } @@ -413,7 +446,7 @@ func TestTransactionTipAboveFeeCap(t *testing.T) { tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.AddRemote(tx); err != ErrTipAboveFeeCap { + if err := pool.AddRemote(tx); !errors.Is(err, ErrTipAboveFeeCap) { t.Error("expected", ErrTipAboveFeeCap, "got", err) } } @@ -428,12 +461,12 @@ func TestTransactionVeryHighValues(t *testing.T) { veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.AddRemote(tx); err != ErrTipVeryHigh { + if err := pool.AddRemote(tx); !errors.Is(err, ErrTipVeryHigh) { t.Error("expected", ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh { + if err := pool.AddRemote(tx2); !errors.Is(err, ErrFeeCapVeryHigh) { t.Error("expected", ErrFeeCapVeryHigh, "got", err) } } @@ -495,23 +528,32 @@ func TestTransactionDoubleNonce(t *testing.T) { if replace, err := pool.add(tx2, false); err != nil || !replace { t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) } + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + + pool.pendingMu.RLock() if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } + pool.pendingMu.RUnlock() // Add the third transaction and ensure it's not saved (smaller price) pool.add(tx3, false) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + + pool.pendingMu.RLock() if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } + pool.pendingMu.RUnlock() + // Ensure the total transaction count is correct if pool.all.Count() != 1 { t.Error("expected 1 total transactions, got", pool.all.Count()) @@ -530,9 +572,13 @@ func TestTransactionMissingNonce(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } + + pool.pendingMu.RLock() if len(pool.pending) != 0 { t.Error("expected 0 pending transactions, got", len(pool.pending)) } + pool.pendingMu.RUnlock() + if pool.queue[addr].Len() != 1 { t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) } @@ -603,19 +649,27 @@ func TestTransactionDropping(t *testing.T) { pool.enqueueTx(tx12.Hash(), tx12, false, true) // Check that pre and post validations leave the pool as is + pool.pendingMu.RLock() if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } + pool.pendingMu.RUnlock() + if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } + <-pool.requestReset(nil, nil) + + pool.pendingMu.RLock() if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } + pool.pendingMu.RUnlock() + if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } @@ -626,6 +680,7 @@ func TestTransactionDropping(t *testing.T) { testAddBalance(pool, account, big.NewInt(-650)) <-pool.requestReset(nil, nil) + pool.pendingMu.RLock() if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) } @@ -635,6 +690,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { t.Errorf("out-of-fund pending transaction present: %v", tx1) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } @@ -651,12 +708,15 @@ func TestTransactionDropping(t *testing.T) { atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100) <-pool.requestReset(nil, nil) + pool.pendingMu.RLock() if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) } if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { t.Errorf("over-gased pending transaction present: %v", tx1) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } @@ -711,19 +771,27 @@ func TestTransactionPostponing(t *testing.T) { } } // Check that pre and post validations leave the pool as is + pool.pendingMu.RLock() if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } + <-pool.requestReset(nil, nil) + + pool.pendingMu.RLock() if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } @@ -738,12 +806,17 @@ func TestTransactionPostponing(t *testing.T) { // The first account's first transaction remains valid, check that subsequent // ones are either filtered out, or queued up for later. + pool.pendingMu.RLock() if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) } + pool.pendingMu.RUnlock() + if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) } + + pool.pendingMu.RLock() for i, tx := range txs[1:100] { if i%2 == 1 { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { @@ -761,11 +834,16 @@ func TestTransactionPostponing(t *testing.T) { } } } + pool.pendingMu.RUnlock() + // The second account's first transaction got invalid, check that all transactions // are either filtered out, or queued up for later. + pool.pendingMu.RLock() if pool.pending[accs[1]] != nil { t.Errorf("invalidated account still has pending transactions") } + pool.pendingMu.RUnlock() + for i, tx := range txs[100:] { if i%2 == 1 { if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { @@ -854,9 +932,13 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } + + pool.pendingMu.RLock() if len(pool.pending) != 0 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) } + pool.pendingMu.RUnlock() + if i <= testTxPoolConfig.AccountQueue { if pool.queue[account].Len() != int(i) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) @@ -935,6 +1017,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { for i := uint64(0); i < 3*config.GlobalQueue; i++ { txs = append(txs, transaction(i+1, 100000, local)) } + pool.AddLocals(txs) // If locals are disabled, the previous eviction algorithm should apply here too @@ -1112,6 +1195,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1140,9 +1224,13 @@ func TestTransactionPendingLimiting(t *testing.T) { if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } + + pool.pendingMu.RLock() if pool.pending[account].Len() != int(i)+1 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) } + pool.pendingMu.RUnlock() + if len(pool.queue) != 0 { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) } @@ -1195,9 +1283,13 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { pool.AddRemotesSync(txs) pending := 0 + + pool.pendingMu.RLock() for _, list := range pool.pending { pending += list.Len() } + pool.pendingMu.RUnlock() + if pending > int(config.GlobalSlots) { t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) } @@ -1330,11 +1422,14 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { // Import the batch and verify that limits have been enforced pool.AddRemotesSync(txs) + pool.pendingMu.RLock() for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) } } + pool.pendingMu.RUnlock() + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1391,15 +1486,19 @@ func TestTransactionPoolRepricing(t *testing.T) { if pending != 7 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) } + if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } + if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Reprice the pool and check that underpriced transactions get dropped pool.SetGasPrice(big.NewInt(2)) @@ -1407,58 +1506,76 @@ func TestTransactionPoolRepricing(t *testing.T) { if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } + if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Check that we can't add the old transactions back - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // However we can add local underpriced transactions tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } + if pending, _ = pool.Stats(); pending != 3 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } + if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // And we can fill gaps with properly priced transactions if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } + if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1487,6 +1604,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) } + // Generate and queue a batch of transactions, both pending and queued txs := types.Transactions{} @@ -1512,15 +1630,19 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if pending != 7 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) } + if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } + if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Reprice the pool and check that underpriced transactions get dropped pool.SetGasPrice(big.NewInt(2)) @@ -1528,64 +1650,87 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } + if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Check that we can't add the old transactions back tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // However we can add local underpriced transactions tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } + if pending, _ = pool.Stats(); pending != 3 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } + if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } + tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } + if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1719,7 +1864,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own @@ -1891,7 +2036,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } @@ -1991,7 +2136,7 @@ func TestDualHeapEviction(t *testing.T) { add(false) for baseFee = 0; baseFee <= 1000; baseFee += 100 { - pool.priced.SetBaseFee(big.NewInt(int64(baseFee))) + pool.priced.SetBaseFee(uint256.NewInt(uint64(baseFee))) add(true) check(highCap, "fee cap") add(false) @@ -2020,49 +2165,65 @@ func TestTransactionDeduplication(t *testing.T) { // Create a batch of transactions and add a few of them txs := make([]*types.Transaction, 16) + for i := 0; i < len(txs); i++ { txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key) } + var firsts []*types.Transaction + for i := 0; i < len(txs); i += 2 { firsts = append(firsts, txs[i]) } + errs := pool.AddRemotesSync(firsts) - if len(errs) != len(firsts) { - t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) + + if len(errs) != 0 { + t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), 0) } + for i, err := range errs { if err != nil { t.Errorf("add %d failed: %v", i, err) } } + pending, queued := pool.Stats() + if pending != 1 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } + if queued != len(txs)/2-1 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) } + // Try to add all of them now and ensure previous ones error out as knowns errs = pool.AddRemotesSync(txs) - if len(errs) != len(txs) { - t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) + if len(errs) != 0 { + t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), 0) } + for i, err := range errs { if i%2 == 0 && err == nil { t.Errorf("add %d succeeded, should have failed as known", i) } + if i%2 == 1 && err != nil { t.Errorf("add %d failed: %v", i, err) } } + pending, queued = pool.Stats() + if pending != len(txs) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs)) } + if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -2096,12 +2257,15 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap pending transaction: %v", err) } + if err := validateEvents(events, 2); err != nil { t.Fatalf("cheap replacement event firing failed: %v", err) } @@ -2109,12 +2273,15 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper pending transaction: %v", err) } + if err := validateEvents(events, 2); err != nil { t.Fatalf("proper replacement event firing failed: %v", err) } @@ -2123,9 +2290,11 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap queued transaction: %v", err) } @@ -2133,9 +2302,11 @@ func TestTransactionReplacement(t *testing.T) { if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper queued transaction: %v", err) } @@ -2143,6 +2314,7 @@ func TestTransactionReplacement(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("queued replacement event firing failed: %v", err) } + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -2197,7 +2369,7 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { } // 2. Don't bump tip or feecap => discard tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 3. Bump both more than min => accept @@ -2220,22 +2392,22 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { } // 6. Bump tip max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 7. Bump fee cap max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 8. Bump tip min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 9. Bump fee cap min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) { t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 10. Check events match expected (3 new executable txs during pending, 0 during queue) @@ -2465,6 +2637,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) { } // Benchmark the speed of pool validation b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { pool.demoteUnexecutables() } @@ -2496,15 +2669,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } // Benchmarks the speed of batched transaction insertion. -func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) } -func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) } -func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) } - -func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) } -func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) } -func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) } - -func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { +func BenchmarkPoolBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, key := setupTxPool() defer pool.Stop() @@ -2512,21 +2677,153 @@ func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) - batches := make([]types.Transactions, b.N) - for i := 0; i < b.N; i++ { - batches[i] = make(types.Transactions, size) - for j := 0; j < size; j++ { - batches[i][j] = transaction(uint64(size*i+j), 100000, key) - } + const format = "size %d, is local %t" + + cases := []struct { + name string + size int + isLocal bool + }{ + {size: 100, isLocal: false}, + {size: 1000, isLocal: false}, + {size: 10000, isLocal: false}, + + {size: 100, isLocal: true}, + {size: 1000, isLocal: true}, + {size: 10000, isLocal: true}, } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].isLocal) + } + // Benchmark importing the transactions into the queue - b.ResetTimer() - for _, batch := range batches { - if local { - pool.AddLocals(batch) - } else { - pool.AddRemotes(batch) - } + + for _, testCase := range cases { + singleCase := testCase + + b.Run(singleCase.name, func(b *testing.B) { + batches := make([]types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + batches[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batches[i][j] = transaction(uint64(singleCase.size*i+j), 100000, key) + } + } + + b.ResetTimer() + b.ReportAllocs() + + for _, batch := range batches { + if testCase.isLocal { + pool.AddLocals(batch) + } else { + pool.AddRemotes(batch) + } + } + }) + } +} + +func BenchmarkPoolMining(b *testing.B) { + const format = "size %d" + + cases := []struct { + name string + size int + }{ + {size: 1}, + {size: 5}, + {size: 10}, + {size: 20}, + } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size) + } + + const blockGasLimit = 30_000_000 + + // Benchmark importing the transactions into the queue + + for _, testCase := range cases { + singleCase := testCase + + b.Run(singleCase.name, func(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + localKeyPub := localKey.PublicKey + account := crypto.PubkeyToAddress(localKeyPub) + + const balanceStr = "1_000_000_000" + balance, ok := big.NewInt(0).SetString(balanceStr, 0) + if !ok { + b.Fatal("incorrect initial balance", balanceStr) + } + + testAddBalance(pool, account, balance) + + signer := types.NewEIP155Signer(big.NewInt(1)) + baseFee := uint256.NewInt(1) + + const batchesSize = 100 + + batches := make([]types.Transactions, batchesSize) + + for i := 0; i < batchesSize; i++ { + batches[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batches[i][j] = transaction(uint64(singleCase.size*i+j), 100_000, localKey) + } + + for _, batch := range batches { + pool.AddRemotes(batch) + } + } + + var promoted int + + for range pendingAddedCh { + promoted++ + + if promoted >= batchesSize*singleCase.size/2 { + break + } + } + + var total int + + b.ResetTimer() + b.ReportAllocs() + + pendingDurations := make([]time.Duration, b.N) + + var added int + + for i := 0; i < b.N; i++ { + added, pendingDurations[i], _ = mining(b, pool, signer, baseFee, blockGasLimit, i) + total += added + } + + b.StopTimer() + + pendingDurationsFloat := make([]float64, len(pendingDurations)) + + for i, v := range pendingDurations { + pendingDurationsFloat[i] = float64(v.Nanoseconds()) + } + + mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil) + b.Logf("[%s] pending mean %v, stdev %v, %v-%v", + common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat))) + }) } } @@ -2566,79 +2863,372 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { } // Benchmarks the speed of batch transaction insertion in case of multiple accounts. -func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) { +func BenchmarkPoolAccountMultiBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, _ := setupTxPool() defer pool.Stop() - b.ReportAllocs() + batches := make(types.Transactions, b.N) + for i := 0; i < b.N; i++ { key, _ := crypto.GenerateKey() account := crypto.PubkeyToAddress(key.PublicKey) + pool.currentState.AddBalance(account, big.NewInt(1000000)) + tx := transaction(uint64(0), 100000, key) + batches[i] = tx } + // Benchmark importing the transactions into the queue + b.ReportAllocs() b.ResetTimer() + for _, tx := range batches { pool.AddRemotesSync([]*types.Transaction{tx}) } } -type acc struct { - nonce uint64 - key *ecdsa.PrivateKey - account common.Address -} - -type testTx struct { - tx *types.Transaction - idx int - isLocal bool -} +func BenchmarkPoolAccountMultiBatchInsertRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() -const localIdx = 0 + batches := make(types.Transactions, b.N) -func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx { - idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int) + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) - var ( - isLocal bool - key *ecdsa.PrivateKey - ) + pool.currentState.AddBalance(account, big.NewInt(1000000)) - if idx == localIdx { - isLocal = true - key = localKey.key - } else { - key = keys[idx].key + batches[i] = tx } - nonces[idx]++ + done := make(chan struct{}) - gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64) - gasPrice := big.NewInt(0).SetUint64(gasPriceUint) - gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64) + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() - return &testTx{ - tx: pricedTransaction(nonces[idx]-1, gasLimit, gasPrice, key), - idx: idx, - isLocal: isLocal, - } -} + var pending map[common.Address]types.Transactions -type transactionBatches struct { - txs []*testTx - totalTxs int -} + loop: + for { + select { + case <-t.C: + pending = pool.Pending(context.Background(), true) + case <-done: + break loop + } + } -func transactionsGen(keys []*acc, nonces []uint64, localKey *acc, minTxs int, maxTxs int, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64, caseParams *strings.Builder) func(t *rapid.T) *transactionBatches { - return func(t *rapid.T) *transactionBatches { - totalTxs := rapid.IntRange(minTxs, maxTxs).Draw(t, "totalTxs").(int) - txs := make([]*testTx, totalTxs) + fmt.Fprint(io.Discard, pending) + }() - gasValues := make([]float64, totalTxs) + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + pool.AddRemotesSync([]*types.Transaction{tx}) + } + + close(done) +} + +func BenchmarkPoolAccountMultiBatchInsertNoLockRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + _ = localKey + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + for range t.C { + pending = pool.Pending(context.Background(), true) + + if len(pending) >= b.N/2 { + close(done) + + return + } + } + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + pool.AddRemotes([]*types.Transaction{tx}) + } + + <-done +} + +func BenchmarkPoolAccountsBatchInsert(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + tx := transaction(uint64(0), 100000, key) + + batches[i] = tx + } + + // Benchmark importing the transactions into the queue + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemoteSync(tx) + } +} + +func BenchmarkPoolAccountsBatchInsertRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + loop: + for { + select { + case <-t.C: + pending = pool.Pending(context.Background(), true) + case <-done: + break loop + } + } + + fmt.Fprint(io.Discard, pending) + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemoteSync(tx) + } + + close(done) +} + +func BenchmarkPoolAccountsBatchInsertNoLockRace(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + _ = localKey + + batches := make(types.Transactions, b.N) + + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1000000)) + + batches[i] = tx + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var pending map[common.Address]types.Transactions + + for range t.C { + pending = pool.Pending(context.Background(), true) + + if len(pending) >= b.N/2 { + close(done) + + return + } + } + }() + + b.ReportAllocs() + b.ResetTimer() + + for _, tx := range batches { + _ = pool.AddRemote(tx) + } + + <-done +} + +func TestPoolMultiAccountBatchInsertRace(t *testing.T) { + t.Parallel() + + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + + const n = 5000 + + batches := make(types.Transactions, n) + batchesSecond := make(types.Transactions, n) + + for i := 0; i < n; i++ { + batches[i] = newTxs(pool) + batchesSecond[i] = newTxs(pool) + } + + done := make(chan struct{}) + + go func() { + t := time.NewTicker(time.Microsecond) + defer t.Stop() + + var ( + pending map[common.Address]types.Transactions + total int + ) + + for range t.C { + pending = pool.Pending(context.Background(), true) + total = len(pending) + + _ = pool.Locals() + + if total >= n { + close(done) + + return + } + } + }() + + for _, tx := range batches { + pool.AddRemotesSync([]*types.Transaction{tx}) + } + + for _, tx := range batchesSecond { + pool.AddRemotes([]*types.Transaction{tx}) + } + + <-done +} + +func newTxs(pool *TxPool) *types.Transaction { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + tx := transaction(uint64(0), 100000, key) + + pool.currentState.AddBalance(account, big.NewInt(1_000_000_000)) + + return tx +} + +type acc struct { + nonce uint64 + key *ecdsa.PrivateKey + account common.Address +} + +type testTx struct { + tx *types.Transaction + idx int + isLocal bool +} + +const localIdx = 0 + +func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx { + idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int) + + var ( + isLocal bool + key *ecdsa.PrivateKey + ) + + if idx == localIdx { + isLocal = true + key = localKey.key + } else { + key = keys[idx].key + } + + nonces[idx]++ + + gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64) + gasPrice := big.NewInt(0).SetUint64(gasPriceUint) + gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64) + + return &testTx{ + tx: pricedTransaction(nonces[idx]-1, gasLimit, gasPrice, key), + idx: idx, + isLocal: isLocal, + } +} + +type transactionBatches struct { + txs []*testTx + totalTxs int +} + +func transactionsGen(keys []*acc, nonces []uint64, localKey *acc, minTxs int, maxTxs int, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64, caseParams *strings.Builder) func(t *rapid.T) *transactionBatches { + return func(t *rapid.T) *transactionBatches { + totalTxs := rapid.IntRange(minTxs, maxTxs).Draw(t, "totalTxs").(int) + txs := make([]*testTx, totalTxs) + + gasValues := make([]float64, totalTxs) fmt.Fprintf(caseParams, " totalTxs = %d;", totalTxs) @@ -2878,20 +3468,20 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { wg.Wait() var ( - addIntoTxPool func(tx []*types.Transaction) []error + addIntoTxPool func(tx *types.Transaction) error totalInBatch int ) for _, tx := range txs.txs { - addIntoTxPool = pool.AddRemotesSync + addIntoTxPool = pool.AddRemoteSync if tx.isLocal { - addIntoTxPool = pool.AddLocals + addIntoTxPool = pool.AddLocal } - err := addIntoTxPool([]*types.Transaction{tx.tx}) - if len(err) != 0 && err[0] != nil { - rt.Log("on adding a transaction to the tx pool", err[0], tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account)) + err := addIntoTxPool(tx.tx) + if err != nil { + rt.Log("on adding a transaction to the tx pool", err, tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account)) } } @@ -2930,7 +3520,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // check if txPool got stuck if currentTxPoolStats == lastTxPoolStats { - stuckBlocks++ //todo: переписать + stuckBlocks++ //todo: need something better then that } else { stuckBlocks = 0 lastTxPoolStats = currentTxPoolStats @@ -2938,7 +3528,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // copy-paste start := time.Now() - pending := pool.Pending(true) + pending := pool.Pending(context.Background(), true) locals := pool.Locals() // from fillTransactions @@ -2956,7 +3546,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) { // check for nonce gaps var lastNonce, currentNonce int - pending = pool.Pending(true) + pending = pool.Pending(context.Background(), true) for txAcc, pendingTxs := range pending { lastNonce = int(pool.Nonce(txAcc)) - len(pendingTxs) - 1 @@ -3026,7 +3616,7 @@ func fillTransactions(ctx context.Context, pool *TxPool, locals []common.Address signer := types.NewLondonSigner(big.NewInt(1)) // fake baseFee - baseFee := big.NewInt(1) + baseFee := uint256.NewInt(1) blockGasLimit := gasLimit @@ -3083,7 +3673,10 @@ func commitTransactions(pool *TxPool, txs *types.TransactionsByPriceAndNonce, bl if tx.Gas() <= blockGasLimit { blockGasLimit -= tx.Gas() + + pool.mu.Lock() pool.removeTx(tx.Hash(), false) + pool.mu.Unlock() txCount++ } else { @@ -3098,3 +3691,885 @@ func MakeWithPromoteTxCh(ch chan struct{}) func(*TxPool) { pool.promoteTxCh = ch } } + +//nolint:thelper +func mining(tb testing.TB, pool *TxPool, signer types.Signer, baseFee *uint256.Int, blockGasLimit uint64, totalBlocks int) (int, time.Duration, time.Duration) { + var ( + localTxsCount int + remoteTxsCount int + localTxs = make(map[common.Address]types.Transactions) + remoteTxs map[common.Address]types.Transactions + total int + ) + + start := time.Now() + + pending := pool.Pending(context.Background(), true) + + pendingDuration := time.Since(start) + + remoteTxs = pending + + locals := pool.Locals() + + pendingLen, queuedLen := pool.Stats() + + for _, account := range locals { + if txs := remoteTxs[account]; len(txs) > 0 { + delete(remoteTxs, account) + + localTxs[account] = txs + } + } + + localTxsCount = len(localTxs) + remoteTxsCount = len(remoteTxs) + + var txLocalCount int + + if localTxsCount > 0 { + txs := types.NewTransactionsByPriceAndNonce(signer, localTxs, baseFee) + + blockGasLimit, txLocalCount = commitTransactions(pool, txs, blockGasLimit) + + total += txLocalCount + } + + var txRemoteCount int + + if remoteTxsCount > 0 { + txs := types.NewTransactionsByPriceAndNonce(signer, remoteTxs, baseFee) + + _, txRemoteCount = commitTransactions(pool, txs, blockGasLimit) + + total += txRemoteCount + } + + miningDuration := time.Since(start) + + tb.Logf("[%s] mining block. block %d. total %d: pending %d(added %d), local %d(added %d), queued %d, localTxsCount %d, remoteTxsCount %d, pending %v, mining %v", + common.NowMilliseconds(), totalBlocks, total, pendingLen, txRemoteCount, localTxsCount, txLocalCount, queuedLen, localTxsCount, remoteTxsCount, pendingDuration, miningDuration) + + return total, pendingDuration, miningDuration +} + +//nolint:paralleltest +func TestPoolMiningDataRaces(t *testing.T) { + if testing.Short() { + t.Skip("only for data race testing") + } + + const format = "size %d, txs ticker %v, api ticker %v" + + cases := []struct { + name string + size int + txsTickerDuration time.Duration + apiTickerDuration time.Duration + }{ + { + size: 1, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 1, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 5, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 5, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 10, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 10, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 20, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 20, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + + { + size: 30, + txsTickerDuration: 200 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 400 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 600 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + { + size: 30, + txsTickerDuration: 800 * time.Millisecond, + apiTickerDuration: 10 * time.Millisecond, + }, + } + + for i := range cases { + cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].txsTickerDuration, cases[i].apiTickerDuration) + } + + //nolint:paralleltest + for _, testCase := range cases { + singleCase := testCase + + t.Run(singleCase.name, func(t *testing.T) { + defer goleak.VerifyNone(t, leak.IgnoreList()...) + + const ( + blocks = 300 + blockGasLimit = 40_000_000 + blockPeriod = time.Second + threads = 10 + batchesSize = 10_000 + timeoutDuration = 10 * blockPeriod + + balanceStr = "1_000_000_000_000" + ) + + apiWithMining(t, balanceStr, batchesSize, singleCase, timeoutDuration, threads, blockPeriod, blocks, blockGasLimit) + }) + } +} + +//nolint:gocognit,thelper +func apiWithMining(tb testing.TB, balanceStr string, batchesSize int, singleCase struct { + name string + size int + txsTickerDuration time.Duration + apiTickerDuration time.Duration +}, timeoutDuration time.Duration, threads int, blockPeriod time.Duration, blocks int, blockGasLimit uint64) { + done := make(chan struct{}) + + var wg sync.WaitGroup + + defer func() { + close(done) + + tb.Logf("[%s] finishing apiWithMining", common.NowMilliseconds()) + + wg.Wait() + + tb.Logf("[%s] apiWithMining finished", common.NowMilliseconds()) + }() + + // Generate a batch of transactions to enqueue into the pool + pendingAddedCh := make(chan struct{}, 1024) + + pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh)) + defer pool.Stop() + + localKeyPub := localKey.PublicKey + account := crypto.PubkeyToAddress(localKeyPub) + + balance, ok := big.NewInt(0).SetString(balanceStr, 0) + if !ok { + tb.Fatal("incorrect initial balance", balanceStr) + } + + testAddBalance(pool, account, balance) + + signer := types.NewEIP155Signer(big.NewInt(1)) + baseFee := uint256.NewInt(1) + + batchesLocal := make([]types.Transactions, batchesSize) + batchesRemote := make([]types.Transactions, batchesSize) + batchesRemotes := make([]types.Transactions, batchesSize) + batchesRemoteSync := make([]types.Transactions, batchesSize) + batchesRemotesSync := make([]types.Transactions, batchesSize) + + for i := 0; i < batchesSize; i++ { + batchesLocal[i] = make(types.Transactions, singleCase.size) + + for j := 0; j < singleCase.size; j++ { + batchesLocal[i][j] = pricedTransaction(uint64(singleCase.size*i+j), 100_000, big.NewInt(int64(i+1)), localKey) + } + + batchesRemote[i] = make(types.Transactions, singleCase.size) + + remoteKey, _ := crypto.GenerateKey() + remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) + testAddBalance(pool, remoteAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemote[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteKey) + } + + batchesRemotes[i] = make(types.Transactions, singleCase.size) + + remotesKey, _ := crypto.GenerateKey() + remotesAddr := crypto.PubkeyToAddress(remotesKey.PublicKey) + testAddBalance(pool, remotesAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemotes[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesKey) + } + + batchesRemoteSync[i] = make(types.Transactions, singleCase.size) + + remoteSyncKey, _ := crypto.GenerateKey() + remoteSyncAddr := crypto.PubkeyToAddress(remoteSyncKey.PublicKey) + testAddBalance(pool, remoteSyncAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemoteSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteSyncKey) + } + + batchesRemotesSync[i] = make(types.Transactions, singleCase.size) + + remotesSyncKey, _ := crypto.GenerateKey() + remotesSyncAddr := crypto.PubkeyToAddress(remotesSyncKey.PublicKey) + testAddBalance(pool, remotesSyncAddr, balance) + + for j := 0; j < singleCase.size; j++ { + batchesRemotesSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesSyncKey) + } + } + + tb.Logf("[%s] starting goroutines", common.NowMilliseconds()) + + txsTickerDuration := singleCase.txsTickerDuration + apiTickerDuration := singleCase.apiTickerDuration + + // locals + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddLocal(s)", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddLocal(s)", common.NowMilliseconds()) + }() + + tb.Logf("[%s] starting AddLocal(s)", common.NowMilliseconds()) + + for _, batch := range batchesLocal { + batch := batch + + select { + case <-done: + return + default: + } + + if rand.Int()%2 == 0 { + runWithTimeout(tb, func(_ chan struct{}) { + errs := pool.AddLocals(batch) + if len(errs) != 0 { + tb.Logf("[%s] AddLocals error, %v", common.NowMilliseconds(), errs) + } + }, done, "AddLocals", timeoutDuration, 0, 0) + } else { + for _, tx := range batch { + tx := tx + + runWithTimeout(tb, func(_ chan struct{}) { + err := pool.AddLocal(tx) + if err != nil { + tb.Logf("[%s] AddLocal error %s", common.NowMilliseconds(), err) + } + }, done, "AddLocal", timeoutDuration, 0, 0) + + time.Sleep(txsTickerDuration) + } + } + + time.Sleep(txsTickerDuration) + } + }() + + // remotes + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemotes", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemotes", common.NowMilliseconds()) + }() + + addTransactionsBatches(tb, batchesRemotes, getFnForBatches(pool.AddRemotes), done, timeoutDuration, txsTickerDuration, "AddRemotes", 0) + }() + + // remote + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemote", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemote", common.NowMilliseconds()) + }() + + addTransactions(tb, batchesRemote, pool.AddRemote, done, timeoutDuration, txsTickerDuration, "AddRemote", 0) + }() + + // sync + // remotes + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemotesSync", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemotesSync", common.NowMilliseconds()) + }() + + addTransactionsBatches(tb, batchesRemotesSync, getFnForBatches(pool.AddRemotesSync), done, timeoutDuration, txsTickerDuration, "AddRemotesSync", 0) + }() + + // remote + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping AddRemoteSync", common.NowMilliseconds()) + + wg.Done() + + tb.Logf("[%s] stopped AddRemoteSync", common.NowMilliseconds()) + }() + + addTransactions(tb, batchesRemoteSync, pool.AddRemoteSync, done, timeoutDuration, txsTickerDuration, "AddRemoteSync", 0) + }() + + // tx pool API + for i := 0; i < threads; i++ { + i := i + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Pending-no-tips, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Pending-no-tips, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p := pool.Pending(context.Background(), false) + fmt.Fprint(io.Discard, p) + }, done, "Pending-no-tips", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Pending-with-tips, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Pending-with-tips, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p := pool.Pending(context.Background(), true) + fmt.Fprint(io.Discard, p) + }, done, "Pending-with-tips", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Locals, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Locals, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + l := pool.Locals() + fmt.Fprint(io.Discard, l) + }, done, "Locals", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Content, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Content, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.Content() + fmt.Fprint(io.Discard, p, q) + }, done, "Content", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping GasPriceUint256, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped GasPriceUint256, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.GasPriceUint256() + fmt.Fprint(io.Discard, res) + }, done, "GasPriceUint256", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping GasPrice, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped GasPrice, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.GasPrice() + fmt.Fprint(io.Discard, res) + }, done, "GasPrice", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping SetGasPrice, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped SetGasPrice, , thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + pool.SetGasPrice(pool.GasPrice()) + }, done, "SetGasPrice", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping ContentFrom, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped ContentFrom, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.ContentFrom(account) + fmt.Fprint(io.Discard, p, q) + }, done, "ContentFrom", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Has, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Has, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.Has(batchesRemotes[0][0].Hash()) + fmt.Fprint(io.Discard, res) + }, done, "Has", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Get, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Get, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + tx := pool.Get(batchesRemotes[0][0].Hash()) + fmt.Fprint(io.Discard, tx == nil) + }, done, "Get", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Nonce, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Nonce, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + res := pool.Nonce(account) + fmt.Fprint(io.Discard, res) + }, done, "Nonce", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Stats, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Stats, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + p, q := pool.Stats() + fmt.Fprint(io.Discard, p, q) + }, done, "Stats", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping Status, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped Status, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(_ chan struct{}) { + st := pool.Status([]common.Hash{batchesRemotes[1][0].Hash()}) + fmt.Fprint(io.Discard, st) + }, done, "Status", apiTickerDuration, timeoutDuration, i) + }() + + wg.Add(1) + + go func() { + defer func() { + tb.Logf("[%s] stopping SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i) + + wg.Done() + + tb.Logf("[%s] stopped SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i) + }() + + runWithTicker(tb, func(c chan struct{}) { + ch := make(chan NewTxsEvent, 10) + sub := pool.SubscribeNewTxsEvent(ch) + + if sub == nil { + return + } + + defer sub.Unsubscribe() + + select { + case <-done: + return + case <-c: + case res := <-ch: + fmt.Fprint(io.Discard, res) + } + + }, done, "SubscribeNewTxsEvent", apiTickerDuration, timeoutDuration, i) + }() + } + + // wait for the start + tb.Logf("[%s] before the first propagated transaction", common.NowMilliseconds()) + <-pendingAddedCh + tb.Logf("[%s] after the first propagated transaction", common.NowMilliseconds()) + + var ( + totalTxs int + totalBlocks int + ) + + pendingDurations := make([]time.Duration, 0, blocks) + + var ( + added int + pendingDuration time.Duration + miningDuration time.Duration + diff time.Duration + ) + + for { + added, pendingDuration, miningDuration = mining(tb, pool, signer, baseFee, blockGasLimit, totalBlocks) + + totalTxs += added + + pendingDurations = append(pendingDurations, pendingDuration) + + totalBlocks++ + + if totalBlocks > blocks { + fmt.Fprint(io.Discard, totalTxs) + break + } + + diff = blockPeriod - miningDuration + if diff > 0 { + time.Sleep(diff) + } + } + + pendingDurationsFloat := make([]float64, len(pendingDurations)) + + for i, v := range pendingDurations { + pendingDurationsFloat[i] = float64(v.Nanoseconds()) + } + + mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil) + tb.Logf("[%s] pending mean %v, stddev %v, %v-%v", + common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat))) +} + +func addTransactionsBatches(tb testing.TB, batches []types.Transactions, fn func(types.Transactions) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) { + tb.Helper() + + tb.Logf("[%s] starting %s", common.NowMilliseconds(), name) + + defer func() { + tb.Logf("[%s] stop %s", common.NowMilliseconds(), name) + }() + + for _, batch := range batches { + batch := batch + + select { + case <-done: + return + default: + } + + runWithTimeout(tb, func(_ chan struct{}) { + err := fn(batch) + if err != nil { + tb.Logf("[%s] %s error: %s", common.NowMilliseconds(), name, err) + } + }, done, name, timeoutDuration, 0, thread) + + time.Sleep(tickerDuration) + } +} + +func addTransactions(tb testing.TB, batches []types.Transactions, fn func(*types.Transaction) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) { + tb.Helper() + + tb.Logf("[%s] starting %s", common.NowMilliseconds(), name) + + defer func() { + tb.Logf("[%s] stop %s", common.NowMilliseconds(), name) + }() + + for _, batch := range batches { + for _, tx := range batch { + tx := tx + + select { + case <-done: + return + default: + } + + runWithTimeout(tb, func(_ chan struct{}) { + err := fn(tx) + if err != nil { + tb.Logf("%s error: %s", name, err) + } + }, done, name, timeoutDuration, 0, thread) + + time.Sleep(tickerDuration) + } + + time.Sleep(tickerDuration) + } +} + +func getFnForBatches(fn func([]*types.Transaction) []error) func(types.Transactions) error { + return func(batch types.Transactions) error { + errs := fn(batch) + if len(errs) != 0 { + return errs[0] + } + + return nil + } +} + +//nolint:unparam +func runWithTicker(tb testing.TB, fn func(c chan struct{}), done chan struct{}, name string, tickerDuration, timeoutDuration time.Duration, thread int) { + tb.Helper() + + select { + case <-done: + tb.Logf("[%s] Short path. finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread) + + return + default: + } + + defer func() { + tb.Logf("[%s] finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread) + }() + + localTicker := time.NewTicker(tickerDuration) + defer localTicker.Stop() + + n := 0 + + for range localTicker.C { + select { + case <-done: + return + default: + } + + runWithTimeout(tb, fn, done, name, timeoutDuration, n, thread) + + n++ + } +} + +func runWithTimeout(tb testing.TB, fn func(chan struct{}), outerDone chan struct{}, name string, timeoutDuration time.Duration, n, thread int) { + tb.Helper() + + select { + case <-outerDone: + tb.Logf("[%s] Short path. exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + + return + default: + } + + timeout := time.NewTimer(timeoutDuration) + defer timeout.Stop() + + doneCh := make(chan struct{}) + + isError := new(int32) + *isError = 0 + + go func() { + defer close(doneCh) + + select { + case <-outerDone: + return + default: + fn(doneCh) + } + }() + + const isDebug = false + + var stack string + + select { + case <-outerDone: + tb.Logf("[%s] exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + case <-doneCh: + // only for debug + //tb.Logf("[%s] exiting inner runWithTimeout by successful call for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n) + case <-timeout.C: + atomic.StoreInt32(isError, 1) + + if isDebug { + stack = string(debug.Stack(true)) + } + + tb.Errorf("[%s] %s timeouted, thread %d, iteration %d. Stack %s", common.NowMilliseconds(), name, thread, n, stack) + } +} diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index 8ad5e739e9..509f86b622 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -19,6 +19,8 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) @@ -44,15 +46,16 @@ func (al AccessList) StorageKeys() int { // AccessListTx is the data of EIP-2930 access list transactions. type AccessListTx struct { - ChainID *big.Int // destination chain ID - Nonce uint64 // nonce of sender account - GasPrice *big.Int // wei per gas - Gas uint64 // gas limit - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int // wei amount - Data []byte // contract invocation input data - AccessList AccessList // EIP-2930 access list - V, R, S *big.Int // signature values + ChainID *big.Int // destination chain ID + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + gasPriceUint256 *uint256.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + AccessList AccessList // EIP-2930 access list + V, R, S *big.Int // signature values } // copy creates a deep copy of the transaction data and initializes all fields. @@ -80,6 +83,12 @@ func (tx *AccessListTx) copy() TxData { } if tx.GasPrice != nil { cpy.GasPrice.Set(tx.GasPrice) + + if cpy.gasPriceUint256 != nil { + cpy.gasPriceUint256.Set(tx.gasPriceUint256) + } else { + cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -100,11 +109,39 @@ func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } func (tx *AccessListTx) data() []byte { return tx.Data } func (tx *AccessListTx) gas() uint64 { return tx.Gas } func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) value() *big.Int { return tx.Value } -func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } -func (tx *AccessListTx) to() *common.Address { return tx.To } +func (tx *AccessListTx) gasPriceU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} + +func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasTipCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasFeeCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 53f246ea1f..532544d54e 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -19,19 +19,23 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) type DynamicFeeTx struct { - ChainID *big.Int - Nonce uint64 - GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas - GasFeeCap *big.Int // a.k.a. maxFeePerGas - Gas uint64 - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int - Data []byte - AccessList AccessList + ChainID *big.Int + Nonce uint64 + GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas + gasTipCapUint256 *uint256.Int // a.k.a. maxPriorityFeePerGas + GasFeeCap *big.Int // a.k.a. maxFeePerGas + gasFeeCapUint256 *uint256.Int // a.k.a. maxFeePerGas + Gas uint64 + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int + Data []byte + AccessList AccessList // Signature values V *big.Int `json:"v" gencodec:"required"` @@ -65,9 +69,21 @@ func (tx *DynamicFeeTx) copy() TxData { } if tx.GasTipCap != nil { cpy.GasTipCap.Set(tx.GasTipCap) + + if cpy.gasTipCapUint256 != nil { + cpy.gasTipCapUint256.Set(tx.gasTipCapUint256) + } else { + cpy.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap) + } } if tx.GasFeeCap != nil { cpy.GasFeeCap.Set(tx.GasFeeCap) + + if cpy.gasFeeCapUint256 != nil { + cpy.gasFeeCapUint256.Set(tx.gasFeeCapUint256) + } else { + cpy.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -88,11 +104,38 @@ func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } func (tx *DynamicFeeTx) data() []byte { return tx.Data } func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } -func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } -func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } -func (tx *DynamicFeeTx) to() *common.Address { return tx.To } +func (tx *DynamicFeeTx) gasFeeCapU256() *uint256.Int { + if tx.gasFeeCapUint256 != nil { + return tx.gasFeeCapUint256 + } + + tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + + return tx.gasFeeCapUint256 +} +func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *DynamicFeeTx) gasTipCapU256() *uint256.Int { + if tx.gasTipCapUint256 != nil { + return tx.gasTipCapUint256 + } + + tx.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap) + + return tx.gasTipCapUint256 +} +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) gasPriceU256() *uint256.Int { + if tx.gasFeeCapUint256 != nil { + return tx.gasTipCapUint256 + } + + tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap) + + return tx.gasFeeCapUint256 +} +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index cb86bed772..72fcd34fa5 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -19,18 +19,21 @@ package types import ( "math/big" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" ) // LegacyTx is the transaction data of regular Ethereum transactions. type LegacyTx struct { - Nonce uint64 // nonce of sender account - GasPrice *big.Int // wei per gas - Gas uint64 // gas limit - To *common.Address `rlp:"nil"` // nil means contract creation - Value *big.Int // wei amount - Data []byte // contract invocation input data - V, R, S *big.Int // signature values + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + gasPriceUint256 *uint256.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + V, R, S *big.Int // signature values } // NewTransaction creates an unsigned legacy transaction. @@ -77,6 +80,12 @@ func (tx *LegacyTx) copy() TxData { } if tx.GasPrice != nil { cpy.GasPrice.Set(tx.GasPrice) + + if cpy.gasPriceUint256 != nil { + cpy.gasPriceUint256.Set(tx.gasPriceUint256) + } else { + cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + } } if tx.V != nil { cpy.V.Set(tx.V) @@ -97,11 +106,38 @@ func (tx *LegacyTx) accessList() AccessList { return nil } func (tx *LegacyTx) data() []byte { return tx.Data } func (tx *LegacyTx) gas() uint64 { return tx.Gas } func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) value() *big.Int { return tx.Value } -func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } -func (tx *LegacyTx) to() *common.Address { return tx.To } +func (tx *LegacyTx) gasPriceU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasTipCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasFeeCapU256() *uint256.Int { + if tx.gasPriceUint256 != nil { + return tx.gasPriceUint256 + } + + tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice) + + return tx.gasPriceUint256 +} +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/core/types/transaction.go b/core/types/transaction.go index e0e52f25bc..9b89f12517 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -25,6 +25,8 @@ import ( "sync/atomic" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -53,9 +55,9 @@ type Transaction struct { time time.Time // Time first seen locally (spam avoidance) // caches - hash atomic.Value - size atomic.Value - from atomic.Value + hash atomic.Pointer[common.Hash] + size atomic.Pointer[common.StorageSize] + from atomic.Pointer[sigCache] } // NewTx creates a new transaction. @@ -77,8 +79,11 @@ type TxData interface { data() []byte gas() uint64 gasPrice() *big.Int + gasPriceU256() *uint256.Int gasTipCap() *big.Int + gasTipCapU256() *uint256.Int gasFeeCap() *big.Int + gasFeeCapU256() *uint256.Int value() *big.Int nonce() uint64 to() *common.Address @@ -194,7 +199,8 @@ func (tx *Transaction) setDecoded(inner TxData, size int) { tx.inner = inner tx.time = time.Now() if size > 0 { - tx.size.Store(common.StorageSize(size)) + v := float64(size) + tx.size.Store((*common.StorageSize)(&v)) } } @@ -265,16 +271,23 @@ func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() } func (tx *Transaction) Gas() uint64 { return tx.inner.gas() } // GasPrice returns the gas price of the transaction. -func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } +func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } +func (tx *Transaction) GasPriceRef() *big.Int { return tx.inner.gasPrice() } +func (tx *Transaction) GasPriceUint() *uint256.Int { return tx.inner.gasPriceU256() } // GasTipCap returns the gasTipCap per gas of the transaction. -func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) } +func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) } +func (tx *Transaction) GasTipCapRef() *big.Int { return tx.inner.gasTipCap() } +func (tx *Transaction) GasTipCapUint() *uint256.Int { return tx.inner.gasTipCapU256() } // GasFeeCap returns the fee cap per gas of the transaction. -func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } +func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } +func (tx *Transaction) GasFeeCapRef() *big.Int { return tx.inner.gasFeeCap() } +func (tx *Transaction) GasFeeCapUint() *uint256.Int { return tx.inner.gasFeeCapU256() } // Value returns the ether amount of the transaction. -func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } +func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } +func (tx *Transaction) ValueRef() *big.Int { return tx.inner.value() } // Nonce returns the sender account nonce of the transaction. func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } @@ -287,9 +300,19 @@ func (tx *Transaction) To() *common.Address { // Cost returns gas * gasPrice + value. func (tx *Transaction) Cost() *big.Int { - total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) - total.Add(total, tx.Value()) - return total + gasPrice, _ := uint256.FromBig(tx.GasPriceRef()) + gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas())) + value, _ := uint256.FromBig(tx.ValueRef()) + + return gasPrice.Add(gasPrice, value).ToBig() +} + +func (tx *Transaction) CostUint() *uint256.Int { + gasPrice, _ := uint256.FromBig(tx.GasPriceRef()) + gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas())) + value, _ := uint256.FromBig(tx.ValueRef()) + + return gasPrice.Add(gasPrice, value) } // RawSignatureValues returns the V, R, S signature values of the transaction. @@ -303,11 +326,18 @@ func (tx *Transaction) GasFeeCapCmp(other *Transaction) int { return tx.inner.gasFeeCap().Cmp(other.inner.gasFeeCap()) } -// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap. func (tx *Transaction) GasFeeCapIntCmp(other *big.Int) int { return tx.inner.gasFeeCap().Cmp(other) } +func (tx *Transaction) GasFeeCapUIntCmp(other *uint256.Int) int { + return tx.inner.gasFeeCapU256().Cmp(other) +} + +func (tx *Transaction) GasFeeCapUIntLt(other *uint256.Int) bool { + return tx.inner.gasFeeCapU256().Lt(other) +} + // GasTipCapCmp compares the gasTipCap of two transactions. func (tx *Transaction) GasTipCapCmp(other *Transaction) int { return tx.inner.gasTipCap().Cmp(other.inner.gasTipCap()) @@ -318,6 +348,14 @@ func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int { return tx.inner.gasTipCap().Cmp(other) } +func (tx *Transaction) GasTipCapUIntCmp(other *uint256.Int) int { + return tx.inner.gasTipCapU256().Cmp(other) +} + +func (tx *Transaction) GasTipCapUIntLt(other *uint256.Int) bool { + return tx.inner.gasTipCapU256().Lt(other) +} + // EffectiveGasTip returns the effective miner gasTipCap for the given base fee. // Note: if the effective gasTipCap is negative, this method returns both error // the actual negative value, _and_ ErrGasFeeCapTooLow @@ -356,10 +394,73 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i return tx.EffectiveGasTipValue(baseFee).Cmp(other) } +func (tx *Transaction) EffectiveGasTipUintCmp(other *uint256.Int, baseFee *uint256.Int) int { + if baseFee == nil { + return tx.GasTipCapUIntCmp(other) + } + + return tx.EffectiveGasTipValueUint(baseFee).Cmp(other) +} + +func (tx *Transaction) EffectiveGasTipUintLt(other *uint256.Int, baseFee *uint256.Int) bool { + if baseFee == nil { + return tx.GasTipCapUIntLt(other) + } + + return tx.EffectiveGasTipValueUint(baseFee).Lt(other) +} + +func (tx *Transaction) EffectiveGasTipTxUintCmp(other *Transaction, baseFee *uint256.Int) int { + if baseFee == nil { + return tx.inner.gasTipCapU256().Cmp(other.inner.gasTipCapU256()) + } + + return tx.EffectiveGasTipValueUint(baseFee).Cmp(other.EffectiveGasTipValueUint(baseFee)) +} + +func (tx *Transaction) EffectiveGasTipValueUint(baseFee *uint256.Int) *uint256.Int { + effectiveTip, _ := tx.EffectiveGasTipUnit(baseFee) + return effectiveTip +} + +func (tx *Transaction) EffectiveGasTipUnit(baseFee *uint256.Int) (*uint256.Int, error) { + if baseFee == nil { + return tx.GasFeeCapUint(), nil + } + + var err error + + gasFeeCap := tx.GasFeeCapUint().Clone() + + if gasFeeCap.Lt(baseFee) { + err = ErrGasFeeCapTooLow + } + + gasTipCapUint := tx.GasTipCapUint() + + if gasFeeCap.Lt(gasTipCapUint) { + return gasFeeCap, err + } + + if gasFeeCap.Lt(gasTipCapUint) && baseFee.IsZero() { + return gasFeeCap, err + } + + gasFeeCap.Sub(gasFeeCap, baseFee) + + if gasFeeCap.Gt(gasTipCapUint) || gasFeeCap.Eq(gasTipCapUint) { + gasFeeCap.Add(gasFeeCap, baseFee) + + return gasTipCapUint, err + } + + return gasFeeCap, err +} + // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { - return hash.(common.Hash) + return *hash } var h common.Hash @@ -368,7 +469,9 @@ func (tx *Transaction) Hash() common.Hash { } else { h = prefixedRlpHash(tx.Type(), tx.inner) } - tx.hash.Store(h) + + tx.hash.Store(&h) + return h } @@ -376,11 +479,14 @@ func (tx *Transaction) Hash() common.Hash { // encoding and returning it, or returning a previously cached value. func (tx *Transaction) Size() common.StorageSize { if size := tx.size.Load(); size != nil { - return size.(common.StorageSize) + return *size } + c := writeCounter(0) + rlp.Encode(&c, &tx.inner) - tx.size.Store(common.StorageSize(c)) + tx.size.Store((*common.StorageSize)(&c)) + return common.StorageSize(c) } @@ -444,14 +550,14 @@ func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap type TxWithMinerFee struct { tx *Transaction - minerFee *big.Int + minerFee *uint256.Int } // NewTxWithMinerFee creates a wrapped transaction, calculating the effective // miner gasTipCap if a base fee is provided. // Returns error in case of a negative effective miner gasTipCap. -func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { - minerFee, err := tx.EffectiveGasTip(baseFee) +func NewTxWithMinerFee(tx *Transaction, baseFee *uint256.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveGasTipUnit(baseFee) if err != nil { return nil, err } @@ -496,7 +602,7 @@ type TransactionsByPriceAndNonce struct { txs map[common.Address]Transactions // Per account nonce-sorted list of transactions heads TxByPriceAndTime // Next transaction for each unique account (price heap) signer Signer // Signer for the set of transactions - baseFee *big.Int // Current base fee + baseFee *uint256.Int // Current base fee } // NewTransactionsByPriceAndNonce creates a transaction set that can retrieve @@ -504,6 +610,7 @@ type TransactionsByPriceAndNonce struct { // // Note, the input map is reowned so the caller should not interact any more with // if after providing it to the constructor. +/* func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { // Initialize a price and received time based heap with the head transactions heads := make(TxByPriceAndTime, 0, len(txs)) @@ -524,6 +631,39 @@ func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transa } heap.Init(&heads) + // Assemble and return the transaction set + return &TransactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +}*/ + +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *uint256.Int) *TransactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(TxByPriceAndTime, 0, len(txs)) + + for from, accTxs := range txs { + if len(accTxs) == 0 { + continue + } + + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { + delete(txs, from) + continue + } + + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + + heap.Init(&heads) + // Assemble and return the transaction set return &TransactionsByPriceAndNonce{ txs: txs, diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 1d0d2a4c75..959aba637a 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -130,12 +130,11 @@ func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction // not match the signer used in the current call. func Sender(signer Signer, tx *Transaction) (common.Address, error) { if sc := tx.from.Load(); sc != nil { - sigCache := sc.(sigCache) // If the signer used to derive from in a previous // call is not the same as used current, invalidate // the cache. - if sigCache.signer.Equal(signer) { - return sigCache.from, nil + if sc.signer.Equal(signer) { + return sc.from, nil } } @@ -143,7 +142,9 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) { if err != nil { return common.Address{}, err } - tx.from.Store(sigCache{signer: signer, from: addr}) + + tx.from.Store(&sigCache{signer: signer, from: addr}) + return addr, nil } @@ -461,10 +462,10 @@ func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v * func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { return rlpHash([]interface{}{ tx.Nonce(), - tx.GasPrice(), + tx.GasPriceRef(), tx.Gas(), tx.To(), - tx.Value(), + tx.ValueRef(), tx.Data(), }) } diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index a4755675cd..255a7b76b4 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -27,7 +27,10 @@ import ( "testing" "time" + "github.com/holiman/uint256" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) @@ -272,14 +275,22 @@ func TestTransactionPriceNonceSort1559(t *testing.T) { // Tests that transactions can be correctly sorted according to their price in // decreasing order, but at the same time with increasing nonces when issued by // the same account. -func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { +// +//nolint:gocognit,thelper +func testTransactionPriceNonceSort(t *testing.T, baseFeeBig *big.Int) { // Generate a batch of accounts to start with keys := make([]*ecdsa.PrivateKey, 25) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() } + signer := LatestSignerForChainID(common.Big1) + var baseFee *uint256.Int + if baseFeeBig != nil { + baseFee = cmath.FromBig(baseFeeBig) + } + // Generate a batch of transactions with overlapping values, but shifted nonces groups := map[common.Address]Transactions{} expectedCount := 0 @@ -308,7 +319,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), Data: nil, }) - if count == 25 && int64(gasFeeCap) < baseFee.Int64() { + if count == 25 && uint64(gasFeeCap) < baseFee.Uint64() { count = i } } @@ -341,12 +352,25 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) } } + // If the next tx has different from account, the price must be lower than the current one if i+1 < len(txs) { next := txs[i+1] fromNext, _ := Sender(signer, next) - tip, err := txi.EffectiveGasTip(baseFee) - nextTip, nextErr := next.EffectiveGasTip(baseFee) + tip, err := txi.EffectiveGasTipUnit(baseFee) + nextTip, nextErr := next.EffectiveGasTipUnit(baseFee) + + tipBig, _ := txi.EffectiveGasTip(baseFeeBig) + nextTipBig, _ := next.EffectiveGasTip(baseFeeBig) + + if tip.Cmp(cmath.FromBig(tipBig)) != 0 { + t.Fatalf("EffectiveGasTip incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", tip.String(), tipBig.String(), baseFee.String(), baseFeeBig.String()) + } + + if nextTip.Cmp(cmath.FromBig(nextTipBig)) != 0 { + t.Fatalf("EffectiveGasTip next incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", nextTip.String(), nextTipBig.String(), baseFee.String(), baseFeeBig.String()) + } + if err != nil || nextErr != nil { t.Errorf("error calculating effective tip") } diff --git a/eth/api_backend.go b/eth/api_backend.go index c33f3cf6f2..2c93e60d87 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -236,11 +236,18 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri } func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - return b.eth.txPool.AddLocal(signedTx) + err := b.eth.txPool.AddLocal(signedTx) + if err != nil { + if unwrapped := errors.Unwrap(err); unwrapped != nil { + return unwrapped + } + } + + return err } func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { - pending := b.eth.txPool.Pending(false) + pending := b.eth.txPool.Pending(context.Background(), false) var txs types.Transactions for _, batch := range pending { txs = append(txs, batch...) diff --git a/eth/bor_checkpoint_verifier.go b/eth/bor_checkpoint_verifier.go index 61e8c382e1..ad81eb6116 100644 --- a/eth/bor_checkpoint_verifier.go +++ b/eth/bor_checkpoint_verifier.go @@ -26,6 +26,7 @@ func newCheckpointVerifier(verifyFn func(ctx context.Context, handler *ethHandle ) // check if we have the checkpoint blocks + //nolint:contextcheck head := handler.ethAPI.BlockNumber() if head < hexutil.Uint64(endBlock) { log.Debug("Head block behind checkpoint block", "head", head, "checkpoint end block", endBlock) diff --git a/eth/handler.go b/eth/handler.go index 8e6d89f9ef..48bdf8eb15 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -17,6 +17,7 @@ package eth import ( + "context" "errors" "math" "math/big" @@ -69,7 +70,7 @@ type txPool interface { // Pending should return pending transactions. // The slice should be modifiable by the caller. - Pending(enforceTips bool) map[common.Address]types.Transactions + Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions // SubscribeNewTxsEvent should return an event subscription of // NewTxsEvent and send events to the given channel. diff --git a/eth/handler_test.go b/eth/handler_test.go index c6d7811d10..7a14619159 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -17,6 +17,7 @@ package eth import ( + "context" "math/big" "sort" "sync" @@ -92,7 +93,7 @@ func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { } // Pending returns all the transactions known to the pool -func (p *testTxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { +func (p *testTxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions { p.lock.RLock() defer p.lock.RUnlock() diff --git a/eth/sync.go b/eth/sync.go index aa79b6181c..377acff95c 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -17,6 +17,7 @@ package eth import ( + "context" "errors" "math/big" "sync/atomic" @@ -44,20 +45,24 @@ func (h *handler) syncTransactions(p *eth.Peer) { // // TODO(karalabe): Figure out if we could get away with random order somehow var txs types.Transactions - pending := h.txpool.Pending(false) + + pending := h.txpool.Pending(context.Background(), false) for _, batch := range pending { txs = append(txs, batch...) } + if len(txs) == 0 { return } // The eth/65 protocol introduces proper transaction announcements, so instead // of dripping transactions across multiple peers, just send the entire list as // an announcement and let the remote side decide what they need (likely nothing). + hashes := make([]common.Hash, len(txs)) for i, tx := range txs { hashes[i] = tx.Hash() } + p.AsyncSendPooledTransactionHashes(hashes) } diff --git a/internal/cli/server/pprof/pprof.go b/internal/cli/server/pprof/pprof.go index 44034f3bb8..69056bd0fb 100644 --- a/internal/cli/server/pprof/pprof.go +++ b/internal/cli/server/pprof/pprof.go @@ -61,6 +61,28 @@ func CPUProfile(ctx context.Context, sec int) ([]byte, map[string]string, error) }, nil } +// CPUProfile generates a CPU Profile for a given duration +func CPUProfileWithChannel(done chan bool) ([]byte, map[string]string, error) { + var buf bytes.Buffer + if err := pprof.StartCPUProfile(&buf); err != nil { + return nil, nil, err + } + + select { + case <-done: + case <-time.After(30 * time.Second): + } + + pprof.StopCPUProfile() + + return buf.Bytes(), + map[string]string{ + "X-Content-Type-Options": "nosniff", + "Content-Type": "application/octet-stream", + "Content-Disposition": `attachment; filename="profile"`, + }, nil +} + // Trace runs a trace profile for a given duration func Trace(ctx context.Context, sec int) ([]byte, map[string]string, error) { if sec <= 0 { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 082dfea66f..c1584e5867 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "math/big" + "runtime" "strings" "time" @@ -2229,6 +2230,21 @@ func (api *PrivateDebugAPI) PurgeCheckpointWhitelist() { api.b.PurgeCheckpointWhitelist() } +// GetTraceStack returns the current trace stack +func (api *PrivateDebugAPI) GetTraceStack() string { + buf := make([]byte, 1024) + + for { + n := runtime.Stack(buf, true) + + if n < len(buf) { + return string(buf) + } + + buf = make([]byte, 2*len(buf)) + } +} + // PublicNetAPI offers network related RPC methods type PublicNetAPI struct { net *p2p.Server diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index dcdd5baf23..64ceb5c42e 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -484,6 +484,11 @@ web3._extend({ call: 'debug_purgeCheckpointWhitelist', params: 0, }), + new web3._extend.Method({ + name: 'getTraceStack', + call: 'debug_getTraceStack', + params: 0, + }), ], properties: [] }); diff --git a/les/handler_test.go b/les/handler_test.go index 3ceabdf8ec..af3324b042 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -617,7 +617,7 @@ func testTransactionStatus(t *testing.T, protocol int) { sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()}) } if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { - t.Errorf("transaction status mismatch") + t.Error("transaction status mismatch", err) } } signer := types.HomesteadSigner{} diff --git a/les/server_requests.go b/les/server_requests.go index 3595a6ab38..b31c11c9d0 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -507,25 +507,39 @@ func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) { if err := msg.Decode(&r); err != nil { return nil, 0, 0, err } + amount := uint64(len(r.Txs)) + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { stats := make([]light.TxStatus, len(r.Txs)) + + var ( + err error + addFn func(transaction *types.Transaction) error + ) + for i, tx := range r.Txs { if i != 0 && !waitOrStop() { return nil } + hash := tx.Hash() stats[i] = txStatus(backend, hash) + if stats[i].Status == core.TxStatusUnknown { - addFn := backend.TxPool().AddRemotes + addFn = backend.TxPool().AddRemote + // Add txs synchronously for testing purpose if backend.AddTxsSync() { - addFn = backend.TxPool().AddRemotesSync + addFn = backend.TxPool().AddRemoteSync } - if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { - stats[i].Error = errs[0].Error() + + if err = addFn(tx); err != nil { + stats[i].Error = err.Error() + continue } + stats[i] = txStatus(backend, hash) } } diff --git a/miner/worker.go b/miner/worker.go index 797e7ea980..0137a74008 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -17,10 +17,15 @@ package miner import ( + "bytes" "context" "errors" "fmt" "math/big" + "os" + "runtime" + "runtime/pprof" + ptrace "runtime/trace" "sync" "sync/atomic" "time" @@ -31,6 +36,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" @@ -39,6 +45,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -83,6 +90,12 @@ const ( staleThreshold = 7 ) +// metrics gauge to track total and empty blocks sealed by a miner +var ( + sealedBlocksCounter = metrics.NewRegisteredCounter("worker/sealedBlocks", nil) + sealedEmptyBlocksCounter = metrics.NewRegisteredCounter("worker/sealedEmptyBlocks", nil) +) + // environment is the worker's current environment and holds all // information of the sealing block generation. type environment struct { @@ -257,6 +270,8 @@ type worker struct { skipSealHook func(*task) bool // Method to decide whether skipping the sealing. fullTaskHook func() // Method to call before pushing the full sealing task. resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. + + profileCount *int32 // Global count for profiling } //nolint:staticcheck @@ -285,6 +300,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), } + worker.profileCount = new(int32) // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain @@ -560,9 +576,11 @@ func (w *worker) mainLoop(ctx context.Context) { for { select { case req := <-w.newWorkCh: + //nolint:contextcheck w.commitWork(req.ctx, req.interrupt, req.noempty, req.timestamp) case req := <-w.getWorkCh: + //nolint:contextcheck block, err := w.generateWork(req.ctx, req.params) if err != nil { req.err = err @@ -622,13 +640,17 @@ func (w *worker) mainLoop(ctx context.Context) { if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { continue } + txs := make(map[common.Address]types.Transactions) + for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], tx) } - txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) + + txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, cmath.FromBig(w.current.header.BaseFee)) tcount := w.current.tcount + w.commitTransactions(w.current, txset, nil) // Only update the snapshot if any new transactions were added @@ -758,7 +780,7 @@ func (w *worker) resultLoop() { err error ) - tracing.Exec(task.ctx, "resultLoop", func(ctx context.Context, span trace.Span) { + tracing.Exec(task.ctx, "", "resultLoop", func(ctx context.Context, span trace.Span) { for i, taskReceipt := range task.receipts { receipt := new(types.Receipt) receipts[i] = receipt @@ -808,6 +830,12 @@ func (w *worker) resultLoop() { // Broadcast the block and announce chain insertion event w.mux.Post(core.NewMinedBlockEvent{Block: block}) + sealedBlocksCounter.Inc(1) + + if block.Transactions().Len() == 0 { + sealedEmptyBlocksCounter.Inc(1) + } + // Insert the block into the set of pending ones to resultLoop for confirmations w.unconfirmed.Insert(block.NumberU64(), block.Hash()) case <-w.exitCh: @@ -965,7 +993,10 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP // Start executing the transaction env.state.Prepare(tx.Hash(), env.tcount) + start := time.Now() + logs, err := w.commitTransaction(env, tx) + switch { case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account @@ -987,6 +1018,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP coalescedLogs = append(coalescedLogs, logs...) env.tcount++ txs.Shift() + log.Info("Committed new tx", "tx hash", tx.Hash(), "from", from, "to", tx.To(), "nonce", tx.Nonce(), "gas", tx.Gas(), "gasPrice", tx.GasPrice(), "value", tx.Value(), "time spent", time.Since(start)) case errors.Is(err, core.ErrTxTypeNotSupported): // Pop the unsupported transaction without shifting in the next from the account @@ -1077,7 +1109,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { } // Set baseFee and GasLimit if we are on an EIP-1559 chain if w.chainConfig.IsLondon(header.Number) { - header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header()) + header.BaseFee = misc.CalcBaseFeeUint(w.chainConfig, parent.Header()).ToBig() if !w.chainConfig.IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) @@ -1117,9 +1149,75 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { return env, nil } +func startProfiler(profile string, filepath string, number uint64) (func() error, error) { + var ( + buf bytes.Buffer + err error + ) + + closeFn := func() {} + + switch profile { + case "cpu": + err = pprof.StartCPUProfile(&buf) + + if err == nil { + closeFn = func() { + pprof.StopCPUProfile() + } + } + case "trace": + err = ptrace.Start(&buf) + + if err == nil { + closeFn = func() { + ptrace.Stop() + } + } + case "heap": + runtime.GC() + + err = pprof.WriteHeapProfile(&buf) + default: + log.Info("Incorrect profile name") + } + + if err != nil { + return func() error { + closeFn() + return nil + }, err + } + + closeFnNew := func() error { + var err error + + closeFn() + + if buf.Len() == 0 { + return nil + } + + f, err := os.Create(filepath + "/" + profile + "-" + fmt.Sprint(number) + ".prof") + if err != nil { + return err + } + + defer f.Close() + + _, err = f.Write(buf.Bytes()) + + return err + } + + return closeFnNew, nil +} + // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. +// +//nolint:gocognit func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *environment) { ctx, span := tracing.StartSpan(ctx, "fillTransactions") defer tracing.EndSpan(span) @@ -1134,10 +1232,76 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en remoteTxs map[common.Address]types.Transactions ) - tracing.Exec(ctx, "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) { - pending := w.eth.TxPool().Pending(true) + // TODO: move to config or RPC + const profiling = false + + if profiling { + doneCh := make(chan struct{}) + + defer func() { + close(doneCh) + }() + + go func(number uint64) { + closeFn := func() error { + return nil + } + + for { + select { + case <-time.After(150 * time.Millisecond): + // Check if we've not crossed limit + if attempt := atomic.AddInt32(w.profileCount, 1); attempt >= 10 { + log.Info("Completed profiling", "attempt", attempt) + + return + } + + log.Info("Starting profiling in fill transactions", "number", number) + + dir, err := os.MkdirTemp("", fmt.Sprintf("bor-traces-%s-", time.Now().UTC().Format("2006-01-02-150405Z"))) + if err != nil { + log.Error("Error in profiling", "path", dir, "number", number, "err", err) + return + } + + // grab the cpu profile + closeFnInternal, err := startProfiler("cpu", dir, number) + if err != nil { + log.Error("Error in profiling", "path", dir, "number", number, "err", err) + return + } + + closeFn = func() error { + err := closeFnInternal() + + log.Info("Completed profiling", "path", dir, "number", number, "error", err) + + return nil + } + + case <-doneCh: + err := closeFn() + + if err != nil { + log.Info("closing fillTransactions", "number", number, "error", err) + } + + return + } + } + }(env.header.Number.Uint64()) + } + + tracing.Exec(ctx, "", "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) { + + prePendingTime := time.Now() + + pending := w.eth.TxPool().Pending(ctx, true) remoteTxs = pending + postPendingTime := time.Now() + for _, account := range w.eth.TxPool().Locals() { if txs := remoteTxs[account]; len(txs) > 0 { delete(remoteTxs, account) @@ -1145,6 +1309,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en } } + postLocalsTime := time.Now() + localTxsCount = len(localTxs) remoteTxsCount = len(remoteTxs) @@ -1152,6 +1318,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en span, attribute.Int("len of local txs", localTxsCount), attribute.Int("len of remote txs", remoteTxsCount), + attribute.String("time taken by Pending()", fmt.Sprintf("%v", postPendingTime.Sub(prePendingTime))), + attribute.String("time taken by Locals()", fmt.Sprintf("%v", postLocalsTime.Sub(postPendingTime))), ) }) @@ -1164,8 +1332,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en if localTxsCount > 0 { var txs *types.TransactionsByPriceAndNonce - tracing.Exec(ctx, "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { - txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) + tracing.Exec(ctx, "", "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { + txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, cmath.FromBig(env.header.BaseFee)) tracing.SetAttributes( span, @@ -1173,7 +1341,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en ) }) - tracing.Exec(ctx, "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) { committed = w.commitTransactions(env, txs, interrupt) }) @@ -1187,8 +1355,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en if remoteTxsCount > 0 { var txs *types.TransactionsByPriceAndNonce - tracing.Exec(ctx, "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { - txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) + tracing.Exec(ctx, "", "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) { + txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, cmath.FromBig(env.header.BaseFee)) tracing.SetAttributes( span, @@ -1196,7 +1364,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en ) }) - tracing.Exec(ctx, "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) { committed = w.commitTransactions(env, txs, interrupt) }) @@ -1237,7 +1405,7 @@ func (w *worker) commitWork(ctx context.Context, interrupt *int32, noempty bool, err error ) - tracing.Exec(ctx, "worker.prepareWork", func(ctx context.Context, span trace.Span) { + tracing.Exec(ctx, "", "worker.prepareWork", func(ctx context.Context, span trace.Span) { // Set the coinbase if the worker is running or it's required var coinbase common.Address if w.isRunning() { diff --git a/tests/init_test.go b/tests/init_test.go index 1c6841e030..5e32f20abf 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -141,9 +141,6 @@ func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows" for _, re := range tm.slowpat { if re.MatchString(name) { - if testing.Short() { - return "skipped in -short mode", false - } if isWin32 { return "skipped on 32bit windows", false }