diff --git a/.golangci.yml b/.golangci.yml index da5553ae7aa..b7e9c207b68 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,19 @@ run: deadline: 10m +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - unused + - varcheck + linters-settings: govet: disable: diff --git a/Makefile b/Makefile index 67a5e662460..cf0c569fd8b 100644 --- a/Makefile +++ b/Makefile @@ -111,12 +111,11 @@ lint: lintci lintci: mdbx @echo "--> Running linter for code" - @./build/bin/golangci-lint run \ - --build-tags="mdbx" + @./build/bin/golangci-lint run --build-tags="mdbx" --config ./.golangci.yml lintci-deps: rm -f ./build/bin/golangci-lint - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./build/bin v1.39.0 + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./build/bin v1.38.0 clean: env GO111MODULE=on go clean -cache diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go index 57c6678dcec..c350042d20c 100644 --- a/cmd/hack/db/lmdb.go +++ b/cmd/hack/db/lmdb.go @@ -42,7 +42,10 @@ func nothing(kv ethdb.RwKV, _ ethdb.RwTx) (bool, error) { // Generates a database with single table and two key-value pair in "t" DBI, and returns the file name func generate2(tx ethdb.RwTx, entries int) error { - c := tx.RwCursor("t") + c, err := tx.RwCursor("t") + if err != nil { + return err + } defer c.Close() for i := 0; i < entries; i++ { k := fmt.Sprintf("%05d", i) @@ -66,7 +69,10 @@ func generate3(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { // Generates a database with one table, containing 1 short and 1 long (more than one page) values func generate4(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - c := tx.RwCursor("t") + c, err := tx.RwCursor("t") + if err != nil { + return false, err + } defer c.Close() if err := c.Append([]byte("k1"), []byte("very_short_value")); err != nil { return false, err @@ -79,7 +85,10 @@ func generate4(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { // Generates a database with one table, containing some DupSort values func generate5(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - c := tx.RwCursorDupSort("t") + c, err := tx.RwCursorDupSort("t") + if err != nil { + return false, err + } defer c.Close() if err := c.AppendDup([]byte("key1"), []byte("value11")); err != nil { return false, err @@ -104,7 +113,10 @@ func generate5(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { // Generate a database with one table, containing lots of dupsort values func generate6(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - c := tx.RwCursorDupSort("t") + c, err := tx.RwCursorDupSort("t") + if err != nil { + return false, err + } defer c.Close() for i := 0; i < 1000; i++ { v := fmt.Sprintf("dupval_%05d", i) @@ -132,9 +144,15 @@ func dropT(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } func generate7(_ ethdb.RwKV, tx ethdb.RwTx) (bool, error) { - c1 := tx.RwCursor("t1") + c1, err := tx.RwCursor("t1") + if err != nil { + return false, err + } defer c1.Close() - c2 := tx.RwCursor("t2") + c2, err := tx.RwCursor("t2") + if err != nil { + return false, err + } defer c2.Close() for i := 0; i < 1000; i++ { k := fmt.Sprintf("%05d", i) @@ -177,7 +195,10 @@ func generate9(tx ethdb.RwTx, entries int) error { var cs []ethdb.RwCursor for i := 0; i < 100; i++ { k := fmt.Sprintf("table_%05d", i) - c := tx.RwCursor(k) + c, err := tx.RwCursor(k) + if err != nil { + return err + } defer c.Close() cs = append(cs, c) } @@ -217,7 +238,10 @@ func dropGradually(kv ethdb.RwKV, tx ethdb.RwTx) (bool, error) { } func change1(tx ethdb.RwTx) (bool, error) { - c := tx.RwCursor("t") + c, err := tx.RwCursor("t") + if err != nil { + return false, err + } defer c.Close() for i := 0; i < 1000; i++ { k := fmt.Sprintf("%05d", i) @@ -229,7 +253,10 @@ func change1(tx ethdb.RwTx) (bool, error) { } func change2(tx ethdb.RwTx) (bool, error) { - c := tx.RwCursor("t") + c, err := tx.RwCursor("t") + if err != nil { + return false, err + } defer c.Close() for i := 0; i < 1000; i++ { k := fmt.Sprintf("%05d", i) @@ -241,7 +268,10 @@ func change2(tx ethdb.RwTx) (bool, error) { } func change3(tx ethdb.RwTx) (bool, error) { - c := tx.RwCursor("t") + c, err := tx.RwCursor("t") + if err != nil { + return false, err + } defer c.Close() for i := 0; i < 1000; i++ { k := fmt.Sprintf("%05d", i) @@ -254,15 +284,19 @@ func change3(tx ethdb.RwTx) (bool, error) { func launchReader(kv ethdb.RwKV, tx ethdb.Tx, expectVal string, startCh chan struct{}, errorCh chan error) (bool, error) { tx.Rollback() - tx1, err := kv.Begin(context.Background()) - if err != nil { - return false, err + tx1, err1 := kv.Begin(context.Background()) + if err1 != nil { + return false, err1 } // Wait for the signal to start reading go func() { defer tx1.Rollback() <-startCh - c := tx1.Cursor("t") + c, err := tx1.Cursor("t") + if err != nil { + errorCh <- err + return + } defer c.Close() for i := 0; i < 1000; i++ { k := fmt.Sprintf("%05d", i) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 11c1a140aa1..659cb2ceb02 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -311,7 +311,10 @@ func accountSavings(db ethdb.RwKV) (int, int) { emptyRoots := 0 emptyCodes := 0 tool.Check(db.View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.HashedAccountsBucket) + c, err := tx.Cursor(dbutils.HashedAccountsBucket) + if err != nil { + return err + } for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { return err @@ -601,7 +604,10 @@ func dbSlice(chaindata string, bucket string, prefix []byte) { db := ethdb.MustOpen(chaindata) defer db.Close() if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(bucket) + c, err := tx.Cursor(bucket) + if err != nil { + return err + } for k, v, err := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v, err = c.Next() { if err != nil { return err @@ -773,7 +779,10 @@ func readAccount(chaindata string, account common.Address) error { } fmt.Printf("CodeHash:%x\nIncarnation:%d\n", a.CodeHash, a.Incarnation) if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(dbutils.PlainStateBucket) + if err != nil { + return err + } for k, v, e := c.Seek(account.Bytes()); k != nil && e == nil; k, v, e = c.Next() { if e != nil { return e @@ -835,10 +844,16 @@ func repairCurrent() { defer currentDb.Close() tool.Check(historyDb.ClearBuckets(dbutils.HashedStorageBucket)) tool.Check(historyDb.RwKV().Update(context.Background(), func(tx ethdb.RwTx) error { - newB := tx.RwCursor(dbutils.HashedStorageBucket) + newB, err := tx.RwCursor(dbutils.HashedStorageBucket) + if err != nil { + return err + } count := 0 if err := currentDb.RwKV().View(context.Background(), func(ctx ethdb.Tx) error { - c := ctx.Cursor(dbutils.HashedStorageBucket) + c, err := ctx.Cursor(dbutils.HashedStorageBucket) + if err != nil { + return err + } for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { return err @@ -861,7 +876,10 @@ func dumpStorage() { db := ethdb.MustOpen(node.DefaultDataDir() + "/geth/chaindata") defer db.Close() if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.StorageHistoryBucket) + c, err := tx.Cursor(dbutils.StorageHistoryBucket) + if err != nil { + return err + } return ethdb.ForEach(c, func(k, v []byte) (bool, error) { fmt.Printf("%x %x\n", k, v) return true, nil @@ -880,7 +898,10 @@ func printBucket(chaindata string) { fb := bufio.NewWriter(f) defer fb.Flush() if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.StorageHistoryBucket) + c, err := tx.Cursor(dbutils.StorageHistoryBucket) + if err != nil { + return err + } for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { return err @@ -1189,7 +1210,10 @@ func changeSetStats(chaindata string, block1, block2 uint64) error { stAccounts := 0 stStorage := 0 if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(dbutils.PlainStateBucket) + if err != nil { + return err + } k, _, e := c.First() for ; k != nil && e == nil; k, _, e = c.Next() { if len(k) > 28 { @@ -1278,7 +1302,10 @@ func supply(chaindata string) error { supply := uint256.NewInt() var a accounts.Account if err := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.PlainStateBucket) + c, err := tx.Cursor(dbutils.PlainStateBucket) + if err != nil { + return err + } for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { return err @@ -1308,7 +1335,10 @@ func extractCode(chaindata string) error { defer db.Close() var contractCount int if err1 := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.CodeBucket) + c, err := tx.Cursor(dbutils.CodeBucket) + if err != nil { + return err + } // This is a mapping of CodeHash => Byte code for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { @@ -1334,7 +1364,10 @@ func iterateOverCode(chaindata string) error { var codeHashTotalLength int var codeTotalLength int // Total length of all byte code (just to illustrate iterating) if err1 := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.PlainContractCodeBucket) + c, err := tx.Cursor(dbutils.PlainContractCodeBucket) + if err != nil { + return err + } // This is a mapping of contractAddress + incarnation => CodeHash for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { @@ -1343,7 +1376,10 @@ func iterateOverCode(chaindata string) error { contractKeyTotalLength += len(k) contractValTotalLength += len(v) } - c = tx.Cursor(dbutils.CodeBucket) + c, err = tx.Cursor(dbutils.CodeBucket) + if err != nil { + return err + } // This is a mapping of CodeHash => Byte code for k, v, err := c.First(); k != nil; k, v, err = c.Next() { if err != nil { @@ -1379,7 +1415,11 @@ func mint(chaindata string, block uint64) error { blockEncoded := dbutils.EncodeBlockNumber(block) canonical := make(map[common.Hash]struct{}) if err1 := db.RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.HeaderCanonicalBucket) + c, err := tx.Cursor(dbutils.HeaderCanonicalBucket) + if err != nil { + return err + } + // This is a mapping of contractAddress + incarnation => CodeHash for k, v, err := c.Seek(blockEncoded); k != nil; k, v, err = c.Next() { if err != nil { @@ -1392,7 +1432,10 @@ func mint(chaindata string, block uint64) error { } } log.Info("Read canonical hashes", "count", len(canonical)) - c = tx.Cursor(dbutils.BlockBodyPrefix) + c, err = tx.Cursor(dbutils.BlockBodyPrefix) + if err != nil { + return err + } var prevBlock uint64 var burntGas uint64 for k, _, err := c.Seek(blockEncoded); k != nil; k, _, err = c.Next() { @@ -1547,7 +1590,10 @@ func extractBodies(chaindata string, block uint64) error { return err } defer tx.Rollback() - c := tx.(ethdb.HasTx).Tx().Cursor(dbutils.BlockBodyPrefix) + c, err := tx.(ethdb.HasTx).Tx().Cursor(dbutils.BlockBodyPrefix) + if err != nil { + return err + } defer c.Close() blockEncoded := dbutils.EncodeBlockNumber(block) for k, _, err := c.Seek(blockEncoded); k != nil; k, _, err = c.Next() { diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index afd372f313f..f63a25d69fb 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -210,12 +210,18 @@ func compareBucketBetweenDatabases(ctx context.Context, chaindata string, refere func compareBuckets(ctx context.Context, tx ethdb.Tx, b string, refTx ethdb.Tx, refB string) error { count := 0 - c := tx.Cursor(b) + c, err := tx.Cursor(b) + if err != nil { + return err + } k, v, e := c.First() if e != nil { return e } - refC := refTx.Cursor(refB) + refC, err := refTx.Cursor(refB) + if err != nil { + return err + } refK, refV, revErr := refC.First() if revErr != nil { return revErr @@ -328,7 +334,10 @@ MainLoop: panic("bucket not parse") } - c := dstTx.RwCursor(bucket) + c, err := dstTx.RwCursor(bucket) + if err != nil { + return err + } var prevK []byte for { @@ -435,8 +444,14 @@ func kv2kv(ctx context.Context, src, dst ethdb.RwKV) error { continue } - c := dstTx.RwCursor(name) - srcC := srcTx.Cursor(name) + c, err := dstTx.RwCursor(name) + if err != nil { + return err + } + srcC, err := srcTx.Cursor(name) + if err != nil { + return err + } var prevK []byte casted, isDupsort := c.(ethdb.RwCursorDupSort) @@ -474,7 +489,10 @@ func kv2kv(ctx context.Context, src, dst ethdb.RwKV) error { if err != nil { return err } - c = dstTx.RwCursor(name) + c, err = dstTx.RwCursor(name) + if err != nil { + return err + } casted, isDupsort = c.(ethdb.RwCursorDupSort) default: } diff --git a/cmd/pics/state.go b/cmd/pics/state.go index af7f5d5a871..fb5127c5aab 100644 --- a/cmd/pics/state.go +++ b/cmd/pics/state.go @@ -139,7 +139,10 @@ func stateDatabaseComparison(first ethdb.RwKV, second ethdb.RwKV, number int) er return first.View(context.Background(), func(firstTx ethdb.Tx) error { for bucketName := range bucketLabels { bucketName := bucketName - c := readTx.Cursor(bucketName) + c, err := readTx.Cursor(bucketName) + if err != nil { + return err + } if err2 := ethdb.ForEach(c, func(k, v []byte) (bool, error) { if firstV, _ := firstTx.GetOne(bucketName, k); firstV != nil && bytes.Equal(v, firstV) { // Skip the record that is the same as in the first Db diff --git a/cmd/rpctest/rpctest/account_range_verify.go b/cmd/rpctest/rpctest/account_range_verify.go index 443b5cae398..ab17cd55b11 100644 --- a/cmd/rpctest/rpctest/account_range_verify.go +++ b/cmd/rpctest/rpctest/account_range_verify.go @@ -117,8 +117,16 @@ func CompareAccountRange(tgURL, gethURL, tmpDataDir, gethDataDir string, blockFr if err != nil { log.Fatal(err) } - tgCursor := tgTx.Cursor(dbutils.AccountsHistoryBucket) - gethCursor := gethTx.Cursor(dbutils.AccountsHistoryBucket) + tgCursor, err := tgTx.Cursor(dbutils.AccountsHistoryBucket) + if err != nil { + log.Fatal(err) + } + defer tgCursor.Close() + gethCursor, err := gethTx.Cursor(dbutils.AccountsHistoryBucket) + if err != nil { + log.Fatal(err) + } + defer gethCursor.Close() tgKey, tgVal, err1 := tgCursor.Next() if err1 != nil { diff --git a/cmd/state/verify/verify_headers_snapshot.go b/cmd/state/verify/verify_headers_snapshot.go index 5af1adcdd38..5980b039d4d 100644 --- a/cmd/state/verify/verify_headers_snapshot.go +++ b/cmd/state/verify/verify_headers_snapshot.go @@ -21,7 +21,10 @@ func HeadersSnapshot(snapshotPath string) error { }).MustOpen() var prevHeader *types.Header err := snKV.View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.HeadersBucket) + c, err := tx.Cursor(dbutils.HeadersBucket) + if err != nil { + return err + } k, v, innerErr := c.First() for { if len(k) == 0 && len(v) == 0 { diff --git a/common/changeset/changeset.go b/common/changeset/changeset.go index 9eaecabd877..3c9a8ffdc64 100644 --- a/common/changeset/changeset.go +++ b/common/changeset/changeset.go @@ -148,7 +148,10 @@ func Truncate(tx ethdb.RwTx, from uint64) error { keyStart := dbutils.EncodeBlockNumber(from) { - c := tx.RwCursorDupSort(dbutils.PlainAccountChangeSetBucket) + c, err := tx.RwCursorDupSort(dbutils.PlainAccountChangeSetBucket) + if err != nil { + return err + } defer c.Close() for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() { if err != nil { @@ -161,7 +164,10 @@ func Truncate(tx ethdb.RwTx, from uint64) error { } } { - c := tx.RwCursorDupSort(dbutils.PlainStorageChangeSetBucket) + c, err := tx.RwCursorDupSort(dbutils.PlainStorageChangeSetBucket) + if err != nil { + return err + } defer c.Close() for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() { if err != nil { diff --git a/common/changeset/storage_changeset_test.go b/common/changeset/storage_changeset_test.go index 64651ac96fb..0582c982da5 100644 --- a/common/changeset/storage_changeset_test.go +++ b/common/changeset/storage_changeset_test.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/turbo-geth/common/dbutils" "github.com/ledgerwatch/turbo-geth/ethdb" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -218,15 +219,16 @@ func TestEncodingStorageNewWithoutNotDefaultIncarnationFindPlain(t *testing.T) { db := ethdb.NewMemDatabase() defer db.Close() tx, err := db.RwKV().BeginRw(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer tx.Rollback() - cs := m.WalkerAdapter(tx.CursorDupSort(bkt)).(StorageChangeSetPlain) + c, err := tx.CursorDupSort(bkt) + require.NoError(t, err) + cs := m.WalkerAdapter(c).(StorageChangeSetPlain) clear := func() { - c := tx.RwCursor(bkt) + c, err := tx.RwCursor(bkt) + require.NoError(t, err) defer c.Close() for k, _, err := c.First(); k != nil; k, _, err = c.First() { if err != nil { @@ -248,15 +250,16 @@ func TestEncodingStorageNewWithoutNotDefaultIncarnationFindWithoutIncarnationPla db := ethdb.NewMemDatabase() defer db.Close() tx, err := db.RwKV().BeginRw(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer tx.Rollback() - cs := m.WalkerAdapter(tx.CursorDupSort(bkt)).(StorageChangeSetPlain) + c, err := tx.CursorDupSort(bkt) + require.NoError(t, err) + cs := m.WalkerAdapter(c).(StorageChangeSetPlain) clear := func() { - c := tx.RwCursor(bkt) + c, err := tx.RwCursor(bkt) + require.NoError(t, err) defer c.Close() for k, _, err := c.First(); k != nil; k, _, err = c.First() { if err != nil { @@ -308,9 +311,10 @@ func doTestFind( } } - c := tx.RwCursor(bucket) + c, err := tx.RwCursor(bucket) + require.NoError(t, err) - err := encodeFunc(1, ch, func(k, v []byte) error { + err = encodeFunc(1, ch, func(k, v []byte) error { if err2 := c.Put(common.CopyBytes(k), common.CopyBytes(v)); err2 != nil { return err2 } @@ -412,12 +416,12 @@ func TestMultipleIncarnationsOfTheSameContract(t *testing.T) { db := ethdb.NewMemDatabase() defer db.Close() tx, err := db.RwKV().BeginRw(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer tx.Rollback() - cs := m.WalkerAdapter(tx.CursorDupSort(bkt)).(StorageChangeSetPlain) + c1, err := tx.CursorDupSort(bkt) + require.NoError(t, err) + cs := m.WalkerAdapter(c1).(StorageChangeSetPlain) contractA := common.HexToAddress("0x6f0e0cdac6c716a00bd8db4d0eee4f2bfccf8e6a") contractB := common.HexToAddress("0xc5acb79c258108f288288bc26f7820d06f45f08c") @@ -439,7 +443,8 @@ func TestMultipleIncarnationsOfTheSameContract(t *testing.T) { val5 := common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000000") val6 := common.FromHex("0xec89478783348038046b42cc126a3c4e351977b5f4cf5e3c4f4d8385adbf8046") - c := tx.RwCursorDupSort(bkt) + c, err := tx.RwCursorDupSort(bkt) + require.NoError(t, err) ch := NewStorageChangeSetPlain() assert.NoError(t, ch.Add(dbutils.PlainGenerateCompositeStorageKey(contractA.Bytes(), 2, key1.Bytes()), val1)) diff --git a/core/chain_makers.go b/core/chain_makers.go index eab025c942e..f66fe442aeb 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -259,7 +259,10 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse }); err != nil { return nil, nil, fmt.Errorf("clear HashedState bucket: %w", err) } - c := tx.(ethdb.HasTx).Tx().Cursor(dbutils.PlainStateBucket) + c, err := tx.(ethdb.HasTx).Tx().Cursor(dbutils.PlainStateBucket) + if err != nil { + return nil, nil, err + } h := common.NewHasher() defer common.ReturnHasherToPool(h) for k, v, err := c.First(); k != nil; k, v, err = c.Next() { diff --git a/core/state/history.go b/core/state/history.go index 36b21bb4877..4a312b6e95d 100644 --- a/core/state/history.go +++ b/core/state/history.go @@ -49,7 +49,10 @@ func FindByHistory(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]b hBucket = dbutils.AccountsHistoryBucket } - ch := tx.Cursor(hBucket) + ch, err := tx.Cursor(hBucket) + if err != nil { + return nil, err + } defer ch.Close() k, v, seekErr := ch.Seek(dbutils.IndexChunkKey(key, timestamp)) if seekErr != nil { @@ -79,9 +82,11 @@ func FindByHistory(tx ethdb.Tx, storage bool, key []byte, timestamp uint64) ([]b var data []byte if ok { csBucket := dbutils.ChangeSetByIndexBucket(storage) - c := tx.CursorDupSort(csBucket) + c, err := tx.CursorDupSort(csBucket) + if err != nil { + return nil, err + } defer c.Close() - var err error if storage { data, err = changeset.Mapper[csBucket].WalkerAdapter(c).(changeset.StorageChangeSetPlain).FindWithIncarnation(changeSetBlock, key) } else { @@ -134,7 +139,10 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st copy(startkeyNoInc[common.AddressLength:], startLocation.Bytes()) //for storage - mCursor := tx.Cursor(dbutils.PlainStateBucket) + mCursor, err := tx.Cursor(dbutils.PlainStateBucket) + if err != nil { + return err + } defer mCursor.Close() mainCursor := ethdb.NewSplitCursor( mCursor, @@ -146,7 +154,10 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st ) //for historic data - shCursor := tx.Cursor(dbutils.StorageHistoryBucket) + shCursor, err := tx.Cursor(dbutils.StorageHistoryBucket) + if err != nil { + return err + } defer shCursor.Close() var hCursor = ethdb.NewSplitCursor( shCursor, @@ -156,7 +167,10 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st common.AddressLength, /* part2start */ common.AddressLength+common.HashLength, /* part3start */ ) - csCursor := tx.CursorDupSort(dbutils.PlainStorageChangeSetBucket) + csCursor, err := tx.CursorDupSort(dbutils.PlainStorageChangeSetBucket) + if err != nil { + return err + } defer csCursor.Close() addr, loc, _, v, err1 := mainCursor.Seek() @@ -173,7 +187,6 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st return err2 } } - var err error goOn := true for goOn { cmp, br := common.KeyCmp(addr, hAddr) @@ -243,9 +256,15 @@ func WalkAsOfStorage(tx ethdb.Tx, address common.Address, incarnation uint64, st } func WalkAsOfAccounts(tx ethdb.Tx, startAddress common.Address, timestamp uint64, walker func(k []byte, v []byte) (bool, error)) error { - mainCursor := tx.Cursor(dbutils.PlainStateBucket) + mainCursor, err := tx.Cursor(dbutils.PlainStateBucket) + if err != nil { + return err + } defer mainCursor.Close() - ahCursor := tx.Cursor(dbutils.AccountsHistoryBucket) + ahCursor, err := tx.Cursor(dbutils.AccountsHistoryBucket) + if err != nil { + return err + } defer ahCursor.Close() var hCursor = ethdb.NewSplitCursor( ahCursor, @@ -255,7 +274,10 @@ func WalkAsOfAccounts(tx ethdb.Tx, startAddress common.Address, timestamp uint64 common.AddressLength, /* part2start */ common.AddressLength+8, /* part3start */ ) - csCursor := tx.CursorDupSort(dbutils.PlainAccountChangeSetBucket) + csCursor, err := tx.CursorDupSort(dbutils.PlainAccountChangeSetBucket) + if err != nil { + return err + } defer csCursor.Close() k, v, err1 := mainCursor.Seek(startAddress.Bytes()) @@ -280,7 +302,6 @@ func WalkAsOfAccounts(tx ethdb.Tx, startAddress common.Address, timestamp uint64 } goOn := true - var err error for goOn { //exit or next conditions cmp, br := common.KeyCmp(k, hK) diff --git a/core/state/intra_block_state_test.go b/core/state/intra_block_state_test.go index 0d61b64360f..21ec9b0721b 100644 --- a/core/state/intra_block_state_test.go +++ b/core/state/intra_block_state_test.go @@ -33,153 +33,10 @@ import ( "gopkg.in/check.v1" "github.com/ledgerwatch/turbo-geth/common" - "github.com/ledgerwatch/turbo-geth/common/dbutils" "github.com/ledgerwatch/turbo-geth/core/types" "github.com/ledgerwatch/turbo-geth/ethdb" ) -// Tests that updating a state trie does not leak any database writes prior to -// actually committing the state. -func TestUpdateLeaks(t *testing.T) { - // Create an empty state database - db := ethdb.NewMemDatabase() - defer db.Close() - w := NewDbStateWriter(db, 0) - state := New(NewDbStateReader(db)) - - // Update it with some accounts - for i := byte(0); i < 255; i++ { - addr := common.BytesToAddress([]byte{i}) - state.AddBalance(addr, uint256.NewInt().SetUint64(uint64(11*i))) - state.SetNonce(addr, uint64(42*i)) - if i%2 == 0 { - val := uint256.NewInt().SetBytes([]byte{i, i, i, i}) - state.SetState(addr, &common.Hash{i, i, i}, *val) - } - if i%3 == 0 { - state.SetCode(addr, []byte{i, i, i, i, i}) - } - _ = state.FinalizeTx(context.Background(), w) - } - - // Ensure that no data was leaked into the database - keys, err := db.Keys() - if err != nil { - t.Fatal(err) - } - for i := 0; i < len(keys); i += 2 { - if string(keys[i]) == dbutils.PreimagePrefix { - continue - } - value, _ := db.Get(string(keys[i]), keys[i+1]) - t.Errorf("State leaked into database: %x:%x -> %x", keys[i], keys[i+1], value) - } -} - -// Tests that no intermediate state of an object is stored into the database, -// only the one right before the commit. -func TestIntermediateLeaks(t *testing.T) { - t.Skip("switch to TG state readers/writers") - // Create two state databases, one transitioning to the final state, the other final from the beginning - transDb := ethdb.NewMemDatabase() - defer transDb.Close() - finalDb := ethdb.NewMemDatabase() - defer finalDb.Close() - transTds := NewTrieDbState(common.Hash{}, transDb, 0) - transState := New(transTds) - transTds.StartNewBuffer() - finalTds := NewTrieDbState(common.Hash{}, finalDb, 0) - finalState := New(finalTds) - finalTds.StartNewBuffer() - - modify := func(state *IntraBlockState, addr common.Address, i, tweak byte) { - state.SetBalance(addr, uint256.NewInt().SetUint64(uint64(11*i+tweak))) - state.SetNonce(addr, uint64(42*i+tweak)) - if i%2 == 0 { - val := uint256.NewInt() - state.SetState(addr, &common.Hash{i, i, i, 0}, *val) - val.SetBytes([]byte{i, i, i, i, tweak}) - state.SetState(addr, &common.Hash{i, i, i, tweak}, *val) - } - if i%3 == 0 { - state.SetCode(addr, []byte{i, i, i, i, i, tweak}) - } - } - - // Modify the transient state. - for i := byte(0); i < 255; i++ { - modify(transState, common.Address{i}, i, 0) - } - - // Write modifications to trie. - if err := transState.FinalizeTx(context.Background(), transTds.TrieStateWriter()); err != nil { - t.Fatal("error while finalizing state", err) - } - - transTds.StartNewBuffer() - - // Overwrite all the data with new values in the transient database. - for i := byte(0); i < 255; i++ { - modify(transState, common.Address{i}, i, 99) - modify(finalState, common.Address{i}, i, 99) - } - - // Commit and cross check the databases. - - if err := transState.FinalizeTx(context.Background(), transTds.TrieStateWriter()); err != nil { - t.Fatal("error while finalizing state", err) - } - - if _, err := transTds.ComputeTrieRoots(); err != nil { - t.Fatal("error while ComputeTrieRoots", err) - } - - transTds.SetBlockNr(1) - - if err := transState.CommitBlock(context.Background(), transTds.DbStateWriter()); err != nil { - t.Fatal("failed to commit transition state", err) - } - - if err := finalState.FinalizeTx(context.Background(), finalTds.TrieStateWriter()); err != nil { - t.Fatal("error while finalizing state", err) - } - - if _, err := finalTds.ComputeTrieRoots(); err != nil { - t.Fatal("error while ComputeTrieRoots", err) - } - - finalTds.SetBlockNr(1) - if err := finalState.CommitBlock(context.Background(), finalTds.DbStateWriter()); err != nil { - t.Fatalf("failed to commit final state: %v", err) - } - finalKeys, err2 := finalDb.Keys() - if err2 != nil { - t.Fatal(err2) - } - for i := 0; i < len(finalKeys); i += 2 { - if string(finalKeys[i]) == dbutils.PreimagePrefix { - continue - } - if _, err := transDb.Get(string(finalKeys[i]), finalKeys[i+1]); err != nil { - val, _ := finalDb.Get(string(finalKeys[i]), finalKeys[i+1]) - t.Errorf("entry missing from the transition database: %x:%x -> %x", finalKeys[i], finalKeys[i+1], val) - } - } - transKeys, err := transDb.Keys() - if err != nil { - t.Fatal(err) - } - for i := 0; i < len(transKeys); i += 2 { - if string(transKeys[i]) == dbutils.PreimagePrefix { - continue - } - if _, err := finalDb.Get(string(transKeys[i]), transKeys[i+1]); err != nil { - val, _ := transDb.Get(string(transKeys[i]), transKeys[i+1]) - t.Errorf("entry missing in the transition database: %x:%x -> %x", transKeys[i], transKeys[i+1], val) - } - } -} - func TestSnapshotRandom(t *testing.T) { config := &quick.Config{MaxCount: 1000} err := quick.Check((*snapshotTest).run, config) diff --git a/eth/integrity/trie.go b/eth/integrity/trie.go index ba8f9ba9033..439820df5e4 100644 --- a/eth/integrity/trie.go +++ b/eth/integrity/trie.go @@ -30,7 +30,18 @@ func Trie(tx ethdb.Tx, slowChecks bool, quit <-chan struct{}) { buf2 := make([]byte, 256) { - c, trieAcc2, accC := tx.Cursor(dbutils.TrieOfAccountsBucket), tx.Cursor(dbutils.TrieOfAccountsBucket), tx.Cursor(dbutils.HashedAccountsBucket) + c, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + if err != nil { + panic(err) + } + trieAcc2, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + if err != nil { + panic(err) + } + accC, err := tx.Cursor(dbutils.HashedAccountsBucket) + if err != nil { + panic(err) + } defer c.Close() defer trieAcc2.Close() defer accC.Close() @@ -128,7 +139,18 @@ func Trie(tx ethdb.Tx, slowChecks bool, quit <-chan struct{}) { } } { - c, trieStorage, storageC := tx.Cursor(dbutils.TrieOfStorageBucket), tx.Cursor(dbutils.TrieOfStorageBucket), tx.Cursor(dbutils.HashedStorageBucket) + c, err := tx.Cursor(dbutils.TrieOfStorageBucket) + if err != nil { + panic(err) + } + trieStorage, err := tx.Cursor(dbutils.TrieOfStorageBucket) + if err != nil { + panic(err) + } + storageC, err := tx.Cursor(dbutils.HashedStorageBucket) + if err != nil { + panic(err) + } defer c.Close() defer trieStorage.Close() defer storageC.Close() diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index b7d250fcd29..1aa1d001081 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -80,7 +80,10 @@ func promoteLogIndex(logPrefix string, db ethdb.Database, start uint64, bufLimit tx := db.(ethdb.HasTx).Tx() topics := map[string]*roaring.Bitmap{} addresses := map[string]*roaring.Bitmap{} - logs := tx.Cursor(dbutils.Log) + logs, err := tx.Cursor(dbutils.Log) + if err != nil { + return err + } defer logs.Close() checkFlushEvery := time.NewTicker(flushEvery) defer checkFlushEvery.Stop() diff --git a/ethdb/database_test.go b/ethdb/database_test.go index 91201a635a4..7ffdbf70204 100644 --- a/ethdb/database_test.go +++ b/ethdb/database_test.go @@ -156,7 +156,7 @@ func testNoPanicAfterDbClosed(db Database, t *testing.T) { }) }() time.Sleep(time.Millisecond) // wait to check that db.Close doesn't panic, but wait when read tx finished - err = writeTx.RwCursor(dbutils.Buckets[0]).Put([]byte{1}, []byte{1}) + err = writeTx.Put(dbutils.Buckets[0], []byte{1}, []byte{1}) require.NoError(t, err) err = writeTx.Commit(context.Background()) require.NoError(t, err) diff --git a/ethdb/kv_abstract.go b/ethdb/kv_abstract.go index 98c5155b2d6..3583a9930b5 100644 --- a/ethdb/kv_abstract.go +++ b/ethdb/kv_abstract.go @@ -108,8 +108,8 @@ type Tx interface { // // Cursor, also provides a grain of magic - it can use a declarative configuration - and automatically break // long keys into DupSort key/values. See docs for `bucket.go:BucketConfigItem` - Cursor(bucket string) Cursor - CursorDupSort(bucket string) CursorDupSort // CursorDupSort - can be used if bucket has lmdb.DupSort flag + Cursor(bucket string) (Cursor, error) + CursorDupSort(bucket string) (CursorDupSort, error) // CursorDupSort - can be used if bucket has lmdb.DupSort flag GetOne(bucket string, key []byte) (val []byte, err error) HasOne(bucket string, key []byte) (bool, error) @@ -132,10 +132,13 @@ type Tx interface { type RwTx interface { Tx - RwCursor(bucket string) RwCursor - RwCursorDupSort(bucket string) RwCursorDupSort + RwCursor(bucket string) (RwCursor, error) + RwCursorDupSort(bucket string) (RwCursorDupSort, error) IncrementSequence(bucket string, amount uint64) (uint64, error) + + Put(bucket string, k, v []byte) error + Delete(bucket string, k, v []byte) error } // BucketMigrator used for buckets migration, don't use it in usual app code diff --git a/ethdb/kv_abstract_test.go b/ethdb/kv_abstract_test.go index 90230e21ac7..ef0e23504a6 100644 --- a/ethdb/kv_abstract_test.go +++ b/ethdb/kv_abstract_test.go @@ -91,8 +91,10 @@ func TestManagedTx(t *testing.T) { require.NoError(t, err) defer tx.Rollback() - c := tx.RwCursor(bucket1) - c1 := tx.RwCursor(bucket2) + c, err := tx.RwCursor(bucket1) + require.NoError(t, err) + c1, err := tx.RwCursor(bucket2) + require.NoError(t, err) require.NoError(t, c.Append([]byte{0}, []byte{1})) require.NoError(t, c1.Append([]byte{0}, []byte{1})) require.NoError(t, c.Append([]byte{0, 0, 0, 0, 0, 1}, []byte{1})) // prefixes of len=FromLen for DupSort test (other keys must be ", common.Bytes2Hex(v)) @@ -1147,11 +1135,9 @@ func TestSnapshot2WritablePrevAndDeleteKey(t *testing.T) { MustOpen() tx, err := kv.BeginRw(context.Background()) - if err != nil { - t.Fatal(err) - } - - c := tx.Cursor(dbutils.PlainStateBucket) + require.NoError(t, err) + c, err := tx.Cursor(dbutils.PlainStateBucket) + require.NoError(t, err) //get first correct k&v k, v, err := c.First() @@ -1163,15 +1149,11 @@ func TestSnapshot2WritablePrevAndDeleteKey(t *testing.T) { for i := 1; i < len(data); i++ { k, v, err = c.Next() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) checkKV(t, k, v, data[i].K, data[i].V) k, v, err = c.Current() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) checkKV(t, k, v, data[i].K, data[i].V) } @@ -1215,12 +1197,11 @@ func TestSnapshot2WritableTxNextAndPrevWithDeleteAndPutKeys(t *testing.T) { MustOpen() tx, err := kv.BeginRw(context.Background()) - if err != nil { - t.Fatal(err) - } - - c := tx.Cursor(dbutils.PlainStateBucket) - deleteCursor := tx.RwCursor(dbutils.PlainStateBucket) + require.NoError(t, err) + c, err := tx.Cursor(dbutils.PlainStateBucket) + require.NoError(t, err) + deleteCursor, err := tx.RwCursor(dbutils.PlainStateBucket) + require.NoError(t, err) //get first correct k&v k, v, err := c.First() @@ -1294,7 +1275,10 @@ func printBucket(kv RwKV, bucket string) { fmt.Println("-Print bucket", bucket) }() err := kv.View(context.Background(), func(tx Tx) error { - c := tx.Cursor(bucket) + c, err := tx.Cursor(bucket) + if err != nil { + return err + } k, v, err := c.First() if err != nil { panic(fmt.Errorf("first err: %w", err)) diff --git a/ethdb/kv_util.go b/ethdb/kv_util.go index d90449c63ea..7345612edaa 100644 --- a/ethdb/kv_util.go +++ b/ethdb/kv_util.go @@ -73,7 +73,10 @@ func MultiPut(tx RwTx, tuples ...[]byte) error { for ; bucketEnd < len(tuples) && bytes.Equal(tuples[bucketEnd], tuples[bucketStart]); bucketEnd += 3 { } bucketName := string(tuples[bucketStart]) - c := tx.RwCursor(bucketName) + c, err := tx.RwCursor(bucketName) + if err != nil { + return err + } // move cursor to a first element in batch // if it's nil, it means all keys in batch gonna be inserted after end of bucket (batch is sorted and has no duplicates here) diff --git a/ethdb/mutation.go b/ethdb/mutation.go index ca3a4560c78..00f88b56efd 100644 --- a/ethdb/mutation.go +++ b/ethdb/mutation.go @@ -233,7 +233,12 @@ func (m *mutation) doCommit(tx RwTx) error { if c != nil { c.Close() } - c = tx.RwCursor(mi.table) + var err error + c, err = tx.RwCursor(mi.table) + if err != nil { + innerErr = err + return false + } prevTable = mi.table firstKey, _, err := c.Seek(mi.key) if err != nil { diff --git a/ethdb/object_db.go b/ethdb/object_db.go index 8e0d7b88bd9..4c7a2bf2103 100644 --- a/ethdb/object_db.go +++ b/ethdb/object_db.go @@ -96,7 +96,7 @@ func Open(path string, readOnly bool) (*ObjectDatabase, error) { // Put inserts or updates a single entry. func (db *ObjectDatabase) Put(bucket string, key []byte, value []byte) error { err := db.kv.Update(context.Background(), func(tx RwTx) error { - return tx.RwCursor(bucket).Put(key, value) + return tx.Put(bucket, key, value) }) return err } @@ -104,7 +104,11 @@ func (db *ObjectDatabase) Put(bucket string, key []byte, value []byte) error { // Append appends a single entry to the end of the bucket. func (db *ObjectDatabase) Append(bucket string, key []byte, value []byte) error { err := db.kv.Update(context.Background(), func(tx RwTx) error { - return tx.RwCursor(bucket).Append(key, value) + c, err := tx.RwCursor(bucket) + if err != nil { + return err + } + return c.Append(key, value) }) return err } @@ -112,7 +116,11 @@ func (db *ObjectDatabase) Append(bucket string, key []byte, value []byte) error // AppendDup appends a single entry to the end of the bucket. func (db *ObjectDatabase) AppendDup(bucket string, key []byte, value []byte) error { err := db.kv.Update(context.Background(), func(tx RwTx) error { - return tx.RwCursorDupSort(bucket).AppendDup(key, value) + c, err := tx.RwCursorDupSort(bucket) + if err != nil { + return err + } + return c.AppendDup(key, value) }) return err } @@ -189,7 +197,11 @@ func (db *ObjectDatabase) Get(bucket string, key []byte) ([]byte, error) { func (db *ObjectDatabase) Last(bucket string) ([]byte, []byte, error) { var key, value []byte if err := db.kv.View(context.Background(), func(tx Tx) error { - k, v, err := tx.Cursor(bucket).Last() + c, err := tx.Cursor(bucket) + if err != nil { + return err + } + k, v, err := c.Last() if err != nil { return err } @@ -205,7 +217,11 @@ func (db *ObjectDatabase) Last(bucket string) ([]byte, []byte, error) { func (db *ObjectDatabase) Walk(bucket string, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error { err := db.kv.View(context.Background(), func(tx Tx) error { - return Walk(tx.Cursor(bucket), startkey, fixedbits, walker) + c, err := tx.Cursor(bucket) + if err != nil { + return err + } + return Walk(c, startkey, fixedbits, walker) }) return err } @@ -214,7 +230,7 @@ func (db *ObjectDatabase) Walk(bucket string, startkey []byte, fixedbits int, wa func (db *ObjectDatabase) Delete(bucket string, k, v []byte) error { // Execute the actual operation err := db.kv.Update(context.Background(), func(tx RwTx) error { - return tx.RwCursor(bucket).Delete(k, v) + return tx.Delete(bucket, k, v) }) return err } @@ -284,12 +300,19 @@ func (db *ObjectDatabase) Keys() ([][]byte, error) { for _, name := range dbutils.Buckets { var nameCopy = make([]byte, len(name)) copy(nameCopy, name) - return ForEach(tx.Cursor(name), func(k, _ []byte) (bool, error) { + c, err := tx.Cursor(name) + if err != nil { + return err + } + err = ForEach(c, func(k, _ []byte) (bool, error) { var kCopy = make([]byte, len(k)) copy(kCopy, k) keys = append(append(keys, nameCopy), kCopy) return true, nil }) + if err != nil { + return err + } } return nil }) @@ -321,9 +344,15 @@ func (db *ObjectDatabase) MemCopy() *ObjectDatabase { for _, name := range dbutils.Buckets { name := name if err := mem.kv.Update(context.Background(), func(writeTx RwTx) error { - newBucketToWrite := writeTx.RwCursor(name) + newBucketToWrite, err := writeTx.RwCursor(name) + if err != nil { + return err + } defer newBucketToWrite.Close() - readC := readTx.Cursor(name) + readC, err := readTx.Cursor(name) + if err != nil { + return err + } defer readC.Close() return ForEach(readC, func(k, v []byte) (bool, error) { if err := newBucketToWrite.Put(common.CopyBytes(k), common.CopyBytes(v)); err != nil { diff --git a/ethdb/remote/remotedbserver/server.go b/ethdb/remote/remotedbserver/server.go index 6c05cb39011..01c5e44c059 100644 --- a/ethdb/remote/remotedbserver/server.go +++ b/ethdb/remote/remotedbserver/server.go @@ -143,7 +143,11 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { } for _, c := range cursors { // restore all cursors position - c.c = tx.Cursor(c.bucket) + var err error + c.c, err = tx.Cursor(c.bucket) + if err != nil { + return err + } switch casted := c.c.(type) { case ethdb.CursorDupSort: v, err := casted.SeekBothRange(c.k, c.v) @@ -176,9 +180,14 @@ func (s *KvServer) Tx(stream remote.KV_TxServer) error { switch in.Op { case remote.Op_OPEN: CursorID++ + var err error + c, err = tx.Cursor(in.BucketName) + if err != nil { + return err + } cursors[CursorID] = &CursorInfo{ bucket: in.BucketName, - c: tx.Cursor(in.BucketName), + c: c, } if err := stream.Send(&remote.Pair{CursorID: CursorID}); err != nil { return fmt.Errorf("server-side error: %w", err) diff --git a/ethdb/tx_db.go b/ethdb/tx_db.go index dfa1448b2b7..2cda6ed4394 100644 --- a/ethdb/tx_db.go +++ b/ethdb/tx_db.go @@ -22,7 +22,10 @@ func NewRoTxDb(tx Tx) *roTxDb { } func (m *roTxDb) Get(bucket string, key []byte) ([]byte, error) { - c := m.tx.Cursor(bucket) + c, err := m.tx.Cursor(bucket) + if err != nil { + return nil, err + } defer c.Close() _, v, err := c.SeekExact(key) if err != nil { @@ -35,7 +38,10 @@ func (m *roTxDb) Get(bucket string, key []byte) ([]byte, error) { } func (m *roTxDb) Has(bucket string, key []byte) (bool, error) { - c := m.tx.Cursor(bucket) + c, err := m.tx.Cursor(bucket) + if err != nil { + return false, err + } defer c.Close() _, v, err := c.SeekExact(key) @@ -43,7 +49,10 @@ func (m *roTxDb) Has(bucket string, key []byte) (bool, error) { } func (m *roTxDb) Walk(bucket string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error { - c := m.tx.Cursor(bucket) + c, err := m.tx.Cursor(bucket) + if err != nil { + return err + } defer c.Close() return Walk(c, startkey, fixedbits, walker) } @@ -110,13 +119,17 @@ func (m *TxDb) BeginRO(ctx context.Context) (GetterTx, error) { return batch, nil } -func (m *TxDb) cursor(bucket string) Cursor { +func (m *TxDb) cursor(bucket string) (Cursor, error) { c, ok := m.cursors[bucket] if !ok { - c = m.tx.Cursor(bucket) + var err error + c, err = m.tx.Cursor(bucket) + if err != nil { + return nil, err + } m.cursors[bucket] = c } - return c + return c, nil } func (m *TxDb) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { @@ -129,22 +142,38 @@ func (m *TxDb) ReadSequence(bucket string) (res uint64, err error) { func (m *TxDb) Put(bucket string, key []byte, value []byte) error { m.len += uint64(len(key) + len(value)) - return m.cursor(bucket).(RwCursor).Put(key, value) + c, err := m.cursor(bucket) + if err != nil { + return err + } + return c.(RwCursor).Put(key, value) } func (m *TxDb) Append(bucket string, key []byte, value []byte) error { m.len += uint64(len(key) + len(value)) - return m.cursor(bucket).(RwCursor).Append(key, value) + c, err := m.cursor(bucket) + if err != nil { + return err + } + return c.(RwCursor).Append(key, value) } func (m *TxDb) AppendDup(bucket string, key []byte, value []byte) error { m.len += uint64(len(key) + len(value)) - return m.cursor(bucket).(RwCursorDupSort).AppendDup(key, value) + c, err := m.cursor(bucket) + if err != nil { + return err + } + return c.(RwCursorDupSort).AppendDup(key, value) } func (m *TxDb) Delete(bucket string, k, v []byte) error { m.len += uint64(len(k)) - return m.cursor(bucket).(RwCursor).Delete(k, v) + c, err := m.cursor(bucket) + if err != nil { + return err + } + return c.(RwCursor).Delete(k, v) } func (m *TxDb) NewBatch() DbWithPendingMutations { @@ -178,15 +207,19 @@ func (m *TxDb) RwKV() RwKV { // Last can only be called from the transaction thread func (m *TxDb) Last(bucket string) ([]byte, []byte, error) { - return m.cursor(bucket).Last() + c, err := m.cursor(bucket) + if err != nil { + return []byte{}, nil, err + } + return c.Last() } func (m *TxDb) Get(bucket string, key []byte) ([]byte, error) { - //if metrics.Enabled { - // defer dbGetTimer.UpdateSince(time.Now()) - //} - - _, v, err := m.cursor(bucket).SeekExact(key) + c, err := m.cursor(bucket) + if err != nil { + return nil, err + } + _, v, err := c.SeekExact(key) if err != nil { return nil, err } @@ -230,7 +263,11 @@ func (m *TxDb) Walk(bucket string, startkey []byte, fixedbits int, walker func([ if ok { delete(m.cursors, bucket) } else { - c = m.tx.Cursor(bucket) + var err error + c, err = m.tx.Cursor(bucket) + if err != nil { + return err + } } defer func() { // put cursor back to pool if can if _, ok = m.cursors[bucket]; ok { diff --git a/migrations/dupsort_state_test.go b/migrations/dupsort_state_test.go index d1f6aa70a99..b3780b48410 100644 --- a/migrations/dupsort_state_test.go +++ b/migrations/dupsort_state_test.go @@ -48,10 +48,10 @@ func TestDupSortHashState(t *testing.T) { require.NoError(err) defer tx.Rollback() - c := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.HashedStorageBucket) - // test low-level data layout + c, err := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.HashedStorageBucket) require.NoError(err) + // test low-level data layout keyLen := common.HashLength + common.IncarnationLength v, err = c.SeekBothRange([]byte(storageKey)[:keyLen], []byte(storageKey)[keyLen:]) require.NoError(err) @@ -102,7 +102,8 @@ func TestDupSortPlainState(t *testing.T) { require.NoError(err) defer tx.Rollback() - c := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.PlainStateBucket) + c, err := tx.(ethdb.HasTx).Tx().CursorDupSort(dbutils.PlainStateBucket) + require.NoError(err) _, v, err = c.SeekExact([]byte(accKey)) require.NoError(err) require.Equal([]byte{1}, v) diff --git a/migrations/header_prefix_test.go b/migrations/header_prefix_test.go index 27deebf069a..0a8ea0c0a0b 100644 --- a/migrations/header_prefix_test.go +++ b/migrations/header_prefix_test.go @@ -24,7 +24,10 @@ func TestHeaderPrefix(t *testing.T) { if err != nil { return err } - c := tx.RwCursor(dbutils.HeaderPrefixOld) + c, err := tx.RwCursor(dbutils.HeaderPrefixOld) + if err != nil { + return err + } for i := uint64(0); i < 10; i++ { //header err = c.Put(dbutils.HeaderKey(i, common.Hash{uint8(i)}), []byte("header "+strconv.Itoa(int(i)))) diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 1826c7181cc..0382957e87b 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -116,7 +116,10 @@ func newPersistentDB(path string) (*DB, error) { var blob []byte if err := kv.Update(context.Background(), func(tx ethdb.RwTx) error { - c := tx.RwCursor(dbutils.InodesBucket) + c, err := tx.RwCursor(dbutils.InodesBucket) + if err != nil { + return err + } _, v, errGet := c.SeekExact([]byte(dbVersionKey)) if errGet != nil { return errGet @@ -231,7 +234,7 @@ func (db *DB) storeInt64(key []byte, n int64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutVarint(blob, n)] return db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.RwCursor(dbutils.InodesBucket).Put(common.CopyBytes(key), blob) + return tx.Put(dbutils.InodesBucket, common.CopyBytes(key), blob) }) } @@ -258,7 +261,7 @@ func (db *DB) storeUint64(key []byte, n uint64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutUvarint(blob, n)] return db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.RwCursor(dbutils.InodesBucket).Put(common.CopyBytes(key), blob) + return tx.Put(dbutils.InodesBucket, common.CopyBytes(key), blob) }) } @@ -304,7 +307,7 @@ func (db *DB) UpdateNode(node *Node) error { return err } if err := db.kv.Update(context.Background(), func(tx ethdb.RwTx) error { - return tx.RwCursor(dbutils.InodesBucket).Put(nodeKey(node.ID()), blob) + return tx.Put(dbutils.InodesBucket, nodeKey(node.ID()), blob) }); err != nil { return err } @@ -332,7 +335,10 @@ func (db *DB) DeleteNode(id ID) { func deleteRange(db ethdb.RwKV, prefix []byte) { if err := db.Update(context.Background(), func(tx ethdb.RwTx) error { - c := tx.RwCursor(dbutils.InodesBucket) + c, err := tx.RwCursor(dbutils.InodesBucket) + if err != nil { + return err + } for k, _, err := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _, err = c.Next() { if err != nil { return err @@ -384,7 +390,10 @@ func (db *DB) expireNodes() { ) var toDelete [][]byte if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.InodesBucket) + c, err := tx.Cursor(dbutils.InodesBucket) + if err != nil { + return err + } p := []byte(dbNodePrefix) var prevId ID var empty = true @@ -514,7 +523,10 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { ) if err := db.kv.View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.InodesBucket) + c, err := tx.Cursor(dbutils.InodesBucket) + if err != nil { + return err + } seek: for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { // Seek to a random entry. The first byte is incremented by a diff --git a/turbo/snapshotsync/postprocessing_test.go b/turbo/snapshotsync/postprocessing_test.go index dc9f20afb2e..b72527fb63e 100644 --- a/turbo/snapshotsync/postprocessing_test.go +++ b/turbo/snapshotsync/postprocessing_test.go @@ -25,12 +25,15 @@ func TestHeadersGenerateIndex(t *testing.T) { if innerErr != nil { panic(innerErr) } - innerErr = tx.RwCursor(dbutils.HeadersBucket).Put(dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), headerBytes) + innerErr = tx.Put(dbutils.HeadersBucket, dbutils.HeaderKey(header.Number.Uint64(), header.Hash()), headerBytes) if innerErr != nil { panic(innerErr) } } - c := tx.RwCursor(dbutils.HeadersSnapshotInfoBucket) + c, err := tx.RwCursor(dbutils.HeadersSnapshotInfoBucket) + if err != nil { + return err + } innerErr := c.Put([]byte(dbutils.SnapshotHeadersHeadHash), headers[len(headers)-1].Hash().Bytes()) if innerErr != nil { return innerErr diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 59a56310903..56114189dc9 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -436,7 +436,10 @@ func (hd *HeaderDownload) SetPreverifiedHashes(preverifiedHashes map[common.Hash func (hd *HeaderDownload) RecoverFromDb(db ethdb.Database) error { err := db.(ethdb.HasRwKV).RwKV().View(context.Background(), func(tx ethdb.Tx) error { - c := tx.Cursor(dbutils.HeadersBucket) + c, err := tx.Cursor(dbutils.HeadersBucket) + if err != nil { + return err + } // Take hd.persistedLinkLimit headers (with the highest heights) as links for k, v, err := c.Last(); k != nil && hd.persistedLinkQueue.Len() < hd.persistedLinkLimit; k, v, err = c.Prev() { if err != nil { diff --git a/turbo/trie/flatdb_sub_trie_loader.go b/turbo/trie/flatdb_sub_trie_loader.go index b801a21ee53..557298ee425 100644 --- a/turbo/trie/flatdb_sub_trie_loader.go +++ b/turbo/trie/flatdb_sub_trie_loader.go @@ -603,7 +603,10 @@ func (fstl *FlatDbSubTrieLoader) LoadSubTries() (SubTries, error) { defer fstl.tx.Rollback() } tx := fstl.tx - c := tx.Cursor(dbutils.CurrentStateBucketOld2) + c, err := tx.Cursor(dbutils.CurrentStateBucketOld2) + if err != nil { + return SubTries{}, err + } var filter = func(k []byte) (bool, error) { if fstl.rl.Retain(k) { @@ -621,7 +624,11 @@ func (fstl *FlatDbSubTrieLoader) LoadSubTries() (SubTries, error) { return true, nil } - ih := NewIHCursor2(NewFilterCursor2(filter, tx.CursorDupSort(dbutils.IntermediateTrieHashBucketOld2))) + c1, err := tx.CursorDupSort(dbutils.IntermediateTrieHashBucketOld2) + if err != nil { + return SubTries{}, err + } + ih := NewIHCursor2(NewFilterCursor2(filter, c1)) if err := fstl.iteration(c, ih, true /* first */); err != nil { return SubTries{}, err } diff --git a/turbo/trie/structural_branch_test.go b/turbo/trie/structural_branch_test.go index 1c25c006f07..3abc51683dd 100644 --- a/turbo/trie/structural_branch_test.go +++ b/turbo/trie/structural_branch_test.go @@ -63,7 +63,8 @@ func TestIHCursor(t *testing.T) { integrity.Trie(tx, false, nil) - cursor := tx.Cursor(dbutils.TrieOfAccountsBucket) + cursor, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + require.NoError(err) rl := trie.NewRetainList(0) rl.AddHex(common.FromHex("01")) rl.AddHex(common.FromHex("0101")) diff --git a/turbo/trie/trie_root.go b/turbo/trie/trie_root.go index 866a656cae4..78f8b330124 100644 --- a/turbo/trie/trie_root.go +++ b/turbo/trie/trie_root.go @@ -203,11 +203,21 @@ func (l *FlatDBTrieLoader) CalcTrieRoot(db ethdb.Database, prefix []byte, quit < tx = txDB.(ethdb.HasTx).Tx() } - accC := tx.Cursor(dbutils.HashedAccountsBucket) + accC, err := tx.Cursor(dbutils.HashedAccountsBucket) + if err != nil { + return EmptyRoot, err + } defer accC.Close() accs := NewStateCursor(accC, quit) - trieAccC, trieStorageC := tx.Cursor(dbutils.TrieOfAccountsBucket), tx.CursorDupSort(dbutils.TrieOfStorageBucket) + trieAccC, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + if err != nil { + return EmptyRoot, err + } defer trieAccC.Close() + trieStorageC, err := tx.CursorDupSort(dbutils.TrieOfStorageBucket) + if err != nil { + return EmptyRoot, err + } defer trieStorageC.Close() var canUse = func(prefix []byte) (bool, []byte) { @@ -217,7 +227,10 @@ func (l *FlatDBTrieLoader) CalcTrieRoot(db ethdb.Database, prefix []byte, quit < accTrie := AccTrie(canUse, l.hc, trieAccC, quit) storageTrie := StorageTrie(canUse, l.shc, trieStorageC, quit) - ss := tx.CursorDupSort(dbutils.HashedStorageBucket) + ss, err := tx.CursorDupSort(dbutils.HashedStorageBucket) + if err != nil { + return EmptyRoot, err + } defer ss.Close() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -1821,11 +1834,25 @@ func (l *FlatDBTrieLoader) CalcSubTrieRootOnCache(db ethdb.Database, prefix []by tx = txDB.(ethdb.HasTx).Tx() } - accsC, stC := tx.Cursor(dbutils.HashedAccountsBucket), tx.Cursor(dbutils.HashedStorageBucket) + accsC, err := tx.Cursor(dbutils.HashedAccountsBucket) + if err != nil { + return EmptyRoot, err + } defer accsC.Close() + stC, err := tx.Cursor(dbutils.HashedStorageBucket) + if err != nil { + return EmptyRoot, err + } defer stC.Close() - trieAccC, trieStorageC := tx.Cursor(dbutils.TrieOfAccountsBucket), tx.Cursor(dbutils.TrieOfStorageBucket) + trieAccC, err := tx.Cursor(dbutils.TrieOfAccountsBucket) + if err != nil { + return EmptyRoot, err + } defer trieAccC.Close() + trieStorageC, err := tx.Cursor(dbutils.TrieOfStorageBucket) + if err != nil { + return EmptyRoot, err + } defer trieStorageC.Close() var canUse = func(prefix []byte) (bool, []byte) { retain, nextCreated := l.rd.RetainWithMarker(prefix) @@ -1833,7 +1860,10 @@ func (l *FlatDBTrieLoader) CalcSubTrieRootOnCache(db ethdb.Database, prefix []by } trieStorage := StorageTrie(canUse, l.shc, trieStorageC, quit) - ss := tx.CursorDupSort(dbutils.HashedStorageBucket) + ss, err := tx.CursorDupSort(dbutils.HashedStorageBucket) + if err != nil { + return EmptyRoot, err + } defer ss.Close() _ = trieStorageC _ = stC