Skip to content

Commit

Permalink
Remove ancients and some old skipped tests (erigontech#1668)
Browse files Browse the repository at this point in the history
  • Loading branch information
vorot93 authored and sam committed Apr 5, 2021
1 parent 30faf17 commit 03c15d2
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 355 deletions.
11 changes: 0 additions & 11 deletions ethdb/mutation.go
Original file line number Diff line number Diff line change
Expand Up @@ -349,17 +349,6 @@ func (m *mutation) SetRwKV(kv RwKV) {
m.db.(HasRwKV).SetRwKV(kv)
}

// [TURBO-GETH] Freezer support (not implemented yet)
// Ancients returns an error as we don't have a backing chain freezer.
func (m *mutation) Ancients() (uint64, error) {
return 0, errNotSupported
}

// TruncateAncients returns an error as we don't have a backing chain freezer.
func (m *mutation) TruncateAncients(items uint64) error {
return errNotSupported
}

func NewRWDecorator(db Database) *RWCounterDecorator {
return &RWCounterDecorator{
db,
Expand Down
11 changes: 0 additions & 11 deletions ethdb/object_db.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,17 +396,6 @@ func (db *ObjectDatabase) Begin(ctx context.Context, flags TxFlags) (DbWithPendi
return batch, nil
}

// [TURBO-GETH] Freezer support (not implemented yet)
// Ancients returns an error as we don't have a backing chain freezer.
func (db *ObjectDatabase) Ancients() (uint64, error) {
return 0, errNotSupported
}

// TruncateAncients returns an error as we don't have a backing chain freezer.
func (db *ObjectDatabase) TruncateAncients(items uint64) error {
return errNotSupported
}

// Type which expecting sequence of triplets: dbi, key, value, ....
// It sorts entries by dbi name, then inside dbi clusters sort by keys
type MultiPutTuples [][]byte
Expand Down
11 changes: 0 additions & 11 deletions ethdb/tx_db.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,17 +334,6 @@ func (m *TxDb) panicOnEmptyDB() {
}
}

// [TURBO-GETH] Freezer support (not implemented yet)
// Ancients returns an error as we don't have a backing chain freezer.
func (m *TxDb) Ancients() (uint64, error) {
return 0, errNotSupported
}

// TruncateAncients returns an error as we don't have a backing chain freezer.
func (m *TxDb) TruncateAncients(items uint64) error {
return errNotSupported
}

func (m *TxDb) BucketExists(name string) (bool, error) {
exists := false
migrator, ok := m.tx.(BucketMigrator)
Expand Down
322 changes: 0 additions & 322 deletions turbo/stages/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -555,328 +555,6 @@ func testBadHashes(t *testing.T, full bool) {
}
}

// Tests that bad hashes are detected on boot, and the chain rolled back to a
// good state prior to the bad hash.
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }

func testReorgBadHashes(t *testing.T, full bool) {
t.Skip("Broken by removal BadHashes check at the creation of blockchain")
// Create a pristine chain and database
db, _, err := newCanonical(ethash.NewFaker(), 0, full)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
defer db.Close()

// Create a chain, import and ban afterwards
headers := makeHeaderChain(rawdb.ReadCurrentHeader(db), 4, ethash.NewFaker(), db, 10)
blocks := makeBlockChain(rawdb.ReadCurrentBlock(db), 4, ethash.NewFaker(), db, 10)

if full {
_, err = stagedsync.InsertBlocksInStages(db, ethdb.DefaultStorageMode, params.AllEthashProtocolChanges, &vm.Config{}, ethash.NewFaker(), blocks, true /* checkRoot */)
if err != nil {
t.Errorf("failed to import blocks: %v", err)
}
if rawdb.ReadCurrentBlock(db).Hash() != blocks[3].Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", rawdb.ReadCurrentBlock(db).Hash(), blocks[3].Header().Hash())
}
core.BadHashes[blocks[3].Header().Hash()] = true
defer func() { delete(core.BadHashes, blocks[3].Header().Hash()) }()
} else {
_, _, _, err = stagedsync.InsertHeadersInStages(db, params.AllEthashProtocolChanges, ethash.NewFaker(), headers)
if err != nil {
t.Errorf("failed to import headers: %v", err)
}
if rawdb.ReadCurrentHeader(db).Hash() != headers[3].Hash() {
t.Errorf("last header hash mismatch: have: %x, want %x", rawdb.ReadCurrentHeader(db).Hash(), headers[3].Hash())
}
core.BadHashes[headers[3].Hash()] = true
defer func() { delete(core.BadHashes, headers[3].Hash()) }()
}

}

// Tests that fast importing a block chain produces the same chain data as the
// classical full block processing.
func TestFastVsFullChains(t *testing.T) {
t.Skip("should be restored. skipped for turbo-geth")

// Configure and generate a sample block chain
gendb := ethdb.NewMemDatabase()
defer gendb.Close()
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
gspec = &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{address: {Balance: funds}},
}
genesis = gspec.MustCommit(gendb)
signer = types.LatestSigner(gspec.Config)
)
blocks, receipts, err1 := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{0x00})

// If the block number is multiple of 3, send a few bonus transactions to the miner
if i%3 == 2 {
for j := 0; j < i%4+1; j++ {
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, uint256.NewInt().SetUint64(1000), params.TxGas, nil, nil), signer, key)
if err != nil {
panic(err)
}
block.AddTx(tx)
}
}
// If the block number is a multiple of 5, add a few bonus uncles to the block
if i%5 == 5 {
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
}
}, false /* intemediateHashes */)
if err1 != nil {
t.Fatalf("generate chain: %v", err1)
}
// Import the chain as an archive node for the comparison baseline
archiveDb := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
cacheConfig := &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
NoHistory: false,
Pruning: false,
}
txCacher := core.NewTxSenderCacher(1)
archive, _ := core.NewBlockChain(archiveDb, cacheConfig, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacher)
defer archive.Stop()

if n, err := archive.InsertChain(context.Background(), blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
// Fast import the chain as a non-archive node to test
fastDb := ethdb.NewMemDatabase()
defer fastDb.Close()
gspec.MustCommit(fastDb)
txCacherFast := core.NewTxSenderCacher(1)
fast, _ := core.NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherFast)
defer fast.Stop()

headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
// Freezer style fast import the chain.
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
ancientDb, err := ethdb.NewDatabaseWithFreezer(ethdb.NewMemDatabase(), frdir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
txCacherAncient := core.NewTxSenderCacher(1)
ancient, _ := core.NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherAncient)
defer ancient.Stop()

if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
// Iterate over all chain data components, and cross reference
for i := 0; i < len(blocks); i++ {
num, hash := blocks[i].NumberU64(), blocks[i].Hash()

if ftd, atd := fast.GetTdByHash(hash), archive.GetTdByHash(hash); ftd.Cmp(atd) != 0 {
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
}
if antd, artd := ancient.GetTdByHash(hash), archive.GetTdByHash(hash); antd.Cmp(artd) != 0 {
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
}
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
}
if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() {
t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader)
}
if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
} else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) {
t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
}
if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash)), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash)), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
}
}
// Check that the canonical chains are the same between the databases
for i := 0; i < len(blocks)+1; i++ {
fhash, err := rawdb.ReadCanonicalHash(fastDb, uint64(i))
if err != nil {
panic(err)
}
ahash, err := rawdb.ReadCanonicalHash(archiveDb, uint64(i))
if err != nil {
panic(err)
}
if fhash != ahash {
t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash)
}
anhash, err := rawdb.ReadCanonicalHash(ancientDb, uint64(i))
if err != nil {
panic(err)
}
arhash, err := rawdb.ReadCanonicalHash(archiveDb, uint64(i))
if err != nil {
panic(err)
}
if anhash != arhash {
t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash)
}
}
}

// Tests that various import methods move the chain head pointers to the correct
// positions.
func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Skip("should be restored. skipped for turbo-geth")
// Configure and generate a sample block chain
gendb := ethdb.NewMemDatabase()
defer gendb.Close()
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
gspec = &core.Genesis{Config: params.TestChainConfig, Alloc: core.GenesisAlloc{address: {Balance: funds}}}
genesis = gspec.MustCommit(gendb)
)
height := uint64(1024)
blocks, receipts, err := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil, false /* intemediateHashes */)
if err != nil {
t.Fatalf("generate chain: %v", err)
}

// makeDb creates a db instance for testing.
makeDb := func() (*ethdb.ObjectDatabase, func()) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(dir)
db, err := ethdb.NewDatabaseWithFreezer(ethdb.NewMemDatabase(), dir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(db)
return db, func() { os.RemoveAll(dir) }
}
// Configure a subchain to roll back
remove := blocks[height/2].NumberU64()

// Create a small assertion method to check the three heads
assert := func(t *testing.T, kind string, chain *core.BlockChain, header uint64, fast uint64, block uint64) {
t.Helper()

if num := chain.CurrentBlock().NumberU64(); num != block {
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
}
if num := chain.CurrentFastBlock().NumberU64(); num != fast {
t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast)
}
if num := chain.CurrentHeader().Number.Uint64(); num != header {
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
}
}
// Import the chain as an archive node and ensure all pointers are updated
archiveDb := ethdb.NewMemDatabase()
defer archiveDb.Close()

gspec.MustCommit(archiveDb)

cacheConfig := &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
NoHistory: false,
}
txCacherArchive := core.NewTxSenderCacher(1)
archive, _ := core.NewBlockChain(archiveDb, cacheConfig, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherArchive)
if n, err := archive.InsertChain(context.Background(), blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
defer archive.Stop()

assert(t, "archive", archive, height, height, height)
archive.SetHead(remove - 1) //nolint:errcheck
assert(t, "archive", archive, height/2, height/2, height/2)

// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, delfn := makeDb()
defer delfn()
txCacherFast := core.NewTxSenderCacher(1)
fast, _ := core.NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherFast)
defer fast.Stop()

headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "fast", fast, height, height, 0)
fast.SetHead(remove - 1) //nolint:errcheck
assert(t, "fast", fast, height/2, height/2, 0)

// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, delfn := makeDb()
defer delfn()
txCacherAncient := core.NewTxSenderCacher(1)
ancient, _ := core.NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherAncient)
defer ancient.Stop()

if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "ancient", ancient, height, height, 0)
ancient.SetHead(remove - 1) //nolint:errcheck
assert(t, "ancient", ancient, 0, 0, 0)

if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
}
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
txCacherLight := core.NewTxSenderCacher(1)
light, _ := core.NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, txCacherLight)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
defer light.Stop()

assert(t, "light", light, height, 0, 0)
light.SetHead(remove - 1) //nolint:errcheck
assert(t, "light", light, height/2, 0, 0)
}

// Tests that chain reorganisations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) {
db, db2 := ethdb.NewMemDatabase(), ethdb.NewMemDatabase()
Expand Down

0 comments on commit 03c15d2

Please sign in to comment.