From f4387ed8e9e7b377719c744a78b63ed2d2a511ca Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 4 Aug 2023 13:35:10 +0200 Subject: [PATCH] itest: add re-org test cases --- itest/assertions.go | 97 ++++++++- itest/re-org_test.go | 391 +++++++++++++++++++++++++++++++++++++ itest/test_list_on_test.go | 12 ++ 3 files changed, 493 insertions(+), 7 deletions(-) create mode 100644 itest/re-org_test.go diff --git a/itest/assertions.go b/itest/assertions.go index 73317c979..25e823b09 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -68,13 +68,13 @@ func assetAnchorCheck(txid, blockHash chainhash.Hash) assetCheck { if a.ChainAnchor.AnchorTxid != txid.String() { return fmt.Errorf("unexpected asset anchor TXID, got "+ - "%x wanted %x", a.ChainAnchor.AnchorTxid, + "%v wanted %x", a.ChainAnchor.AnchorTxid, txid[:]) } if a.ChainAnchor.AnchorBlockHash != blockHash.String() { return fmt.Errorf("unexpected asset anchor block "+ - "hash, got %x wanted %x", + "hash, got %v wanted %x", a.ChainAnchor.AnchorBlockHash, blockHash[:]) } @@ -201,6 +201,39 @@ func commitmentKey(t *testing.T, rpcAsset *taprpc.Asset) [32]byte { return asset.AssetCommitmentKey(assetID, scriptKey, groupKey == nil) } +// waitForProofUpdate polls until the proof for the given asset has been +// updated, which is detected by checking the block height of the last proof. +func waitForProofUpdate(t *testing.T, tapd *tapdHarness, a *taprpc.Asset, + blockHeight int32) { + + t.Helper() + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout*2) + defer cancel() + + require.Eventually(t, func() bool { + // Export the proof, then decode it. + exportResp, err := tapd.ExportProof( + ctxt, &taprpc.ExportProofRequest{ + AssetId: a.AssetGenesis.AssetId, + ScriptKey: a.ScriptKey, + }, + ) + require.NoError(t, err) + + f := &proof.File{} + require.NoError( + t, f.Decode(bytes.NewReader(exportResp.RawProof)), + ) + lastProof, err := f.LastProof() + require.NoError(t, err) + + // Check the block height of the proof. + return lastProof.BlockHeight == uint32(blockHeight) + }, defaultWaitTimeout, 200*time.Millisecond) +} + // assertAssetProofs makes sure the proofs for the given asset can be retrieved // from the given daemon and can be fully validated. func assertAssetProofs(t *testing.T, tapd *tapdHarness, @@ -233,6 +266,34 @@ func assertAssetProofs(t *testing.T, tapd *tapdHarness, return exportResp.RawProof } +// assertAssetProofsInvalid makes sure the proofs for the given asset can be +// retrieved from the given daemon but fail to validate. +func assertAssetProofsInvalid(t *testing.T, tapd *tapdHarness, + a *taprpc.Asset) { + + t.Helper() + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + exportResp, err := tapd.ExportProof(ctxt, &taprpc.ExportProofRequest{ + AssetId: a.AssetGenesis.AssetId, + ScriptKey: a.ScriptKey, + }) + require.NoError(t, err) + + f := &proof.File{} + require.NoError(t, f.Decode(bytes.NewReader(exportResp.RawProof))) + + // Also make sure that the RPC can verify the proof as well. + verifyResp, err := tapd.VerifyProof(ctxt, &taprpc.ProofFile{ + RawProof: exportResp.RawProof, + }) + require.NoError(t, err) + require.False(t, verifyResp.Valid) +} + // verifyProofBlob parses the given proof blob into a file, verifies it and // returns the resulting last asset snapshot together with the parsed file. func verifyProofBlob(t *testing.T, tapd *tapdHarness, a *taprpc.Asset, @@ -287,7 +348,7 @@ func verifyProofBlob(t *testing.T, tapd *tapdHarness, a *taprpc.Asset, expectedHash := hash if heightHash != expectedHash { return fmt.Errorf("block hash and block height "+ - "mismatch; (height: %x, hashAtHeight: %s, "+ + "mismatch; (height: %d, hashAtHeight: %s, "+ "expectedHash: %s)", height, heightHash, expectedHash) } @@ -421,9 +482,10 @@ func assertAddrEventByStatus(t *testing.T, tapd *tapdHarness, // with the node. func confirmAndAssertOutboundTransfer(t *harnessTest, sender *tapdHarness, sendResp *taprpc.SendAssetResponse, assetID []byte, - expectedAmounts []uint64, currentTransferIdx, numTransfers int) { + expectedAmounts []uint64, currentTransferIdx, + numTransfers int) *wire.MsgBlock { - confirmAndAssetOutboundTransferWithOutputs( + return confirmAndAssetOutboundTransferWithOutputs( t, sender, sendResp, assetID, expectedAmounts, currentTransferIdx, numTransfers, 2, ) @@ -435,7 +497,7 @@ func confirmAndAssertOutboundTransfer(t *harnessTest, sender *tapdHarness, func confirmAndAssetOutboundTransferWithOutputs(t *harnessTest, sender *tapdHarness, sendResp *taprpc.SendAssetResponse, assetID []byte, expectedAmounts []uint64, currentTransferIdx, - numTransfers, numOutputs int) { + numTransfers, numOutputs int) *wire.MsgBlock { ctxb := context.Background() @@ -459,7 +521,7 @@ func confirmAndAssetOutboundTransferWithOutputs(t *harnessTest, t.Logf("Got response from sending assets: %v", sendRespJSON) // Mine a block to force the send event to complete (confirm on-chain). - _ = mineBlocks(t, t.lndHarness, 1, 1) + newBlock := mineBlocks(t, t.lndHarness, 1, 1)[0] // Confirm that we can externally view the transfer. require.Eventually(t.t, func() bool { @@ -494,6 +556,8 @@ func confirmAndAssetOutboundTransferWithOutputs(t *harnessTest, transferRespJSON, err := formatProtoJSON(transferResp) require.NoError(t.t, err) t.Logf("Got response from list transfers: %v", transferRespJSON) + + return newBlock } // assertNonInteractiveRecvComplete makes sure the given receiver has the @@ -780,6 +844,25 @@ func assertListAssets(t *harnessTest, ctx context.Context, tapd *tapdHarness, } } +// assertUniverseRootEquality checks that the universe roots returned by two +// daemons are either equal or not, depending on the expectedEquality parameter. +func assertUniverseRootEquality(t *testing.T, a, b *tapdHarness, + expectedEquality bool) { + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + rootRequest := &unirpc.AssetRootRequest{} + universeRootsAlice, err := a.AssetRoots(ctxt, rootRequest) + require.NoError(t, err) + universeRootsBob, err := b.AssetRoots(ctxt, rootRequest) + require.NoError(t, err) + require.Equal(t, expectedEquality, assertUniverseRootsEqual( + universeRootsAlice, universeRootsBob, + )) +} + func assertUniverseRoot(t *testing.T, tapd *tapdHarness, sum int, assetID []byte, groupKey []byte) error { diff --git a/itest/re-org_test.go b/itest/re-org_test.go new file mode 100644 index 000000000..1661f9ef6 --- /dev/null +++ b/itest/re-org_test.go @@ -0,0 +1,391 @@ +package itest + +import ( + "context" + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightninglabs/taproot-assets/taprpc" + "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" + unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/stretchr/testify/require" +) + +// testReOrgMint tests that when a re-org occurs, minted asset proofs are +// updated accordingly. +func testReOrgMint(t *harnessTest) { + // First, we'll mint a few assets but don't confirm the batch TX. + mintRequests := []*mintrpc.MintAssetRequest{ + issuableAssets[0], issuableAssets[1], + } + mintTXID := mintAssetUnconfirmed(t, t.tapd, mintRequests) + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + // Before we mine a block to confirm the mint TX, we create a temporary + // miner. + tempMiner := t.lndHarness.Miner.SpawnTempMiner() + miner := t.lndHarness.Miner + + // And now we mine a block to confirm the assets. + initialBlock := mineBlocks(t, t.lndHarness, 1, 1)[0] + initialBlockHash := initialBlock.BlockHash() + waitForBatchState( + t, ctxt, t.tapd, defaultWaitTimeout, + mintrpc.BatchState_BATCH_STATE_FINALIZED, + ) + + // Make sure the original mint TX was mined in the first block. + miner.AssertTxInBlock(initialBlock, &mintTXID) + t.Logf("Mint TX %v mined in block %v", mintTXID, initialBlockHash) + + assetList := assertAssetsMinted( + t, t.tapd, mintRequests, mintTXID, initialBlockHash, + ) + + // Now that we have the asset created, we'll make a new node that'll + // serve as the node which'll receive the assets. The existing tapd + // node will be used to synchronize universe state. + secondTapd := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.startupSyncNode = t.tapd + params.startupSyncNumAssets = len(assetList) + }, + ) + defer func() { + require.NoError(t.t, secondTapd.stop(!*noDelete)) + }() + + // We now generate the re-org. + generateReOrg(t.t, t.lndHarness, tempMiner, 3, 2) + + // This should have caused a reorg, and Alice should sync to the longer + // chain, where the funding transaction is not confirmed. + _, tempMinerHeight, err := tempMiner.Client.GetBestBlock() + require.NoError(t.t, err, "unable to get current block height") + t.lndHarness.WaitForNodeBlockHeight(t.lndHarness.Alice, tempMinerHeight) + + // At this point, the asset proofs should be invalid, since the mint TX + // was re-organized out. + for idx := range assetList { + a := assetList[idx] + assertAssetProofsInvalid(t.t, t.tapd, a) + } + + // Cleanup by mining the minting tx again. + newBlock := t.lndHarness.MineBlocksAndAssertNumTxes(1, 1)[0] + newBlockHash := newBlock.BlockHash() + _, newBlockHeight := t.lndHarness.Miner.GetBestBlock() + t.lndHarness.Miner.AssertTxInBlock(newBlock, &mintTXID) + t.Logf("Mint TX %v re-mined in block %v", mintTXID, newBlockHash) + + // Let's wait until we see that the proof for the first asset was + // updated to the new block height. + waitForProofUpdate(t.t, t.tapd, assetList[0], newBlockHeight) + + // We now try to validate the issuance proof of the two assets we + // minted again. The re-org watcher should have updated the proofs and + // pushed them to the proof store. They should be valid now. + for idx := range assetList { + a := assetList[idx] + assertAssetProofs(t.t, t.tapd, a) + } + + // Let's now bury the proofs under sufficient blocks to allow the re-org + // watcher to stop watching the TX. + t.lndHarness.MineBlocks(8) + + // The second tapd instance should now have a different universe state + // since we only updated the issuance proofs in the first tapd instance. + assertUniverseRootEquality(t.t, t.tapd, secondTapd, false) + + // A universe sync should now bring both nodes back into sync. + ctxt, cancel = context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + syncDiff, err := secondTapd.SyncUniverse(ctxt, &unirpc.SyncRequest{ + UniverseHost: t.tapd.rpcHost(), + SyncMode: unirpc.UniverseSyncMode_SYNC_ISSUANCE_ONLY, + }) + require.NoError(t.t, err) + require.Len(t.t, syncDiff.SyncedUniverses, len(assetList)) + + assertUniverseRootEquality(t.t, t.tapd, secondTapd, true) +} + +// testReOrgSend tests that when a re-org occurs, sent asset proofs are updated +// accordingly. +func testReOrgSend(t *harnessTest) { + // First, we'll mint a few assets and confirm the batch TX. + mintRequests := []*mintrpc.MintAssetRequest{ + issuableAssets[0], issuableAssets[1], + } + assetList := mintAssetsConfirmBatch(t, t.tapd, mintRequests) + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + // Now that we have the asset created, we'll make a new node that'll + // serve as the node which'll receive the assets. The existing tapd + // node will be used to synchronize universe state. + secondTapd := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.startupSyncNode = t.tapd + params.startupSyncNumAssets = len(assetList) + }, + ) + defer func() { + require.NoError(t.t, secondTapd.stop(!*noDelete)) + }() + + // Before we mine a block to confirm the mint TX, we create a temporary + // miner. + tempMiner := t.lndHarness.Miner.SpawnTempMiner() + miner := t.lndHarness.Miner + + // Now to the second part of the test: We'll send an asset to Bob, and + // then re-org the chain again. + sendAsset := assetList[0] + sendAssetGen := sendAsset.AssetGenesis + sendAmount := uint64(500) + bobAddr, err := secondTapd.NewAddr(ctxt, &taprpc.NewAddrRequest{ + AssetId: sendAssetGen.AssetId, + Amt: sendAmount, + }) + require.NoError(t.t, err) + assertAddrCreated(t.t, secondTapd, sendAsset, bobAddr) + sendResp := sendAssetsToAddr(t, t.tapd, bobAddr) + initialBlock := confirmAndAssertOutboundTransfer( + t, t.tapd, sendResp, sendAssetGen.AssetId, + []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, + ) + _ = sendProof( + t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, + ) + assertNonInteractiveRecvComplete(t, secondTapd, 1) + initialBlockHash := initialBlock.BlockHash() + + // Make sure the original send TX was mined in the first block. + sendTXID, err := chainhash.NewHash(sendResp.Transfer.AnchorTxHash) + require.NoError(t.t, err) + miner.AssertTxInBlock(initialBlock, sendTXID) + t.Logf("Send TX %v mined in block %v", sendTXID, initialBlockHash) + + // We now generate the re-org. + generateReOrg(t.t, t.lndHarness, tempMiner, 3, 2) + + // This should have caused a reorg, and Alice should sync to the longer + // chain, where the funding transaction is not confirmed. + _, tempMinerHeight, err := tempMiner.Client.GetBestBlock() + require.NoError(t.t, err, "unable to get current block height") + t.lndHarness.WaitForNodeBlockHeight(t.lndHarness.Alice, tempMinerHeight) + + // At this point, the all asset proofs should be invalid, since the send + // TX was re-organized out, and it also contained passive assets. + listAssetRequest := &taprpc.ListAssetRequest{} + aliceAssets, err := t.tapd.ListAssets(ctxb, listAssetRequest) + require.NoError(t.t, err) + bobAssets, err := secondTapd.ListAssets(ctxb, listAssetRequest) + require.NoError(t.t, err) + + for idx := range aliceAssets.Assets { + a := aliceAssets.Assets[idx] + assertAssetProofsInvalid(t.t, t.tapd, a) + } + for idx := range bobAssets.Assets { + a := bobAssets.Assets[idx] + assertAssetProofsInvalid(t.t, secondTapd, a) + } + + // Cleanup by mining the minting tx again. + newBlock := t.lndHarness.MineBlocksAndAssertNumTxes(1, 1)[0] + newBlockHash := newBlock.BlockHash() + _, newBlockHeight := t.lndHarness.Miner.GetBestBlock() + t.lndHarness.Miner.AssertTxInBlock(newBlock, sendTXID) + t.Logf("Send TX %v re-mined in block %v", sendTXID, newBlockHash) + + // Let's wait until we see that the proof for the first asset was + // updated to the new block height. + waitForProofUpdate(t.t, t.tapd, aliceAssets.Assets[0], newBlockHeight) + waitForProofUpdate(t.t, secondTapd, bobAssets.Assets[0], newBlockHeight) + + // We now try to validate the send proofs of the delivered, change and + // passive assets. The re-org watcher should have updated the proofs and + // pushed them to the proof store. They should be valid now. + for idx := range aliceAssets.Assets { + a := aliceAssets.Assets[idx] + assertAssetProofs(t.t, t.tapd, a) + } + for idx := range bobAssets.Assets { + a := bobAssets.Assets[idx] + assertAssetProofs(t.t, secondTapd, a) + } + + // Let's now bury the proofs under sufficient blocks to allow the re-org + // watcher to stop watching the TX. + t.lndHarness.MineBlocks(8) +} + +// testReOrgMintAndSend tests that when a re-org occurs, minted and directly +// sent asset proofs are updated accordingly. +func testReOrgMintAndSend(t *harnessTest) { + // Before we do anything, we spawn a miner. This is where the fork in + // the chain starts. + tempMiner := t.lndHarness.Miner.SpawnTempMiner() + miner := t.lndHarness.Miner + + // Then, we'll mint a few assets and confirm the batch TX. + mintRequests := []*mintrpc.MintAssetRequest{ + issuableAssets[0], issuableAssets[1], + } + assetList := mintAssetsConfirmBatch(t, t.tapd, mintRequests) + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + // Now that we have the asset created, we'll make a new node that'll + // serve as the node which'll receive the assets. The existing tapd + // node will be used to synchronize universe state. + secondTapd := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.startupSyncNode = t.tapd + params.startupSyncNumAssets = len(assetList) + }, + ) + defer func() { + require.NoError(t.t, secondTapd.stop(!*noDelete)) + }() + + // We'll send an asset to Bob, and then re-org the chain, which should + // cause both the minting TX and the send TX to be un-confirmed. + sendAsset := assetList[0] + sendAssetGen := sendAsset.AssetGenesis + sendAmount := uint64(500) + bobAddr, err := secondTapd.NewAddr(ctxt, &taprpc.NewAddrRequest{ + AssetId: sendAssetGen.AssetId, + Amt: sendAmount, + }) + require.NoError(t.t, err) + assertAddrCreated(t.t, secondTapd, sendAsset, bobAddr) + sendResp := sendAssetsToAddr(t, t.tapd, bobAddr) + initialBlock := confirmAndAssertOutboundTransfer( + t, t.tapd, sendResp, sendAssetGen.AssetId, + []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, + ) + _ = sendProof( + t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, + ) + assertNonInteractiveRecvComplete(t, secondTapd, 1) + initialBlockHash := initialBlock.BlockHash() + + // Make sure the original send TX was mined in the first block. + sendTXID, err := chainhash.NewHash(sendResp.Transfer.AnchorTxHash) + require.NoError(t.t, err) + miner.AssertTxInBlock(initialBlock, sendTXID) + t.Logf("Send TX %v mined in block %v", sendTXID, initialBlockHash) + + // We now generate the re-org. + generateReOrg(t.t, t.lndHarness, tempMiner, 4, 2) + + // This should have caused a reorg, and Alice should sync to the longer + // chain, where the funding transaction is not confirmed. + _, tempMinerHeight, err := tempMiner.Client.GetBestBlock() + require.NoError(t.t, err, "unable to get current block height") + t.lndHarness.WaitForNodeBlockHeight(t.lndHarness.Alice, tempMinerHeight) + + // At this point, the all asset proofs should be invalid, since the send + // TX was re-organized out, and it also contained passive assets. + listAssetRequest := &taprpc.ListAssetRequest{} + aliceAssets, err := t.tapd.ListAssets(ctxb, listAssetRequest) + require.NoError(t.t, err) + bobAssets, err := secondTapd.ListAssets(ctxb, listAssetRequest) + require.NoError(t.t, err) + + for idx := range aliceAssets.Assets { + a := aliceAssets.Assets[idx] + assertAssetProofsInvalid(t.t, t.tapd, a) + } + for idx := range bobAssets.Assets { + a := bobAssets.Assets[idx] + assertAssetProofsInvalid(t.t, secondTapd, a) + } + + // We now also stop Bob to make sure he can still detect the re-org and + // update the proofs once it comes back up. + t.t.Logf("Stopping Bob's daemon") + require.NoError(t.t, secondTapd.stop(false)) + + // Cleanup by mining the minting tx again. + newBlock := t.lndHarness.MineBlocksAndAssertNumTxes(1, 2)[0] + newBlockHash := newBlock.BlockHash() + _, newBlockHeight := t.lndHarness.Miner.GetBestBlock() + t.lndHarness.Miner.AssertTxInBlock(newBlock, sendTXID) + t.Logf("Send TX %v re-mined in block %v", sendTXID, newBlockHash) + + // We now restart Bob's daemon, expecting it to pick up the re-org. + t.t.Logf("Re-starting Bob's daemon so as to complete transfer") + require.NoError(t.t, secondTapd.start(false)) + + // Let's wait until we see that the proof for the mint, first and sent + // assets were updated to the new block height. + waitForProofUpdate(t.t, t.tapd, assetList[0], newBlockHeight) + waitForProofUpdate(t.t, t.tapd, aliceAssets.Assets[0], newBlockHeight) + waitForProofUpdate(t.t, secondTapd, bobAssets.Assets[0], newBlockHeight) + + // We now try to validate the send proofs of the delivered, change and + // passive assets. The re-org watcher should have updated the proofs and + // pushed them to the proof store. They should be valid now. + for idx := range aliceAssets.Assets { + a := aliceAssets.Assets[idx] + assertAssetProofs(t.t, t.tapd, a) + } + for idx := range bobAssets.Assets { + a := bobAssets.Assets[idx] + assertAssetProofs(t.t, secondTapd, a) + } + + // Let's now bury the proofs under sufficient blocks to allow the re-org + // watcher to stop watching the TX. + t.lndHarness.MineBlocks(8) +} + +// generateReOrg generates a re-org by mining a longer chain with a temporary +// miner, and then connecting the temporary miner to the original miner. +// Depending on when exactly the temporary miner was spawned, the expectedDelta +// might differ from the depth, if the "main" miner already has more blocks. +func generateReOrg(t *testing.T, lnd *lntest.HarnessTest, + tempMiner *lntest.HarnessMiner, depth uint32, expectedDelta int32) { + + // Now we generate a longer chain with the temp miner. + _, err := tempMiner.Client.Generate(depth) + require.NoError(t, err, "unable to generate blocks") + + // Ensure the chain lengths are what we expect, with the temp miner + // being 2 blocks ahead. + lnd.Miner.AssertMinerBlockHeightDelta(tempMiner, expectedDelta) + + // Now we disconnect lnd's chain backend from the original miner, and + // connect the two miners together. Since the temporary miner knows + // about a longer chain, both miners should sync to that chain. + lnd.DisconnectMiner() + + // Connecting to the temporary miner should now cause our original + // chain to be re-orged out. + lnd.Miner.ConnectMiner(tempMiner) + + // Once again they should be on the same chain. + lnd.Miner.AssertMinerBlockHeightDelta(tempMiner, 0) + + // Now we disconnect the two miners, and connect our original miner to + // our chain backend once again. + lnd.Miner.DisconnectMiner(tempMiner) + + lnd.ConnectMiner() +} diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index b7dddf53a..2c7af4e04 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -132,6 +132,18 @@ var testCases = []*testCase{ name: "get info", test: testGetInfo, }, + { + name: "re-org mint", + test: testReOrgMint, + }, + { + name: "re-org send", + test: testReOrgSend, + }, + { + name: "re-org mint and send", + test: testReOrgMintAndSend, + }, } var optionalTestCases = []*testCase{