From 3848c01cb6cdfe7ba0eb36edb0ecc652c78eb4cf Mon Sep 17 00:00:00 2001 From: Gregorio Juliana Date: Thu, 21 Nov 2024 23:44:29 +0100 Subject: [PATCH] feat: reset pxe indexes (#10093) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes: https://github.com/AztecProtocol/aztec-packages/issues/9369 Complete index db nuke. As discussed via slack, more granularity might not be necessary. Maybe per contract? --------- Co-authored-by: Nicolás Venturo --- .../pxe/src/database/kv_pxe_database.ts | 11 +++ yarn-project/pxe/src/database/pxe_database.ts | 8 +++ .../pxe/src/pxe_service/pxe_service.ts | 4 ++ .../simulator_oracle/simulator_oracle.test.ts | 71 +++++++++++++++++-- .../pxe/src/synchronizer/synchronizer.test.ts | 2 + .../pxe/src/synchronizer/synchronizer.ts | 3 + 6 files changed, 92 insertions(+), 7 deletions(-) diff --git a/yarn-project/pxe/src/database/kv_pxe_database.ts b/yarn-project/pxe/src/database/kv_pxe_database.ts index bbcc83869c0..090718e5448 100644 --- a/yarn-project/pxe/src/database/kv_pxe_database.ts +++ b/yarn-project/pxe/src/database/kv_pxe_database.ts @@ -702,4 +702,15 @@ export class KVPxeDatabase implements PxeDatabase { #getTaggingSecretsIndexes(appTaggingSecrets: Fr[], storageMap: AztecMap): Promise { return this.db.transaction(() => appTaggingSecrets.map(secret => storageMap.get(`${secret.toString()}`) ?? 0)); } + + async resetNoteSyncData(): Promise { + await this.db.transaction(() => { + for (const recipient of this.#taggingSecretIndexesForRecipients.keys()) { + void this.#taggingSecretIndexesForRecipients.delete(recipient); + } + for (const sender of this.#taggingSecretIndexesForSenders.keys()) { + void this.#taggingSecretIndexesForSenders.delete(sender); + } + }); + } } diff --git a/yarn-project/pxe/src/database/pxe_database.ts b/yarn-project/pxe/src/database/pxe_database.ts index f93d32240d3..1e7293c33ae 100644 --- a/yarn-project/pxe/src/database/pxe_database.ts +++ b/yarn-project/pxe/src/database/pxe_database.ts @@ -226,4 +226,12 @@ export interface PxeDatabase extends ContractArtifactDatabase, ContractInstanceD * @param blockNumber - All nullifiers strictly after this block are removed. */ unnullifyNotesAfter(blockNumber: number): Promise; + + /** + * Resets the indexes used to sync notes to 0 for every sender and recipient, causing the next sync process to + * start from scratch, taking longer than usual. + * This can help fix desynchronization issues, including finding logs that had previously been overlooked, and + * is also required to deal with chain reorgs. + */ + resetNoteSyncData(): Promise; } diff --git a/yarn-project/pxe/src/pxe_service/pxe_service.ts b/yarn-project/pxe/src/pxe_service/pxe_service.ts index bfbfd0cf689..3e7aa453ea3 100644 --- a/yarn-project/pxe/src/pxe_service/pxe_service.ts +++ b/yarn-project/pxe/src/pxe_service/pxe_service.ts @@ -974,4 +974,8 @@ export class PXEService implements PXE { return decodedEvents; } + + async resetNoteSyncData() { + return await this.db.resetNoteSyncData(); + } } diff --git a/yarn-project/pxe/src/simulator_oracle/simulator_oracle.test.ts b/yarn-project/pxe/src/simulator_oracle/simulator_oracle.test.ts index 9e160d98e82..6f7a1204e94 100644 --- a/yarn-project/pxe/src/simulator_oracle/simulator_oracle.test.ts +++ b/yarn-project/pxe/src/simulator_oracle/simulator_oracle.test.ts @@ -38,7 +38,7 @@ import { type PxeDatabase } from '../database/index.js'; import { KVPxeDatabase } from '../database/kv_pxe_database.js'; import { type OutgoingNoteDao } from '../database/outgoing_note_dao.js'; import { ContractDataOracle } from '../index.js'; -import { type SimulatorOracle } from './index.js'; +import { SimulatorOracle } from './index.js'; const TXS_PER_BLOCK = 4; const NUM_NOTE_HASHES_PER_BLOCK = TXS_PER_BLOCK * MAX_NOTE_HASHES_PER_TX; @@ -130,8 +130,7 @@ describe('Simulator oracle', () => { contractDataOracle = new ContractDataOracle(database); jest.spyOn(contractDataOracle, 'getDebugContractName').mockImplementation(() => Promise.resolve('TestContract')); keyStore = new KeyStore(db); - const simulatorOracleModule = await import('../simulator_oracle/index.js'); - simulatorOracle = new simulatorOracleModule.SimulatorOracle(contractDataOracle, database, keyStore, aztecNode); + simulatorOracle = new SimulatorOracle(contractDataOracle, database, keyStore, aztecNode); // Set up contract address contractAddress = AztecAddress.random(); // Set up recipient account @@ -291,7 +290,7 @@ describe('Simulator oracle', () => { expect(indexes).toEqual([6, 6, 6, 6, 6, 7, 7, 7, 7, 7]); // We should have called the node 17 times: - // 5 times with no results (sender offset) + 2 times with logs (slide the window) + 10 times with no results (window size) + // 5 times with no results (sender offset) + 2 times with logs (sliding the window) + 10 times with no results (window size) expect(aztecNode.getLogsByTags.mock.calls.length).toBe(5 + 2 + SENDER_OFFSET_WINDOW_SIZE); }); @@ -306,19 +305,29 @@ describe('Simulator oracle', () => { return poseidon2Hash([firstSenderSharedSecret.x, firstSenderSharedSecret.y, contractAddress]); }); + // Increase our indexes to 2 await database.setTaggingSecretsIndexesAsRecipient(secrets.map(secret => new IndexedTaggingSecret(secret, 2))); const syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3); - // Even if our index as recipient is higher than what the recipient sent, we should be able to find the logs + // Even if our index as recipient is higher than what the sender sent, we should be able to find the logs + // since the window starts at Math.max(0, 2 - window_size) = 0 expect(syncedLogs.get(recipient.address.toString())).toHaveLength(NUM_SENDERS + 1 + NUM_SENDERS / 2); + // First sender should have 2 logs, but keep index 2 since they were built using the same tag + // Next 4 senders hould also have index 2 = offset + 1 + // Last 5 senders should have index 3 = offset + 2 + const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets); + + expect(indexes).toHaveLength(NUM_SENDERS); + expect(indexes).toEqual([2, 2, 2, 2, 2, 3, 3, 3, 3, 3]); + // We should have called the node 13 times: // 1 time without logs + 2 times with logs (sliding the window) + 10 times with no results (window size) expect(aztecNode.getLogsByTags.mock.calls.length).toBe(3 + SENDER_OFFSET_WINDOW_SIZE); }); - it("should sync not tagged logs for which indexes are not updated if they're outside the window", async () => { + it("should not sync tagged logs for which indexes are not updated if they're outside the window", async () => { const senderOffset = 0; generateMockLogs(senderOffset); @@ -335,14 +344,62 @@ describe('Simulator oracle', () => { const syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3); - // Only half of the logs should be synced since we start from index 1 = offset + 1, the other half should be skipped + // Only half of the logs should be synced since we start from index 1 = (11 - window_size), the other half should be skipped expect(syncedLogs.get(recipient.address.toString())).toHaveLength(NUM_SENDERS / 2); + // Indexes should remain where we set them (window_size + 1) + const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets); + + expect(indexes).toHaveLength(NUM_SENDERS); + expect(indexes).toEqual([11, 11, 11, 11, 11, 11, 11, 11, 11, 11]); + // We should have called the node SENDER_OFFSET_WINDOW_SIZE + 1 (with logs) + SENDER_OFFSET_WINDOW_SIZE: // Once for index 1 (NUM_SENDERS/2 logs) + 2 times the sliding window (no logs each time) expect(aztecNode.getLogsByTags.mock.calls.length).toBe(1 + 2 * SENDER_OFFSET_WINDOW_SIZE); }); + it('should sync tagged logs from scratch after a DB wipe', async () => { + const senderOffset = 0; + generateMockLogs(senderOffset); + + // Recompute the secrets (as recipient) to update indexes + const ivsk = await keyStore.getMasterIncomingViewingSecretKey(recipient.address); + const secrets = senders.map(sender => { + const firstSenderSharedSecret = computeTaggingSecret(recipient, ivsk, sender.completeAddress.address); + return poseidon2Hash([firstSenderSharedSecret.x, firstSenderSharedSecret.y, contractAddress]); + }); + + await database.setTaggingSecretsIndexesAsRecipient( + secrets.map(secret => new IndexedTaggingSecret(secret, SENDER_OFFSET_WINDOW_SIZE + 2)), + ); + + let syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3); + + // No logs should be synced since we start from index 2 = 12 - window_size + expect(syncedLogs.get(recipient.address.toString())).toHaveLength(0); + // We should have called the node 21 times (window size + current_index + window size) + expect(aztecNode.getLogsByTags.mock.calls.length).toBe(2 * SENDER_OFFSET_WINDOW_SIZE + 1); + + aztecNode.getLogsByTags.mockClear(); + + // Wipe the database + await database.resetNoteSyncData(); + + syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3); + + // First sender should have 2 logs, but keep index 1 since they were built using the same tag + // Next 4 senders hould also have index 1 = offset + 1 + // Last 5 senders should have index 2 = offset + 2 + const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets); + + expect(indexes).toHaveLength(NUM_SENDERS); + expect(indexes).toEqual([1, 1, 1, 1, 1, 2, 2, 2, 2, 2]); + + // We should have called the node 12 times: + // 2 times with logs (sliding the window) + 10 times with no results (window size) + expect(aztecNode.getLogsByTags.mock.calls.length).toBe(2 + SENDER_OFFSET_WINDOW_SIZE); + }); + it('should not sync tagged logs with a blockNumber > maxBlockNumber', async () => { const senderOffset = 0; generateMockLogs(senderOffset); diff --git a/yarn-project/pxe/src/synchronizer/synchronizer.test.ts b/yarn-project/pxe/src/synchronizer/synchronizer.test.ts index c504e82d279..e78c0dbb4ab 100644 --- a/yarn-project/pxe/src/synchronizer/synchronizer.test.ts +++ b/yarn-project/pxe/src/synchronizer/synchronizer.test.ts @@ -44,6 +44,7 @@ describe('Synchronizer', () => { it('removes notes from db on a reorg', async () => { const removeNotesAfter = jest.spyOn(database, 'removeNotesAfter').mockImplementation(() => Promise.resolve()); const unnullifyNotesAfter = jest.spyOn(database, 'unnullifyNotesAfter').mockImplementation(() => Promise.resolve()); + const resetNoteSyncData = jest.spyOn(database, 'resetNoteSyncData').mockImplementation(() => Promise.resolve()); aztecNode.getBlockHeader.mockImplementation(blockNumber => Promise.resolve(L2Block.random(blockNumber as number).header), ); @@ -53,5 +54,6 @@ describe('Synchronizer', () => { expect(removeNotesAfter).toHaveBeenCalledWith(3); expect(unnullifyNotesAfter).toHaveBeenCalledWith(3); + expect(resetNoteSyncData).toHaveBeenCalled(); }); }); diff --git a/yarn-project/pxe/src/synchronizer/synchronizer.ts b/yarn-project/pxe/src/synchronizer/synchronizer.ts index 63fdc36843e..d527a38b535 100644 --- a/yarn-project/pxe/src/synchronizer/synchronizer.ts +++ b/yarn-project/pxe/src/synchronizer/synchronizer.ts @@ -56,6 +56,9 @@ export class Synchronizer implements L2BlockStreamEventHandler { // We first unnullify and then remove so that unnullified notes that were created after the block number end up deleted. await this.db.unnullifyNotesAfter(event.blockNumber); await this.db.removeNotesAfter(event.blockNumber); + // Remove all note tagging indexes to force a full resync. This is suboptimal, but unless we track the + // block number in which each index is used it's all we can do. + await this.db.resetNoteSyncData(); // Update the header to the last block. await this.db.setHeader(await this.node.getBlockHeader(event.blockNumber)); break;