From bad0524f066c1951e2ef8023c81739edd9cb8993 Mon Sep 17 00:00:00 2001 From: Charlie Lye Date: Mon, 13 Mar 2023 16:41:14 +0000 Subject: [PATCH 1/2] Performance fixes. (#26) * Performance fixes. * TxPoolProfile should only every be computed once, until it expires. * Fix. --- .../client_proofs/proof_data/proof_data.ts | 4 +- yarn-project/falafel/src/app.ts | 2 +- yarn-project/falafel/src/world_state.ts | 99 ++++++++++--------- 3 files changed, 56 insertions(+), 49 deletions(-) diff --git a/yarn-project/barretenberg.js/src/client_proofs/proof_data/proof_data.ts b/yarn-project/barretenberg.js/src/client_proofs/proof_data/proof_data.ts index 0a02e93e6..8112bd007 100644 --- a/yarn-project/barretenberg.js/src/client_proofs/proof_data/proof_data.ts +++ b/yarn-project/barretenberg.js/src/client_proofs/proof_data/proof_data.ts @@ -81,7 +81,7 @@ export class ProofData { public readonly backwardLink: Buffer; public readonly allowChain: Buffer; - constructor(public rawProofData: Buffer) { + constructor(public rawProofData: Buffer, txId?: Buffer) { this.proofId = rawProofData.readUInt32BE(ProofDataOffsets.PROOF_ID); this.noteCommitment1 = rawProofData.slice( ProofDataOffsets.NOTE_COMMITMENT_1, @@ -109,6 +109,8 @@ export class ProofData { this.defiRoot = rawProofData.slice(ProofDataOffsets.DEFI_ROOT, ProofDataOffsets.DEFI_ROOT + 32); this.backwardLink = rawProofData.slice(ProofDataOffsets.BACKWARD_LINK, ProofDataOffsets.BACKWARD_LINK + 32); this.allowChain = rawProofData.slice(ProofDataOffsets.ALLOW_CHAIN, ProofDataOffsets.ALLOW_CHAIN + 32); + + this.txId_ = txId; } get allowChainFromNote1() { diff --git a/yarn-project/falafel/src/app.ts b/yarn-project/falafel/src/app.ts index 4e00e1f21..c2e0f6106 100644 --- a/yarn-project/falafel/src/app.ts +++ b/yarn-project/falafel/src/app.ts @@ -348,7 +348,7 @@ export function appFactory(server: Server, prefix: string, metrics: Metrics, ser router.get('/get-pending-txs', recordMetric, async (ctx: Koa.Context) => { const txs = await server.getUnsettledTxs(); - ctx.body = txs.map(tx => new ProofData(tx.proofData)).map(toPendingTxJson); + ctx.body = txs.map(tx => new ProofData(tx.proofData, tx.id)).map(toPendingTxJson); ctx.status = 200; }); diff --git a/yarn-project/falafel/src/world_state.ts b/yarn-project/falafel/src/world_state.ts index f91dcdcb3..570a136b0 100644 --- a/yarn-project/falafel/src/world_state.ts +++ b/yarn-project/falafel/src/world_state.ts @@ -71,8 +71,7 @@ type TxPoolProfile = { export class WorldState { private serialQueue = new SerialQueue(); private pipeline?: RollupPipeline; - private txPoolProfile!: TxPoolProfile; - private txPoolProfileValidUntil!: Date; + private txPoolProfile?: Promise; private initialSubtreeRootsCache: Buffer[] = []; private bridgeStatsQueryHandler: BridgeStatsQueryHandler; @@ -88,13 +87,6 @@ export class WorldState { private expireTxPoolAfter = 60 * 1000, private log = createLogger('WorldState'), ) { - this.txPoolProfile = { - numTxs: 0, - numTxsInNextRollup: 0, - pendingTxCount: 0, - pendingSecondClassTxCount: 0, - pendingBridgeStats: [], - }; this.bridgeStatsQueryHandler = new BridgeStatsQueryHandler(rollupDb, txFeeResolver); } @@ -136,45 +128,10 @@ export class WorldState { } public async getTxPoolProfile() { - // getPendingTxs from rollup db - // remove the tranasctions that we know are in the next rollup currently being built - if (!this.txPoolProfileValidUntil || new Date().getTime() > this.txPoolProfileValidUntil.getTime()) { - const pendingTxs = await this.rollupDb.getPendingTxs(); - const processedTransactions = this.pipeline?.getProcessedTxs() || []; - const pendingTransactionsNotInRollup = pendingTxs.filter(elem => - processedTransactions.every(tx => !tx.id.equals(elem.id)), - ); - - const pendingBridgeStats: Map = new Map(); - for (const tx of pendingTransactionsNotInRollup) { - const proof = new ProofData(tx.proofData); - if (proof.proofId !== ProofId.DEFI_DEPOSIT) { - continue; - } - - const defiProof = new DefiDepositProofData(proof); - const rollupTx = createDefiRollupTx(tx, defiProof); - const bridgeCallData = rollupTx.bridgeCallData!; - const bridgeProfile = pendingBridgeStats.get(bridgeCallData) || { - bridgeCallData, - gasAccrued: 0, - }; - bridgeProfile.gasAccrued += this.txFeeResolver.getSingleBridgeTxGas(bridgeCallData) + rollupTx.excessGas; - - pendingBridgeStats.set(bridgeCallData, bridgeProfile); - } - - this.txPoolProfile = { - numTxs: await this.rollupDb.getUnsettledTxCount(), - numTxsInNextRollup: processedTransactions.length, - pendingBridgeStats: [...pendingBridgeStats.values()], - pendingTxCount: pendingTransactionsNotInRollup.length, - pendingSecondClassTxCount: await this.rollupDb.getPendingSecondClassTxCount(), - }; - this.txPoolProfileValidUntil = new Date(Date.now() + this.expireTxPoolAfter); + if (!this.txPoolProfile) { + this.txPoolProfile = this.computeTxPoolProfile(); } - - return this.txPoolProfile; + return await this.txPoolProfile; } public async queryBridgeStats(query: BridgePublishQuery) { @@ -821,4 +778,52 @@ export class WorldState { await this.worldStateDb.commit(); } + + /** + * getPendingTxs from rollup db + * remove the tranasctions that we know are in the next rollup currently being built + */ + private async computeTxPoolProfile() { + const pendingTxs = await this.rollupDb.getPendingTxs(); + const processedTransactions = this.pipeline?.getProcessedTxs() || []; + + const txsBeingProcessed = new Set(processedTransactions.map(tx => tx.id.toString('hex'))); + + const pendingTransactionsNotBeingProcessed = pendingTxs.filter( + elem => !txsBeingProcessed.has(elem.id.toString('hex')), + ); + + const pendingBridgeStats: Map = new Map(); + for (const tx of pendingTransactionsNotBeingProcessed) { + const proof = new ProofData(tx.proofData); + if (proof.proofId !== ProofId.DEFI_DEPOSIT) { + continue; + } + + const defiProof = new DefiDepositProofData(proof); + const rollupTx = createDefiRollupTx(tx, defiProof); + const bridgeCallData = rollupTx.bridgeCallData!; + const bridgeProfile = pendingBridgeStats.get(bridgeCallData) || { + bridgeCallData, + gasAccrued: 0, + }; + bridgeProfile.gasAccrued += this.txFeeResolver.getSingleBridgeTxGas(bridgeCallData) + rollupTx.excessGas; + + pendingBridgeStats.set(bridgeCallData, bridgeProfile); + } + + setTimeout(() => { + this.txPoolProfile = undefined; + }, this.expireTxPoolAfter); + + const result: TxPoolProfile = { + numTxs: await this.rollupDb.getUnsettledTxCount(), + numTxsInNextRollup: processedTransactions.length, + pendingBridgeStats: [...pendingBridgeStats.values()], + pendingTxCount: pendingTransactionsNotBeingProcessed.length, + pendingSecondClassTxCount: await this.rollupDb.getPendingSecondClassTxCount(), + }; + + return result; + } } From 96bcd9dde2a836a90ae9c6c050f0edf053404097 Mon Sep 17 00:00:00 2001 From: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Date: Mon, 13 Mar 2023 17:20:13 +0000 Subject: [PATCH 2/2] Pw/block download fix (#27) * Fixed the block download calculation * Comment fix * More fixes --- .../sdk/src/core_sdk/block_downloader.ts | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/yarn-project/sdk/src/core_sdk/block_downloader.ts b/yarn-project/sdk/src/core_sdk/block_downloader.ts index 015dcb108..b7fe24e72 100644 --- a/yarn-project/sdk/src/core_sdk/block_downloader.ts +++ b/yarn-project/sdk/src/core_sdk/block_downloader.ts @@ -11,10 +11,17 @@ export class BlockDownloader { private interruptableSleep = new InterruptableSleep(); private semaphore: Semaphore; private queue = new MemoryFifo(); + private genesisTake; private debug = createDebugLogger('bb:block_downloader'); - constructor(private rollupProvider: RollupProvider, maxQueueSize: number, private initialTreeSize: number) { + constructor(private rollupProvider: RollupProvider, maxQueueSize: number, initialTreeSize: number) { this.semaphore = new Semaphore(maxQueueSize); + // Choosing 55 as an initial chunk to insert if starting from 0, is an aztec-connect optimisation. + // The aztec-connect genesis data consists of 73 rollups. + // Initially inserting 55 brings us to 128, after which we work with chunks of 128 rollups. + // If not synching from zero, the chunk size is whatever takes us up to the next 128 alignment. + // This allows for optimal subtree insertions in the client side merkle tree for better sync performance. + this.genesisTake = 128 - (initialTreeSize % 128); } public start(from = 0) { @@ -31,13 +38,12 @@ export class BlockDownloader { const fn = async () => { while (this.running) { try { - // Choosing 55 as an initial chunk to insert if starting from 0, is an aztec-connect optimisation. - // The aztec-connect genesis data consists of 73 rollups. - // Initially inserting 55 brings us to 128, after which we work with chunks of 128 rollups. - // If not synching from zero, the chunk size is whatever takes us up to the next 128 alignment. - // This allows for optimal subtree insertions in the client side merkle tree for better sync performance. - const initialTake = 128 - ((this.from === 0 ? this.initialTreeSize : this.from) % 128); - const blocks = await this.rollupProvider.getBlocks(this.from, this.from === from ? initialTake : 128); + // If requesting from block 0, then take the fixed number of blocks to take us to 128 (genesisTake) + // Otherwise, take blocks as required to get us to a 128 aligned boundary starting from block (128 - initialTreeSize). + // e.g. we are trying to get to blocks 183, 311, 439 etc.... + const takeValue = + this.from < this.genesisTake ? this.genesisTake - this.from : 128 - ((this.from - this.genesisTake) % 128); + const blocks = await this.rollupProvider.getBlocks(this.from, takeValue); if (!blocks.length) { await this.interruptableSleep.sleep(10000);