diff --git a/packages/block/src/from-beacon-payload.ts b/packages/block/src/from-beacon-payload.ts index f0cea6b412..59baac22e5 100644 --- a/packages/block/src/from-beacon-payload.ts +++ b/packages/block/src/from-beacon-payload.ts @@ -48,7 +48,7 @@ export function executionPayloadFromBeaconPayload(payload: BeaconPayloadJson): E gasLimit: bigIntToHex(BigInt(payload.gas_limit)), gasUsed: bigIntToHex(BigInt(payload.gas_used)), timestamp: bigIntToHex(BigInt(payload.timestamp)), - extraData: bigIntToHex(BigInt(payload.extra_data)), + extraData: payload.extra_data, baseFeePerGas: bigIntToHex(BigInt(payload.base_fee_per_gas)), blockHash: payload.block_hash, transactions: payload.transactions, diff --git a/packages/client/bin/cli.ts b/packages/client/bin/cli.ts index 04aca628ab..52946ab6cb 100755 --- a/packages/client/bin/cli.ts +++ b/packages/client/bin/cli.ts @@ -317,13 +317,8 @@ const args: ClientOpts = yargs(hideBin(process.argv)) boolean: true, default: true, }) - .option('disableBeaconSync', { - describe: - 'Disables beacon (optimistic) sync if the CL provides blocks at the head of the chain', - boolean: true, - }) - .option('forceSnapSync', { - describe: 'Force a snap sync run (for testing and development purposes)', + .option('snap', { + describe: 'Enable snap state sync (for testing and development purposes)', boolean: true, }) .option('prefixStorageTrieKeys', { @@ -881,9 +876,8 @@ async function run() { port: args.port, saveReceipts: args.saveReceipts, syncmode: args.sync, - disableBeaconSync: args.disableBeaconSync, - forceSnapSync: args.forceSnapSync, prefixStorageTrieKeys: args.prefixStorageTrieKeys, + enableSnapSync: args.snap, useStringValueTrieDB: args.useStringValueTrieDB, txLookupLimit: args.txLookupLimit, pruneEngineCache: args.pruneEngineCache, diff --git a/packages/client/src/config.ts b/packages/client/src/config.ts index 41d9c42b05..3c68ea60de 100644 --- a/packages/client/src/config.ts +++ b/packages/client/src/config.ts @@ -43,22 +43,11 @@ export interface ConfigOptions { syncmode?: SyncMode /** - * Whether to disable beacon (optimistic) sync if CL provides - * blocks at the head of chain. + * Whether to enable and run snapSync, currently experimental * * Default: false */ - disableBeaconSync?: boolean - - /** - * Whether to test and run snapSync. When fully ready, this needs to - * be replaced by a more sophisticated condition based on how far back we are - * from the head, and how to run it in conjunction with the beacon sync - * blocks at the head of chain. - * - * Default: false - */ - forceSnapSync?: boolean + enableSnapSync?: boolean /** * A temporary option to offer backward compatibility with already-synced databases that are @@ -335,6 +324,8 @@ export interface ConfigOptions { */ maxInvalidBlocksErrorCache?: number pruneEngineCache?: boolean + snapAvailabilityDepth?: bigint + snapTransitionSafeDepth?: bigint } export class Config { @@ -368,7 +359,7 @@ export class Config { public static readonly MAX_RANGE_BYTES = 50000 // This should get like 100 accounts in this range - public static readonly MAX_ACCOUNT_RANGE = (BIGINT_2 ** BIGINT_256 - BIGINT_1) / BigInt(1_000_000) + public static readonly MAX_ACCOUNT_RANGE = (BIGINT_2 ** BIGINT_256 - BIGINT_1) / BigInt(1_000) // Larger ranges used for storage slots since assumption is slots should be much sparser than accounts public static readonly MAX_STORAGE_RANGE = (BIGINT_2 ** BIGINT_256 - BIGINT_1) / BigInt(10) @@ -381,6 +372,10 @@ export class Config { public static readonly ENGINE_NEWPAYLOAD_MAX_EXECUTE = 2 // currently ethereumjs can execute 200 txs in 12 second window so keeping 1/2 target for blocking response public static readonly ENGINE_NEWPAYLOAD_MAX_TXS_EXECUTE = 100 + public static readonly SNAP_AVAILABILITY_DEPTH = BigInt(128) + // distance from head at which we can safely transition from a synced snapstate to vmexecution + // randomly kept it at 5 for fast testing purposes but ideally should be >=32 slots + public static readonly SNAP_TRANSITION_SAFE_DEPTH = BigInt(5) public readonly logger: Logger public readonly syncmode: SyncMode @@ -427,15 +422,16 @@ export class Config { public readonly engineParentLookupMaxDepth: number public readonly engineNewpayloadMaxExecute: number public readonly engineNewpayloadMaxTxsExecute: number + public readonly snapAvailabilityDepth: bigint + public readonly snapTransitionSafeDepth: bigint - public readonly disableBeaconSync: boolean - public readonly forceSnapSync: boolean - // Just a development only flag, will/should be removed - public readonly disableSnapSync: boolean = false public readonly prefixStorageTrieKeys: boolean + // Defaulting to false as experimental as of now + public readonly enableSnapSync: boolean public readonly useStringValueTrieDB: boolean public synchronized: boolean + public lastsyncronized?: boolean /** lastSyncDate in ms */ public lastSyncDate: number /** Best known block height */ @@ -464,7 +460,7 @@ export class Config { this.txLookupLimit = options.txLookupLimit ?? 2350000 this.maxPerRequest = options.maxPerRequest ?? Config.MAXPERREQUEST_DEFAULT this.maxFetcherJobs = options.maxFetcherJobs ?? Config.MAXFETCHERJOBS_DEFAULT - this.maxFetcherRequests = options.maxPerRequest ?? Config.MAXFETCHERREQUESTS_DEFAULT + this.maxFetcherRequests = options.maxFetcherRequests ?? Config.MAXFETCHERREQUESTS_DEFAULT this.minPeers = options.minPeers ?? Config.MINPEERS_DEFAULT this.maxPeers = options.maxPeers ?? Config.MAXPEERS_DEFAULT this.dnsAddr = options.dnsAddr ?? Config.DNSADDR_DEFAULT @@ -510,10 +506,12 @@ export class Config { options.engineNewpayloadMaxExecute ?? Config.ENGINE_NEWPAYLOAD_MAX_EXECUTE this.engineNewpayloadMaxTxsExecute = options.engineNewpayloadMaxTxsExecute ?? Config.ENGINE_NEWPAYLOAD_MAX_TXS_EXECUTE + this.snapAvailabilityDepth = options.snapAvailabilityDepth ?? Config.SNAP_AVAILABILITY_DEPTH + this.snapTransitionSafeDepth = + options.snapTransitionSafeDepth ?? Config.SNAP_TRANSITION_SAFE_DEPTH - this.disableBeaconSync = options.disableBeaconSync ?? false - this.forceSnapSync = options.forceSnapSync ?? false this.prefixStorageTrieKeys = options.prefixStorageTrieKeys ?? true + this.enableSnapSync = options.enableSnapSync ?? false this.useStringValueTrieDB = options.useStringValueTrieDB ?? false // Start it off as synchronized if this is configured to mine or as single node @@ -597,13 +595,16 @@ export class Config { } } - this.logger.debug( - `Client synchronized=${this.synchronized}${ - latest !== null && latest !== undefined ? ' height=' + latest.number : '' - } syncTargetHeight=${this.syncTargetHeight} lastSyncDate=${ - (Date.now() - this.lastSyncDate) / 1000 - } secs ago` - ) + if (this.synchronized !== this.lastsyncronized) { + this.logger.debug( + `Client synchronized=${this.synchronized}${ + latest !== null && latest !== undefined ? ' height=' + latest.number : '' + } syncTargetHeight=${this.syncTargetHeight} lastSyncDate=${ + (Date.now() - this.lastSyncDate) / 1000 + } secs ago` + ) + this.lastsyncronized = this.synchronized + } } /** @@ -680,7 +681,7 @@ export class Config { */ getDnsDiscovery(option: boolean | undefined): boolean { if (option !== undefined) return option - const dnsNets = ['holesky', 'sepolia'] + const dnsNets = ['goerli', 'sepolia', 'holesky'] return dnsNets.includes(this.chainCommon.chainName()) } } diff --git a/packages/client/src/execution/vmexecution.ts b/packages/client/src/execution/vmexecution.ts index 2395caf0ba..99ac115e17 100644 --- a/packages/client/src/execution/vmexecution.ts +++ b/packages/client/src/execution/vmexecution.ts @@ -233,41 +233,26 @@ export class VMExecution extends Execution { async setHead( blocks: Block[], { finalizedBlock, safeBlock }: { finalizedBlock?: Block; safeBlock?: Block } = {} - ): Promise { - return this.runWithLock(async () => { - const vmHeadBlock = blocks[blocks.length - 1] - const chainPointers: [string, Block][] = [ - ['vmHeadBlock', vmHeadBlock], - // if safeBlock is not provided, the current safeBlock of chain should be used - // which is genesisBlock if it has never been set for e.g. - ['safeBlock', safeBlock ?? this.chain.blocks.safe ?? this.chain.genesis], - ['finalizedBlock', finalizedBlock ?? this.chain.blocks.finalized ?? this.chain.genesis], - ] - - let isSortedDesc = true - let lastBlock = vmHeadBlock - for (const [blockName, block] of chainPointers) { - if (block === null) { - continue - } - if (!(await this.vm.stateManager.hasStateRoot(block.header.stateRoot))) { - // If we set blockchain iterator to somewhere where we don't have stateroot - // execution run will always fail - throw Error( - `${blockName}'s stateRoot not found number=${block.header.number} root=${short( - block.header.stateRoot - )}` - ) - } - isSortedDesc = isSortedDesc && lastBlock.header.number >= block.header.number - lastBlock = block - } + ): Promise { + if (!this.started || this.config.shutdown) return false - if (isSortedDesc === false) { + return this.runWithLock(async () => { + const vmHeadBlock = blocks[blocks.length - 1] + const chainPointers: [string, Block][] = [['vmHeadBlock', vmHeadBlock]] + + // instead of checking for the previous roots of safe,finalized, we will contend + // ourselves with just vmHead because in snap sync we might not have the safe + // finalized blocks executed + if (!(await this.vm.stateManager.hasStateRoot(vmHeadBlock.header.stateRoot))) { + // If we set blockchain iterator to somewhere where we don't have stateroot + // execution run will always fail throw Error( - `headBlock=${chainPointers[0][1].header.number} should be >= safeBlock=${chainPointers[1][1]?.header.number} should be >= finalizedBlock=${chainPointers[2][1]?.header.number}` + `vmHeadBlock's stateRoot not found number=${vmHeadBlock.header.number} root=${short( + vmHeadBlock.header.stateRoot + )}` ) } + // skip emitting the chain update event as we will manually do it await this.chain.putBlocks(blocks, true, true) for (const block of blocks) { @@ -296,6 +281,7 @@ export class VMExecution extends Execution { await this.chain.blockchain.setIteratorHead('finalized', finalizedBlock.hash()) } await this.chain.update(true) + return true }) } @@ -361,7 +347,7 @@ export class VMExecution extends Execution { // determine starting state for block run // if we are just starting or if a chain reorg has happened if (headBlock === undefined || reorg) { - const headBlock = await blockchain.getBlock(block.header.parentHash) + headBlock = await blockchain.getBlock(block.header.parentHash) parentState = headBlock.header.stateRoot if (reorg) { @@ -508,21 +494,34 @@ export class VMExecution extends Execution { // to parent's parent and so on... // // There can also be a better way to backstep vm to but lets naively step back - let backStepTo, backStepToHash + let backStepTo, + backStepToHash, + backStepToRoot, + hasParentStateRoot = false if (headBlock !== undefined) { + hasParentStateRoot = await this.vm.stateManager.hasStateRoot( + headBlock.header.stateRoot + ) backStepTo = headBlock.header.number ?? BIGINT_0 - BIGINT_1 backStepToHash = headBlock.header.parentHash + backStepToRoot = headBlock.header.stateRoot } - this.config.logger.warn( - `${errorMsg}, backStepping vmHead to number=${backStepTo} hash=${short( - backStepToHash ?? 'na' - )}:\n${error}` - ) - - // backStepToHash should not be undefined but if its the above warn log will show us to debug - // but still handle here so that we don't send the client into a tizzy - if (backStepToHash !== undefined) { + + if (hasParentStateRoot === true && backStepToHash !== undefined) { + this.config.logger.warn( + `${errorMsg}, backStepping vmHead to number=${backStepTo} hash=${short( + backStepToHash ?? 'na' + )} hasParentStateRoot=${short(backStepToRoot ?? 'na')}:\n${error}` + ) await this.vm.blockchain.setIteratorHead('vm', backStepToHash) + } else { + this.config.logger.error( + `${errorMsg}, couldn't back step to vmHead number=${backStepTo} hash=${short( + backStepToHash ?? 'na' + )} hasParentStateRoot=${hasParentStateRoot} backStepToRoot=${short( + backStepToRoot ?? 'na' + )}:\n${error}` + ) } } else { this.config.logger.warn(`${errorMsg}:\n${error}`) diff --git a/packages/client/src/rpc/modules/engine.ts b/packages/client/src/rpc/modules/engine.ts index 08c1516bec..7a229e074b 100644 --- a/packages/client/src/rpc/modules/engine.ts +++ b/packages/client/src/rpc/modules/engine.ts @@ -509,8 +509,9 @@ export class Engine { this.lastAnnouncementStatus = this.skeleton.logSyncStatus('[ EL ]', { forceShowInfo, lastStatus: this.lastAnnouncementStatus, - executing: this.execution.started && this.execution.running, + vmexecution: { started: this.execution.started, running: this.execution.running }, fetching: fetcher !== undefined && fetcher !== null && fetcher.syncErrored === undefined, + snapsync: this.service.snapsync?.fetcherDoneFlags, peers: (this.service.beaconSync as any)?.pool.size, }) } @@ -1067,7 +1068,7 @@ export class Engine { // It is possible that newPayload didn't start beacon sync as the payload it was asked to // evaluate didn't require syncing beacon. This can happen if the EL<>CL starts and CL // starts from a bit behind like how lodestar does - if (!this.service.beaconSync && !this.config.disableBeaconSync) { + if (!this.service.beaconSync) { await this.service.switchToBeaconSync() } @@ -1081,9 +1082,6 @@ export class Engine { return response } - /* - * Process head block - */ let headBlock: Block | undefined try { const head = toBytes(headBlockHash) @@ -1121,9 +1119,12 @@ export class Engine { ) // call skeleton sethead with force head change and reset beacon sync if reorg - const reorged = await this.skeleton.setHead(headBlock, true) + const { reorged, safeBlock, finalizedBlock } = await this.skeleton.forkchoiceUpdate(headBlock, { + safeBlockHash: safe, + finalizedBlockHash: finalized, + }) + if (reorged) await this.service.beaconSync?.reorged(headBlock) - await this.skeleton.blockingFillWithCutoff(this.chain.config.engineNewpayloadMaxExecute) // Only validate this as terminal block if this block's difficulty is non-zero, // else this is a PoS block but its hardfork could be indeterminable if the skeleton @@ -1147,6 +1148,9 @@ export class Engine { (this.executedBlocks.get(headBlockHash.slice(2)) ?? (await validExecutedChainBlock(headBlock, this.chain))) !== null if (!isHeadExecuted) { + // Trigger the statebuild here since we have finalized and safeblock available + void this.service.buildHeadState() + // execution has not yet caught up, so lets just return sync const payloadStatus = { status: Status.SYNCING, @@ -1157,51 +1161,7 @@ export class Engine { return response } - /* - * Process safe and finalized block since headBlock has been found to be executed - * Allowed to have zero value while transition block is finalizing - */ - let safeBlock, finalizedBlock - - if (!equalsBytes(safe, zeroBlockHash)) { - if (equalsBytes(safe, headBlock.hash())) { - safeBlock = headBlock - } else { - try { - // Right now only check if the block is available, canonicality check is done - // in setHead after chain.putBlocks so as to reflect latest canonical chain - safeBlock = - (await this.skeleton.getBlockByHash(safe, true)) ?? (await this.chain.getBlock(safe)) - } catch (_error: any) { - throw { - code: INVALID_PARAMS, - message: 'safe block not available', - } - } - } - } else { - safeBlock = undefined - } - - if (!equalsBytes(finalized, zeroBlockHash)) { - try { - // Right now only check if the block is available, canonicality check is done - // in setHead after chain.putBlocks so as to reflect latest canonical chain - finalizedBlock = - (await this.skeleton.getBlockByHash(finalized, true)) ?? - (await this.chain.getBlock(finalized)) - } catch (error: any) { - throw { - message: 'finalized block not available', - code: INVALID_PARAMS, - } - } - } else { - finalizedBlock = undefined - } - const vmHeadHash = (await this.chain.blockchain.getIteratorHead()).hash() - if (!equalsBytes(vmHeadHash, headBlock.hash())) { let parentBlocks: Block[] = [] if (this.chain.headers.latest && this.chain.headers.latest.number < headBlock.header.number) { @@ -1224,7 +1184,17 @@ export class Engine { const blocks = [...parentBlocks, headBlock] try { - await this.execution.setHead(blocks, { safeBlock, finalizedBlock }) + const completed = await this.execution.setHead(blocks, { safeBlock, finalizedBlock }) + if (!completed) { + const latestValidHash = await validHash(headBlock.hash(), this.chain, this.chainCache) + const payloadStatus = { + status: Status.SYNCING, + latestValidHash, + validationError: null, + } + const response = { payloadStatus, payloadId: null } + return response + } } catch (error) { throw { message: (error as Error).message, @@ -1251,12 +1221,6 @@ export class Engine { } } - if ( - this.config.syncTargetHeight === undefined || - this.config.syncTargetHeight < headBlock.header.number - ) { - this.config.syncTargetHeight = headBlock.header.number - } this.config.updateSynchronizedState(headBlock.header) if (this.chain.config.synchronized) { this.service.txPool.checkRunState() diff --git a/packages/client/src/service/fullethereumservice.ts b/packages/client/src/service/fullethereumservice.ts index 529e3ef3eb..c5e8e24742 100644 --- a/packages/client/src/service/fullethereumservice.ts +++ b/packages/client/src/service/fullethereumservice.ts @@ -31,13 +31,20 @@ interface FullEthereumServiceOptions extends ServiceOptions { * @memberof module:service */ export class FullEthereumService extends Service { - public synchronizer?: BeaconSynchronizer | FullSynchronizer | SnapSynchronizer + /* synchronizer for syncing the chain */ + public synchronizer?: BeaconSynchronizer | FullSynchronizer public lightserv: boolean public miner: Miner | undefined - public execution: VMExecution public txPool: TxPool public skeleton?: Skeleton + // objects dealing with state + public snapsync?: SnapSynchronizer + public execution: VMExecution + + /** building head state via snapsync or vmexecution */ + private building = false + /** * Create new ETH service */ @@ -48,65 +55,65 @@ export class FullEthereumService extends Service { this.config.logger.info('Full sync mode') + const { metaDB } = options + if (metaDB !== undefined) { + this.skeleton = new Skeleton({ + config: this.config, + chain: this.chain, + metaDB, + }) + } + this.execution = new VMExecution({ config: options.config, stateDB: options.stateDB, - metaDB: options.metaDB, + metaDB, chain: this.chain, }) + this.snapsync = this.config.enableSnapSync + ? new SnapSynchronizer({ + config: this.config, + pool: this.pool, + chain: this.chain, + interval: this.interval, + skeleton: this.skeleton, + execution: this.execution, + }) + : undefined + this.txPool = new TxPool({ config: this.config, service: this, }) - const metaDB = (this.execution as any).metaDB - if (metaDB !== undefined) { - this.skeleton = new Skeleton({ - config: this.config, - chain: this.chain, - metaDB, - }) - } - - // This flag is just to run and test snap sync, when fully ready, this needs to - // be replaced by a more sophisticated condition based on how far back we are - // from the head, and how to run it in conjunction with the beacon sync - if (this.config.forceSnapSync) { - this.synchronizer = new SnapSynchronizer({ - config: this.config, - pool: this.pool, - chain: this.chain, - interval: this.interval, - }) - } else { + if (this.config.syncmode === SyncMode.Full) { if (this.config.chainCommon.gteHardfork(Hardfork.Paris) === true) { - if (!this.config.disableBeaconSync) { - // skip opening the beacon synchronizer before everything else (chain, execution etc) - // as it resets and messes up the entire chain - void this.switchToBeaconSync(true) - } + // skip opening the beacon synchronizer before everything else (chain, execution etc) + // as it resets and messes up the entire chain + // + // also with skipOpen this call is a sync call as no async operation is executed + // as good as creating the synchronizer here + void this.switchToBeaconSync(true) this.config.logger.info(`Post-merge 🐼 client mode: run with CL client.`) } else { - if (this.config.syncmode === SyncMode.Full) { - this.synchronizer = new FullSynchronizer({ + this.synchronizer = new FullSynchronizer({ + config: this.config, + pool: this.pool, + chain: this.chain, + txPool: this.txPool, + execution: this.execution, + interval: this.interval, + }) + + if (this.config.mine) { + this.miner = new Miner({ config: this.config, - pool: this.pool, - chain: this.chain, - txPool: this.txPool, - execution: this.execution, - interval: this.interval, + service: this, }) } } } - - if (this.config.mine) { - this.miner = new Miner({ - config: this.config, - service: this, - }) - } } /** @@ -130,16 +137,18 @@ export class FullEthereumService extends Service { this.config.superMsg(`Transitioning to beacon sync`) } - this.synchronizer = new BeaconSynchronizer({ - config: this.config, - pool: this.pool, - chain: this.chain, - interval: this.interval, - execution: this.execution, - skeleton: this.skeleton!, - }) - if (!skipOpen) { - await this.synchronizer.open() + if (this.config.syncmode !== SyncMode.None && this.beaconSync === undefined) { + this.synchronizer = new BeaconSynchronizer({ + config: this.config, + pool: this.pool, + chain: this.chain, + interval: this.interval, + execution: this.execution, + skeleton: this.skeleton!, + }) + if (!skipOpen) { + await this.synchronizer.open() + } } } @@ -178,7 +187,15 @@ export class FullEthereumService extends Service { // but after chain is opened, which skeleton.open() does internally await this.skeleton?.open() await super.open() - await this.execution.open() + + // open snapsync instead of execution if instantiated + // it will open execution when done (or if doesn't need to snap sync) + if (this.snapsync !== undefined) { + await this.snapsync.open() + } else { + await this.execution.open() + } + this.txPool.open() if (this.config.mine) { // Start the TxPool immediately if mining @@ -196,10 +213,52 @@ export class FullEthereumService extends Service { } await super.start() this.miner?.start() - await this.execution.start() + if (this.snapsync === undefined) { + await this.execution.start() + } + void this.buildHeadState() return true } + /** + * if the vm head is not recent enough, trigger building a recent state by snapsync or by running + * vm execution + */ + async buildHeadState(): Promise { + if (this.building) return + this.building = true + + try { + if (this.execution.started && this.synchronizer !== undefined) { + await this.synchronizer.runExecution() + } else if (this.snapsync !== undefined) { + if (this.config.synchronized === true || this.skeleton?.synchronized === true) { + const syncResult = await this.snapsync.checkAndSync() + if (syncResult !== null) { + const transition = await this.skeleton?.setVmHead(syncResult) + if (transition === true) { + this.config.superMsg('Snapsync completed, transitioning to VMExecution') + await this.execution.open() + await this.execution.start() + } + } + } else { + this.config.logger.debug( + `skipping snapsync since cl (skeleton) synchronized=${this.skeleton?.synchronized}` + ) + } + } else { + this.config.logger.warn( + 'skipping building head state as neither execution is started nor snapsync is available' + ) + } + } catch (error) { + this.config.logger.error(`Error building headstate error=${error}`) + } finally { + this.building = false + } + } + /** * Stop service */ @@ -210,7 +269,11 @@ export class FullEthereumService extends Service { this.txPool.stop() this.miner?.stop() await this.synchronizer?.stop() + + await this.snapsync?.stop() + // independently close execution even if it might have been opened by snapsync await this.execution.stop() + await super.stop() return true } diff --git a/packages/client/src/service/skeleton.ts b/packages/client/src/service/skeleton.ts index dad5f53680..9531932c2c 100644 --- a/packages/client/src/service/skeleton.ts +++ b/packages/client/src/service/skeleton.ts @@ -3,11 +3,14 @@ import { RLP } from '@ethereumjs/rlp' import { BIGINT_0, BIGINT_1, + BIGINT_100, + BIGINT_2EXP256, Lock, bigIntToBytes, bytesToBigInt, bytesToInt, equalsBytes, + formatBigDecimal, intToBytes, utf8ToBytes, zeros, @@ -16,15 +19,19 @@ import { import { short, timeDuration } from '../util' import { DBKey, MetaDBManager } from '../util/metaDBManager' +import type { SnapFetcherDoneFlags } from '../sync/fetcher/types' import type { MetaDBManagerOptions } from '../util/metaDBManager' +import type { BlockHeader } from '@ethereumjs/block' import type { Hardfork } from '@ethereumjs/common' -// Thanks to go-ethereum for the skeleton design +const INVALID_PARAMS = -32602 type SkeletonStatus = { progress: SkeletonProgress linked: boolean canonicalHeadReset: boolean + safe: bigint + finalized: bigint } /** @@ -107,9 +114,26 @@ export class Skeleton extends MetaDBManager { private STATUS_LOG_INTERVAL = 8000 /** How often to log sync status (in ms) */ + /** + * safeBlock as indicated by engine api, set + */ + public safeBlock?: Block + public finalizedBlock?: Block + + // to track if we have cl FCUs close to the clockhead + synchronized = false + private lastsyncronized = false + private lastSyncDate = 0 + constructor(opts: MetaDBManagerOptions) { super(opts) - this.status = { progress: { subchains: [] }, linked: false, canonicalHeadReset: false } + this.status = { + progress: { subchains: [] }, + linked: false, + canonicalHeadReset: false, + safe: BIGINT_0, + finalized: BIGINT_0, + } this.started = 0 } @@ -153,7 +177,13 @@ export class Skeleton extends MetaDBManager { throw Error(`skeleton reset called before being opened`) } await this.runWithLock(async () => { - this.status = { progress: { subchains: [] }, linked: false, canonicalHeadReset: false } + // retain safe,finalized from the progress as that is not bound to change + this.status = { + ...this.status, + progress: { subchains: [] }, + linked: false, + canonicalHeadReset: false, + } await this.writeSyncStatus() }) } @@ -246,16 +276,15 @@ export class Skeleton extends MetaDBManager { ) } // genesis announcement + this.status.linked = true + this.status.canonicalHeadReset = false return false } - let [lastchain] = this.status.progress.subchains + const [lastchain] = this.status.progress.subchains + // subchains should have already been inited if (lastchain === undefined) { - this.config.logger.debug( - `Skeleton empty, comparing against genesis tail=0 head=0 newHead=${number}` - ) - // set the lastchain to genesis for comparison in following conditions - lastchain = { head: BIGINT_0, tail: BIGINT_0, next: zeroBlockHash } + throw Error(`No subchain to processNewHead`) } if (lastchain.tail > number) { @@ -289,7 +318,7 @@ export class Skeleton extends MetaDBManager { // which we will let it get addressed post this if else block if (force) { this.config.logger.debug( - `Skeleton differing announcement tail=${lastchain.tail} head=${ + `Skeleton head reorg tail=${lastchain.tail} head=${ lastchain.head } number=${number} expected=${short( mayBeDupBlock?.hash() ?? zeroBlockHash @@ -297,7 +326,7 @@ export class Skeleton extends MetaDBManager { ) } else { this.config.logger.debug( - `Skeleton stale announcement tail=${lastchain.tail} head=${lastchain.head} number=${number}` + `Skeleton differing announcement tail=${lastchain.tail} head=${lastchain.head} number=${number}` ) } return true @@ -358,6 +387,13 @@ export class Skeleton extends MetaDBManager { * @returns True if the head (will) cause a reorg in the canonical skeleton subchain */ async setHead(head: Block, force = true, init = false, reorgthrow = false): Promise { + if ( + this.config.syncTargetHeight === undefined || + this.config.syncTargetHeight < head.header.number + ) { + this.config.syncTargetHeight = head.header.number + } + return this.runWithLock(async () => { if (this.started === 0) { throw Error(`skeleton setHead called before being opened`) @@ -372,7 +408,25 @@ export class Skeleton extends MetaDBManager { )} force=${force}` ) - const subchain0Head = this.status.progress.subchains[0]?.head ?? BIGINT_0 + let [lastchain] = this.status.progress.subchains + if (lastchain === undefined) { + // init the subchains even if this is not a forced head + lastchain = { + head: this.chain.blocks.height, + tail: this.chain.blocks.height, + next: this.chain.blocks.latest?.header.parentHash ?? zeroBlockHash, + } + this.status.linked = true + this.status.canonicalHeadReset = false + this.config.logger.debug( + `Initing empty skeleton with current chain head tail=${lastchain.tail} head=${ + lastchain.head + } next=${short(lastchain.next)}` + ) + this.status.progress.subchains.push(lastchain) + } + + const subchain0Head = lastchain.head const reorg = await this.processNewHead(head, force) // see if just the head needs to be updated or a new subchain needs to be created @@ -389,11 +443,47 @@ export class Skeleton extends MetaDBManager { !equalsBytes(parent.hash(), head.header.parentHash) || parent.header.number < subchain.tail ) { + // truncate subchain 0 before inserting a new chain so that this chain can be merged into new + // one without issues if the opportunity arrises + if ( + subchain !== undefined && + this.status.linked && + this.status.canonicalHeadReset === false && + this.chain.blocks.height >= subchain.tail + ) { + const trucateTailToNumber = this.chain.blocks.height + BIGINT_1 + const trucateTailTo = + trucateTailToNumber <= subchain.head + ? await this.getBlock(trucateTailToNumber, true) + : undefined + if (trucateTailTo !== undefined) { + subchain.tail = trucateTailTo.header.number + subchain.next = trucateTailTo.header.parentHash + this.config.logger.info( + `Truncated subchain0 with head=${subchain.head} to a new tail=${ + subchain.tail + } next=${short(subchain.next)} before overlaying a new subchain` + ) + } else { + // clear out this subchain + this.config.logger.info( + `Dropping subchain0 with head=${subchain.head} before overlaying a new subchain as trucateTailToNumber=${trucateTailToNumber} block not available ` + ) + this.status.progress.subchains.splice(0, 1) + } + } + const s = { head: head.header.number, tail: head.header.number, next: head.header.parentHash, } + + // reset subchains if it only had been set with genesis to only track non-trivial subchains + if (subchain0Head === BIGINT_0) { + this.status.progress.subchains = [] + } + this.status.progress.subchains.unshift(s) const msgs = [ `Created new subchain tail=${s.tail} head=${s.head} next=${short(s.next)}`, @@ -403,17 +493,53 @@ export class Skeleton extends MetaDBManager { // Reset the filling of canonical head from tail only on tail reorg and exit any ongoing fill this.status.canonicalHeadReset = s.tail > BIGINT_0 } else { - // Only the head differed, tail is preserved + // we are here because valid canonical parent is either in skeleton or chain and new head + // > tail and hence doesn't reorg the current tail subchain.head = head.header.number + // if this was a linked chain with no reset marked and chain height >= tail we need to + // truncate the tail + if ( + this.status.linked && + !this.status.canonicalHeadReset && + this.chain.blocks.height >= subchain.tail + ) { + let trucateTailTo + const trucateTailToNumber = this.chain.blocks.height + BIGINT_1 + if (trucateTailToNumber < head.header.number) { + trucateTailTo = await this.getBlock(trucateTailToNumber, true) + } + + if (trucateTailTo === undefined) { + subchain.tail = head.header.number + subchain.next = head.header.parentHash + // reset canonical head, don't change linked status because parent was + // found in canonical chain + this.status.canonicalHeadReset = true + this.config.logger.info( + `Truncated subchain tail for chain reorg to the subchain head=${ + subchain.tail + } next=${short(subchain.next)} linked=${this.status.linked} canonicalHeadReset=${ + this.status.canonicalHeadReset + }` + ) + } else { + subchain.tail = trucateTailTo.header.number + subchain.next = trucateTailTo.header.parentHash + // just reset tail and no need to modify linked status + this.config.logger.info( + `Truncated subchain with head=${subchain.head} to a new tail=${ + subchain.tail + } next=${short(subchain.next)} linked=${this.status.linked} canonicalHeadReset=${ + this.status.canonicalHeadReset + }` + ) + } + } } } - // Put block if this is forced i.e. fcU update or if this is forward announcement i.e. new blocks - // after the current head. putting this block on annoucement i.e force=false on <=current head changes the - // skeleton canonical relationship. for > current head, this is treated more like optimistic cache - if (force || head.header.number > subchain0Head) { - await this.putBlock(head) - } + // only add to unfinalized cache if this is announcement and before canonical head + await this.putBlock(head, !force && head.header.number <= subchain0Head) if (init) { await this.trySubChainsMerge() @@ -445,6 +571,248 @@ export class Skeleton extends MetaDBManager { }) } + /** + * Updates if the skeleton/cl seems synced to the head + * copied over from config, could be DRY-ied + * @param option latest to update the sync state with + */ + updateSynchronizedState(latest?: BlockHeader | null) { + // If no syncTargetHeight has been discovered from peer or fcU sync state can't be + // determined + const subchain0 = this.status.progress.subchains[0] + if ((this.config.syncTargetHeight ?? BIGINT_0) === BIGINT_0 || subchain0 === undefined) { + return + } + + if (latest !== null && latest !== undefined) { + const height = subchain0.head + if (height >= (this.config.syncTargetHeight ?? BIGINT_0)) { + this.config.syncTargetHeight = height + this.lastSyncDate = + typeof latest.timestamp === 'bigint' && latest.timestamp > 0n + ? Number(latest.timestamp) * 1000 + : Date.now() + + const diff = Date.now() - this.lastSyncDate + // update synchronized + if (diff < this.config.syncedStateRemovalPeriod) { + if (!this.synchronized) { + this.synchronized = true + // Log to console the sync status + this.config.superMsg( + `Synchronized cl (skeleton) at height=${height} hash=${short(latest.hash())} 🎉` + ) + } + } + } + } else { + if (this.synchronized) { + const diff = Date.now() - this.lastSyncDate + if (diff >= this.config.syncedStateRemovalPeriod) { + this.synchronized = false + this.config.logger.info( + `Cl (skeleton) sync status reset (no chain updates for ${Math.round( + diff / 1000 + )} seconds).` + ) + } + } + } + + if (this.synchronized !== this.lastsyncronized) { + this.config.logger.debug( + `Cl (skeleton) synchronized=${this.synchronized}${ + latest !== null && latest !== undefined ? ' height=' + latest.number : '' + } syncTargetHeight=${this.config.syncTargetHeight} lastSyncDate=${ + (Date.now() - this.lastSyncDate) / 1000 + } secs ago` + ) + this.lastsyncronized = this.synchronized + } + } + + async forkchoiceUpdate( + headBlock: Block, + { + safeBlockHash, + finalizedBlockHash, + }: { safeBlockHash?: Uint8Array; finalizedBlockHash?: Uint8Array } = {} + ): Promise<{ reorged: boolean; safeBlock?: Block; finalizedBlock?: Block }> { + // setHead locks independently and between setHead unlocking and locking below there should + // be no injected code as each of the async ops take the lock. so once setHead takes the + // lock, all of them should be executed serially + const prevLinked = this.status.linked + const reorged = await this.setHead(headBlock, true) + if (reorged && prevLinked && !this.status.linked) { + await this.blockingTailBackfillWithCutoff(this.chain.config.engineNewpayloadMaxExecute).catch( + (e) => { + this.config.logger.debug(`blockingTailBackfillWithCutoff exited with error=${e}`) + } + ) + } + + const subchain0 = this.status.progress.subchains[0] + if (subchain0 === undefined) { + throw Error(`subchain0 should have been set as a result of skeleton setHead`) + } + + // set/update safe and finalized and see if they can backfill the tail in which case should + // update tail of subchain0 + // also important to do putBlocks before running validations + let safeBlock: Block | undefined + if (safeBlockHash !== undefined) { + if (equalsBytes(safeBlockHash, zeroBlockHash)) { + safeBlock = this.chain.genesis + } else if (equalsBytes(safeBlockHash, this.safeBlock?.hash() ?? zeroBlockHash)) { + safeBlock = this.safeBlock + } else if (equalsBytes(safeBlockHash, headBlock.hash())) { + safeBlock = headBlock + } else { + safeBlock = await this.getBlockByHash(safeBlockHash) + } + if (safeBlock !== undefined) { + if (safeBlock.header.number > headBlock.header.number) { + throw { + code: INVALID_PARAMS, + message: `Invalid safe block=${safeBlock.header.number} > headBlock=${headBlock.header.number}`, + } + } + if (!this.status.linked && safeBlock.header.number === subchain0.tail - BIGINT_1) { + await this.putBlocks([safeBlock]) + } + } + } else { + safeBlock = this.safeBlock ?? this.chain.genesis + } + + let finalizedBlock: Block | undefined + if (finalizedBlockHash !== undefined) { + if (equalsBytes(finalizedBlockHash, zeroBlockHash)) { + finalizedBlock = this.chain.genesis + } else if (equalsBytes(finalizedBlockHash, this.finalizedBlock?.hash() ?? zeroBlockHash)) { + finalizedBlock = this.finalizedBlock + } else if (equalsBytes(finalizedBlockHash, headBlock.hash())) { + finalizedBlock = headBlock + } else { + finalizedBlock = await this.getBlockByHash(finalizedBlockHash) + } + if (finalizedBlock !== undefined) { + if ( + finalizedBlock.header.number > headBlock.header.number || + (safeBlock !== undefined && finalizedBlock.header.number > safeBlock.header.number) + ) { + throw { + code: INVALID_PARAMS, + message: `Invalid finalized block=${finalizedBlock.header.number} > headBlock=${headBlock.header.number} or safeBlock=${safeBlock?.header.number}`, + } + } + if (!this.status.linked && finalizedBlock.header.number === subchain0.tail - BIGINT_1) { + await this.putBlocks([finalizedBlock]) + } + } + } else { + finalizedBlock = this.finalizedBlock ?? this.chain.genesis + } + + await this.runWithLock(async () => { + let shouldBeFinalizedNumber = this.finalizedBlock?.header.number ?? BIGINT_0 + if (finalizedBlock !== undefined && finalizedBlock.header.number > shouldBeFinalizedNumber) { + shouldBeFinalizedNumber = finalizedBlock.header.number + } + + let shouldBeSafeNumber = shouldBeFinalizedNumber + if (this.safeBlock !== undefined && this.safeBlock.header.number > shouldBeSafeNumber) { + shouldBeSafeNumber = this.safeBlock.header.number + } + if (safeBlock !== undefined && safeBlock.header.number > shouldBeSafeNumber) { + shouldBeSafeNumber = safeBlock.header.number + } + + // check for canonicality and availability of the safe and finalized now + if (this.status.linked || shouldBeSafeNumber >= subchain0.tail) { + if (safeBlock === undefined) { + throw { + code: INVALID_PARAMS, + message: `safe block not available in canonical chain`, + } + } else { + const canonicalBlock = await this.getBlock(safeBlock.header.number, true) + if ( + canonicalBlock === undefined || + !equalsBytes(safeBlock.hash(), canonicalBlock.hash()) + ) { + throw { + code: INVALID_PARAMS, + message: `safe block not canonical in chain`, + } + } + } + } + + if (this.status.linked || shouldBeFinalizedNumber >= subchain0.tail) { + if (finalizedBlock === undefined) { + throw { + code: INVALID_PARAMS, + message: `finalized block not available in canonical chain`, + } + } else { + const canonicalBlock = await this.getBlock(finalizedBlock.header.number, true) + if ( + canonicalBlock === undefined || + !equalsBytes(finalizedBlock.hash(), canonicalBlock.hash()) + ) { + throw { + code: INVALID_PARAMS, + message: `finalized block not canonical in chain`, + } + } + } + } + + this.updateSynchronizedState(headBlock?.header) + this.safeBlock = safeBlock ?? this.safeBlock + + if ( + (finalizedBlock?.header.number ?? BIGINT_0) > + (this.finalizedBlock?.header.number ?? BIGINT_0) + ) { + void this.pruneFinalizedNonCanonicalBlocks() + } + + this.finalizedBlock = finalizedBlock ?? this.finalizedBlock + }) + + await this.blockingFillWithCutoff(this.chain.config.engineNewpayloadMaxExecute) + + return { reorged, safeBlock: this.safeBlock, finalizedBlock: this.finalizedBlock } + } + + async setVmHead(snapStatus: { syncedHash: Uint8Array; syncedHeight: bigint }): Promise { + const { syncedHash, syncedHeight } = snapStatus + return this.runWithLock(async () => { + // check if the synced state's block is canonical and <= current safe and chain has synced till + const syncedBlock = await this.getBlock( + syncedHeight + // need to debug why this flag causes to return undefined when chain gets synced + //, true + ) + if ( + syncedBlock !== undefined && + syncedBlock.header.number <= this.chain.blocks.height && + ((this.safeBlock !== undefined && + syncedBlock.header.number <= this.safeBlock.header.number) || + syncedBlock.header.number <= + this.chain.blocks.height - this.config.snapTransitionSafeDepth) + ) { + await this.chain.blockchain.setIteratorHead('vm', syncedHash) + await this.chain.update(false) + return true + } else { + return false + } + }) + } + /** * Setup the skeleton to init sync with head * @params head - The block with which we want to init the skeleton head @@ -464,6 +832,16 @@ export class Skeleton extends MetaDBManager { return this.status.progress.subchains[0] } + async headHash(): Promise { + const subchain = this.bounds() + if (subchain !== undefined) { + const headBlock = await this.getBlock(subchain.head) + if (headBlock) { + return headBlock.hash() + } + } + } + private async trySubChainsMerge(): Promise { let merged = false let edited = false @@ -546,7 +924,7 @@ export class Skeleton extends MetaDBManager { * Writes skeleton blocks to the db by number * @returns number of blocks saved */ - async putBlocks(blocks: Block[]): Promise { + async putBlocks(blocks: Block[], skipForwardFill: boolean = false): Promise { return this.runWithLock(async () => { // if no subchain or linked chain throw error as this will exit the fetcher if (this.status.progress.subchains.length === 0) { @@ -611,26 +989,31 @@ export class Skeleton extends MetaDBManager { throw Error(`Blocks don't extend canonical subchain`) } merged = await this.trySubChainsMerge() + // If tail is updated normally or because of merge, we should now fill from // the tail to modify the canonical if (tailUpdated || merged) { this.status.canonicalHeadReset = true + if (this.status.progress.subchains[0].tail - BIGINT_1 <= this.chain.blocks.height) { + this.status.linked = await this.checkLinked() + } } + // If its merged, we need to break as the new tail could be quite ahead // so we need to clear out and run the reverse block fetcher again - if (merged) break + if (merged || this.status.linked) break } await this.writeSyncStatus() - if (!this.status.linked) { - this.status.linked = await this.checkLinked() - } - // If the sync is finished, start filling the canonical chain. if (this.status.linked) { - this.config.superMsg('Backfilling subchain completed. Start filling canonical chain.') - void this.fillCanonicalChain() + this.config.superMsg( + `Backfilling subchain completed, filling canonical chain=${!skipForwardFill}` + ) + if (!skipForwardFill) { + void this.fillCanonicalChain() + } } if (merged) throw errSyncMerged @@ -686,14 +1069,67 @@ export class Skeleton extends MetaDBManager { // if subchain0Head is not too ahead, then fill blocking as it gives better sync // log experience else just trigger if ( - !this.status.canonicalHeadReset && - subchain0.head - BigInt(cutoffLen) < this.chain.blocks.height + subchain0.head - BigInt(cutoffLen) < + (this.status.canonicalHeadReset ? subchain0.tail : this.chain.blocks.height) ) { + this.config.logger.debug('Attempting blocking fill') await fillPromise } } } + async getUnfinalizedParentsForBackfill(maxItems: number): Promise { + const blocks = [] + const subchain0 = this.status.progress.subchains[0] + if (!this.status.linked && subchain0 !== undefined) { + let next = subchain0.next + for (let i = 0; i < maxItems; i++) { + const tailBlock = await this.getBlockByHash(next) + + if (tailBlock === undefined) { + break + } else { + blocks.push(tailBlock) + next = tailBlock.header.parentHash + } + } + } + + return blocks + } + + /** + * lookup and try backfill if skeleton already has blocks previously filled + */ + async tryTailBackfill(): Promise { + let blocks: Block[] + do { + blocks = await this.getUnfinalizedParentsForBackfill(this.chain.config.maxPerRequest) + if (blocks.length > 0) { + await this.putBlocks(blocks) + } + } while (blocks.length > 0) + } + + /** + * + */ + async blockingTailBackfillWithCutoff(maxItems: number): Promise { + const blocks = await this.getUnfinalizedParentsForBackfill(maxItems) + if (blocks.length > 0) { + // also skip the fill since a blocking fill might be attempted by forkchoiceUpdate + await this.putBlocks(blocks, true) + + // if chain isn't linked and blocks requested were full then start a non blocking + // fill + if (!this.status.linked && blocks.length === maxItems) { + void this.tryTailBackfill().catch((e) => { + this.chain.config.logger.debug(`tryTailBackfill exited with error ${e}`) + }) + } + } + } + /** * Inserts skeleton blocks into canonical chain and runs execution. */ @@ -837,7 +1273,15 @@ export class Skeleton extends MetaDBManager { // however delete it in a lock as the parent lookup of a reorged block in skeleton is used // to determine if the tail is to be reset or not await this.runWithLock(async () => { - await this.deleteBlock(block) + // there could be a race between new subchain creation and deletion of the block with the + // tail of subchain so making sure we are in happy condition to go for deletion + if ( + this.status.linked && + !this.status.canonicalHeadReset && + this.chain.blocks.height >= block.header.number + ) { + await this.deleteBlock(block) + } }) if (this.fillLogIndex >= this.config.numBlocksPerIteration) { this.config.logger.debug( @@ -871,35 +1315,50 @@ export class Skeleton extends MetaDBManager { /** * Writes a skeleton block to the db by number */ - private async putBlock(block: Block): Promise { + private async putBlock(block: Block, onlyUnfinalized: boolean = false): Promise { // Serialize the block with its hardfork so that its easy to load the block latter const rlp = this.serialize({ hardfork: block.common.hardfork(), blockRLP: block.serialize() }) - await this.put(DBKey.SkeletonBlock, bigIntToBytes(block.header.number), rlp) - await this.put( - DBKey.SkeletonBlockHashToNumber, - block.hash(), - bigIntToBytes(block.header.number) - ) + await this.put(DBKey.SkeletonUnfinalizedBlockByHash, block.hash(), rlp) + + if (!onlyUnfinalized) { + await this.put(DBKey.SkeletonBlock, bigIntToBytes(block.header.number), rlp) + // this is duplication of the unfinalized blocks but for now an easy reference + // will be pruned on finalization changes. this could be simplified and deduped + // but will anyway will move into blockchain class and db on upcoming skeleton refactor + await this.put( + DBKey.SkeletonBlockHashToNumber, + block.hash(), + bigIntToBytes(block.header.number) + ) + } + return true } + skeletonBlockRlpToBlock(skeletonBlockRlp: Uint8Array): Block { + const { hardfork, blockRLP } = this.deserialize(skeletonBlockRlp) + const common = this.config.chainCommon.copy() + common.setHardfork(hardfork) + + const block = Block.fromRLPSerializedBlock(blockRLP, { + common, + }) + return block + } + /** * Gets a block from the skeleton or canonical db by number. */ - async getBlock(number: bigint, onlySkeleton = false): Promise { + async getBlock(number: bigint, onlyCanonical = false): Promise { try { - const rlp = await this.get(DBKey.SkeletonBlock, bigIntToBytes(number)) - const { hardfork, blockRLP } = this.deserialize(rlp!) - const common = this.config.chainCommon.copy() - common.setHardfork(hardfork) - - const block = Block.fromRLPSerializedBlock(blockRLP, { - common, - }) - return block + const skeletonBlockRlp = await this.get(DBKey.SkeletonBlock, bigIntToBytes(number)) + if (skeletonBlockRlp === null) { + throw Error(`SkeletonBlock rlp lookup failed for ${number} onlyCanonical=${onlyCanonical}`) + } + return this.skeletonBlockRlpToBlock(skeletonBlockRlp) } catch (error: any) { // If skeleton is linked, it probably has deleted the block and put it into the chain - if (onlySkeleton || !this.status.linked) return undefined + if (onlyCanonical && !this.status.linked) return undefined // As a fallback, try to get the block from the canonical chain in case it is available there try { return await this.chain.getBlock(number) @@ -912,23 +1371,52 @@ export class Skeleton extends MetaDBManager { /** * Gets a skeleton block from the db by hash */ - async getBlockByHash(hash: Uint8Array, onlySkeleton?: boolean): Promise { + async getBlockByHash( + hash: Uint8Array, + onlyCanonical: boolean = false + ): Promise { const number = await this.get(DBKey.SkeletonBlockHashToNumber, hash) if (number) { - const block = await this.getBlock(bytesToBigInt(number), onlySkeleton) + const block = await this.getBlock(bytesToBigInt(number), onlyCanonical) if (block !== undefined && equalsBytes(block.hash(), hash)) { return block } } - if (onlySkeleton === true) { + if (onlyCanonical === true && !this.status.linked) { return undefined + } + + let block = onlyCanonical === false ? await this.getUnfinalizedBlock(hash) : undefined + if (block === undefined && (onlyCanonical === false || this.status.linked)) { + block = await this.chain.getBlock(hash).catch((_e) => undefined) + } + + if (onlyCanonical === false) { + return block } else { - try { - return await this.chain.getBlock(hash) - } catch (e) { - return undefined + if (this.status.linked && block !== undefined) { + const canBlock = await this.chain.getBlock(block.header.number).catch((_e) => undefined) + if (canBlock !== undefined && equalsBytes(canBlock.hash(), block.hash())) { + // block is canonical + return block + } + } + + // no canonical block found or the block was not canonical + return undefined + } + } + + async getUnfinalizedBlock(hash: Uint8Array): Promise { + try { + const skeletonBlockRlp = await this.get(DBKey.SkeletonUnfinalizedBlockByHash, hash) + if (skeletonBlockRlp === null) { + throw Error(`SkeletonUnfinalizedBlockByHash rlp lookup failed for hash=${short(hash)}`) } + return this.skeletonBlockRlpToBlock(skeletonBlockRlp) + } catch (_e) { + return undefined } } @@ -939,25 +1427,38 @@ export class Skeleton extends MetaDBManager { try { await this.delete(DBKey.SkeletonBlock, bigIntToBytes(block.header.number)) await this.delete(DBKey.SkeletonBlockHashToNumber, block.hash()) + await this.delete(DBKey.SkeletonUnfinalizedBlockByHash, block.hash()) return true } catch (error: any) { return false } } + /** + * + * TODO: complete the impl of pruning of blocks which got finalized and were non + * canonical. canonical blocks anyway get deleted in deleteBlock + */ + + async pruneFinalizedNonCanonicalBlocks(): Promise { + return + } + logSyncStatus( logPrefix: string, { forceShowInfo, lastStatus, - executing, + vmexecution, fetching, + snapsync, peers, }: { forceShowInfo?: boolean lastStatus?: string - executing?: boolean + vmexecution?: { running: boolean; started: boolean } fetching?: boolean + snapsync?: SnapFetcherDoneFlags peers?: number | string } = {} ): string { @@ -988,7 +1489,7 @@ export class Skeleton extends MetaDBManager { const status = isValid ? 'VALID' : isSynced - ? executing === true + ? vmexecution?.running === true ? `EXECUTING` : `SYNCED` : `SYNCING` @@ -1048,8 +1549,23 @@ export class Skeleton extends MetaDBManager { extraStatus = ` (${scenario} vm=${vmHead?.header.number} cl=el=${this.chain.blocks.height})` break case 'SYNCED': - scenario = - Date.now() - this.syncedchain > STALE_WINDOW ? 'execution stalled?' : 'awaiting execution' + if (vmexecution?.started === true) { + scenario = + Date.now() - this.syncedchain > STALE_WINDOW + ? 'execution stalled?' + : 'awaiting execution' + } else if (snapsync !== undefined) { + // stall detection yet to be added + if (snapsync.done) { + scenario = `snapsync-to-vm-transition=${ + (snapsync.snapTargetHeight ?? BIGINT_0) + this.config.snapTransitionSafeDepth + }` + } else { + scenario = `snapsync target=${snapsync.snapTargetHeight}` + } + } else { + scenario = 'execution none' + } extraStatus = ` (${scenario} vm=${vmHead?.header.number} cl=el=${this.chain.blocks.height} )` break case 'SYNCING': @@ -1107,14 +1623,71 @@ export class Skeleton extends MetaDBManager { } } - let logInfo + let vmlogInfo + let snapLogInfo let subchainLog = '' if (isValid) { - logInfo = `vm=cl=${chainHead}` + vmlogInfo = `vm=cl=${chainHead}` } else { - logInfo = `vm=${vmHead?.header.number} hash=${short( - vmHead?.hash() ?? 'na' - )} executing=${executing}` + vmlogInfo = `vm=${vmHead?.header.number} hash=${short(vmHead?.hash() ?? 'na')} started=${ + vmexecution?.started + }` + + if (vmexecution?.started === true) { + vmlogInfo = `${vmlogInfo} executing=${vmexecution?.running}` + } else { + if (snapsync === undefined) { + snapLogInfo = `snapsync=false` + } else { + const { snapTargetHeight, snapTargetRoot, snapTargetHash } = snapsync + if (snapsync.done === true) { + snapLogInfo = `snapsync=synced height=${snapTargetHeight} hash=${short( + snapTargetHash ?? 'na' + )} root=${short(snapTargetRoot ?? 'na')}` + } else if (snapsync.syncing) { + const accountsDone = formatBigDecimal( + snapsync.accountFetcher.first * BIGINT_100, + BIGINT_2EXP256, + BIGINT_100 + ) + const storageReqsDone = formatBigDecimal( + snapsync.storageFetcher.first * BIGINT_100, + snapsync.storageFetcher.count, + BIGINT_100 + ) + const codeReqsDone = formatBigDecimal( + snapsync.byteCodeFetcher.first * BIGINT_100, + snapsync.byteCodeFetcher.count, + BIGINT_100 + ) + + const snapprogress = `accounts=${accountsDone}% storage=${storageReqsDone}% of ${snapsync.storageFetcher.count} codes=${codeReqsDone}% of ${snapsync.byteCodeFetcher.count}` + + let stage = 'snapsync=??' + stage = `snapsync=accounts` + // move the stage along + if (snapsync.accountFetcher.done === true) { + stage = `snapsync=storage&codes` + } + if (snapsync.storageFetcher.done === true && snapsync.byteCodeFetcher.done === true) { + stage = `snapsync=trienodes` + } + if (snapsync.trieNodeFetcher.done === true) { + stage = `finished` + } + + snapLogInfo = `${stage} ${snapprogress} (hash=${short( + snapTargetHash ?? 'na' + )} root=${short(snapTargetRoot ?? 'na')})` + } else { + if (this.synchronized) { + snapLogInfo = `snapsync=??` + } else { + snapLogInfo = `snapsync awaiting cl synchronization` + } + } + } + } // if not synced add subchain info if (!isSynced) { @@ -1130,9 +1703,7 @@ export class Skeleton extends MetaDBManager { } reorgsHead=${ this.status.canonicalHeadReset && (subchain0?.tail ?? BIGINT_0) <= this.chain.blocks.height - }` - } else { - logInfo = `${logInfo} cl=${chainHead}` + } synchronized=${this.synchronized}` } } peers = peers !== undefined ? `${peers}` : 'na' @@ -1140,11 +1711,18 @@ export class Skeleton extends MetaDBManager { // if valid then the status info is short and sweet this.config.logger.info('') if (isValid) { - this.config.logger.info(`${logPrefix} ${status}${extraStatus} ${logInfo} peers=${peers}`) + this.config.logger.info(`${logPrefix} ${status}${extraStatus} ${vmlogInfo} peers=${peers}`) } else { // else break into two - this.config.logger.info(`${logPrefix} ${status}${extraStatus} peers=${peers}`) - this.config.logger.info(`${logPrefix} ${logInfo}`) + this.config.logger.info( + `${logPrefix} ${status}${extraStatus} synchronized=${this.config.synchronized} peers=${peers}` + ) + if (snapLogInfo !== undefined && snapLogInfo !== '') { + this.config.logger.info(`${logPrefix} ${snapLogInfo}`) + } + if (vmlogInfo !== undefined && vmlogInfo !== '') { + this.config.logger.info(`${logPrefix} ${vmlogInfo}`) + } if (!isSynced) { this.config.logger.info(`${logPrefix} ${subchainLog}`) } @@ -1179,6 +1757,11 @@ export class Skeleton extends MetaDBManager { if (!rawStatus) return const status = this.statusRLPtoObject(rawStatus) this.status = status + + const { safe, finalized } = this.status + this.safeBlock = await this.getBlock(safe, true) + this.finalizedBlock = await this.getBlock(finalized, true) + return status } @@ -1197,6 +1780,9 @@ export class Skeleton extends MetaDBManager { intToBytes(this.status.linked ? 1 : 0), // canonocalHeadReset intToBytes(this.status.canonicalHeadReset ? 1 : 0), + // safe and finalized + bigIntToBytes(this.status.safe), + bigIntToBytes(this.status.finalized), ]) } @@ -1208,10 +1794,15 @@ export class Skeleton extends MetaDBManager { progress: { subchains: [] }, linked: false, canonicalHeadReset: true, + safe: BIGINT_0, + finalized: BIGINT_0, } const rawStatus = RLP.decode(serializedStatus) as unknown as [ SkeletonSubchainRLP[], Uint8Array, + Uint8Array, + // safe and finalized + Uint8Array, Uint8Array ] const subchains: SkeletonSubchain[] = rawStatus[0].map((raw) => ({ @@ -1222,6 +1813,8 @@ export class Skeleton extends MetaDBManager { status.progress.subchains = subchains status.linked = bytesToInt(rawStatus[1]) === 1 status.canonicalHeadReset = bytesToInt(rawStatus[2]) === 1 + status.safe = bytesToBigInt(rawStatus[3]) + status.finalized = bytesToBigInt(rawStatus[4]) return status } } diff --git a/packages/client/src/sync/beaconsync.ts b/packages/client/src/sync/beaconsync.ts index cf572d9015..6f45a22953 100644 --- a/packages/client/src/sync/beaconsync.ts +++ b/packages/client/src/sync/beaconsync.ts @@ -307,7 +307,7 @@ export class BeaconSynchronizer extends Synchronizer { this.chain.blocks.height > this.skeleton.bounds().head - BigInt(50) ) if (!shouldRunOnlyBatched || this.chain.blocks.height % BigInt(50) === BIGINT_0) { - void this.execution.run(true, shouldRunOnlyBatched) + await this.execution.run(true, shouldRunOnlyBatched) } } diff --git a/packages/client/src/sync/fetcher/accountfetcher.ts b/packages/client/src/sync/fetcher/accountfetcher.ts index 28489ae669..a8a8d4c8fd 100644 --- a/packages/client/src/sync/fetcher/accountfetcher.ts +++ b/packages/client/src/sync/fetcher/accountfetcher.ts @@ -1,9 +1,10 @@ +import { DefaultStateManager } from '@ethereumjs/statemanager' import { Trie } from '@ethereumjs/trie' import { BIGINT_0, BIGINT_1, - BIGINT_2, - BIGINT_256, + BIGINT_100, + BIGINT_2EXP256, KECCAK256_NULL, KECCAK256_RLP, accountBodyToRLP, @@ -12,6 +13,7 @@ import { bytesToHex, compareBytes, equalsBytes, + formatBigDecimal, setLengthLeft, } from '@ethereumjs/util' import debugDefault from 'debug' @@ -23,13 +25,13 @@ import { ByteCodeFetcher } from './bytecodefetcher' import { Fetcher } from './fetcher' import { StorageFetcher } from './storagefetcher' import { TrieNodeFetcher } from './trienodefetcher' +import { getInitFecherDoneFlags } from './types' import type { Peer } from '../../net/peer' import type { AccountData } from '../../net/protocol/snapprotocol' -import type { EventBusType } from '../../types' import type { FetcherOptions } from './fetcher' import type { StorageRequest } from './storagefetcher' -import type { Job } from './types' +import type { Job, SnapFetcherDoneFlags } from './types' import type { Debugger } from 'debug' const { debug: createDebugLogger } = debugDefault @@ -43,14 +45,17 @@ export interface AccountFetcherOptions extends FetcherOptions { /** Root hash of the account trie to serve */ root: Uint8Array - /** The origin to start account fetcher from */ + /** The origin to start account fetcher from (including), by default starts from 0 (0x0000...) */ first: bigint - - /** Range to eventually fetch */ + /** The range to eventually, by default should be set at BIGINT_2 ** BigInt(256) + BIGINT_1 - first */ count?: bigint /** Destroy fetcher once all tasks are done */ destroyWhenDone?: boolean + + stateManager?: DefaultStateManager + + fetcherDoneFlags?: SnapFetcherDoneFlags } // root comes from block? @@ -61,104 +66,40 @@ export type JobTask = { count: bigint } -export type FetcherDoneFlags = { - storageFetcherDone: boolean - accountFetcherDone: boolean - byteCodeFetcherDone: boolean - trieNodeFetcherDone: boolean - eventBus?: EventBusType | undefined - stateRoot?: Uint8Array | undefined - stateTrie?: Trie | undefined -} - -export function snapFetchersCompleted( - fetcherDoneFlags: FetcherDoneFlags, - fetcherType: Object, - root?: Uint8Array, - trie?: Trie, - eventBus?: EventBusType -) { - switch (fetcherType) { - // eslint-disable-next-line @typescript-eslint/no-use-before-define - case AccountFetcher: - fetcherDoneFlags.accountFetcherDone = true - fetcherDoneFlags.stateRoot = root - fetcherDoneFlags.stateTrie = trie - fetcherDoneFlags.eventBus = eventBus - break - case StorageFetcher: - fetcherDoneFlags.storageFetcherDone = true - break - case ByteCodeFetcher: - fetcherDoneFlags.byteCodeFetcherDone = true - break - case TrieNodeFetcher: - fetcherDoneFlags.trieNodeFetcherDone = true - break - } - if ( - fetcherDoneFlags.accountFetcherDone && - fetcherDoneFlags.storageFetcherDone && - fetcherDoneFlags.byteCodeFetcherDone && - fetcherDoneFlags.trieNodeFetcherDone - ) { - fetcherDoneFlags.eventBus!.emit( - Event.SYNC_SNAPSYNC_COMPLETE, - fetcherDoneFlags.stateRoot!, - fetcherDoneFlags.stateTrie! - ) - } -} - export class AccountFetcher extends Fetcher { protected debug: Debugger + stateManager: DefaultStateManager + accountTrie: Trie - /** - * The stateRoot for the fetcher which sorts of pin it to a snapshot. - * This might eventually be removed as the snapshots are moving and not static - */ root: Uint8Array + highestKnownHash: Uint8Array | undefined /** The origin to start account fetcher from (including), by default starts from 0 (0x0000...) */ first: bigint - /** The range to eventually, by default should be set at BIGINT_2 ** BigInt(256) + BIGINT_1 - first */ count: bigint storageFetcher: StorageFetcher - byteCodeFetcher: ByteCodeFetcher - trieNodeFetcher: TrieNodeFetcher - - accountTrie: Trie - - accountToStorageTrie: Map - - highestKnownHash: Uint8Array | undefined - - /** Contains known bytecodes */ - codeTrie: Trie - - fetcherDoneFlags: FetcherDoneFlags = { - storageFetcherDone: false, - accountFetcherDone: false, - byteCodeFetcherDone: false, - trieNodeFetcherDone: false, - } + private readonly fetcherDoneFlags: SnapFetcherDoneFlags /** * Create new block fetcher */ constructor(options: AccountFetcherOptions) { super(options) + this.fetcherDoneFlags = options.fetcherDoneFlags ?? getInitFecherDoneFlags() + this.root = options.root this.first = options.first - this.count = options.count ?? BIGINT_2 ** BIGINT_256 - this.first - this.codeTrie = new Trie({ useKeyHashing: true }) - this.accountTrie = new Trie({ useKeyHashing: true }) - this.accountToStorageTrie = new Map() + this.count = options.count ?? BIGINT_2EXP256 - this.first + + this.stateManager = options.stateManager ?? new DefaultStateManager() + this.accountTrie = this.stateManager['_getAccountTrie']() + this.debug = createDebugLogger('client:AccountFetcher') + this.storageFetcher = new StorageFetcher({ config: this.config, pool: this.pool, @@ -166,42 +107,25 @@ export class AccountFetcher extends Fetcher storageRequests: [], first: BIGINT_1, destroyWhenDone: false, - accountToStorageTrie: this.accountToStorageTrie, + stateManager: this.stateManager, + fetcherDoneFlags: this.fetcherDoneFlags, }) - this.storageFetcher.fetch().then( - () => snapFetchersCompleted(this.fetcherDoneFlags, StorageFetcher), - () => { - throw Error('Snap fetcher failed to exit') - } - ) this.byteCodeFetcher = new ByteCodeFetcher({ config: this.config, pool: this.pool, hashes: [], destroyWhenDone: false, - trie: this.codeTrie, + stateManager: this.stateManager, + fetcherDoneFlags: this.fetcherDoneFlags, }) - this.byteCodeFetcher.fetch().then( - () => snapFetchersCompleted(this.fetcherDoneFlags, ByteCodeFetcher), - () => { - throw Error('Snap fetcher failed to exit') - } - ) this.trieNodeFetcher = new TrieNodeFetcher({ config: this.config, pool: this.pool, root: this.root, - accountTrie: this.accountTrie, - codeTrie: this.codeTrie, - accountToStorageTrie: this.accountToStorageTrie, + stateManager: this.stateManager, destroyWhenDone: false, + fetcherDoneFlags: this.fetcherDoneFlags, }) - this.trieNodeFetcher.fetch().then( - () => snapFetchersCompleted(this.fetcherDoneFlags, TrieNodeFetcher), - () => { - throw Error('Snap fetcher failed to exit') - } - ) const syncRange = { task: { first: this.first, count: this.count } } as Job< JobTask, @@ -218,6 +142,160 @@ export class AccountFetcher extends Fetcher ) } + async blockingFetch(): Promise { + this.fetcherDoneFlags.syncing = true + + try { + // in next iterations we might make this dynamic depending on how far off we are from the + // vmhead + const accountFetch = !this.fetcherDoneFlags.accountFetcher.done ? super.blockingFetch() : null + // wait for all accounts to fetch else storage and code fetcher's doesn't get us full data + this.config.superMsg(`Snapsync: running accountFetch=${accountFetch !== null}`) + + // if account fetcher is working, storage fetchers might need to work + if (accountFetch !== null) { + this.fetcherDoneFlags.storageFetcher.done = false + this.fetcherDoneFlags.byteCodeFetcher.done = false + } + // trienodes need to be tried on each fetch call + this.fetcherDoneFlags.trieNodeFetcher.done = false + + await accountFetch + if (this.fetcherDoneFlags.accountFetcher.done !== true) { + throw Error('accountFetcher finished without completing the sync') + } + + const storageFetch = !this.fetcherDoneFlags.storageFetcher.done + ? this.storageFetcher.blockingFetch().then( + () => this.snapFetchersCompleted(StorageFetcher), + () => { + throw Error('Snap fetcher failed to exit') + } + ) + : null + const codeFetch = !this.fetcherDoneFlags.byteCodeFetcher.done + ? this.byteCodeFetcher.blockingFetch().then( + () => this.snapFetchersCompleted(ByteCodeFetcher), + () => { + throw Error('Snap fetcher failed to exit') + } + ) + : null + + this.config.superMsg( + `Snapsync: running storageFetch=${storageFetch !== null} codeFetch=${codeFetch !== null}` + ) + + this.storageFetcher.setDestroyWhenDone() + this.byteCodeFetcher.setDestroyWhenDone() + await Promise.all([storageFetch, codeFetch]) + + if ( + this.fetcherDoneFlags.storageFetcher.done !== true || + this.fetcherDoneFlags.byteCodeFetcher.done !== true + ) { + throw Error( + `storageFetch or codeFetch didn't complete storageFetcherDone=${this.fetcherDoneFlags.storageFetcher.done} byteCodeFetcherDone=${this.fetcherDoneFlags.byteCodeFetcher.done}` + ) + } + + // always do trienode fetch as this should only sync diffs else return + // but currently it doesn't seem to be returning, so for static state + // ignore this if previously build + const trieNodeFetch = this.trieNodeFetcher.fetch().then( + () => { + this.snapFetchersCompleted(TrieNodeFetcher) + }, + () => { + throw Error('Snap fetcher failed to exit') + } + ) + this.config.superMsg(`Snapsync: running trieNodeFetch=${trieNodeFetch !== null}`) + this.trieNodeFetcher.setDestroyWhenDone() + await trieNodeFetch + + return true + } catch (error) { + this.config.logger.error(`Error while fetching snapsync: ${error}`) + return false + } finally { + this.fetcherDoneFlags.syncing = false + this.fetcherDoneFlags.accountFetcher.started = false + } + } + + snapFetchersCompleted(fetcherType: Object, root?: Uint8Array): void { + const fetcherDoneFlags = this.fetcherDoneFlags + + switch (fetcherType) { + // eslint-disable-next-line @typescript-eslint/no-use-before-define + case AccountFetcher: + fetcherDoneFlags.accountFetcher.done = true + fetcherDoneFlags.accountFetcher.first = BIGINT_2EXP256 + fetcherDoneFlags.stateRoot = root + + if (fetcherDoneFlags.accountFetcher.first !== BIGINT_2EXP256) { + const fetcherProgress = formatBigDecimal( + fetcherDoneFlags.accountFetcher.first * BIGINT_100, + BIGINT_2EXP256, + BIGINT_100 + ) + this.config.logger.warn( + `accountFetcher completed with pending range done=${fetcherProgress}%` + ) + } + break + case StorageFetcher: + fetcherDoneFlags.storageFetcher.done = true + + if (fetcherDoneFlags.storageFetcher.first !== fetcherDoneFlags.storageFetcher.count) { + const reqsDone = formatBigDecimal( + fetcherDoneFlags.storageFetcher.first * BIGINT_100, + fetcherDoneFlags.storageFetcher.count, + BIGINT_100 + ) + this.config.logger.warn( + `storageFetcher completed with pending tasks done=${reqsDone}% of ${fetcherDoneFlags.storageFetcher.count} queued=${this.storageFetcher.storageRequests.length}` + ) + } + + break + case ByteCodeFetcher: + fetcherDoneFlags.byteCodeFetcher.done = true + + if (fetcherDoneFlags.byteCodeFetcher.first !== fetcherDoneFlags.byteCodeFetcher.count) { + const reqsDone = formatBigDecimal( + fetcherDoneFlags.byteCodeFetcher.first * BIGINT_100, + fetcherDoneFlags.byteCodeFetcher.count, + BIGINT_100 + ) + this.config.logger.warn( + `byteCodeFetcher completed with pending tasks done=${reqsDone}% of ${fetcherDoneFlags.byteCodeFetcher.count}` + ) + } + break + case TrieNodeFetcher: + fetcherDoneFlags.trieNodeFetcher.done = true + break + } + + const { accountFetcher, storageFetcher, byteCodeFetcher, trieNodeFetcher } = fetcherDoneFlags + this.fetcherDoneFlags.done = + accountFetcher.done && storageFetcher.done && byteCodeFetcher.done && trieNodeFetcher.done + + this.config.superMsg( + `snapFetchersCompletion root=${short(this.root)} accountsRoot=${short( + fetcherDoneFlags.stateRoot ?? 'na' + )} done=${this.fetcherDoneFlags.done} accountsDone=${accountFetcher.done} storageDone=${ + storageFetcher.done + } byteCodesDone=${byteCodeFetcher.done} trieNodesDone=${trieNodeFetcher.done}` + ) + + if (this.fetcherDoneFlags.done) { + this.config.events.emit(Event.SYNC_SNAPSYNC_COMPLETE, this.root, this.stateManager) + } + } + private async verifyRangeProof( stateRoot: Uint8Array, origin: Uint8Array, @@ -311,7 +389,7 @@ export class AccountFetcher extends Fetcher if ( rangeResult.accounts.length === 0 || - equalsBytes(limit, bigIntToBytes(BIGINT_2 ** BIGINT_256)) === true + equalsBytes(limit, bigIntToBytes(BIGINT_2EXP256)) === true ) { // TODO have to check proof of nonexistence -- as a shortcut for now, we can mark as completed if a proof is present if (rangeResult.proof.length > 0) { @@ -393,20 +471,10 @@ export class AccountFetcher extends Fetcher this.debug('Final range received with no elements remaining to the right') await this.accountTrie.persistRoot() - snapFetchersCompleted( - this.fetcherDoneFlags, - AccountFetcher, - this.accountTrie.root(), - this.accountTrie, - this.config.events - ) - - // TODO It's possible that we should never destroy these fetchers since they will be needed to continually heal tries - this.byteCodeFetcher.setDestroyWhenDone() - this.trieNodeFetcher.setDestroyWhenDone() - + this.snapFetchersCompleted(AccountFetcher, this.accountTrie.root()) return } + const storageFetchRequests = new Set() const byteCodeFetchRequests = new Set() for (const account of result) { @@ -420,7 +488,7 @@ export class AccountFetcher extends Fetcher accountHash: account.hash, storageRoot, first: BIGINT_0, - count: BIGINT_2 ** BIGINT_256 - BIGINT_1, + count: BIGINT_2EXP256 - BIGINT_1, }) } // build record of accounts that need bytecode to be fetched @@ -429,6 +497,13 @@ export class AccountFetcher extends Fetcher byteCodeFetchRequests.add(codeHash) } } + + // update what has been synced for accountfetcher + const lastFetched = result[result.length - 1] + if (lastFetched !== undefined && lastFetched !== null) { + this.fetcherDoneFlags.accountFetcher.first = bytesToBigInt(lastFetched.hash) + } + if (storageFetchRequests.size > 0) this.storageFetcher.enqueueByStorageRequestList( Array.from(storageFetchRequests) as StorageRequest[] @@ -481,6 +556,10 @@ export class AccountFetcher extends Fetcher return tasks } + updateStateRoot(stateRoot: Uint8Array) { + this.root = stateRoot + } + nextTasks(): void { if ( this.in.length === 0 && diff --git a/packages/client/src/sync/fetcher/bytecodefetcher.ts b/packages/client/src/sync/fetcher/bytecodefetcher.ts index b1ecc7a8ff..51da04251a 100644 --- a/packages/client/src/sync/fetcher/bytecodefetcher.ts +++ b/packages/client/src/sync/fetcher/bytecodefetcher.ts @@ -1,5 +1,4 @@ -import { CODEHASH_PREFIX } from '@ethereumjs/statemanager' -import { Trie } from '@ethereumjs/trie' +import { CODEHASH_PREFIX, DefaultStateManager } from '@ethereumjs/statemanager' import { BIGINT_0, bytesToHex, @@ -11,11 +10,12 @@ import debugDefault from 'debug' import { keccak256 } from 'ethereum-cryptography/keccak' import { Fetcher } from './fetcher' +import { getInitFecherDoneFlags } from './types' import type { Peer } from '../../net/peer' import type { FetcherOptions } from './fetcher' -import type { Job } from './types' -import type { BatchDBOp } from '@ethereumjs/util' +import type { Job, SnapFetcherDoneFlags } from './types' +import type { BatchDBOp, DB } from '@ethereumjs/util' import type { Debugger } from 'debug' const { debug: createDebugLogger } = debugDefault @@ -27,7 +27,8 @@ type ByteCodeDataResponse = Uint8Array[] & { completed?: boolean } */ export interface ByteCodeFetcherOptions extends FetcherOptions { hashes: Uint8Array[] - trie: Trie + stateManager?: DefaultStateManager + fetcherDoneFlags?: SnapFetcherDoneFlags /** Destroy fetcher once all tasks are done */ destroyWhenDone?: boolean @@ -40,18 +41,23 @@ export type JobTask = { export class ByteCodeFetcher extends Fetcher { protected debug: Debugger + stateManager: DefaultStateManager + fetcherDoneFlags: SnapFetcherDoneFlags + codeDB: DB hashes: Uint8Array[] - trie: Trie - /** * Create new block fetcher */ constructor(options: ByteCodeFetcherOptions) { super(options) this.hashes = options.hashes ?? [] - this.trie = options.trie ?? new Trie({ useKeyHashing: true }) + this.stateManager = options.stateManager ?? new DefaultStateManager() + this.fetcherDoneFlags = options.fetcherDoneFlags ?? getInitFecherDoneFlags() + this.fetcherDoneFlags.byteCodeFetcher.count = BigInt(this.hashes.length) + this.codeDB = this.stateManager['_getCodeDB']() + this.debug = createDebugLogger('client:ByteCodeFetcher') if (this.hashes.length > 0) { const fullJob = { task: { hashes: this.hashes } } as Job @@ -171,7 +177,12 @@ export class ByteCodeFetcher extends Fetcher }) storeCount += 1 } - await this.trie.batch(ops as BatchDBOp[]) + await this.codeDB.batch(ops as BatchDBOp[]) + this.fetcherDoneFlags.byteCodeFetcher.first += BigInt(codeHashToByteCode.size) + // no idea why first starts exceeding count, may be because of missed hashesh thing, so resort to this + // weird method of tracking the count + this.fetcherDoneFlags.byteCodeFetcher.count = + this.fetcherDoneFlags.byteCodeFetcher.first + BigInt(this.hashes.length) this.debug(`Stored ${storeCount} bytecode in code trie`) } @@ -189,6 +200,10 @@ export class ByteCodeFetcher extends Fetcher */ enqueueByByteCodeRequestList(byteCodeRequestList: Uint8Array[]) { this.hashes.push(...byteCodeRequestList) + // no idea why first starts exceeding count, may be because of missed hashesh thing, so resort to this + // weird method of tracking the count + this.fetcherDoneFlags.byteCodeFetcher.count = + this.fetcherDoneFlags.byteCodeFetcher.first + BigInt(this.hashes.length) this.debug( `Number of bytecode fetch requests added to fetcher queue: ${byteCodeRequestList.length}` ) diff --git a/packages/client/src/sync/fetcher/fetcher.ts b/packages/client/src/sync/fetcher/fetcher.ts index 9335afb3ad..a64b149afa 100644 --- a/packages/client/src/sync/fetcher/fetcher.ts +++ b/packages/client/src/sync/fetcher/fetcher.ts @@ -48,6 +48,7 @@ export interface FetcherOptions { */ export abstract class Fetcher extends Readable { public config: Config + public fetchPromise: Promise | null = null protected debug: Debugger protected pool: PeerPool @@ -480,28 +481,49 @@ export abstract class Fetcher extends Readable /** * Run the fetcher. Returns a promise that resolves once all tasks are completed. */ + async _fetch() { + try { + this.write() + this.running = true + this.nextTasks() + + while (this.running) { + if (this.next() === false) { + if (this.finished === this.total && this.destroyWhenDone) { + this.push(null) + } + await this.wait() + } + } + this.running = false + if (this.destroyWhenDone) { + this.destroy() + this.writer = null + } + if (this.syncErrored) throw this.syncErrored + return true + } finally { + this.fetchPromise = null + } + } + + /** + * Wraps the internal fetcher to track its promise + */ async fetch() { if (this.running) { return false } - this.write() - this.running = true - this.nextTasks() - while (this.running) { - if (this.next() === false) { - if (this.finished === this.total && this.destroyWhenDone) { - this.push(null) - } - await this.wait() - } + if (this.fetchPromise === null) { + this.fetchPromise = this._fetch() } - this.running = false - if (this.destroyWhenDone) { - this.destroy() - this.writer = null - } - if (this.syncErrored) throw this.syncErrored + return this.fetchPromise + } + + async blockingFetch(): Promise { + const blockingPromise = this.fetchPromise ?? this.fetch() + return blockingPromise } /** diff --git a/packages/client/src/sync/fetcher/storagefetcher.ts b/packages/client/src/sync/fetcher/storagefetcher.ts index 163ed3252e..37df950bb2 100644 --- a/packages/client/src/sync/fetcher/storagefetcher.ts +++ b/packages/client/src/sync/fetcher/storagefetcher.ts @@ -1,3 +1,4 @@ +import { DefaultStateManager } from '@ethereumjs/statemanager' import { Trie } from '@ethereumjs/trie' import { BIGINT_0, @@ -8,7 +9,6 @@ import { bigIntToHex, bytesToBigInt, bytesToHex, - bytesToUnprefixedHex, compareBytes, setLengthLeft, } from '@ethereumjs/util' @@ -17,11 +17,12 @@ import debugDefault from 'debug' import { short } from '../../util' import { Fetcher } from './fetcher' +import { getInitFecherDoneFlags } from './types' import type { Peer } from '../../net/peer' import type { StorageData } from '../../net/protocol/snapprotocol' import type { FetcherOptions } from './fetcher' -import type { Job } from './types' +import type { Job, SnapFetcherDoneFlags } from './types' import type { Debugger } from 'debug' const { debug: createDebugLogger } = debugDefault @@ -56,7 +57,9 @@ export interface StorageFetcherOptions extends FetcherOptions { /** Destroy fetcher once all tasks are done */ destroyWhenDone?: boolean - accountToStorageTrie?: Map + stateManager: DefaultStateManager + + fetcherDoneFlags: SnapFetcherDoneFlags } export type JobTask = { @@ -65,23 +68,18 @@ export type JobTask = { export class StorageFetcher extends Fetcher { protected debug: Debugger + root: Uint8Array + stateManager: DefaultStateManager + fetcherDoneFlags: SnapFetcherDoneFlags private _proofTrie: Trie - /** - * The stateRoot for the fetcher which sorts of pin it to a snapshot. - * This might eventually be removed as the snapshots are moving and not static - */ - root: Uint8Array - /** The accounts to fetch storage data for */ storageRequests: StorageRequest[] /** Fragmented requests to fetch remaining slot data for */ fragmentedRequests: StorageRequest[] - accountToStorageTrie: Map - accountToHighestKnownHash: Map /** @@ -91,9 +89,13 @@ export class StorageFetcher extends Fetcher() this.debug = createDebugLogger('client:StorageFetcher') if (this.storageRequests.length > 0) { @@ -190,6 +192,10 @@ export class StorageFetcher extends Fetcher[] = [] result[0].map((slotArray, i) => { const accountHash = result.requests[i].accountHash - const storageTrie = - this.accountToStorageTrie.get(bytesToUnprefixedHex(accountHash)) ?? - new Trie({ useKeyHashing: true }) + const storageTrie = this.stateManager['_getStorageTrie'](accountHash) for (const slot of slotArray as any) { slotCount++ // what we have is hashed account and not its pre-image, so we skipKeyTransform storagePromises.push(storageTrie.put(slot.hash, slot.body, true)) } - this.accountToStorageTrie.set(bytesToUnprefixedHex(accountHash), storageTrie) }) await Promise.all(storagePromises) + this.fetcherDoneFlags.storageFetcher.first += BigInt(result[0].length) + this.fetcherDoneFlags.storageFetcher.count = + this.fetcherDoneFlags.storageFetcher.first + BigInt(this.storageRequests.length) this.debug(`Stored ${slotCount} slot(s)`) } catch (err) { this.debug(err) + throw err } } @@ -450,6 +456,8 @@ export class StorageFetcher extends Fetcher + stateManager?: DefaultStateManager /** Destroy fetcher once all tasks are done */ destroyWhenDone?: boolean + + fetcherDoneFlags?: SnapFetcherDoneFlags } export type JobTask = { @@ -60,6 +69,11 @@ export class TrieNodeFetcher extends Fetcher protected debug: Debugger root: Uint8Array + stateManager: DefaultStateManager + fetcherDoneFlags: SnapFetcherDoneFlags + accountTrie: Trie + codeDB: DB + /** * Holds all paths and nodes that need to be requested * @@ -75,9 +89,7 @@ export class TrieNodeFetcher extends Fetcher // Holds active requests to remove after storing requestedNodeToPath: Map fetchedAccountNodes: Map // key is node hash - accountTrie: Trie - codeTrie: Trie - accountToStorageTrie: Map + nodeCount: number /** @@ -86,12 +98,15 @@ export class TrieNodeFetcher extends Fetcher constructor(options: TrieNodeFetcherOptions) { super(options) this.root = options.root + this.fetcherDoneFlags = options.fetcherDoneFlags ?? getInitFecherDoneFlags() this.pathToNodeRequestData = new OrderedMap() this.requestedNodeToPath = new Map() this.fetchedAccountNodes = new Map() - this.accountTrie = options.accountTrie ?? new Trie({ useKeyHashing: true }) - this.codeTrie = options.codeTrie ?? new Trie({ useKeyHashing: true }) - this.accountToStorageTrie = options.accountToStorageTrie ?? new Map() + + this.stateManager = options.stateManager ?? new DefaultStateManager() + this.accountTrie = this.stateManager['_getAccountTrie']() + this.codeDB = this.stateManager['_getCodeDB']() + this.nodeCount = 0 this.debug = createDebugLogger('client:TrieNodeFetcher') @@ -257,8 +272,10 @@ export class TrieNodeFetcher extends Fetcher for (const childNode of childNodes) { try { if (storagePath !== undefined) { - // look up node in storage trie - const storageTrie = this.accountToStorageTrie.get(accountPath) + // look up node in storage trie, accountPath is hashed key/applied key + // TODO PR: optimized out the conversion from string to bytes? + const accountHash = unprefixedHexToBytes(accountPath) + const storageTrie = this.stateManager['_getStorageTrie'](accountHash) await storageTrie!.lookupNode(childNode.nodeHash as Uint8Array) } else { // look up node in account trie diff --git a/packages/client/src/sync/fetcher/types.ts b/packages/client/src/sync/fetcher/types.ts index 90fbcd704a..6ab5ae189b 100644 --- a/packages/client/src/sync/fetcher/types.ts +++ b/packages/client/src/sync/fetcher/types.ts @@ -9,3 +9,67 @@ export type Job = { state: 'idle' | 'expired' | 'active' peer: Peer | null } + +export type SnapFetcherDoneFlags = { + snapTargetHeight?: bigint + snapTargetRoot?: Uint8Array + snapTargetHash?: Uint8Array + + done: boolean + syncing: boolean + accountFetcher: { + started: boolean + first: bigint + done: boolean + } + storageFetcher: { + started: boolean + first: bigint + count: bigint + done: boolean + } + byteCodeFetcher: { + started: boolean + first: bigint + count: bigint + done: boolean + } + trieNodeFetcher: { + started: boolean + first: bigint + count: bigint + done: boolean + } + stateRoot?: Uint8Array +} + +export function getInitFecherDoneFlags(): SnapFetcherDoneFlags { + return { + done: false, + syncing: false, + accountFetcher: { + started: false, + // entire account range + first: BigInt(0), + done: false, + }, + storageFetcher: { + started: false, + first: BigInt(0), + count: BigInt(0), + done: false, + }, + byteCodeFetcher: { + started: false, + first: BigInt(0), + count: BigInt(0), + done: false, + }, + trieNodeFetcher: { + started: false, + first: BigInt(0), + count: BigInt(0), + done: false, + }, + } +} diff --git a/packages/client/src/sync/fullsync.ts b/packages/client/src/sync/fullsync.ts index 7f2b64c816..a82e631709 100644 --- a/packages/client/src/sync/fullsync.ts +++ b/packages/client/src/sync/fullsync.ts @@ -65,6 +65,22 @@ export class FullSynchronizer extends Synchronizer { this._fetcher = fetcher } + async sync() { + const syncWithFetcher = super.sync() + const syncEvent: Promise = new Promise((resolve) => { + // This event listener listens for other instances of the fetcher that might be syncing from a different peer + // and reach the head of the chain before the current fetcher. + this.config.events.once(Event.SYNC_SYNCHRONIZED, (height?: number) => { + this.resolveSync(height) + resolve(true) + }) + }) + + // This "race" ensures that either the current fetcher (or any other fetcher that happens to be syncing) + // resolve this current call to `sync` so we don't have orphan processes running in the background + return Promise.race([syncWithFetcher, syncEvent]) + } + /** * Open synchronizer. Must be called before sync() is called */ diff --git a/packages/client/src/sync/snapsync.ts b/packages/client/src/sync/snapsync.ts index fb50a068d7..0ab6c362ad 100644 --- a/packages/client/src/sync/snapsync.ts +++ b/packages/client/src/sync/snapsync.ts @@ -1,25 +1,38 @@ -import { DefaultStateManager } from '@ethereumjs/statemanager' -import { BIGINT_0, bytesToHex } from '@ethereumjs/util' +import { BIGINT_0, bytesToHex, equalsBytes } from '@ethereumjs/util' import { Event } from '../types' +import { short } from '../util' import { AccountFetcher } from './fetcher' +import { getInitFecherDoneFlags } from './fetcher/types' import { Synchronizer } from './sync' +import type { VMExecution } from '../execution' import type { Peer } from '../net/peer/peer' +import type { Skeleton } from '../service/skeleton' +import type { SnapFetcherDoneFlags } from './fetcher/types' import type { SynchronizerOptions } from './sync' +import type { DefaultStateManager } from '@ethereumjs/statemanager' -interface SnapSynchronizerOptions extends SynchronizerOptions {} +interface SnapSynchronizerOptions extends SynchronizerOptions { + /** Skeleton chain */ + skeleton?: Skeleton + + /** VM Execution */ + execution: VMExecution +} export class SnapSynchronizer extends Synchronizer { public running = false - - stateManager: DefaultStateManager + skeleton?: Skeleton + private execution: VMExecution + readonly fetcherDoneFlags: SnapFetcherDoneFlags = getInitFecherDoneFlags() constructor(options: SnapSynchronizerOptions) { super(options) - this.stateManager = new DefaultStateManager() + this.skeleton = options.skeleton + this.execution = options.execution } /** @@ -47,6 +60,10 @@ export class SnapSynchronizer extends Synchronizer { await super.open() await this.chain.open() await this.pool.open() + + this.config.logger.info( + `Opened SnapSynchronizer syncTargetHeight=${this.config.syncTargetHeight ?? 'NA'}` + ) } /** @@ -70,7 +87,11 @@ export class SnapSynchronizer extends Synchronizer { const latest = await this.latest(peer) if (latest) { const { number } = latest - if ((!best && number >= this.chain.blocks.height) || (best && best[1] < number)) { + if ( + (!best && + number + this.config.snapAvailabilityDepth / BigInt(4) >= this.chain.blocks.height) || + (best && best[1] < number) + ) { best = [peer, number] } } @@ -82,8 +103,11 @@ export class SnapSynchronizer extends Synchronizer { * Get latest header of peer */ async latest(peer: Peer) { + // TODO: refine the way to query latest to fetch for the peer + const blockHash = peer.eth!.status.bestHash + // const blockHash = this.skeleton?.headHash() ?? peer.eth!.status.bestHash const result = await peer.eth?.getBlockHeaders({ - block: peer.eth!.status.bestHash, + block: blockHash, max: 1, }) return result ? result[1][0] : undefined @@ -110,15 +134,84 @@ export class SnapSynchronizer extends Synchronizer { clearTimeout(timeout) } + async checkAndSync(): Promise<{ + syncedHash: Uint8Array + syncedRoot: Uint8Array + syncedHeight: bigint + } | null> { + const fetchingAlreadyDone = this.fetcherDoneFlags.done + if (!fetchingAlreadyDone) { + await this.sync() + } + + if (!this.fetcherDoneFlags.done) { + throw Error( + `snap sync fetchers didn't sync complete state accountFetcherDone=${this.fetcherDoneFlags.accountFetcher.done} storageFetcherDone=${this.fetcherDoneFlags.storageFetcher.done} byteCodeFetcherDone=${this.fetcherDoneFlags.byteCodeFetcher.done} trieNodeFetcherDone=${this.fetcherDoneFlags.trieNodeFetcher.done}` + ) + } + + const { snapTargetHeight, snapTargetRoot, snapTargetHash } = this.fetcherDoneFlags + if ( + snapTargetHeight === undefined || + snapTargetRoot === undefined || + snapTargetHash === undefined + ) { + throw Error( + `Invalid synced data by snapsync snapTargetHeight=${snapTargetHeight} snapTargetRoot=${short( + snapTargetRoot ?? 'na' + )} snapTargetHash=${short(snapTargetHash ?? 'na')}` + ) + } + + // getStateRoot also flushes the data + const syncedRoot = await this.execution.vm.stateManager.getStateRoot() + if (!equalsBytes(syncedRoot, snapTargetRoot)) { + throw Error( + `Invalid snap syncedRoot=${short(syncedRoot)} targetRoot=${short( + snapTargetRoot + )} for target height=${snapTargetHeight} hash=${short(snapTargetHash)}` + ) + // TODO: figure out what needs to be reinited + // this.fetcherDoneFlags.accountFetcher.done = false; + // this.fetcherDoneFlags.storageFetcher.done = false; + // this.fetcherDoneFlags.byteCodeFetcher.done = false; + // this.fetcherDoneFlags.trieNodeFetcher.done = false + } + + const snapDoneMsg = `snapsync complete!!! height=${snapTargetHeight} root=${short( + snapTargetRoot + )} hash=${short(snapTargetHash)}` + if (fetchingAlreadyDone) { + this.config.logger.debug(snapDoneMsg) + } else { + this.config.superMsg(snapDoneMsg) + } + + return { + syncedHash: snapTargetHash, + syncedRoot: snapTargetRoot, + syncedHeight: snapTargetHeight, + } + } + /** * Called from `sync()` to sync blocks and state from peer starting from current height. * @param peer remote peer to sync with * @returns a boolean if the setup was successful */ async syncWithPeer(peer?: Peer): Promise { + // if skeleton is passed we have to wait for skeleton to be updated + if (this.skeleton?.synchronized !== true || this.fetcherDoneFlags.done) { + this.config.logger.info(`SnapSynchronizer - early return ${peer?.id}`) + return false + } + const latest = peer ? await this.latest(peer) : undefined - if (!latest) return false + if (!latest) { + return false + } + this.config.logger.info(`SnapSynchronizer - syncWithPeer ${peer?.id}`) const stateRoot = latest.stateRoot const height = latest.number // eslint-disable-next-line eqeqeq @@ -127,14 +220,37 @@ export class SnapSynchronizer extends Synchronizer { this.config.logger.info(`New sync target height=${height} hash=${bytesToHex(latest.hash())}`) } - this.fetcher = new AccountFetcher({ - config: this.config, - pool: this.pool, - root: stateRoot, - // This needs to be determined from the current state of the MPT dump - first: BIGINT_0, - }) + if ( + (this.fetcher === null || this.fetcher.syncErrored !== undefined) && + this.config.syncTargetHeight <= latest.number + this.config.snapAvailabilityDepth + ) { + if ((this.fetcherDoneFlags.snapTargetHeight ?? BIGINT_0) < latest.number) { + this.fetcherDoneFlags.snapTargetHeight = latest.number + this.fetcherDoneFlags.snapTargetRoot = latest.stateRoot + this.fetcherDoneFlags.snapTargetHash = latest.hash() + } + this.config.logger.info( + `syncWithPeer new AccountFetcher peer=${peer?.id} snapTargetHeight=${ + this.fetcherDoneFlags.snapTargetHeight + } snapTargetRoot=${short(this.fetcherDoneFlags.snapTargetRoot!)} ${ + this.fetcher === null + ? '' + : 'previous fetcher errored=' + this.fetcher.syncErrored?.message + }` + ) + this.fetcher = new AccountFetcher({ + config: this.config, + pool: this.pool, + stateManager: this.execution.vm.stateManager as DefaultStateManager, + root: stateRoot, + // This needs to be determined from the current state of the MPT dump + first: BigInt(0), + fetcherDoneFlags: this.fetcherDoneFlags, + }) + } else { + return false + } return true } diff --git a/packages/client/src/sync/sync.ts b/packages/client/src/sync/sync.ts index f43e23e150..3ee493986f 100644 --- a/packages/client/src/sync/sync.ts +++ b/packages/client/src/sync/sync.ts @@ -150,7 +150,7 @@ export abstract class Synchronizer { async syncWithFetcher() { try { if (this._fetcher) { - await this._fetcher.fetch() + await this._fetcher.blockingFetch() } this.config.logger.debug(`Fetcher finished fetching...`) return this.resolveSync() @@ -178,18 +178,13 @@ export abstract class Synchronizer { } if (!(await this.syncWithPeer(peer))) return false - const syncEvent: Promise = new Promise((resolve) => { - // This event listener listens for other instances of the fetcher that might be syncing from a different peer - // and reach the head of the chain before the current fetcher. - this.config.events.once(Event.SYNC_SYNCHRONIZED, (height?: number) => { - this.resolveSync(height) - resolve(true) - }) - }) - // This "race" ensures that either the current fetcher (or any other fetcher that happens to be syncing) - // resolve this current call to `sync` so we don't have orphan processes running in the background - return Promise.race([this.syncWithFetcher(), syncEvent]) + // syncWithFetcher should auto resolve when sync completes even if from any other independent + // fetcher. We shouldn't be auto resolving the fetchers on sync events because SYNC events are + // not precision based but we need precision to resolve the fetchers + // + // TODO: check this for the forward fetcher that it resolves on being close/on head or post merge + return this.syncWithFetcher() } /** diff --git a/packages/client/src/types.ts b/packages/client/src/types.ts index 6a8414279a..9778bae90d 100644 --- a/packages/client/src/types.ts +++ b/packages/client/src/types.ts @@ -4,7 +4,7 @@ import type { SyncMode } from '.' import type { Peer } from './net/peer' import type { Server } from './net/server' import type { Block, BlockHeader } from '@ethereumjs/block' -import type { Trie } from '@ethereumjs/trie' +import type { DefaultStateManager } from '@ethereumjs/statemanager' import type { Address } from '@ethereumjs/util' import type { Multiaddr } from 'multiaddr' @@ -40,7 +40,7 @@ export interface EventParams { [Event.SYNC_FETCHED_BLOCKS]: [blocks: Block[]] [Event.SYNC_FETCHED_HEADERS]: [headers: BlockHeader[]] [Event.SYNC_SYNCHRONIZED]: [chainHeight: bigint] - [Event.SYNC_SNAPSYNC_COMPLETE]: [stateRoot: Uint8Array, accountTrie: Trie] + [Event.SYNC_SNAPSYNC_COMPLETE]: [stateRoot: Uint8Array, stateManager: DefaultStateManager] [Event.SYNC_ERROR]: [syncError: Error] [Event.SYNC_FETCHER_ERROR]: [fetchError: Error, task: any, peer: Peer | null | undefined] [Event.PEER_CONNECTED]: [connectedPeer: Peer] @@ -152,9 +152,8 @@ export interface ClientOpts { dev?: boolean | string minerCoinbase?: Address saveReceipts?: boolean - disableBeaconSync?: boolean - forceSnapSync?: boolean prefixStorageTrieKeys?: boolean + snap?: boolean useStringValueTrieDB?: boolean txLookupLimit?: number startBlock?: number diff --git a/packages/client/src/util/metaDBManager.ts b/packages/client/src/util/metaDBManager.ts index dc067a6547..ede8a4820c 100644 --- a/packages/client/src/util/metaDBManager.ts +++ b/packages/client/src/util/metaDBManager.ts @@ -19,6 +19,7 @@ export enum DBKey { SkeletonBlock, SkeletonBlockHashToNumber, SkeletonStatus, + SkeletonUnfinalizedBlockByHash, } export interface MetaDBManagerOptions { diff --git a/packages/client/test/cli/cli.spec.ts b/packages/client/test/cli/cli.spec.ts index 1b88b06e4b..231645572d 100644 --- a/packages/client/test/cli/cli.spec.ts +++ b/packages/client/test/cli/cli.spec.ts @@ -477,7 +477,7 @@ describe('[CLI]', () => { }, 30000) // test experimental feature options it('should start client when passed options for experimental features', async () => { - const cliArgs = ['--mine=true', '--forceSnapSync=true', '--dev=poa', '--port=30393'] + const cliArgs = ['--mine=true', '--snap=true', '--dev=poa', '--port=30393'] const onData = async ( message: string, child: ChildProcessWithoutNullStreams, @@ -600,7 +600,6 @@ describe('[CLI]', () => { '--port=30301', '--dev=poa', '--isSingleNode=true', - '--disableBeaconSync=true', '--sync="none"', '--lightServe=true', '--mergeForkIdPostMerge=false', diff --git a/packages/client/test/rpc/engine/forkchoiceUpdatedV1.spec.ts b/packages/client/test/rpc/engine/forkchoiceUpdatedV1.spec.ts index c9c7ded54d..d63f3d7a11 100644 --- a/packages/client/test/rpc/engine/forkchoiceUpdatedV1.spec.ts +++ b/packages/client/test/rpc/engine/forkchoiceUpdatedV1.spec.ts @@ -194,8 +194,13 @@ describe(method, () => { ) await chain.putBlocks([newBlock]) + const newBlockHashHex = bytesToHex(newBlock.hash()) const req = params(method, [ - { ...validForkChoiceState, headBlockHash: bytesToHex(newBlock.hash()) }, + { + safeBlockHash: newBlockHashHex, + finalizedBlockHash: newBlockHashHex, + headBlockHash: newBlockHashHex, + }, null, ]) const expectRes = (res: any) => { @@ -262,7 +267,7 @@ describe(method, () => { finalizedBlockHash: '0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4b', }, ]) - const expectRes = checkError(INVALID_PARAMS, 'finalized block not available') + const expectRes = checkError(INVALID_PARAMS, 'finalized block not available in canonical chain') await baseRequest(server, req, 200, expectRes) }) @@ -366,7 +371,7 @@ describe(method, () => { const expectRes = (res: any) => { assert.equal(res.body.error.code, -32602) - assert.ok(res.body.error.message.includes('safeBlock')) + assert.ok(res.body.error.message.includes('safe')) assert.ok(res.body.error.message.includes('canonical')) } await baseRequest(server, req, 200, expectRes) @@ -409,7 +414,7 @@ describe(method, () => { const expectRes = (res: any) => { assert.equal(res.body.error.code, -32602) - assert.ok(res.body.error.message.includes('finalizedBlock')) + assert.ok(res.body.error.message.includes('finalized')) assert.ok(res.body.error.message.includes('canonical')) } await baseRequest(server, req, 200, expectRes) diff --git a/packages/client/test/rpc/engine/newPayloadV1.spec.ts b/packages/client/test/rpc/engine/newPayloadV1.spec.ts index 824572f950..0d32776b69 100644 --- a/packages/client/test/rpc/engine/newPayloadV1.spec.ts +++ b/packages/client/test/rpc/engine/newPayloadV1.spec.ts @@ -292,7 +292,7 @@ describe(method, () => { // newpayload shouldn't execute block but just return either SYNCING or ACCEPTED let expectRes = (res: any) => { - assert.equal(res.body.result.status, 'ACCEPTED') + assert.equal(res.body.result.status, 'SYNCING') } let req = params(method, [blockDataWithValidTransaction]) await baseRequest(server, req, 200, expectRes, false, false) diff --git a/packages/client/test/rpc/helpers.ts b/packages/client/test/rpc/helpers.ts index 6991befa4e..5b7f183af8 100644 --- a/packages/client/test/rpc/helpers.ts +++ b/packages/client/test/rpc/helpers.ts @@ -169,6 +169,7 @@ export function createClient(clientOpts: Partial = {}) { switchToBeaconSync: () => { return undefined }, + buildHeadState: () => {}, }, ], servers, diff --git a/packages/client/test/service/fullethereumservice.spec.ts b/packages/client/test/service/fullethereumservice.spec.ts index 9566b46027..a692c889e8 100644 --- a/packages/client/test/service/fullethereumservice.spec.ts +++ b/packages/client/test/service/fullethereumservice.spec.ts @@ -370,7 +370,7 @@ describe('[FullEthereumService]', async () => { const chain = await Chain.create({ config }) let service = new FullEthereumService({ config, chain }) assert.ok(service.beaconSync, 'beacon sync should be available') - const configDisableBeaconSync = new Config({ common, disableBeaconSync: true }) + const configDisableBeaconSync = new Config({ common, syncmode: 'none' }) service = new FullEthereumService({ config: configDisableBeaconSync, chain }) assert.notOk(service.beaconSync, 'beacon sync should not be available') }) diff --git a/packages/client/test/sim/beaconsync.spec.ts b/packages/client/test/sim/beaconsync.spec.ts index fdbcbcb8fb..2ccd0dd144 100644 --- a/packages/client/test/sim/beaconsync.spec.ts +++ b/packages/client/test/sim/beaconsync.spec.ts @@ -163,13 +163,14 @@ describe('simple mainnet test run', async () => { void ejsClient.services[0].synchronizer?.sync() const syncResponse = await Promise.race([beaconSyncPromise, syncTimeout]) assert.equal( - syncResponse.syncState, - 'SYNCED', + ['SYNCED', 'VALID'].includes(syncResponse.syncState), + true, 'beaconSyncRelayer should have synced client' ) await ejsClient.stop() assert.ok(true, 'completed beacon sync') } catch (e) { + console.log() assert.fail('could not complete beacon sync in 8 minutes') } } else { diff --git a/packages/client/test/sim/snapsync.md b/packages/client/test/sim/snapsync.md index dd38e39e00..ce71305420 100644 --- a/packages/client/test/sim/snapsync.md +++ b/packages/client/test/sim/snapsync.md @@ -16,14 +16,13 @@ Note: All commands should be run from the `client` package directory root (so so 1. Start external geth client: ```bash -NETWORK=mainnet NETWORKID=1337903 ELCLIENT=geth DATADIR=/usr/app/ethereumjs/packages/client/data test -/sim/single-run.sh +NETWORK=mainnet NETWORKID=1337903 ELCLIENT=geth EXTRA_CL_PARAMS="--params.CAPELLA_FORK_EPOCH 0" DATADIR=/usr/app/ethereumjs/packages/client/data test/sim/single-run.sh ``` 2. (optional) Add some txs/state to geth ```bash -EXTERNAL_RUN=true ADD_EOA_STATE=true DATADIR=/usr/app/ethereumjs/packages/client/data npx vitest test/sim/snapsync.spec.ts +rm -rf ./datadir; EXTERNAL_RUN=true ADD_EOA_STATE=true DATADIR=/usr/app/ethereumjs/packages/client/data npx vitest run test/sim/snapsync.spec.ts ``` 3. Run snap sync: @@ -43,7 +42,7 @@ EXTERNAL_RUN=true SNAP_SYNC=true DEBUG_SNAP=client:* DATADIR=/usr/app/ethereumjs Combine 2 & 3 in single step: ```bash -EXTERNAL_RUN=true ADD_EOA_STATE=true SNAP_SYNC=true DATADIR=/usr/app/ethereumjs/packages/client/data npx vitest test/sim/snapsync.spec.ts +rm -rf ./datadir; EXTERNAL_RUN=true SNAP_SYNC=true DEBUG_SNAP=client:* DATADIR=/usr/app/ethereumjs/packages/client/data npx vitest test/sim/snapsync.spec.ts ``` Combine 1, 2, 3 in single step diff --git a/packages/client/test/sim/snapsync.spec.ts b/packages/client/test/sim/snapsync.spec.ts index 880ded921b..54580a6c50 100644 --- a/packages/client/test/sim/snapsync.spec.ts +++ b/packages/client/test/sim/snapsync.spec.ts @@ -1,5 +1,4 @@ import { Common } from '@ethereumjs/common' -import { DefaultStateManager } from '@ethereumjs/statemanager' import { Address, bytesToHex, @@ -27,7 +26,7 @@ import { import type { EthereumClient } from '../../src/client' import type { RlpxServer } from '../../src/net/server' -import type { Trie } from '@ethereumjs/trie' +import type { DefaultStateManager } from '@ethereumjs/statemanager' const client = Client.http({ port: 8545 }) @@ -44,7 +43,7 @@ let senderBalance = BigInt(customGenesisState[sender][0]) let ejsClient: EthereumClient | null = null let beaconSyncRelayer: any = null let snapCompleted: Promise | undefined = undefined -let syncedTrie: Trie | undefined = undefined +let stateManager: DefaultStateManager | undefined = undefined // This account doesn't exist in the genesis so starting balance is zero const EOATransferToAccount = '0x3dA33B9A0894b908DdBb00d96399e506515A1009' @@ -168,7 +167,7 @@ describe('simple mainnet test run', async () => { if (ejsClient !== null && snapCompleted !== undefined && beaconSyncRelayer !== null) { // node should be in syncing or valid state, when snap sync fully implemented should just // check for VALID - const beaconSyncPromise = beaconSyncRelayer.start({ waitForStates: ['SYNCING', 'VALID'] }) + const beaconSyncPromise = beaconSyncRelayer.start({ waitForStates: ['VALID'] }) // wait on the sync promise to complete if it has been called independently const snapSyncTimeout = new Promise((_resolve, reject) => setTimeout(reject, 8 * 60_000)) let syncedSnapRoot: Uint8Array | undefined = undefined @@ -177,15 +176,14 @@ describe('simple mainnet test run', async () => { // call sync if not has been called yet void ejsClient.services[0].synchronizer?.sync() await Promise.race([ - snapCompleted.then(([root, trie]) => { + snapCompleted.then(([root, syncedStateManager]) => { syncedSnapRoot = root - syncedTrie = trie + stateManager = syncedStateManager }), snapSyncTimeout, ]) await Promise.race([beaconSyncPromise, snapSyncTimeout]) - await ejsClient.stop() assert.ok(true, 'completed snap sync') } catch (e) { assert.fail('could not complete snap sync in 8 minutes') @@ -202,7 +200,7 @@ describe('simple mainnet test run', async () => { 10 * 60_000 ) - it.skipIf(syncedTrie !== undefined)('should match entire state', async () => { + it.skipIf(stateManager !== undefined)('should match entire state', async () => { // update customGenesisState to reflect latest changes and match entire customGenesisState if (process.env.ADD_EOA_STATE !== undefined) { customGenesisState[EOATransferToAccount] = [ @@ -214,8 +212,6 @@ describe('simple mainnet test run', async () => { customGenesisState[sender][0] = `0x${senderBalance.toString(16)}` } - const stateManager = new DefaultStateManager({ trie: syncedTrie }) - for (const addressString of Object.keys(customGenesisState)) { const address = Address.fromString(addressString) const account = await stateManager.getAccount(address) @@ -230,6 +226,7 @@ describe('simple mainnet test run', async () => { it('network cleanup', async () => { try { beaconSyncRelayer?.close() + await ejsClient?.stop() await teardownCallBack() assert.ok(true, 'network cleaned') } catch (e) { @@ -258,7 +255,8 @@ async function createSnapClient( discDns: false, discV4: false, port: 30304, - forceSnapSync: true, + enableSnapSync: true, + // syncmode: 'none', // Keep the single job sync range high as the state is not big maxAccountRange: (BigInt(2) ** BigInt(256) - BigInt(1)) / BigInt(10), maxFetcherJobs: 10, @@ -267,8 +265,10 @@ async function createSnapClient( config.events.once(Event.PEER_CONNECTED, (peer: any) => resolve(peer)) }) const snapSyncCompletedPromise = new Promise((resolve) => { - config.events.once(Event.SYNC_SNAPSYNC_COMPLETE, (stateRoot: Uint8Array, trie: Trie) => - resolve([stateRoot, trie]) + config.events.once( + Event.SYNC_SNAPSYNC_COMPLETE, + (stateRoot: Uint8Array, stateManager: DefaultStateManager) => + resolve([stateRoot, stateManager]) ) }) diff --git a/packages/client/test/sync/fetcher/accountfetcher.spec.ts b/packages/client/test/sync/fetcher/accountfetcher.spec.ts index de795bc50c..8d7ea3ed33 100644 --- a/packages/client/test/sync/fetcher/accountfetcher.spec.ts +++ b/packages/client/test/sync/fetcher/accountfetcher.spec.ts @@ -6,6 +6,8 @@ import { assert, describe, it } from 'vitest' import { Chain } from '../../../src/blockchain' import { Config } from '../../../src/config' import { SnapProtocol } from '../../../src/net/protocol' +import { ByteCodeFetcher } from '../../../src/sync/fetcher/bytecodefetcher' +import { StorageFetcher } from '../../../src/sync/fetcher/storagefetcher' import { TrieNodeFetcher } from '../../../src/sync/fetcher/trienodefetcher' import { Event } from '../../../src/types' import { wait } from '../../integration/util' @@ -21,9 +23,7 @@ describe('[AccountFetcher]', async () => { PeerPool.prototype.idle = td.func() PeerPool.prototype.ban = td.func() - const { AccountFetcher, snapFetchersCompleted } = await import( - '../../../src/sync/fetcher/accountfetcher' - ) + const { AccountFetcher } = await import('../../../src/sync/fetcher/accountfetcher') it('should start/stop', async () => { const config = new Config({ maxPerRequest: 5 }) @@ -285,21 +285,16 @@ describe('[AccountFetcher]', async () => { } catch (e) { assert.fail(`fetcher failed to store results, Error: ${(e as Error).message}`) } - const fetcherDoneFlags = fetcher.fetcherDoneFlags const snapCompleted = new Promise((resolve) => { config.events.once(Event.SYNC_SNAPSYNC_COMPLETE, (stateRoot: any) => resolve(stateRoot)) }) // test snapfetcher complete, since the storage fetcher is already empty it should anyway lead // call to snapFetchersCompleted with storageFetcher - snapFetchersCompleted(fetcherDoneFlags, TrieNodeFetcher) - snapFetchersCompleted( - fetcherDoneFlags, - AccountFetcher, - fetcher.accountTrie.root(), - fetcher.accountTrie, - config.events - ) + fetcher.snapFetchersCompleted(TrieNodeFetcher) + fetcher.snapFetchersCompleted(StorageFetcher) + fetcher.snapFetchersCompleted(ByteCodeFetcher) + fetcher.snapFetchersCompleted(AccountFetcher) const snapSyncTimeout = new Promise((_resolve, reject) => setTimeout(reject, 10000)) try { await Promise.race([snapCompleted, snapSyncTimeout]) diff --git a/packages/client/test/sync/fetcher/storagefetcher.spec.ts b/packages/client/test/sync/fetcher/storagefetcher.spec.ts index 78889291ae..73d2141467 100644 --- a/packages/client/test/sync/fetcher/storagefetcher.spec.ts +++ b/packages/client/test/sync/fetcher/storagefetcher.spec.ts @@ -415,7 +415,12 @@ describe('[StorageFetcher]', async () => { // send end of range input to store ;(fetcher as any)['destroyWhenDone'] = false await fetcher.store([Object.create(null)] as any) - assert.ok(fetcher['destroyWhenDone'] === true, 'should have marked fetcher to close') + assert.ok( + fetcher['destroyWhenDone'] === false, + 'should still be open to enqueue and process new requests' + ) + fetcher.setDestroyWhenDone() + assert.ok(fetcher['destroyWhenDone'] === true, 'should mark to close on finished') }) it('should find a fetchable peer', async () => { diff --git a/packages/client/test/sync/skeleton.spec.ts b/packages/client/test/sync/skeleton.spec.ts index 9e7a17a0aa..bbaf9de7bd 100644 --- a/packages/client/test/sync/skeleton.spec.ts +++ b/packages/client/test/sync/skeleton.spec.ts @@ -86,6 +86,13 @@ describe('[Skeleton] / initSync', async () => { head: block50, newState: [{ head: BigInt(50), tail: BigInt(50) }], }, + // Completely empty database with only the trivial genesis subchain + { + name: 'Completely empty database with only the genesis set', + oldState: [{ head: BigInt(0), tail: BigInt(0) }], + head: block50, + newState: [{ head: BigInt(50), tail: BigInt(50) }], + }, // Empty database with only the genesis set with a leftover empty sync // progress. This is a synthetic case, just for the sake of covering things. { @@ -453,9 +460,15 @@ describe('[Skeleton] / setHead', async () => { assert.equal( (skeleton as any).status.progress.subchains.length, - 0, - 'no subchain should have been created' + 1, + 'trivial subchain0 should have been created' + ) + assert.equal( + (skeleton as any).status.progress.subchains[0]!.head, + BigInt(0), + 'trivial subchain0 should have been created' ) + try { await skeleton.putBlocks([block1]) assert.fail('should have not allowed putBlocks since no subchain set') @@ -468,9 +481,15 @@ describe('[Skeleton] / setHead', async () => { assert.equal(reorg, false, 'should not reorg on valid first block') assert.equal( (skeleton as any).status.progress.subchains.length, - 0, - 'no subchain should have been created' + 1, + 'trivial subchain should have been created' + ) + assert.equal( + (skeleton as any).status.progress.subchains[0]!.head, + BigInt(0), + 'trivial subchain0 should have been created' ) + reorg = await skeleton.setHead(block1, true) assert.equal(reorg, false, 'should not reorg on valid first block') assert.equal( @@ -579,6 +598,9 @@ describe('[Skeleton] / setHead', async () => { BigInt(5), 'canonical height should change when setHead is set with force=true' ) + + // unlink the skeleton for the below check to check all blocks cleared + skeleton.status.linked = false for (const block of [block1, block2, block3, block4, block5]) { assert.equal( (await skeleton.getBlock(block.header.number, true))?.hash(), @@ -596,7 +618,7 @@ describe('[Skeleton] / setHead', async () => { }) it('should fill the canonical chain after being linked to a canonical block past genesis', async () => { - const config = new Config({ common }) + const config = new Config({ common, engineNewpayloadMaxExecute: 10 }) const chain = await Chain.create({ config }) ;(chain.blockchain as any)._validateBlocks = false @@ -644,8 +666,9 @@ describe('[Skeleton] / setHead', async () => { BigInt(4), 'canonical height should not change when setHead with force=false' ) - await skeleton.setHead(block5, true) - await skeleton.blockingFillWithCutoff(10) + + // test sethead and blockingFillWithCutoff true via forkchoice update + await skeleton.forkchoiceUpdate(block5) await wait(200) assert.equal( @@ -653,6 +676,10 @@ describe('[Skeleton] / setHead', async () => { BigInt(5), 'canonical height should change when setHead with force=true' ) + + // unlink the skeleton for the below check to check all blocks cleared + const prevLinked = skeleton.status.linked + skeleton.status.linked = false for (const block of [block3, block4, block5]) { assert.equal( (await skeleton.getBlock(block.header.number, true))?.hash(), @@ -667,6 +694,112 @@ describe('[Skeleton] / setHead', async () => { )} should be cleaned up after filling canonical chain` ) } + // restore linkedStatus + skeleton.status.linked = prevLinked + + const block41 = Block.fromBlockData( + { header: { number: 4, parentHash: block3.hash(), difficulty: 101 } }, + { common, setHardfork: true } + ) + const block51 = Block.fromBlockData( + { header: { number: 5, parentHash: block41.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + const block61 = Block.fromBlockData( + { header: { number: 6, parentHash: block51.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + + await skeleton.setHead(block41, false) + await skeleton.setHead(block51, false) + + // should link the chains including the 41, 51 block backfilled from the unfinalized + await skeleton.forkchoiceUpdate(block61) + assert.equal(skeleton.status.progress.subchains[0]?.head, BigInt(6), 'head should be correct') + assert.equal( + skeleton.status.progress.subchains[0]?.tail, + BigInt(4), + 'tail should be backfilled' + ) + assert.equal(skeleton.status.linked, true, 'should be linked') + assert.equal(chain.blocks.height, BigInt(6), 'all blocks should be in chain') + + const block71 = Block.fromBlockData( + { header: { number: 7, parentHash: block61.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + const block81 = Block.fromBlockData( + { header: { number: 8, parentHash: block71.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + const block91 = Block.fromBlockData( + { header: { number: 9, parentHash: block81.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + + // lets jump ahead and add the block 81 and 71 with annoucements and trigger tryTailBackfill + await skeleton.forkchoiceUpdate(block91) + assert.equal(skeleton.status.progress.subchains.length, 1, '1 subchain with older dropped') + assert.equal(skeleton.status.progress.subchains[0]?.head, BigInt(9), 'head should be correct') + assert.equal( + skeleton.status.progress.subchains[0]?.tail, + BigInt(9), + 'new subchain should be created' + ) + await skeleton.setHead(block81, false) + await skeleton.setHead(block71, false) + await skeleton.tryTailBackfill() + + assert.equal(skeleton.status.progress.subchains[0]?.head, BigInt(9), 'head should be correct') + assert.equal( + skeleton.status.progress.subchains[0]?.tail, + BigInt(7), + 'tail should be backfilled' + ) + assert.equal(skeleton.status.linked, true, 'should be linked') + // async wait needed here so the async fillCanonicalChain can fill the chain + await wait(50) + assert.equal(chain.blocks.height, BigInt(9), 'all blocks should be in chain') + assert.equal( + equalsBytes(chain.blocks.latest!.hash(), block91.hash()), + true, + 'correct head hash' + ) + + // do a very common reorg that happens in a network: reorged head block + const block92 = Block.fromBlockData( + { header: { number: 9, parentHash: block81.hash(), difficulty: 101 } }, + { common, setHardfork: true } + ) + const block102 = Block.fromBlockData( + { header: { number: 10, parentHash: block92.hash(), difficulty: 100 } }, + { common, setHardfork: true } + ) + + await skeleton.forkchoiceUpdate(block92) + assert.equal( + skeleton.status.progress.subchains[0]?.head, + BigInt(9), + 'head number should be same' + ) + assert.equal( + skeleton.status.progress.subchains[0]?.tail, + BigInt(9), + 'tail should be truncated to head' + ) + assert.equal( + equalsBytes(chain.blocks.latest!.hash(), block92.hash()), + true, + 'correct reorged head hash' + ) + + // should be able to build on top of the next block + await skeleton.forkchoiceUpdate(block102) + assert.equal( + equalsBytes(chain.blocks.latest!.hash(), block102.hash()), + true, + 'continue reorged chain' + ) }) it('should abort filling the canonical chain if the terminal block is invalid', async () => { diff --git a/packages/statemanager/src/stateManager.ts b/packages/statemanager/src/stateManager.ts index 383c55acf7..0b373ce743 100644 --- a/packages/statemanager/src/stateManager.ts +++ b/packages/statemanager/src/stateManager.ts @@ -406,19 +406,28 @@ export class DefaultStateManager implements EVMStateManagerInterface { * cache or does a lookup. * @private */ - protected _getStorageTrie(address: Address, account: Account): Trie { - // from storage cache - const addressHex = bytesToUnprefixedHex(address.bytes) - const storageTrie = this._storageTries[addressHex] + // TODO PR: have a better interface for hashed address pull? + protected _getStorageTrie(addressOrHash: Address | Uint8Array, account?: Account): Trie { + // use hashed key for lookup from storage cache + const addressHex = bytesToUnprefixedHex( + addressOrHash instanceof Address ? keccak256(addressOrHash.bytes) : addressOrHash + ) + let storageTrie = this._storageTries[addressHex] if (storageTrie === undefined) { const keyPrefix = this._prefixStorageTrieKeys - ? keccak256(address.bytes).slice(0, 7) + ? (addressOrHash instanceof Address ? keccak256(addressOrHash.bytes) : addressOrHash).slice( + 0, + 7 + ) : undefined - const storageTrie = this._trie.shallowCopy(false, { keyPrefix }) - storageTrie.root(account.storageRoot) + storageTrie = this._trie.shallowCopy(false, { keyPrefix }) + if (account !== undefined) { + storageTrie.root(account.storageRoot) + } else { + storageTrie.root(storageTrie.EMPTY_TRIE_ROOT) + } storageTrie.flushCheckpoints() this._storageTries[addressHex] = storageTrie - return storageTrie } return storageTrie } diff --git a/packages/statemanager/test/proofStateManager.spec.ts b/packages/statemanager/test/proofStateManager.spec.ts index 6fc1689a07..64a2a1c095 100644 --- a/packages/statemanager/test/proofStateManager.spec.ts +++ b/packages/statemanager/test/proofStateManager.spec.ts @@ -169,7 +169,7 @@ describe('ProofStateManager', () => { } } storageTrie.root(hexToBytes(storageRoot)) - const addressHex = bytesToUnprefixedHex(address.bytes) + const addressHex = bytesToUnprefixedHex(keccak256(address.bytes)) stateManager['_storageTries'][addressHex] = storageTrie trie.root(stateRoot!) diff --git a/packages/util/src/constants.ts b/packages/util/src/constants.ts index 148fa85f35..d1c8110465 100644 --- a/packages/util/src/constants.ts +++ b/packages/util/src/constants.ts @@ -97,9 +97,11 @@ export const BIGINT_255 = BigInt(255) export const BIGINT_256 = BigInt(256) export const BIGINT_96 = BigInt(96) +export const BIGINT_100 = BigInt(100) export const BIGINT_160 = BigInt(160) export const BIGINT_224 = BigInt(224) export const BIGINT_2EXP96 = BigInt(79228162514264337593543950336) export const BIGINT_2EXP160 = BigInt(1461501637330902918203684832716283019655932542976) export const BIGINT_2EXP224 = BigInt(26959946667150639794667015087019630673637144422540572481103610249216) +export const BIGINT_2EXP256 = BIGINT_2 ** BIGINT_256 diff --git a/packages/util/src/units.ts b/packages/util/src/units.ts index 4ed6f1a6c8..dfbca2d2de 100644 --- a/packages/util/src/units.ts +++ b/packages/util/src/units.ts @@ -1,2 +1,20 @@ +import { BIGINT_0, BIGINT_1 } from './constants.js' /** Easy conversion from Gwei to wei */ export const GWEI_TO_WEI = BigInt(1000000000) + +export function formatBigDecimal( + numerator: bigint, + denominator: bigint, + maxDecimalFactor: bigint +): string { + if (denominator === BIGINT_0) { + denominator = BIGINT_1 + } + + const full = numerator / denominator + const fraction = ((numerator - full * denominator) * maxDecimalFactor) / denominator + + // zeros to be added post decimal are number of zeros in maxDecimalFactor - number of digits in fraction + const zerosPostDecimal = String(maxDecimalFactor).length - 1 - String(fraction).length + return `${full}.${'0'.repeat(zerosPostDecimal)}${fraction}` +} diff --git a/packages/util/test/units.spec.ts b/packages/util/test/units.spec.ts new file mode 100644 index 0000000000..be78ec3927 --- /dev/null +++ b/packages/util/test/units.spec.ts @@ -0,0 +1,22 @@ +import { assert, describe, it } from 'vitest' + +import { formatBigDecimal } from '../src/index.js' + +describe('formatBigDecimal', function () { + const testCases: [bigint, bigint, bigint, string][] = [ + [BigInt('103797739275696858'), BigInt('1000000000000000000'), BigInt('100000'), '0.10379'], + [BigInt('103797739275696858'), BigInt('1000000000000000000'), BigInt('1000'), '0.103'], + [BigInt('10379773927569685'), BigInt('1000000000000000000'), BigInt('1000'), '0.010'], + [BigInt('1037977392756968'), BigInt('1000000000000000000'), BigInt('1000'), '0.001'], + [BigInt('1037977392756968'), BigInt('1000000000000000000'), BigInt('100000'), '0.00103'], + [BigInt('58200000000000000'), BigInt('1000000000000000000'), BigInt('100000'), '0.05820'], + [BigInt('111103797739275696858'), BigInt('1000000000000000000'), BigInt('100000'), '111.10379'], + [BigInt('111103797739275696858'), BigInt('1000000000000000000'), BigInt('1000'), '111.103'], + [BigInt('1037977392756'), BigInt('1000000000000000000'), BigInt('100000'), '0.00000'], + ] + for (const [numerator, denominator, decimalFactor, expectedString] of testCases) { + it(`format ${numerator} / ${denominator} correctly to ${expectedString}`, () => { + assert.equal(formatBigDecimal(numerator, denominator, decimalFactor), expectedString) + }) + } +})