diff --git a/Cargo.lock b/Cargo.lock index b671f7ebf6..b54a342ae0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1087,8 +1087,7 @@ dependencies = [ [[package]] name = "incrementalmerkletree" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361c467824d4d9d4f284be4b2608800839419dccc4d4608f28345237fe354623" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "either", "proptest", @@ -2246,8 +2245,7 @@ dependencies = [ [[package]] name = "shardtree" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf20c7a2747d9083092e3a3eeb9a7ed75577ae364896bebbc5e0bdcd4e97735" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "assert_matches", "bitflags 2.4.1", diff --git a/Cargo.toml b/Cargo.toml index d90c6d3e04..28886d4f49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,3 +120,7 @@ zip32 = "0.1" lto = true panic = 'abort' codegen-units = 1 + +[patch.crates-io] +incrementalmerkletree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } +shardtree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } diff --git a/zcash_client_backend/src/data_api.rs b/zcash_client_backend/src/data_api.rs index 5a72ea9309..8cd61d55bb 100644 --- a/zcash_client_backend/src/data_api.rs +++ b/zcash_client_backend/src/data_api.rs @@ -67,7 +67,10 @@ use incrementalmerkletree::{frontier::Frontier, Retention}; use secrecy::SecretVec; use shardtree::{error::ShardTreeError, store::ShardStore, ShardTree}; -use self::{chain::CommitmentTreeRoot, scanning::ScanRange}; +use self::{ + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, +}; use crate::{ address::UnifiedAddress, decrypt::DecryptedOutput, @@ -1260,8 +1263,11 @@ pub trait WalletWrite: WalletRead { /// pertaining to this wallet. /// /// `blocks` must be sequential, in order of increasing block height - fn put_blocks(&mut self, blocks: Vec>) - -> Result<(), Self::Error>; + fn put_blocks( + &mut self, + from_state: ChainState, + blocks: Vec>, + ) -> Result<(), Self::Error>; /// Updates the wallet's view of the blockchain. /// @@ -1400,9 +1406,11 @@ pub mod testing { }; use super::{ - chain::CommitmentTreeRoot, scanning::ScanRange, AccountBirthday, BlockMetadata, - DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, - WalletCommitmentTrees, WalletRead, WalletSummary, WalletWrite, SAPLING_SHARD_HEIGHT, + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, + AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, + ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, + WalletWrite, SAPLING_SHARD_HEIGHT, }; #[cfg(feature = "transparent-inputs")] @@ -1633,6 +1641,7 @@ pub mod testing { #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + _from_state: ChainState, _blocks: Vec>, ) -> Result<(), Self::Error> { Ok(()) diff --git a/zcash_client_backend/src/data_api/chain.rs b/zcash_client_backend/src/data_api/chain.rs index 5cd911c522..d547166dd1 100644 --- a/zcash_client_backend/src/data_api/chain.rs +++ b/zcash_client_backend/src/data_api/chain.rs @@ -145,6 +145,7 @@ use std::ops::Range; +use incrementalmerkletree::frontier::Frontier; use subtle::ConditionallySelectable; use zcash_primitives::consensus::{self, BlockHeight}; @@ -278,6 +279,58 @@ impl ScanSummary { } } +/// The final note commitment tree state for each shielded pool, as of a particular block height. +#[derive(Debug)] +pub struct ChainState { + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] + final_orchard_tree: + Frontier, +} + +impl ChainState { + /// Construct a new [`ChainState`] from its constituent parts. + pub fn new( + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] final_orchard_tree: Frontier< + orchard::tree::MerkleHashOrchard, + { orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 }, + >, + ) -> Self { + Self { + block_height, + final_sapling_tree, + #[cfg(feature = "orchard")] + final_orchard_tree, + } + } + + /// Returns the block height to which this chain state applies. + pub fn block_height(&self) -> BlockHeight { + self.block_height + } + + /// Returns the frontier of the Sapling note commitment tree as of the end of the block at + /// [`Self::block_height`]. + pub fn final_sapling_tree( + &self, + ) -> &Frontier { + &self.final_sapling_tree + } + + /// Returns the frontier of the Orchard note commitment tree as of the end of the block at + /// [`Self::block_height`]. + #[cfg(feature = "orchard")] + pub fn final_orchard_tree( + &self, + ) -> &Frontier + { + &self.final_orchard_tree + } +} + /// Scans at most `limit` blocks from the provided block source for in order to find transactions /// received by the accounts tracked in the provided wallet database. /// @@ -290,7 +343,7 @@ pub fn scan_cached_blocks( params: &ParamsT, block_source: &BlockSourceT, data_db: &mut DbT, - from_height: BlockHeight, + from_state: ChainState, limit: usize, ) -> Result> where @@ -299,6 +352,7 @@ where DbT: WalletWrite, ::AccountId: ConditionallySelectable + Default + Send + 'static, { + let from_height = from_state.block_height + 1; // Fetch the UnifiedFullViewingKeys we are tracking let account_ufvks = data_db .get_unified_full_viewing_keys() @@ -392,7 +446,9 @@ where }, )?; - data_db.put_blocks(scanned_blocks).map_err(Error::Wallet)?; + data_db + .put_blocks(from_state, scanned_blocks) + .map_err(Error::Wallet)?; Ok(scan_summary) } diff --git a/zcash_client_sqlite/src/lib.rs b/zcash_client_sqlite/src/lib.rs index 992ac9533d..081932ee21 100644 --- a/zcash_client_sqlite/src/lib.rs +++ b/zcash_client_sqlite/src/lib.rs @@ -32,16 +32,25 @@ // Catch documentation errors caused by code changes. #![deny(rustdoc::broken_intra_doc_links)] -use incrementalmerkletree::Position; +use incrementalmerkletree::{frontier::Frontier, Position, Retention}; use maybe_rayon::{ prelude::{IndexedParallelIterator, ParallelIterator}, slice::ParallelSliceMut, }; use rusqlite::{self, Connection}; use secrecy::{ExposeSecret, SecretVec}; -use shardtree::{error::ShardTreeError, ShardTree}; +use shardtree::{ + error::ShardTreeError, + store::{Checkpoint, ShardStore}, + ShardTree, +}; use std::{ - borrow::Borrow, collections::HashMap, convert::AsRef, fmt, num::NonZeroU32, ops::Range, + borrow::Borrow, + collections::{BTreeMap, HashMap}, + convert::AsRef, + fmt, + num::NonZeroU32, + ops::Range, path::Path, }; use subtle::ConditionallySelectable; @@ -58,7 +67,7 @@ use zcash_client_backend::{ address::UnifiedAddress, data_api::{ self, - chain::{BlockSource, CommitmentTreeRoot}, + chain::{BlockSource, ChainState, CommitmentTreeRoot}, scanning::{ScanPriority, ScanRange}, AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, @@ -92,7 +101,6 @@ use { pub mod chain; pub mod error; - pub mod wallet; use wallet::{ commitment_tree::{self, put_shard_roots}, @@ -535,6 +543,7 @@ impl WalletWrite for WalletDb #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + from_state: ChainState, blocks: Vec>, ) -> Result<(), Self::Error> { struct BlockPositions { @@ -695,62 +704,170 @@ impl WalletWrite for WalletDb { // Create subtrees from the note commitments in parallel. const CHUNK_SIZE: usize = 1024; - { - let sapling_subtrees = sapling_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - SAPLING_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) + let sapling_subtrees = sapling_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + SAPLING_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + #[cfg(feature = "orchard")] + let orchard_subtrees = orchard_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + ORCHARD_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + // Collect the complete set of Sapling checkpoints + let sapling_checkpoint_positions: BTreeMap = + sapling_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect(); + + #[cfg(feature = "orchard")] + let orchard_checkpoint_positions: BTreeMap = + orchard_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect(); + + fn copy_checkpoints( + // The set of checkpoints to copy from + from_checkpoint_positions: &BTreeMap, + // The set of checkpoints to copy into + to_checkpoint_positions: &BTreeMap, + // The block height to use when there is no preceding checkpoint in to_checkpoint_positions + state_height: BlockHeight, + // The frontier whose position will be used when there is no preceding + // checkpoint in to_checkpoint_positions. + state_final_tree: &Frontier, + ) -> Vec<(BlockHeight, Checkpoint)> { + from_checkpoint_positions + .keys() + .flat_map(|from_checkpoint_height| { + to_checkpoint_positions + .range::(..=*from_checkpoint_height) + .last() + .map_or_else( + || { + Some(( + state_height, + state_final_tree.value().map_or_else( + || Checkpoint::tree_empty(), + |t| Checkpoint::at_position(t.position()), + ), + )) + }, + |(to_prev_height, position)| { + if *to_prev_height < *from_checkpoint_height { + Some(( + *from_checkpoint_height, + Checkpoint::at_position(*position), + )) + } else { + // The checkpoint already exists, so we don't need to + // do anything. + None + } + }, + ) + .into_iter() }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); + .collect::>() + } - // Update the Sapling note commitment tree with all newly read note commitments - let mut sapling_subtrees = sapling_subtrees.into_iter(); - wdb.with_sapling_tree_mut::<_, _, Self::Error>(move |sapling_tree| { - for (tree, checkpoints) in &mut sapling_subtrees { + #[cfg(feature = "orchard")] + let missing_sapling_checkpoints = copy_checkpoints( + &orchard_checkpoint_positions, + &sapling_checkpoint_positions, + from_state.block_height(), + from_state.final_sapling_tree(), + ); + #[cfg(feature = "orchard")] + let missing_orchard_checkpoints = copy_checkpoints( + &sapling_checkpoint_positions, + &orchard_checkpoint_positions, + from_state.block_height(), + from_state.final_orchard_tree(), + ); + + // Update the Sapling note commitment tree with all newly read note commitments + { + let mut sapling_subtrees_iter = sapling_subtrees.into_iter(); + wdb.with_sapling_tree_mut::<_, _, Self::Error>(|sapling_tree| { + sapling_tree.insert_frontier( + from_state.final_sapling_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + + for (tree, checkpoints) in &mut sapling_subtrees_iter { sapling_tree.insert_tree(tree, checkpoints)?; } + // Ensure we have a Sapling checkpoint for each checkpointed Orchard block height + #[cfg(feature = "orchard")] + for (height, checkpoint) in &missing_sapling_checkpoints { + sapling_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone()) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; } - // Create subtrees from the note commitments in parallel. + // Update the Orchard note commitment tree with all newly read note commitments #[cfg(feature = "orchard")] { - let orchard_subtrees = orchard_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - ORCHARD_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) - }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); - - // Update the Orchard note commitment tree with all newly read note commitments let mut orchard_subtrees = orchard_subtrees.into_iter(); - wdb.with_orchard_tree_mut::<_, _, Self::Error>(move |orchard_tree| { + wdb.with_orchard_tree_mut::<_, _, Self::Error>(|orchard_tree| { + orchard_tree.insert_frontier( + from_state.final_orchard_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + for (tree, checkpoints) in &mut orchard_subtrees { orchard_tree.insert_tree(tree, checkpoints)?; } + for (height, checkpoint) in &missing_orchard_checkpoints { + orchard_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone()) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; }