diff --git a/src/app.rs b/src/app.rs index 3713145a..bae70e08 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,22 +1,21 @@ use std::collections::HashSet; use std::sync::{Arc, OnceLock}; -use std::time::Instant; use chrono::{Duration, Utc}; use ruint::Uint; -use semaphore::poseidon_tree::LazyPoseidonTree; use semaphore::protocol::verify_proof; use sqlx::{Postgres, Transaction}; use tracing::{info, instrument, warn}; use crate::config::Config; -use crate::contracts::{IdentityManager, SharedIdentityManager}; +use crate::contracts::IdentityManager; use crate::database::query::DatabaseQuery as _; use crate::database::Database; use crate::ethereum::Ethereum; +use crate::identity::validator::IdentityValidator; +use crate::identity_tree::initializer::TreeInitializer; use crate::identity_tree::{ - CanonicalTreeBuilder, Hash, InclusionProof, ProcessedStatus, RootItem, TreeState, TreeUpdate, - TreeVersionReadOps, TreeWithNextVersion, + Hash, InclusionProof, ProcessedStatus, RootItem, TreeState, TreeVersionReadOps, }; use crate::prover::map::initialize_prover_maps; use crate::prover::{ProverConfig, ProverType}; @@ -26,14 +25,14 @@ use crate::server::data::{ }; use crate::server::error::Error as ServerError; use crate::utils::retry_tx; -use crate::utils::tree_updates::dedup_tree_updates; pub struct App { - pub database: Arc, - pub identity_manager: SharedIdentityManager, - tree_state: OnceLock, - pub snark_scalar_field: Hash, - pub config: Config, + pub database: Arc, + pub identity_manager: Arc, + tree_state: OnceLock, + pub config: Config, + + pub identity_validator: IdentityValidator, } impl App { @@ -72,20 +71,14 @@ impl App { .await?, ); - // TODO Export the reduced-ness check that this is enabling from the - // `semaphore-rs` library when we bump the version. - let snark_scalar_field = Hash::from_str_radix( - "21888242871839275222246405745257275088548364400416034343698204186575808495617", - 10, - ) - .expect("This should just parse."); + let identity_validator = Default::default(); let app = Arc::new(Self { database, identity_manager, tree_state: OnceLock::new(), - snark_scalar_field, config, + identity_validator, }); Ok(app) @@ -94,47 +87,13 @@ impl App { /// Initializes the tree state. This should only ever be called once. /// Attempts to call this method more than once will result in a panic. pub async fn init_tree(self: Arc) -> anyhow::Result<()> { - // Await for all pending transactions - self.identity_manager.await_clean_slate().await?; - - // Prefetch latest root & mark it as mined - let root_hash = self.identity_manager.latest_root().await?; - let root_hash = root_hash.into(); - - let initial_root_hash = LazyPoseidonTree::new( - self.identity_manager.tree_depth(), - self.identity_manager.initial_leaf_value(), + let tree_state = TreeInitializer::new( + self.database.clone(), + self.identity_manager.clone(), + self.config.tree.clone(), ) - .root(); - - // We don't store the initial root in the database, so we have to skip this step - // if the contract root hash is equal to initial root hash - if root_hash != initial_root_hash { - // Note that we don't have a way of queuing a root here for finalization. - // so it's going to stay as "processed" until the next root is mined. - self.database.mark_root_as_processed_tx(&root_hash).await?; - self.database.delete_batches_after_root(&root_hash).await?; - } else { - // Db is either empty or we're restarting with a new contract/chain - // so we should mark everything as pending - self.database.mark_all_as_pending().await?; - self.database.delete_all_batches().await?; - } - - let timer = Instant::now(); - let mut tree_state = self.restore_or_initialize_tree(initial_root_hash).await?; - info!("Tree state initialization took: {:?}", timer.elapsed()); - - let tree_root = tree_state.get_processed_tree().get_root(); - - if tree_root != initial_root_hash { - warn!( - "Cached tree root is different from the contract root. Purging cache and \ - reinitializing." - ); - - tree_state = self.restore_or_initialize_tree(initial_root_hash).await?; - } + .run() + .await?; self.tree_state.set(tree_state).map_err(|_| { anyhow::anyhow!( @@ -145,146 +104,6 @@ impl App { Ok::<(), anyhow::Error>(()) } - #[instrument(skip(self))] - async fn restore_or_initialize_tree( - &self, - initial_root_hash: Hash, - ) -> anyhow::Result { - let mut mined_items = self - .database - .get_commitments_by_status(ProcessedStatus::Mined) - .await?; - - mined_items.sort_by_key(|item| item.leaf_index); - - if !self.config.tree.force_cache_purge { - info!("Attempting to restore tree from cache"); - if let Some(tree_state) = self - .get_cached_tree_state(mined_items.clone(), initial_root_hash) - .await? - { - info!("tree restored from cache"); - return Ok(tree_state); - } - } - - info!("Initializing tree from the database"); - let tree_state = self.initialize_tree(mined_items).await?; - - info!("tree initialization successful"); - - Ok(tree_state) - } - - pub fn get_leftover_leaves_and_update_index( - index: &mut Option, - dense_prefix_depth: usize, - mined_items: &[TreeUpdate], - ) -> Vec> { - let leftover_items = if mined_items.is_empty() { - vec![] - } else { - let max_leaf = mined_items.last().map(|item| item.leaf_index).unwrap(); - // if the last index is greater then dense_prefix_depth, 1 << dense_prefix_depth - // should be the last index in restored tree - let last_index = std::cmp::min(max_leaf, (1 << dense_prefix_depth) - 1); - *index = Some(last_index); - - if max_leaf - last_index == 0 { - return vec![]; - } - - let mut leaves = Vec::with_capacity(max_leaf - last_index); - - let leftover = &mined_items[(last_index + 1)..]; - - for item in leftover { - leaves.push(item.element); - } - - leaves - }; - - leftover_items - } - - async fn get_cached_tree_state( - &self, - mined_items: Vec, - initial_root_hash: Hash, - ) -> anyhow::Result> { - let mined_items = dedup_tree_updates(mined_items); - - let mut last_mined_index_in_dense: Option = None; - let leftover_items = Self::get_leftover_leaves_and_update_index( - &mut last_mined_index_in_dense, - self.config.tree.dense_tree_prefix_depth, - &mined_items, - ); - - let Some(mined_builder) = CanonicalTreeBuilder::restore( - self.identity_manager.tree_depth(), - self.config.tree.dense_tree_prefix_depth, - &self.identity_manager.initial_leaf_value(), - last_mined_index_in_dense, - &leftover_items, - self.config.tree.tree_gc_threshold, - &self.config.tree.cache_file, - ) else { - return Ok(None); - }; - - let (mined, mut processed_builder) = mined_builder.seal(); - - match self - .database - .get_latest_root_by_status(ProcessedStatus::Mined) - .await? - { - Some(root) => { - if !mined.get_root().eq(&root) { - return Ok(None); - } - } - None => { - if !mined.get_root().eq(&initial_root_hash) { - return Ok(None); - } - } - } - - let processed_items = self - .database - .get_commitments_by_status(ProcessedStatus::Processed) - .await?; - - for processed_item in processed_items { - processed_builder.update(&processed_item); - } - - let (processed, batching_builder) = processed_builder.seal_and_continue(); - let (batching, mut latest_builder) = batching_builder.seal_and_continue(); - - let pending_items = self - .database - .get_commitments_by_status(ProcessedStatus::Pending) - .await?; - for update in pending_items { - latest_builder.update(&update); - } - let latest = latest_builder.seal(); - - let batch = self.database.get_latest_batch().await?; - if let Some(batch) = batch { - if batching.get_root() != batch.next_root { - batching.apply_updates_up_to(batch.next_root); - } - assert_eq!(batching.get_root(), batch.next_root); - } - - Ok(Some(TreeState::new(mined, processed, batching, latest))) - } - pub fn tree_state(&self) -> anyhow::Result<&TreeState> { Ok(self .tree_state @@ -292,93 +111,6 @@ impl App { .ok_or(ServerError::TreeStateUninitialized)?) } - #[instrument(skip_all)] - async fn initialize_tree(&self, mined_items: Vec) -> anyhow::Result { - // Flatten the updates for initial leaves - tracing::info!("Deduplicating mined items"); - - let mined_items = dedup_tree_updates(mined_items); - let initial_leaf_value = self.identity_manager.initial_leaf_value(); - - let initial_leaves = if mined_items.is_empty() { - vec![] - } else { - let max_leaf = mined_items.last().map(|item| item.leaf_index).unwrap(); - let mut leaves = vec![initial_leaf_value; max_leaf + 1]; - - for item in mined_items { - leaves[item.leaf_index] = item.element; - } - - leaves - }; - - tracing::info!("Creating mined tree"); - let tree_depth = self.identity_manager.tree_depth(); - let dense_tree_prefix_depth = self.config.tree.dense_tree_prefix_depth; - let tree_gc_threshold = self.config.tree.tree_gc_threshold; - let cache_file = self.config.tree.cache_file.clone(); - - let mined_builder = tokio::task::spawn_blocking(move || { - CanonicalTreeBuilder::new( - tree_depth, - dense_tree_prefix_depth, - tree_gc_threshold, - initial_leaf_value, - &initial_leaves, - &cache_file, - ) - }) - .await?; - - let (mined, mut processed_builder) = mined_builder.seal(); - - let processed_items = self - .database - .get_commitments_by_status(ProcessedStatus::Processed) - .await?; - - tracing::info!("Updating processed tree"); - let processed_builder = tokio::task::spawn_blocking(move || { - for processed_item in processed_items { - processed_builder.update(&processed_item); - } - - processed_builder - }) - .await?; - - let (processed, batching_builder) = processed_builder.seal_and_continue(); - let (batching, mut latest_builder) = batching_builder.seal_and_continue(); - - let pending_items = self - .database - .get_commitments_by_status(ProcessedStatus::Pending) - .await?; - - tracing::info!("Updating latest tree"); - let latest_builder = tokio::task::spawn_blocking(move || { - for update in pending_items { - latest_builder.update(&update); - } - - latest_builder - }) - .await?; - - let latest = latest_builder.seal(); - - let batch = self.database.get_latest_batch().await?; - if let Some(batch) = batch { - if batching.get_root() != batch.next_root { - batching.apply_updates_up_to(batch.next_root); - } - assert_eq!(batching.get_root(), batch.next_root); - } - - Ok(TreeState::new(mined, processed, batching, latest)) - } - /// Queues an insert into the merkle tree. /// /// # Errors @@ -401,7 +133,7 @@ impl App { return Err(ServerError::NoProversOnIdInsert); } - if !self.identity_is_reduced(commitment) { + if !self.identity_validator.identity_is_reduced(commitment) { warn!( ?commitment, "The provided commitment is not an element of the field." @@ -516,7 +248,7 @@ impl App { return Err(ServerError::NoProversOnIdInsert); } - if !self.identity_is_reduced(*new_commitment) { + if !self.identity_validator.identity_is_reduced(*new_commitment) { warn!( ?new_commitment, "The new identity commitment is not reduced." @@ -563,10 +295,6 @@ impl App { env_provers } - fn identity_is_reduced(&self, commitment: Hash) -> bool { - commitment.lt(&self.snark_scalar_field) - } - /// # Errors /// /// Will return `Err` if the provided batch size already exists. @@ -753,99 +481,3 @@ impl App { } } } - -#[cfg(test)] -mod test { - use ethers::prelude::rand; - use ethers::types::U256; - use ruint::Uint; - - use super::App; - use crate::identity_tree::TreeUpdate; - - pub fn generate_test_identities_with_index(identity_count: usize) -> Vec { - let mut identities = vec![]; - - for i in 1..=identity_count { - let bytes: [u8; 32] = U256::from(rand::random::()).into(); - let identity = Uint::<256, 4>::from_le_bytes(bytes); - - identities.push(TreeUpdate { - leaf_index: i, - element: identity, - }); - } - - identities - } - - #[tokio::test] - async fn test_index_logic_for_cached_tree() -> anyhow::Result<()> { - // supports 8 identities (2^3) - let dense_prefix_depth: usize = 3; - - let less_identities_count = 2usize.pow(dense_prefix_depth.try_into().unwrap()) - 2; - let more_identities_count = 2usize.pow(dense_prefix_depth.try_into().unwrap()) + 2; - - // test if empty case is handled correctly (it means no last mined index as no - // indecies at all) - let identities: Vec = vec![]; - - let mut last_mined_index_in_dense: Option = None; - let leaves = App::get_leftover_leaves_and_update_index( - &mut last_mined_index_in_dense, - dense_prefix_depth, - &identities, - ); - - // check if the index is correct - assert_eq!(last_mined_index_in_dense, None); - - // since there are no identities at all the leaves should be 0 - assert_eq!(leaves.len(), 0); - - // first test with less then dense prefix - let identities = generate_test_identities_with_index(less_identities_count); - - last_mined_index_in_dense = None; - - let leaves = App::get_leftover_leaves_and_update_index( - &mut last_mined_index_in_dense, - dense_prefix_depth, - &identities, - ); - - // check if the index is correct - assert_eq!(last_mined_index_in_dense, Some(identities.len())); - // since there are less identities then dense prefix, the leavs should be empty - // vector - assert!(leaves.is_empty()); - - // lets try now with more identities then dense prefix supports - - // this should generate 2^dense_prefix + 2 - let identities = generate_test_identities_with_index(more_identities_count); - - last_mined_index_in_dense = None; - let leaves = App::get_leftover_leaves_and_update_index( - &mut last_mined_index_in_dense, - dense_prefix_depth, - &identities, - ); - - // check if the index is correct - assert_eq!( - last_mined_index_in_dense, - Some((1 << dense_prefix_depth) - 1) - ); - - // since there are more identities then dense prefix, the leavs should be 2 - assert_eq!(leaves.len(), 2); - - // additional check for correctness - assert_eq!(leaves[0], identities[8].element); - assert_eq!(leaves[1], identities[9].element); - - Ok(()) - } -} diff --git a/src/contracts/mod.rs b/src/contracts/mod.rs index 5a7ad913..7f76f9c2 100644 --- a/src/contracts/mod.rs +++ b/src/contracts/mod.rs @@ -2,8 +2,6 @@ pub mod abi; pub mod scanner; -use std::sync::Arc; - use anyhow::{anyhow, Context}; use ethers::providers::Middleware; use ethers::types::{H256, U256}; @@ -470,6 +468,3 @@ impl IdentityManager { self.deletion_prover_map.read().await.len() > 0 } } - -/// A type for an identity manager object that can be sent across threads. -pub type SharedIdentityManager = Arc; diff --git a/src/identity/mod.rs b/src/identity/mod.rs new file mode 100644 index 00000000..fa199f24 --- /dev/null +++ b/src/identity/mod.rs @@ -0,0 +1 @@ +pub mod validator; diff --git a/src/identity/validator.rs b/src/identity/validator.rs new file mode 100644 index 00000000..bd6c9cf4 --- /dev/null +++ b/src/identity/validator.rs @@ -0,0 +1,32 @@ +use ruint::uint; +use semaphore::Field; + +use crate::identity_tree::Hash; + +// See +pub const MODULUS: Field = + uint!(21888242871839275222246405745257275088548364400416034343698204186575808495617_U256); + +pub struct IdentityValidator { + snark_scalar_field: Hash, +} + +// TODO Export the reduced-ness check that this is enabling from the +// `semaphore-rs` library when we bump the version. +impl IdentityValidator { + pub fn new() -> Self { + Self { + snark_scalar_field: Hash::from(MODULUS), + } + } + + pub fn identity_is_reduced(&self, commitment: Hash) -> bool { + commitment.lt(&self.snark_scalar_field) + } +} + +impl Default for IdentityValidator { + fn default() -> Self { + Self::new() + } +} diff --git a/src/identity_tree/initializer.rs b/src/identity_tree/initializer.rs new file mode 100644 index 00000000..a78ceed2 --- /dev/null +++ b/src/identity_tree/initializer.rs @@ -0,0 +1,406 @@ +use std::sync::Arc; +use std::time::Instant; + +use semaphore::poseidon_tree::LazyPoseidonTree; +use tracing::{info, instrument, warn}; + +use crate::config::TreeConfig; +use crate::contracts::IdentityManager; +use crate::database::query::DatabaseQuery; +use crate::database::Database; +use crate::identity_tree::{ + CanonicalTreeBuilder, Hash, ProcessedStatus, TreeState, TreeUpdate, TreeVersionReadOps, + TreeWithNextVersion, +}; +use crate::utils::tree_updates::dedup_tree_updates; + +pub struct TreeInitializer { + pub database: Arc, + pub identity_manager: Arc, + pub config: TreeConfig, +} + +impl TreeInitializer { + pub fn new( + database: Arc, + identity_manager: Arc, + config: TreeConfig, + ) -> Self { + Self { + database, + identity_manager, + config, + } + } + + /// Initializes the tree state. This should only ever be called once. + /// Attempts to call this method more than once will result in a panic. + pub async fn run(self) -> anyhow::Result { + // Await for all pending transactions + self.identity_manager.await_clean_slate().await?; + + // Prefetch latest root & mark it as mined + let root_hash = self.identity_manager.latest_root().await?; + let root_hash = root_hash.into(); + + let initial_root_hash = LazyPoseidonTree::new( + self.identity_manager.tree_depth(), + self.identity_manager.initial_leaf_value(), + ) + .root(); + + // We don't store the initial root in the database, so we have to skip this step + // if the contract root hash is equal to initial root hash + if root_hash != initial_root_hash { + // Note that we don't have a way of queuing a root here for finalization. + // so it's going to stay as "processed" until the next root is mined. + self.database.mark_root_as_processed_tx(&root_hash).await?; + self.database.delete_batches_after_root(&root_hash).await?; + } else { + // Db is either empty or we're restarting with a new contract/chain + // so we should mark everything as pending + self.database.mark_all_as_pending().await?; + self.database.delete_all_batches().await?; + } + + let timer = Instant::now(); + let mut tree_state = self.restore_or_initialize_tree(initial_root_hash).await?; + info!("Tree state initialization took: {:?}", timer.elapsed()); + + let tree_root = tree_state.get_processed_tree().get_root(); + + if tree_root != initial_root_hash { + warn!( + "Cached tree root is different from the contract root. Purging cache and \ + reinitializing." + ); + + tree_state = self.restore_or_initialize_tree(initial_root_hash).await?; + } + + Ok(tree_state) + } + + #[instrument(skip(self))] + async fn restore_or_initialize_tree( + &self, + initial_root_hash: Hash, + ) -> anyhow::Result { + let mut mined_items = self + .database + .get_commitments_by_status(ProcessedStatus::Mined) + .await?; + + mined_items.sort_by_key(|item| item.leaf_index); + + if !self.config.force_cache_purge { + info!("Attempting to restore tree from cache"); + if let Some(tree_state) = self + .get_cached_tree_state(mined_items.clone(), initial_root_hash) + .await? + { + info!("tree restored from cache"); + return Ok(tree_state); + } + } + + info!("Initializing tree from the database"); + let tree_state = self.initialize_tree(mined_items).await?; + + info!("tree initialization successful"); + + Ok(tree_state) + } + + pub fn get_leftover_leaves_and_update_index( + index: &mut Option, + dense_prefix_depth: usize, + mined_items: &[TreeUpdate], + ) -> Vec> { + let leftover_items = if mined_items.is_empty() { + vec![] + } else { + let max_leaf = mined_items.last().map(|item| item.leaf_index).unwrap(); + // if the last index is greater then dense_prefix_depth, 1 << dense_prefix_depth + // should be the last index in restored tree + let last_index = std::cmp::min(max_leaf, (1 << dense_prefix_depth) - 1); + *index = Some(last_index); + + if max_leaf - last_index == 0 { + return vec![]; + } + + let mut leaves = Vec::with_capacity(max_leaf - last_index); + + let leftover = &mined_items[(last_index + 1)..]; + + for item in leftover { + leaves.push(item.element); + } + + leaves + }; + + leftover_items + } + + async fn get_cached_tree_state( + &self, + mined_items: Vec, + initial_root_hash: Hash, + ) -> anyhow::Result> { + let mined_items = dedup_tree_updates(mined_items); + + let mut last_mined_index_in_dense: Option = None; + let leftover_items = Self::get_leftover_leaves_and_update_index( + &mut last_mined_index_in_dense, + self.config.dense_tree_prefix_depth, + &mined_items, + ); + + let Some(mined_builder) = CanonicalTreeBuilder::restore( + self.identity_manager.tree_depth(), + self.config.dense_tree_prefix_depth, + &self.identity_manager.initial_leaf_value(), + last_mined_index_in_dense, + &leftover_items, + self.config.tree_gc_threshold, + &self.config.cache_file, + ) else { + return Ok(None); + }; + + let (mined, mut processed_builder) = mined_builder.seal(); + + match self + .database + .get_latest_root_by_status(ProcessedStatus::Mined) + .await? + { + Some(root) => { + if !mined.get_root().eq(&root) { + return Ok(None); + } + } + None => { + if !mined.get_root().eq(&initial_root_hash) { + return Ok(None); + } + } + } + + let processed_items = self + .database + .get_commitments_by_status(ProcessedStatus::Processed) + .await?; + + for processed_item in processed_items { + processed_builder.update(&processed_item); + } + + let (processed, batching_builder) = processed_builder.seal_and_continue(); + let (batching, mut latest_builder) = batching_builder.seal_and_continue(); + + let pending_items = self + .database + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; + for update in pending_items { + latest_builder.update(&update); + } + let latest = latest_builder.seal(); + + let batch = self.database.get_latest_batch().await?; + if let Some(batch) = batch { + if batching.get_root() != batch.next_root { + batching.apply_updates_up_to(batch.next_root); + } + assert_eq!(batching.get_root(), batch.next_root); + } + + Ok(Some(TreeState::new(mined, processed, batching, latest))) + } + + #[instrument(skip_all)] + async fn initialize_tree(&self, mined_items: Vec) -> anyhow::Result { + // Flatten the updates for initial leaves + info!("Deduplicating mined items"); + + let mined_items = dedup_tree_updates(mined_items); + let initial_leaf_value = self.identity_manager.initial_leaf_value(); + + let initial_leaves = if mined_items.is_empty() { + vec![] + } else { + let max_leaf = mined_items.last().map(|item| item.leaf_index).unwrap(); + let mut leaves = vec![initial_leaf_value; max_leaf + 1]; + + for item in mined_items { + leaves[item.leaf_index] = item.element; + } + + leaves + }; + + info!("Creating mined tree"); + let tree_depth = self.identity_manager.tree_depth(); + let dense_tree_prefix_depth = self.config.dense_tree_prefix_depth; + let tree_gc_threshold = self.config.tree_gc_threshold; + let cache_file = self.config.cache_file.clone(); + + let mined_builder = tokio::task::spawn_blocking(move || { + CanonicalTreeBuilder::new( + tree_depth, + dense_tree_prefix_depth, + tree_gc_threshold, + initial_leaf_value, + &initial_leaves, + &cache_file, + ) + }) + .await?; + + let (mined, mut processed_builder) = mined_builder.seal(); + + let processed_items = self + .database + .get_commitments_by_status(ProcessedStatus::Processed) + .await?; + + info!("Updating processed tree"); + let processed_builder = tokio::task::spawn_blocking(move || { + for processed_item in processed_items { + processed_builder.update(&processed_item); + } + + processed_builder + }) + .await?; + + let (processed, batching_builder) = processed_builder.seal_and_continue(); + let (batching, mut latest_builder) = batching_builder.seal_and_continue(); + + let pending_items = self + .database + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; + + info!("Updating latest tree"); + let latest_builder = tokio::task::spawn_blocking(move || { + for update in pending_items { + latest_builder.update(&update); + } + + latest_builder + }) + .await?; + + let latest = latest_builder.seal(); + + let batch = self.database.get_latest_batch().await?; + if let Some(batch) = batch { + if batching.get_root() != batch.next_root { + batching.apply_updates_up_to(batch.next_root); + } + assert_eq!(batching.get_root(), batch.next_root); + } + + Ok(TreeState::new(mined, processed, batching, latest)) + } +} + +#[cfg(test)] +mod test { + use ethers::prelude::rand; + use ethers::types::U256; + use ruint::Uint; + + use crate::identity_tree::initializer::TreeInitializer; + use crate::identity_tree::TreeUpdate; + + pub fn generate_test_identities_with_index(identity_count: usize) -> Vec { + let mut identities = vec![]; + + for i in 1..=identity_count { + let bytes: [u8; 32] = U256::from(rand::random::()).into(); + let identity = Uint::<256, 4>::from_le_bytes(bytes); + + identities.push(TreeUpdate { + leaf_index: i, + element: identity, + }); + } + + identities + } + + #[tokio::test] + async fn test_index_logic_for_cached_tree() -> anyhow::Result<()> { + // supports 8 identities (2^3) + let dense_prefix_depth: usize = 3; + + let less_identities_count = 2usize.pow(dense_prefix_depth.try_into().unwrap()) - 2; + let more_identities_count = 2usize.pow(dense_prefix_depth.try_into().unwrap()) + 2; + + // test if empty case is handled correctly (it means no last mined index as no + // indecies at all) + let identities: Vec = vec![]; + + let mut last_mined_index_in_dense: Option = None; + let leaves = TreeInitializer::get_leftover_leaves_and_update_index( + &mut last_mined_index_in_dense, + dense_prefix_depth, + &identities, + ); + + // check if the index is correct + assert_eq!(last_mined_index_in_dense, None); + + // since there are no identities at all the leaves should be 0 + assert_eq!(leaves.len(), 0); + + // first test with less then dense prefix + let identities = generate_test_identities_with_index(less_identities_count); + + last_mined_index_in_dense = None; + + let leaves = TreeInitializer::get_leftover_leaves_and_update_index( + &mut last_mined_index_in_dense, + dense_prefix_depth, + &identities, + ); + + // check if the index is correct + assert_eq!(last_mined_index_in_dense, Some(identities.len())); + // since there are less identities then dense prefix, the leavs should be empty + // vector + assert!(leaves.is_empty()); + + // lets try now with more identities then dense prefix supports + + // this should generate 2^dense_prefix + 2 + let identities = generate_test_identities_with_index(more_identities_count); + + last_mined_index_in_dense = None; + let leaves = TreeInitializer::get_leftover_leaves_and_update_index( + &mut last_mined_index_in_dense, + dense_prefix_depth, + &identities, + ); + + // check if the index is correct + assert_eq!( + last_mined_index_in_dense, + Some((1 << dense_prefix_depth) - 1) + ); + + // since there are more identities then dense prefix, the leavs should be 2 + assert_eq!(leaves.len(), 2); + + // additional check for correctness + assert_eq!(leaves[0], identities[8].element); + assert_eq!(leaves[1], identities[9].element); + + Ok(()) + } +} diff --git a/src/identity_tree.rs b/src/identity_tree/mod.rs similarity index 99% rename from src/identity_tree.rs rename to src/identity_tree/mod.rs index 78d86cba..2b32dcef 100644 --- a/src/identity_tree.rs +++ b/src/identity_tree/mod.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Serialize}; use sqlx::prelude::FromRow; use tracing::{info, warn}; +pub mod initializer; mod status; pub type PoseidonTree = LazyMerkleTree; diff --git a/src/lib.rs b/src/lib.rs index 0ca28a98..5243ee6f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,8 @@ pub mod config; mod contracts; mod database; mod ethereum; + +mod identity; pub mod identity_tree; pub mod prover; pub mod server; diff --git a/src/prover.rs b/src/prover/mod.rs similarity index 100% rename from src/prover.rs rename to src/prover/mod.rs diff --git a/src/server.rs b/src/server/mod.rs similarity index 100% rename from src/server.rs rename to src/server/mod.rs diff --git a/src/task_monitor.rs b/src/task_monitor/mod.rs similarity index 100% rename from src/task_monitor.rs rename to src/task_monitor/mod.rs diff --git a/src/utils.rs b/src/utils/mod.rs similarity index 100% rename from src/utils.rs rename to src/utils/mod.rs