diff --git a/Readme.md b/Readme.md index 2f2d4139..3b6c1e38 100644 --- a/Readme.md +++ b/Readme.md @@ -31,14 +31,16 @@ Sequencer has 6 API routes. 2. `/inclusionProof` - Takes the identity commitment hash, and checks for any errors that might have occurred in the insert identity steps. Then leaf index is fetched from the database, corresponding to the identity hash provided, and then the we check if the identity is indeed in the tree. The inclusion proof is then returned to the API caller. -3. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof. +3. `/deleteIdentity` - Takes an identity commitment hash, ensures that it exists and hasn't been deleted yet. This identity is then scheduled for deletion. +4. `/recoverIdentity` - Takes two identity commitment hashes. The first must exist and will be scheduled for deletion and the other will be inserted as a replacement after the first identity has been deleted and a set amount of time (depends on configuration parameters) has passed. +5. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof. The proving key is fetched based on the depth index, and verification key as well. The list of prime fields is created based on request input mentioned before, and then we proceed to verify the proof. Sequencer uses groth16 zk-SNARK implementation. The API call returns the proof as response. -4. `/addBatchSize` - Adds a prover with specific batch size to a list of provers. -5. `/removeBatchSize` - Removes the prover based on batch size. -6. `/listBatchSizes` - Lists all provers that are added to the Sequencer. +6. `/addBatchSize` - Adds a prover with specific batch size to a list of provers. +7. `/removeBatchSize` - Removes the prover based on batch size. +8. `/listBatchSizes` - Lists all provers that are added to the Sequencer. diff --git a/src/app.rs b/src/app.rs index 82507b1f..62f14b16 100644 --- a/src/app.rs +++ b/src/app.rs @@ -16,8 +16,8 @@ use crate::contracts::{IdentityManager, SharedIdentityManager}; use crate::database::{self, Database}; use crate::ethereum::{self, Ethereum}; use crate::identity_tree::{ - CanonicalTreeBuilder, Hash, InclusionProof, RootItem, Status, TreeState, TreeUpdate, - TreeVersionReadOps, + CanonicalTreeBuilder, Hash, InclusionProof, ProcessedStatus, RootItem, Status, TreeState, + TreeUpdate, TreeVersionReadOps, UnprocessedStatus, }; use crate::prover::map::initialize_prover_maps; use crate::prover::{self, ProverConfiguration, ProverType, Provers}; @@ -34,8 +34,8 @@ pub struct InclusionProofResponse(InclusionProof); impl InclusionProofResponse { #[must_use] pub fn hide_processed_status(mut self) -> Self { - self.0.status = if self.0.status == Status::Processed { - Status::Pending + self.0.status = if self.0.status == Status::Processed(ProcessedStatus::Processed) { + Status::Processed(ProcessedStatus::Pending) } else { self.0.status }; @@ -53,9 +53,12 @@ impl From for InclusionProofResponse { impl ToResponseCode for InclusionProofResponse { fn to_response_code(&self) -> StatusCode { match self.0.status { - Status::Failed => StatusCode::BAD_REQUEST, - Status::New | Status::Pending => StatusCode::ACCEPTED, - Status::Mined | Status::Processed => StatusCode::OK, + Status::Unprocessed(UnprocessedStatus::Failed) => StatusCode::BAD_REQUEST, + Status::Unprocessed(UnprocessedStatus::New) + | Status::Processed(ProcessedStatus::Pending) => StatusCode::ACCEPTED, + Status::Processed(ProcessedStatus::Mined | ProcessedStatus::Processed) => { + StatusCode::OK + } } } } @@ -83,8 +86,8 @@ pub struct VerifySemaphoreProofResponse(RootItem); impl VerifySemaphoreProofResponse { #[must_use] pub fn hide_processed_status(mut self) -> Self { - self.0.status = if self.0.status == Status::Processed { - Status::Pending + self.0.status = if self.0.status == ProcessedStatus::Processed { + ProcessedStatus::Pending } else { self.0.status }; @@ -251,7 +254,9 @@ impl App { initial_root_hash: Hash, mmap_file_path: String, ) -> AnyhowResult { - let mut mined_items = database.get_commitments_by_status(Status::Mined).await?; + let mut mined_items = database + .get_commitments_by_status(ProcessedStatus::Mined) + .await?; mined_items.sort_by_key(|item| item.leaf_index); if let Some(tree_state) = Self::get_cached_tree_state( @@ -347,7 +352,10 @@ impl App { let (mined, mut processed_builder) = mined_builder.seal(); - match database.get_latest_root_by_status(Status::Mined).await? { + match database + .get_latest_root_by_status(ProcessedStatus::Mined) + .await? + { Some(root) => { if !mined.get_root().eq(&root) { return Ok(None); @@ -361,7 +369,7 @@ impl App { } let processed_items = database - .get_commitments_by_status(Status::Processed) + .get_commitments_by_status(ProcessedStatus::Processed) .await?; for processed_item in processed_items { @@ -370,7 +378,9 @@ impl App { let (processed, batching_builder) = processed_builder.seal_and_continue(); let (batching, mut latest_builder) = batching_builder.seal_and_continue(); - let pending_items = database.get_commitments_by_status(Status::Pending).await?; + let pending_items = database + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; for update in pending_items { latest_builder.update(&update); } @@ -415,7 +425,7 @@ impl App { let (mined, mut processed_builder) = mined_builder.seal(); let processed_items = database - .get_commitments_by_status(Status::Processed) + .get_commitments_by_status(ProcessedStatus::Processed) .await?; for processed_item in processed_items { @@ -425,7 +435,9 @@ impl App { let (processed, batching_builder) = processed_builder.seal_and_continue(); let (batching, mut latest_builder) = batching_builder.seal_and_continue(); - let pending_items = database.get_commitments_by_status(Status::Pending).await?; + let pending_items = database + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; for update in pending_items { latest_builder.update(&update); } @@ -535,12 +547,14 @@ impl App { Ok(()) } - /// Queues a deletion from the merkle tree. + /// Queues a recovery of an identity. + /// + /// i.e. deletion and reinsertion after a set period of time. /// /// # Errors /// - /// Will return `Err` if identity is already queued, not in the tree, or the - /// queue malfunctions. + /// Will return `Err` if identity is already queued for deletion, not in the + /// tree, or the queue malfunctions. #[instrument(level = "debug", skip(self))] pub async fn recover_identity( &self, @@ -670,9 +684,9 @@ impl App { .await? { return Ok(InclusionProofResponse(InclusionProof { - status, - root: None, - proof: None, + status: status.into(), + root: None, + proof: None, message: Some(error_message), })); } @@ -743,17 +757,22 @@ impl App { match root_state.status { // Pending status implies the batching or latest tree - Status::Pending if latest_root == root || batching_root == root => return Ok(()), + ProcessedStatus::Pending if latest_root == root || batching_root == root => { + return Ok(()) + } // Processed status is hidden - this should never happen - Status::Processed if processed_root == root => return Ok(()), + ProcessedStatus::Processed if processed_root == root => return Ok(()), // Processed status is hidden so it could be either processed or mined - Status::Mined if processed_root == root || mined_root == root => return Ok(()), + ProcessedStatus::Mined if processed_root == root || mined_root == root => return Ok(()), _ => (), } let now = chrono::Utc::now(); - let root_age = if matches!(root_state.status, Status::Pending | Status::Processed) { + let root_age = if matches!( + root_state.status, + ProcessedStatus::Pending | ProcessedStatus::Processed + ) { now - root_state.pending_valid_as_of } else { let mined_at = root_state diff --git a/src/database/mod.rs b/src/database/mod.rs index 3dfc9d49..98a27748 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -16,7 +16,9 @@ use thiserror::Error; use tracing::{error, info, instrument, warn}; use self::types::{DeletionEntry, LatestDeletionEntry, RecoveryEntry}; -use crate::identity_tree::{Hash, RootItem, Status, TreeItem, TreeUpdate}; +use crate::identity_tree::{ + Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate, UnprocessedStatus, +}; pub mod types; use crate::prover::{ProverConfiguration, ProverType, Provers}; @@ -149,7 +151,7 @@ impl Database { .bind(leaf_index as i64) .bind(identity) .bind(root) - .bind(<&str>::from(Status::Pending)); + .bind(<&str>::from(ProcessedStatus::Pending)); tx.execute(insert_pending_identity_query).await?; @@ -181,9 +183,9 @@ impl Database { /// Also marks following roots as pending #[instrument(skip(self), level = "debug")] pub async fn mark_root_as_processed(&self, root: &Hash) -> Result<(), Error> { - let mined_status = Status::Mined; - let processed_status = Status::Processed; - let pending_status = Status::Pending; + let mined_status = ProcessedStatus::Mined; + let processed_status = ProcessedStatus::Processed; + let pending_status = ProcessedStatus::Pending; let mut tx = self.pool.begin().await?; @@ -230,7 +232,7 @@ impl Database { /// finalized #[instrument(skip(self), level = "debug")] pub async fn mark_root_as_mined(&self, root: &Hash) -> Result<(), Error> { - let mined_status = Status::Mined; + let mined_status = ProcessedStatus::Mined; let mut tx = self.pool.begin().await?; @@ -308,7 +310,7 @@ impl Database { pub async fn get_commitments_by_status( &self, - status: Status, + status: ProcessedStatus, ) -> Result, Error> { let query = sqlx::query( r#" @@ -331,10 +333,13 @@ impl Database { .collect::>()) } - pub async fn get_latest_root_by_status(&self, status: Status) -> Result, Error> { + pub async fn get_latest_root_by_status( + &self, + status: ProcessedStatus, + ) -> Result, Error> { let query = sqlx::query( r#" - SELECT root FROM identities WHERE status = $1 ORDER BY id DESC LIMIT 1 + SELECT root FROM identities WHERE status = $1 ORDER BY id DESC LIMIT 1 "#, ) .bind(<&str>::from(status)); @@ -411,7 +416,7 @@ impl Database { WHERE status = $1 "#, ) - .bind(<&str>::from(Status::Pending)); + .bind(<&str>::from(ProcessedStatus::Pending)); let result = self.pool.fetch_one(query).await?; Ok(result.get::(0) as i32) } @@ -522,7 +527,7 @@ impl Database { "#, ) .bind(identity) - .bind(<&str>::from(Status::New)) + .bind(<&str>::from(UnprocessedStatus::New)) .bind(eligibility_timestamp); self.pool.execute(query).await?; @@ -686,7 +691,7 @@ impl Database { pub async fn get_eligible_unprocessed_commitments( &self, - status: Status, + status: UnprocessedStatus, ) -> Result, Error> { let query = sqlx::query( r#" @@ -716,7 +721,7 @@ impl Database { pub async fn get_unprocessed_commit_status( &self, commitment: &Hash, - ) -> Result, Error> { + ) -> Result, Error> { let query = sqlx::query( r#" SELECT status, error_message FROM unprocessed_identities WHERE commitment = $1 @@ -760,7 +765,7 @@ impl Database { "#, ) .bind(message) - .bind(<&str>::from(Status::Failed)) + .bind(<&str>::from(UnprocessedStatus::Failed)) .bind(commitment); self.pool.execute(query).await?; @@ -820,7 +825,7 @@ mod test { use semaphore::Field; use super::{Database, Options}; - use crate::identity_tree::{Hash, Status}; + use crate::identity_tree::{Hash, ProcessedStatus, UnprocessedStatus}; use crate::prover::{ProverConfiguration, ProverType}; use crate::secret::SecretUrl; @@ -881,7 +886,7 @@ mod test { async fn assert_roots_are( db: &Database, roots: impl IntoIterator, - expected_state: Status, + expected_state: ProcessedStatus, ) -> anyhow::Result<()> { for root in roots { let root = db @@ -915,10 +920,10 @@ mod test { .get_unprocessed_commit_status(&commit_hash) .await? .expect("expected commitment status"); - assert_eq!(commit.0, Status::New); + assert_eq!(commit.0, UnprocessedStatus::New); let identity_count = db - .get_eligible_unprocessed_commitments(Status::New) + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) .await? .len(); @@ -1091,7 +1096,9 @@ mod test { db.insert_new_identity(commitment_1, eligibility_timestamp_1) .await?; - let unprocessed_commitments = db.get_eligible_unprocessed_commitments(Status::New).await?; + let unprocessed_commitments = db + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .await?; assert_eq!(unprocessed_commitments.len(), 1); assert_eq!(unprocessed_commitments[0].commitment, commitment_0); @@ -1122,7 +1129,9 @@ mod test { db.insert_new_identity(commitment_1, eligibility_timestamp_1) .await?; - let unprocessed_commitments = db.get_eligible_unprocessed_commitments(Status::New).await?; + let unprocessed_commitments = db + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .await?; // Assert unprocessed commitments against expected values assert_eq!(unprocessed_commitments.len(), 1); @@ -1163,10 +1172,14 @@ mod test { db.insert_new_identity(commit_hash, eligibility_timestamp) .await?; - let commitments = db.get_eligible_unprocessed_commitments(Status::New).await?; + let commitments = db + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .await?; assert_eq!(commitments.len(), 1); - let eligible_commitments = db.get_eligible_unprocessed_commitments(Status::New).await?; + let eligible_commitments = db + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .await?; assert_eq!(eligible_commitments.len(), 1); // Set eligibility to Utc::now() + 7 days and check db entries @@ -1179,7 +1192,9 @@ mod test { db.insert_new_identity(commit_hash, eligibility_timestamp) .await?; - let eligible_commitments = db.get_eligible_unprocessed_commitments(Status::New).await?; + let eligible_commitments = db + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .await?; assert_eq!(eligible_commitments.len(), 1); Ok(()) @@ -1275,7 +1290,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Processed); + assert_eq!(root.status, ProcessedStatus::Processed); } for root in roots.iter().skip(3).take(2) { @@ -1284,7 +1299,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Pending); + assert_eq!(root.status, ProcessedStatus::Pending); } let pending_identities = db.count_pending_identities().await?; @@ -1317,7 +1332,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Mined); + assert_eq!(root.status, ProcessedStatus::Mined); } for root in roots.iter().skip(3).take(2) { @@ -1326,7 +1341,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Pending); + assert_eq!(root.status, ProcessedStatus::Pending); } let pending_identities = db.count_pending_identities().await?; @@ -1356,27 +1371,27 @@ mod test { println!("Marking roots up to 2nd as processed"); db.mark_root_as_processed(&roots[2]).await?; - assert_roots_are(&db, &roots[..3], Status::Processed).await?; - assert_roots_are(&db, &roots[3..], Status::Pending).await?; + assert_roots_are(&db, &roots[..3], ProcessedStatus::Processed).await?; + assert_roots_are(&db, &roots[3..], ProcessedStatus::Pending).await?; println!("Marking roots up to 1st as mined"); db.mark_root_as_mined(&roots[1]).await?; - assert_roots_are(&db, &roots[..2], Status::Mined).await?; - assert_roots_are(&db, &[roots[2]], Status::Processed).await?; - assert_roots_are(&db, &roots[3..], Status::Pending).await?; + assert_roots_are(&db, &roots[..2], ProcessedStatus::Mined).await?; + assert_roots_are(&db, &[roots[2]], ProcessedStatus::Processed).await?; + assert_roots_are(&db, &roots[3..], ProcessedStatus::Pending).await?; println!("Marking roots up to 4th as processed"); db.mark_root_as_processed(&roots[4]).await?; - assert_roots_are(&db, &roots[..2], Status::Mined).await?; - assert_roots_are(&db, &roots[2..5], Status::Processed).await?; - assert_roots_are(&db, &roots[5..], Status::Pending).await?; + assert_roots_are(&db, &roots[..2], ProcessedStatus::Mined).await?; + assert_roots_are(&db, &roots[2..5], ProcessedStatus::Processed).await?; + assert_roots_are(&db, &roots[5..], ProcessedStatus::Pending).await?; println!("Marking all roots as mined"); db.mark_root_as_mined(&roots[num_identities - 1]).await?; - assert_roots_are(&db, &roots, Status::Mined).await?; + assert_roots_are(&db, &roots, ProcessedStatus::Mined).await?; Ok(()) } @@ -1406,7 +1421,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Processed); + assert_eq!(root.status, ProcessedStatus::Processed); } for root in roots.iter().skip(2).take(3) { @@ -1415,7 +1430,7 @@ mod test { .await? .context("Fetching root state")?; - assert_eq!(root.status, Status::Pending); + assert_eq!(root.status, ProcessedStatus::Pending); } let pending_identities = db.count_pending_identities().await?; @@ -1497,8 +1512,12 @@ mod test { db.mark_root_as_processed(&roots[2]).await?; - let mined_tree_updates = db.get_commitments_by_status(Status::Processed).await?; - let pending_tree_updates = db.get_commitments_by_status(Status::Pending).await?; + let mined_tree_updates = db + .get_commitments_by_status(ProcessedStatus::Processed) + .await?; + let pending_tree_updates = db + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; assert_eq!(mined_tree_updates.len(), 3); for i in 0..3 { @@ -1535,7 +1554,9 @@ mod test { db.insert_pending_identity(3, &Hash::ZERO, &zero_roots[3]) .await?; - let pending_tree_updates = db.get_commitments_by_status(Status::Pending).await?; + let pending_tree_updates = db + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; assert_eq!(pending_tree_updates.len(), 7); // 1st identity @@ -1578,7 +1599,9 @@ mod test { .context("Inserting identity")?; } - let pending_tree_updates = db.get_commitments_by_status(Status::Pending).await?; + let pending_tree_updates = db + .get_commitments_by_status(ProcessedStatus::Pending) + .await?; assert_eq!(pending_tree_updates.len(), 5); for i in 0..5 { assert_eq!(pending_tree_updates[i].element, identities[i]); @@ -1607,7 +1630,7 @@ mod test { // Basic scenario, latest pending root let root_item = db.get_root_state(&roots[0]).await?.unwrap(); assert_eq!(roots[0], root_item.root); - assert!(matches!(root_item.status, Status::Pending)); + assert!(matches!(root_item.status, ProcessedStatus::Pending)); assert!(root_item.mined_valid_as_of.is_none()); // Inserting a new pending root sets invalidation time for the @@ -1646,17 +1669,17 @@ mod test { tokio::time::sleep(Duration::from_secs(2)).await; // sleep enough for the database time resolution let root_item_2 = db.get_root_state(&roots[2]).await?.unwrap(); - assert!(matches!(root_item_2.status, Status::Pending)); + assert!(matches!(root_item_2.status, ProcessedStatus::Pending)); assert!(root_item_2.mined_valid_as_of.is_none()); let root_item_1 = db.get_root_state(&roots[1]).await?.unwrap(); - assert_eq!(root_item_1.status, Status::Pending); + assert_eq!(root_item_1.status, ProcessedStatus::Pending); assert!(root_item_1.mined_valid_as_of.is_none()); assert!(root_item_1.pending_valid_as_of < root_2_mined_at); let root_item_0 = db.get_root_state(&roots[0]).await?.unwrap(); assert!(root_item_0.pending_valid_as_of < root_1_inserted_at); - assert_eq!(root_item_0.status, Status::Processed); + assert_eq!(root_item_0.status, ProcessedStatus::Processed); assert!(root_item_0.mined_valid_as_of.unwrap() < root_2_mined_at); assert!(root_item_0.mined_valid_as_of.unwrap() > root_1_inserted_at); assert!(root_item_0.pending_valid_as_of < root_1_inserted_at); diff --git a/src/database/types.rs b/src/database/types.rs index e3179d3d..2d64d719 100644 --- a/src/database/types.rs +++ b/src/database/types.rs @@ -1,10 +1,10 @@ use chrono::{DateTime, Utc}; -use crate::identity_tree::{Hash, Status}; +use crate::identity_tree::{Hash, UnprocessedStatus}; pub struct UnprocessedCommitment { pub commitment: Hash, - pub status: Status, + pub status: UnprocessedStatus, pub created_at: DateTime, pub processed_at: Option>, pub error_message: Option, diff --git a/src/identity_tree.rs b/src/identity_tree.rs index b94b35d9..6b3e71f4 100644 --- a/src/identity_tree.rs +++ b/src/identity_tree.rs @@ -1,5 +1,4 @@ use std::cmp::min; -use std::str::FromStr; use std::sync::{Arc, Mutex, MutexGuard}; use chrono::Utc; @@ -8,12 +7,15 @@ use semaphore::merkle_tree::Hasher; use semaphore::poseidon_tree::{PoseidonHash, Proof}; use semaphore::{lazy_merkle_tree, Field}; use serde::Serialize; -use thiserror::Error; use tracing::{info, warn}; +mod status; + pub type PoseidonTree = LazyMerkleTree; pub type Hash = ::Hash; +pub use self::status::{ProcessedStatus, Status, UnknownStatus, UnprocessedStatus}; + #[derive(Clone, Eq, PartialEq, Hash, Debug)] pub struct TreeUpdate { pub leaf_index: usize, @@ -32,76 +34,15 @@ impl TreeUpdate { #[derive(Debug)] pub struct TreeItem { - pub status: Status, + pub status: ProcessedStatus, pub leaf_index: usize, } -// TODO: Failed and New seem to only be used for "unprocessed" identities -// we should create a separate enum with just those 2 variants - -/// The status pertains to the status of the root. -/// But it can also be used interchangeably with the status of an identity -/// as all identity commitments has an associated root. -#[derive(Clone, Copy, Debug, Serialize, PartialEq, Eq, Hash)] -#[serde(rename_all = "camelCase")] -pub enum Status { - /// An unprocessed identity that failed to be included` - Failed, - /// Root is unprocessed - i.e. not included in sequencer's - /// in-memory tree. - New, - /// Root is included in sequencer's in-memory tree but not yet mined. - /// The - Pending, - /// Root is mined on mainnet but is still waiting for confirmation on - /// relayed chains - /// - /// i.e. the root is included in a mined block on mainnet, - /// but the state has not yet been bridged to Optimism and Polygon - /// - /// NOTE: If the sequencer is not configured with any secondary chains this - /// status should immediately become Finalized - Processed, - /// Root is mined and relayed to secondary chains - Mined, -} - -#[derive(Debug, Error)] -#[error("unknown status")] -pub struct UnknownStatus; - -impl FromStr for Status { - type Err = UnknownStatus; - - fn from_str(s: &str) -> Result { - match s { - "new" => Ok(Self::New), - "failed" => Ok(Self::Failed), - "pending" => Ok(Self::Pending), - "mined" => Ok(Self::Mined), - "processed" => Ok(Self::Processed), - _ => Err(UnknownStatus), - } - } -} - -impl From for &str { - fn from(scope: Status) -> Self { - match scope { - Status::New => "new", - Status::Failed => "failed", - Status::Pending => "pending", - Status::Mined => "mined", - Status::Processed => "processed", - } - } -} - #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct RootItem { pub root: Field, - pub status: Status, + pub status: ProcessedStatus, pub pending_valid_as_of: chrono::DateTime, pub mined_valid_as_of: Option>, } @@ -601,15 +542,13 @@ impl TreeState { #[must_use] pub fn get_proof_for(&self, item: &TreeItem) -> (Field, InclusionProof) { let (leaf, root, proof) = match item.status { - Status::Pending | Status::New | Status::Failed => { - self.latest.get_leaf_and_proof(item.leaf_index) - } - Status::Processed => self.processed.get_leaf_and_proof(item.leaf_index), - Status::Mined => self.mined.get_leaf_and_proof(item.leaf_index), + ProcessedStatus::Pending => self.latest.get_leaf_and_proof(item.leaf_index), + ProcessedStatus::Processed => self.processed.get_leaf_and_proof(item.leaf_index), + ProcessedStatus::Mined => self.mined.get_leaf_and_proof(item.leaf_index), }; let proof = InclusionProof { - status: item.status, + status: item.status.into(), root: Some(root), proof: Some(proof), message: None, diff --git a/src/identity_tree/status.rs b/src/identity_tree/status.rs new file mode 100644 index 00000000..92e11125 --- /dev/null +++ b/src/identity_tree/status.rs @@ -0,0 +1,157 @@ +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +/// The status pertains to the status of the root. +/// But it can also be used interchangeably with the status of an identity +/// as all identity commitments have an associated root. +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "camelCase")] +pub enum ProcessedStatus { + /// Root is included in sequencer's in-memory tree, but is not yet + /// mined. + Pending, + + /// Root is mined on mainnet but is still waiting for confirmation on + /// relayed chains + /// + /// i.e. the root is included in a mined block on mainnet, + /// but the state has not yet been bridged to Optimism and Polygon + /// + /// NOTE: If the sequencer is not configured with any secondary chains this + /// status should immediately become Finalized + Processed, + + /// Root is mined and relayed to secondary chains + Mined, +} + +/// Status of identity commitments which have not yet been included in the tree +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "camelCase")] +pub enum UnprocessedStatus { + /// Unprocessed identity failed to be inserted into the tree for some reason + /// + /// Usually accompanied by an appropriate error message + Failed, + + /// Root is unprocessed - i.e. not included in sequencer's + /// in-memory tree. + New, +} + +/// A status type visible on the API level - contains both the processed and +/// unprocessed statuses +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "camelCase")] +#[serde(untagged)] +pub enum Status { + Unprocessed(UnprocessedStatus), + Processed(ProcessedStatus), +} + +#[derive(Debug, Error)] +#[error("unknown status")] +pub struct UnknownStatus; + +impl FromStr for ProcessedStatus { + type Err = UnknownStatus; + + fn from_str(s: &str) -> Result { + match s { + "pending" => Ok(Self::Pending), + "mined" => Ok(Self::Mined), + "processed" => Ok(Self::Processed), + _ => Err(UnknownStatus), + } + } +} + +impl From for &str { + fn from(scope: ProcessedStatus) -> Self { + match scope { + ProcessedStatus::Pending => "pending", + ProcessedStatus::Mined => "mined", + ProcessedStatus::Processed => "processed", + } + } +} + +impl FromStr for Status { + type Err = UnknownStatus; + + fn from_str(s: &str) -> Result { + if let Ok(s) = UnprocessedStatus::from_str(s) { + Ok(Self::Unprocessed(s)) + } else if let Ok(s) = ProcessedStatus::from_str(s) { + Ok(Self::Processed(s)) + } else { + Err(UnknownStatus) + } + } +} + +impl FromStr for UnprocessedStatus { + type Err = UnknownStatus; + + fn from_str(s: &str) -> Result { + match s { + "new" => Ok(Self::New), + "failed" => Ok(Self::Failed), + _ => Err(UnknownStatus), + } + } +} + +impl From for &str { + fn from(scope: UnprocessedStatus) -> Self { + match scope { + UnprocessedStatus::New => "new", + UnprocessedStatus::Failed => "failed", + } + } +} + +impl From for Status { + fn from(status: UnprocessedStatus) -> Self { + Self::Unprocessed(status) + } +} + +impl From for Status { + fn from(status: ProcessedStatus) -> Self { + Self::Processed(status) + } +} + +#[cfg(test)] +mod tests { + use test_case::test_case; + + use super::*; + + #[test_case(Status::Processed(ProcessedStatus::Pending) => "pending")] + #[test_case(Status::Processed(ProcessedStatus::Mined) => "mined")] + #[test_case(Status::Unprocessed(UnprocessedStatus::New) => "new")] + #[test_case(Status::Unprocessed(UnprocessedStatus::Failed) => "failed")] + fn serialize_status(api_status: Status) -> &'static str { + let s = serde_json::to_string(&api_status).unwrap(); + + let s = s.leak() as &'static str; + + // Unwrap from the redundant JSON quotes + s.trim_start_matches("\"").trim_end_matches("\"") + } + + #[test_case("pending" => Status::Processed(ProcessedStatus::Pending))] + #[test_case("mined" => Status::Processed(ProcessedStatus::Mined))] + #[test_case("new" => Status::Unprocessed(UnprocessedStatus::New))] + #[test_case("failed" => Status::Unprocessed(UnprocessedStatus::Failed))] + fn deserialize_status(s: &str) -> Status { + // Wrapped because JSON expected `"something"` and not `something` + let wrapped = format!("\"{s}\""); + + serde_json::from_str(&wrapped).unwrap() + } +} diff --git a/src/task_monitor/tasks/insert_identities.rs b/src/task_monitor/tasks/insert_identities.rs index cd052d79..f0700d13 100644 --- a/src/task_monitor/tasks/insert_identities.rs +++ b/src/task_monitor/tasks/insert_identities.rs @@ -9,7 +9,7 @@ use tracing::instrument; use crate::database::types::UnprocessedCommitment; use crate::database::Database; -use crate::identity_tree::{Hash, Latest, Status, TreeVersion, TreeVersionReadOps}; +use crate::identity_tree::{Hash, Latest, TreeVersion, TreeVersionReadOps, UnprocessedStatus}; pub struct InsertIdentities { database: Arc, @@ -43,7 +43,7 @@ async fn insert_identities_loop( loop { // get commits from database let unprocessed = database - .get_eligible_unprocessed_commitments(Status::New) + .get_eligible_unprocessed_commitments(UnprocessedStatus::New) .await?; if unprocessed.is_empty() { sleep(Duration::from_secs(5)).await; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3d0fdad3..6b9e9fdb 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -288,7 +288,7 @@ pub async fn test_inclusion_status( uri: &str, client: &Client, leaf: &Hash, - expected_status: Status, + expected_status: impl Into, ) { let body = construct_inclusion_proof_body(leaf); info!(?uri, "Contacting"); @@ -320,6 +320,8 @@ pub async fn test_inclusion_status( .as_str() .expect("Failed to get status"); + let expected_status = expected_status.into(); + assert_eq!( expected_status, Status::from_str(status).expect("Could not convert str to Status") diff --git a/tests/recover_identities.rs b/tests/recover_identities.rs index 25e93a19..6a93cf0e 100644 --- a/tests/recover_identities.rs +++ b/tests/recover_identities.rs @@ -1,7 +1,7 @@ mod common; use common::prelude::*; -use signup_sequencer::identity_tree::Status; +use signup_sequencer::identity_tree::{ProcessedStatus, UnprocessedStatus}; use crate::common::{test_inclusion_status, test_recover_identity}; const SUPPORTED_DEPTH: usize = 18; @@ -162,7 +162,7 @@ async fn recover_identities() -> anyhow::Result<()> { &uri, &client, &identities_ref[recovery_leaf_index], - Status::New, + UnprocessedStatus::New, ) .await; } @@ -188,7 +188,7 @@ async fn recover_identities() -> anyhow::Result<()> { &uri, &client, &identities_ref[recovery_leaf_index], - Status::Mined, + ProcessedStatus::Mined, ) .await; }