From c5cab664446b53230c2f266b4939f01d7f01a38a Mon Sep 17 00:00:00 2001 From: Dzejkop Date: Thu, 17 Oct 2024 15:45:56 +0200 Subject: [PATCH 1/5] WIP --- Readme.md | 11 +- schemas/database/016_remove_recovery.down.sql | 13 ++ schemas/database/016_remove_recovery.up.sql | 11 ++ schemas/openapi.yaml | 29 --- src/app.rs | 4 + src/database/methods.rs | 110 +---------- src/database/mod.rs | 177 +++--------------- src/identity/processor.rs | 4 +- src/task_monitor/tasks/insert_identities.rs | 3 +- tests/common/mod.rs | 50 ----- 10 files changed, 67 insertions(+), 345 deletions(-) create mode 100644 schemas/database/016_remove_recovery.down.sql create mode 100644 schemas/database/016_remove_recovery.up.sql diff --git a/Readme.md b/Readme.md index 4e0ce338..ec0104c6 100644 --- a/Readme.md +++ b/Readme.md @@ -37,17 +37,14 @@ Sequencer has 6 API routes. indeed in the tree. The inclusion proof is then returned to the API caller. 3. `/deleteIdentity` - Takes an identity commitment hash, ensures that it exists and hasn't been deleted yet. This identity is then scheduled for deletion. -4. `/recoverIdentity` - Takes two identity commitment hashes. The first must exist and will be scheduled for deletion - and the other will be inserted as a replacement after the first identity has been deleted and a set amount of time ( - depends on configuration parameters) has passed. -5. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof. +4. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof. The proving key is fetched based on the depth index, and verification key as well. The list of prime fields is created based on request input mentioned before, and then we proceed to verify the proof. Sequencer uses groth16 zk-SNARK implementation. The API call returns the proof as a response. -6. `/addBatchSize` - Adds a prover with specific batch size to a list of provers. -7. `/removeBatchSize` - Removes the prover based on batch size. -8. `/listBatchSizes` - Lists all provers that are added to the Sequencer. +5. `/addBatchSize` - Adds a prover with specific batch size to a list of provers. +6. `/removeBatchSize` - Removes the prover based on batch size. +7. `/listBatchSizes` - Lists all provers that are added to the Sequencer. ## Getting Started diff --git a/schemas/database/016_remove_recovery.down.sql b/schemas/database/016_remove_recovery.down.sql new file mode 100644 index 00000000..68221433 --- /dev/null +++ b/schemas/database/016_remove_recovery.down.sql @@ -0,0 +1,13 @@ +CREATE TABLE recoveries ( + existing_commitment BYTEA NOT NULL UNIQUE, + new_commitment BYTEA NOT NULL UNIQUE +); + +ALTER TABLE unprocessed_identities + ADD COLUMN eligibility TIMESTAMPTZ, + ADD COLUMN status VARCHAR(50) NOT NULL, + ADD COLUMN processed_at TIMESTAMPTZ, + ADD COLUMN error_message TEXT; + +ALTER TABLE unprocessed_identities + DROP CONSTRAINT unique_commitment; diff --git a/schemas/database/016_remove_recovery.up.sql b/schemas/database/016_remove_recovery.up.sql new file mode 100644 index 00000000..4d0ea58a --- /dev/null +++ b/schemas/database/016_remove_recovery.up.sql @@ -0,0 +1,11 @@ +DROP TABLE recoveries; + +ALTER TABLE unprocessed_identities + DROP COLUMN eligibility, + DROP COLUMN status, + DROP COLUMN processed_at, + DROP COLUMN error_message; + +ALTER TABLE unprocessed_identities + ADD CONSTRAINT unique_commitment UNIQUE (commitment); + diff --git a/schemas/openapi.yaml b/schemas/openapi.yaml index 43a2f951..67b181c4 100644 --- a/schemas/openapi.yaml +++ b/schemas/openapi.yaml @@ -62,26 +62,6 @@ paths: schema: description: 'Identity could not be queued for deletion' type: 'string' - /recoverIdentity: - post: - summary: 'Queues a recovery request, deleting the previous identity specified and inserting the new one. - New insertions must wait a specified time delay before being included in the merkle tree' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/RecoveryRequest' - responses: - '202': - description: 'Identity has been successfully queued for recovery' - '400': - description: 'Invalid request' - content: - application/json: - schema: - description: 'Identity could not be queued for recovery' - type: 'string' /inclusionProof: post: summary: 'Get Merkle inclusion proof' @@ -152,15 +132,6 @@ paths: components: schemas: - RecoveryRequest: - type: object - properties: - previousIdentityCommitment: - type: string - pattern: '^[A-F0-9]{64}$' - newIdentityCommitment: - type: string - pattern: '^[A-F0-9]{64}$' IdentityCommitment: type: object properties: diff --git a/src/app.rs b/src/app.rs index b2cf3a03..02ca8de3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -164,7 +164,11 @@ impl App { return Err(ServerError::DuplicateCommitment); } +<<<<<<< HEAD tx.insert_new_identity(commitment, Utc::now()).await?; +======= + tx.insert_unprocessed_identity(commitment, Utc::now()).await?; +>>>>>>> 428daf1 (WIP) tx.commit().await?; diff --git a/src/database/methods.rs b/src/database/methods.rs index 96e53410..d4f0d8c1 100644 --- a/src/database/methods.rs +++ b/src/database/methods.rs @@ -475,50 +475,23 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized { } #[instrument(skip(self), level = "debug")] - async fn insert_new_identity( - self, - identity: Hash, - eligibility_timestamp: sqlx::types::chrono::DateTime, - ) -> Result { + async fn insert_unprocessed_identity(self, identity: Hash) -> Result { let mut conn = self.acquire().await?; sqlx::query( r#" - INSERT INTO unprocessed_identities (commitment, status, created_at, eligibility) - VALUES ($1, $2, CURRENT_TIMESTAMP, $3) + INSERT INTO unprocessed_identities (commitment, created_at) + VALUES ($1, CURRENT_TIMESTAMP) + ON CONFLICT DO NOTHING "#, ) .bind(identity) - .bind(<&str>::from(UnprocessedStatus::New)) - .bind(eligibility_timestamp) .execute(&mut *conn) .await?; Ok(identity) } - #[instrument(skip(self), level = "debug")] - async fn insert_new_recovery( - self, - existing_commitment: &Hash, - new_commitment: &Hash, - ) -> Result<(), Error> { - let mut conn = self.acquire().await?; - - sqlx::query( - r#" - INSERT INTO recoveries (existing_commitment, new_commitment) - VALUES ($1, $2) - "#, - ) - .bind(existing_commitment) - .bind(new_commitment) - .execute(&mut *conn) - .await?; - - Ok(()) - } - #[instrument(skip(self), level = "debug")] async fn get_latest_deletion(self) -> Result { let mut conn = self.acquire().await?; @@ -580,47 +553,6 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized { Ok(()) } - #[cfg(test)] - #[instrument(skip(self), level = "debug")] - async fn get_all_recoveries(self) -> Result, Error> { - let mut conn = self.acquire().await?; - - Ok( - sqlx::query_as::<_, RecoveryEntry>("SELECT * FROM recoveries") - .fetch_all(&mut *conn) - .await?, - ) - } - - #[instrument(skip(self, prev_commits), level = "debug")] - async fn delete_recoveries(self, prev_commits: I) -> Result, Error> - where - I: IntoIterator + Send, - T: Into, - { - let mut conn = self.acquire().await?; - - // TODO: upstream PgHasArrayType impl to ruint - let prev_commits = prev_commits - .into_iter() - .map(|c| c.into().to_be_bytes()) - .collect::>(); - - let res = sqlx::query_as::<_, RecoveryEntry>( - r#" - DELETE - FROM recoveries - WHERE existing_commitment = ANY($1) - RETURNING * - "#, - ) - .bind(&prev_commits) - .fetch_all(&mut *conn) - .await?; - - Ok(res) - } - /// Inserts a new deletion into the deletions table /// /// This method is idempotent and on conflict nothing will happen @@ -686,20 +618,15 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized { } #[instrument(skip(self), level = "debug")] - async fn get_eligible_unprocessed_commitments( - self, - status: UnprocessedStatus, - ) -> Result, Error> { + async fn get_unprocessed_commitments(self) -> Result, Error> { let mut conn = self.acquire().await?; let result: Vec<(Hash,)> = sqlx::query_as( r#" SELECT commitment FROM unprocessed_identities - WHERE status = $1 AND CURRENT_TIMESTAMP > eligibility LIMIT $2 "#, ) - .bind(<&str>::from(status)) .bind(MAX_UNPROCESSED_FETCH_COUNT) .fetch_all(&mut *conn) .await?; @@ -707,33 +634,6 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized { Ok(result.into_iter().map(|(commitment,)| commitment).collect()) } - /// Returns the error message from the unprocessed identities table - /// if it exists - /// - /// - The outer option represents the existence of the commitment in the - /// unprocessed_identities table - /// - The inner option represents the existence of an error message - #[instrument(skip(self), level = "debug")] - async fn get_unprocessed_error( - self, - commitment: &Hash, - ) -> Result>, Error> { - let mut conn = self.acquire().await?; - - let result: Option<(Option,)> = sqlx::query_as( - r#" - SELECT error_message - FROM unprocessed_identities - WHERE commitment = $1 - "#, - ) - .bind(commitment) - .fetch_optional(&mut *conn) - .await?; - - Ok(result.map(|(error_message,)| error_message)) - } - #[instrument(skip(self), level = "debug")] async fn remove_unprocessed_identity(self, commitment: &Hash) -> Result<(), Error> { let mut conn = self.acquire().await?; diff --git a/src/database/mod.rs b/src/database/mod.rs index 0e14359a..aae12d24 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -281,24 +281,11 @@ mod test { .expect("cant convert to u256") .into(); - let eligibility_timestamp = Utc::now(); - - let hash = db - .insert_new_identity(commit_hash, eligibility_timestamp) - .await?; + let hash = db.insert_unprocessed_identity(commit_hash).await?; assert_eq!(commit_hash, hash); - let commit = db - .get_unprocessed_error(&commit_hash) - .await - .expect("expected commitment status"); - assert_eq!(commit, Some(None)); - - let identity_count = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await? - .len(); + let identity_count = db.get_unprocessed_commitments().await?.len(); assert_eq!(identity_count, 1); @@ -318,8 +305,7 @@ mod test { let eligibility_timestamp = Utc::now(); for identity in &identities { - db.insert_new_identity(*identity, eligibility_timestamp) - .await?; + db.insert_unprocessed_identity(*identity).await?; } assert_eq!( @@ -390,7 +376,7 @@ mod test { } #[tokio::test] - async fn test_insert_prover_configuration() -> anyhow::Result<()> { + async fn insert_prover_configuration() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; @@ -433,7 +419,7 @@ mod test { } #[tokio::test] - async fn test_insert_provers() -> anyhow::Result<()> { + async fn insert_provers() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let mock_provers = mock_provers(); @@ -447,7 +433,7 @@ mod test { } #[tokio::test] - async fn test_remove_prover() -> anyhow::Result<()> { + async fn remove_prover() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let mock_provers = mock_provers(); @@ -465,27 +451,7 @@ mod test { } #[tokio::test] - async fn test_insert_new_recovery() -> anyhow::Result<()> { - let docker = Cli::default(); - let (db, _db_container) = setup_db(&docker).await?; - - let existing_commitment: Uint<256, 4> = Uint::from(1); - let new_commitment: Uint<256, 4> = Uint::from(2); - - db.insert_new_recovery(&existing_commitment, &new_commitment) - .await?; - - let recoveries = db.get_all_recoveries().await?; - - assert_eq!(recoveries.len(), 1); - assert_eq!(recoveries[0].existing_commitment, existing_commitment); - assert_eq!(recoveries[0].new_commitment, new_commitment); - - Ok(()) - } - - #[tokio::test] - async fn test_insert_new_deletion() -> anyhow::Result<()> { + async fn insert_new_deletion() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let existing_commitment: Uint<256, 4> = Uint::from(1); @@ -501,26 +467,22 @@ mod test { } #[tokio::test] - async fn test_get_eligible_unprocessed_commitments() -> anyhow::Result<()> { + async fn get_eligible_unprocessed_commitments() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let commitment_0: Uint<256, 4> = Uint::from(1); let eligibility_timestamp_0 = Utc::now(); - db.insert_new_identity(commitment_0, eligibility_timestamp_0) - .await?; + db.insert_unprocessed_identity(commitment_0).await?; let commitment_1: Uint<256, 4> = Uint::from(2); let eligibility_timestamp_1 = Utc::now() .checked_add_days(Days::new(7)) .expect("Could not create eligibility timestamp"); - db.insert_new_identity(commitment_1, eligibility_timestamp_1) - .await?; + db.insert_unprocessed_identity(commitment_1).await?; - let unprocessed_commitments = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await?; + let unprocessed_commitments = db.get_unprocessed_commitments().await?; assert_eq!(unprocessed_commitments.len(), 1); assert_eq!(unprocessed_commitments[0], commitment_0); @@ -529,27 +491,23 @@ mod test { } #[tokio::test] - async fn test_get_unprocessed_commitments() -> anyhow::Result<()> { + async fn get_unprocessed_commitments() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; // Insert new identity with a valid eligibility timestamp let commitment_0: Uint<256, 4> = Uint::from(1); let eligibility_timestamp_0 = Utc::now(); - db.insert_new_identity(commitment_0, eligibility_timestamp_0) - .await?; + db.insert_unprocessed_identity(commitment_0).await?; // Insert new identity with eligibility timestamp in the future let commitment_1: Uint<256, 4> = Uint::from(2); let eligibility_timestamp_1 = Utc::now() .checked_add_days(Days::new(7)) .expect("Could not create eligibility timestamp"); - db.insert_new_identity(commitment_1, eligibility_timestamp_1) - .await?; + db.insert_unprocessed_identity(commitment_1).await?; - let unprocessed_commitments = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await?; + let unprocessed_commitments = db.get_unprocessed_commitments().await?; // Assert unprocessed commitments against expected values assert_eq!(unprocessed_commitments.len(), 1); @@ -559,49 +517,7 @@ mod test { } #[tokio::test] - async fn test_update_eligibility_timestamp() -> anyhow::Result<()> { - let docker = Cli::default(); - let (db, _db_container) = setup_db(&docker).await?; - let dec = "1234500000000000000"; - let commit_hash: Hash = U256::from_dec_str(dec) - .expect("cant convert to u256") - .into(); - - // Set eligibility to Utc::now() day and check db entries - let eligibility_timestamp = Utc::now(); - db.insert_new_identity(commit_hash, eligibility_timestamp) - .await?; - - let commitments = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await?; - assert_eq!(commitments.len(), 1); - - let eligible_commitments = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await?; - assert_eq!(eligible_commitments.len(), 1); - - // Set eligibility to Utc::now() + 7 days and check db entries - let eligibility_timestamp = Utc::now() - .checked_add_days(Days::new(7)) - .expect("Could not create eligibility timestamp"); - - // Insert new identity with an eligibility timestamp in the future - let commit_hash: Hash = Hash::from(1); - db.insert_new_identity(commit_hash, eligibility_timestamp) - .await?; - - let eligible_commitments = db - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) - .await?; - assert_eq!(eligible_commitments.len(), 1); - - Ok(()) - } - - #[tokio::test] - async fn test_insert_deletion() -> anyhow::Result<()> { + async fn insert_deletion() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let identities = mock_identities(3); @@ -617,47 +533,6 @@ mod test { Ok(()) } - #[tokio::test] - async fn test_insert_recovery() -> anyhow::Result<()> { - let docker = Cli::default(); - let (db, _db_container) = setup_db(&docker).await?; - - let old_identities = mock_identities(3); - let new_identities = mock_identities(3); - - for (old, new) in old_identities.into_iter().zip(new_identities) { - db.insert_new_recovery(&old, &new).await?; - } - - let recoveries = db.get_all_recoveries().await?; - assert_eq!(recoveries.len(), 3); - - Ok(()) - } - - #[tokio::test] - async fn test_delete_recoveries() -> anyhow::Result<()> { - let docker = Cli::default(); - let (db, _db_container) = setup_db(&docker).await?; - - let old_identities = mock_identities(3); - let new_identities = mock_identities(3); - - for (old, new) in old_identities.clone().into_iter().zip(new_identities) { - db.insert_new_recovery(&old, &new).await?; - } - - let deleted_recoveries = db - .delete_recoveries(old_identities[0..2].iter().cloned()) - .await?; - assert_eq!(deleted_recoveries.len(), 2); - - let remaining = db.get_all_recoveries().await?; - assert_eq!(remaining.len(), 1); - - Ok(()) - } - #[tokio::test] async fn get_last_leaf_index() -> anyhow::Result<()> { let docker = Cli::default(); @@ -1063,7 +938,7 @@ mod test { } #[tokio::test] - async fn test_root_invalidation() -> anyhow::Result<()> { + async fn root_invalidation() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; @@ -1155,7 +1030,7 @@ mod test { // When there's only unprocessed identity let eligibility_timestamp = Utc::now(); - db.insert_new_identity(identities[0], eligibility_timestamp) + db.insert_unprocessed_identity(identities[0], eligibility_timestamp) .await .context("Inserting new identity")?; assert!(db.identity_exists(identities[0]).await?); @@ -1171,7 +1046,7 @@ mod test { } #[tokio::test] - async fn test_remove_deletions() -> anyhow::Result<()> { + async fn remove_deletions() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; @@ -1203,7 +1078,7 @@ mod test { } #[tokio::test] - async fn test_latest_insertion() -> anyhow::Result<()> { + async fn latest_insertion() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; @@ -1231,7 +1106,7 @@ mod test { } #[tokio::test] - async fn test_latest_deletion() -> anyhow::Result<()> { + async fn latest_deletion() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; @@ -1280,7 +1155,7 @@ mod test { } #[tokio::test] - async fn test_insert_batch() -> anyhow::Result<()> { + async fn insert_batch() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let identities: Vec<_> = mock_identities(10) @@ -1308,7 +1183,7 @@ mod test { } #[tokio::test] - async fn test_get_next_batch() -> anyhow::Result<()> { + async fn get_next_batch() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let identities: Vec<_> = mock_identities(10) @@ -1352,7 +1227,7 @@ mod test { } #[tokio::test] - async fn test_get_next_batch_without_transaction() -> anyhow::Result<()> { + async fn get_next_batch_without_transaction() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let identities: Vec<_> = mock_identities(10) @@ -1400,7 +1275,7 @@ mod test { } #[tokio::test] - async fn test_get_batch_head() -> anyhow::Result<()> { + async fn get_batch_head() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let roots = mock_roots(1); @@ -1431,7 +1306,7 @@ mod test { } #[tokio::test] - async fn test_insert_transaction() -> anyhow::Result<()> { + async fn insert_transaction() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; let roots = mock_roots(1); diff --git a/src/identity/processor.rs b/src/identity/processor.rs index 0971abe1..2d8821ef 100644 --- a/src/identity/processor.rs +++ b/src/identity/processor.rs @@ -554,7 +554,7 @@ impl OnChainIdentityProcessor { // For each deletion, if there is a corresponding recovery, insert a new // identity with the specified eligibility timestamp for recovery in deleted_recoveries { - tx.insert_new_identity(recovery.new_commitment, eligibility_timestamp) + tx.insert_unprocessed_identity(recovery.new_commitment, eligibility_timestamp) .await?; } @@ -656,7 +656,7 @@ impl OffChainIdentityProcessor { // For each deletion, if there is a corresponding recovery, insert a new // identity with the specified eligibility timestamp for recovery in deleted_recoveries { - tx.insert_new_identity(recovery.new_commitment, eligibility_timestamp) + tx.insert_unprocessed_identity(recovery.new_commitment, eligibility_timestamp) .await?; } diff --git a/src/task_monitor/tasks/insert_identities.rs b/src/task_monitor/tasks/insert_identities.rs index 41916929..9674b84b 100644 --- a/src/task_monitor/tasks/insert_identities.rs +++ b/src/task_monitor/tasks/insert_identities.rs @@ -31,8 +31,9 @@ pub async fn insert_identities( // get commits from database let unprocessed = app .database - .get_eligible_unprocessed_commitments(UnprocessedStatus::New) + .get_unprocessed_commitments(UnprocessedStatus::New) .await?; + if unprocessed.is_empty() { continue; } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 4028af3e..18835722 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -479,56 +479,6 @@ pub async fn test_delete_identity( (ref_tree.proof(leaf_index).unwrap(), ref_tree.root()) } -#[instrument(skip_all)] -pub async fn test_recover_identity( - uri: &str, - client: &Client, - ref_tree: &mut PoseidonTree, - test_leaves: &[Field], - previous_leaf_index: usize, - new_leaf: Field, - new_leaf_index: usize, - expect_failure: bool, -) -> (merkle_tree::Proof, Field) { - let previous_leaf = test_leaves[previous_leaf_index]; - - let body = construct_recover_identity_body(&previous_leaf, &new_leaf); - - let response = client - .post(uri.to_owned() + "/recoverIdentity") - .header("Content-Type", "application/json") - .body(body) - .send() - .await - .expect("Failed to create insert identity"); - - let response_status = response.status(); - - let bytes = response - .bytes() - .await - .expect("Failed to get response bytes"); - - if expect_failure { - assert!(!response_status.is_success()); - } else { - assert!(response_status.is_success()); - assert!(bytes.is_empty()); - } - - // TODO: Note that recovery order is non-deterministic and therefore we cannot - // easily keep the ref_tree in sync with the sequencer's version of the - // tree. In the future, we could consider tracking updates to the tree in a - // different way like listening to event emission. - ref_tree.set(previous_leaf_index, Hash::ZERO); - // Continuing on the note above, while the replacement identity is be - // inserted as a new identity, it is not deterministic and if there are multiple - // recovery requests, it is possible that the sequencer tree is ordered in a - // different way than the ref_tree - ref_tree.set(new_leaf_index, new_leaf); - (ref_tree.proof(new_leaf_index).unwrap(), ref_tree.root()) -} - #[instrument(skip_all)] pub async fn test_add_batch_size( uri: impl Into, From 8409c36eece0d7911bed46c38cb5537ed6a9af3a Mon Sep 17 00:00:00 2001 From: Dzejkop Date: Mon, 21 Oct 2024 20:28:47 +0200 Subject: [PATCH 2/5] chop chop --- src/app.rs | 16 +--- src/config.rs | 12 --- src/database/methods.rs | 4 +- src/database/mod.rs | 44 ++-------- src/database/types.rs | 10 --- src/identity/processor.rs | 93 --------------------- src/identity_tree/mod.rs | 2 +- src/identity_tree/status.rs | 43 +--------- src/server/data.rs | 10 --- src/task_monitor/tasks/insert_identities.rs | 7 +- tests/common/mod.rs | 15 +--- 11 files changed, 15 insertions(+), 241 deletions(-) diff --git a/src/app.rs b/src/app.rs index 02ca8de3..a3e89d55 100644 --- a/src/app.rs +++ b/src/app.rs @@ -164,11 +164,7 @@ impl App { return Err(ServerError::DuplicateCommitment); } -<<<<<<< HEAD - tx.insert_new_identity(commitment, Utc::now()).await?; -======= - tx.insert_unprocessed_identity(commitment, Utc::now()).await?; ->>>>>>> 428daf1 (WIP) + tx.insert_unprocessed_identity(commitment).await?; tx.commit().await?; @@ -315,16 +311,6 @@ impl App { return Err(ServerError::InvalidCommitment); } - if let Some(error_message) = self.database.get_unprocessed_error(commitment).await? { - return Ok(InclusionProof { - root: None, - proof: None, - message: error_message - .or_else(|| Some("identity exists but has not yet been processed".to_string())), - } - .into()); - } - let item = self .database .get_identity_leaf_index(commitment) diff --git a/src/config.rs b/src/config.rs index acdfc388..8982af9e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -69,18 +69,6 @@ pub struct AppConfig { #[serde(default = "default::min_batch_deletion_size")] pub min_batch_deletion_size: usize, - /// The parameter to control the delay between mining a deletion batch and - /// inserting the recovery identities - /// - /// The sequencer will insert the recovery identities after - /// max_epoch_duration_seconds + root_history_expiry) seconds have passed - /// - /// By default the value is set to 0 so the sequencer will only use - /// root_history_expiry - #[serde(with = "humantime_serde")] - #[serde(default = "default::max_epoch_duration")] - pub max_epoch_duration: Duration, - /// The maximum number of windows to scan for finalization logs #[serde(default = "default::scanning_window_size")] pub scanning_window_size: u64, diff --git a/src/database/methods.rs b/src/database/methods.rs index d4f0d8c1..1c556f76 100644 --- a/src/database/methods.rs +++ b/src/database/methods.rs @@ -5,13 +5,13 @@ use chrono::{DateTime, Utc}; use ruint::aliases::U256; use sqlx::{Acquire, Executor, Postgres, Row}; use tracing::instrument; -use types::{DeletionEntry, RecoveryEntry}; +use types::{DeletionEntry}; use super::types::{LatestDeletionEntry, LatestInsertionEntry}; use crate::database::types::{BatchEntry, BatchEntryData, BatchType}; use crate::database::{types, Error}; use crate::identity_tree::{ - Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate, UnprocessedStatus, + Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate, }; use crate::prover::identity::Identity; use crate::prover::{ProverConfig, ProverType}; diff --git a/src/database/mod.rs b/src/database/mod.rs index aae12d24..8ec7d54c 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -194,7 +194,7 @@ mod test { use crate::config::DatabaseConfig; use crate::database::methods::DbMethods; use crate::database::types::BatchType; - use crate::identity_tree::{Hash, ProcessedStatus, UnprocessedStatus}; + use crate::identity_tree::{Hash, ProcessedStatus}; use crate::prover::identity::Identity; use crate::prover::{ProverConfig, ProverType}; use crate::utils::secret::SecretUrl; @@ -302,8 +302,6 @@ mod test { let identities = mock_identities(10); let roots = mock_roots(11); - let eligibility_timestamp = Utc::now(); - for identity in &identities { db.insert_unprocessed_identity(*identity).await?; } @@ -466,52 +464,25 @@ mod test { Ok(()) } - #[tokio::test] - async fn get_eligible_unprocessed_commitments() -> anyhow::Result<()> { - let docker = Cli::default(); - let (db, _db_container) = setup_db(&docker).await?; - let commitment_0: Uint<256, 4> = Uint::from(1); - let eligibility_timestamp_0 = Utc::now(); - - db.insert_unprocessed_identity(commitment_0).await?; - - let commitment_1: Uint<256, 4> = Uint::from(2); - let eligibility_timestamp_1 = Utc::now() - .checked_add_days(Days::new(7)) - .expect("Could not create eligibility timestamp"); - - db.insert_unprocessed_identity(commitment_1).await?; - - let unprocessed_commitments = db.get_unprocessed_commitments().await?; - - assert_eq!(unprocessed_commitments.len(), 1); - assert_eq!(unprocessed_commitments[0], commitment_0); - - Ok(()) - } - #[tokio::test] async fn get_unprocessed_commitments() -> anyhow::Result<()> { let docker = Cli::default(); let (db, _db_container) = setup_db(&docker).await?; - // Insert new identity with a valid eligibility timestamp + // Insert new identity let commitment_0: Uint<256, 4> = Uint::from(1); - let eligibility_timestamp_0 = Utc::now(); db.insert_unprocessed_identity(commitment_0).await?; - // Insert new identity with eligibility timestamp in the future + // Insert new identity let commitment_1: Uint<256, 4> = Uint::from(2); - let eligibility_timestamp_1 = Utc::now() - .checked_add_days(Days::new(7)) - .expect("Could not create eligibility timestamp"); db.insert_unprocessed_identity(commitment_1).await?; let unprocessed_commitments = db.get_unprocessed_commitments().await?; // Assert unprocessed commitments against expected values - assert_eq!(unprocessed_commitments.len(), 1); + assert_eq!(unprocessed_commitments.len(), 2); assert_eq!(unprocessed_commitments[0], commitment_0); + assert_eq!(unprocessed_commitments[1], commitment_1); Ok(()) } @@ -1027,10 +998,7 @@ mod test { // When there's no identity assert!(!db.identity_exists(identities[0]).await?); - // When there's only unprocessed identity - let eligibility_timestamp = Utc::now(); - - db.insert_unprocessed_identity(identities[0], eligibility_timestamp) + db.insert_unprocessed_identity(identities[0]) .await .context("Inserting new identity")?; assert!(db.identity_exists(identities[0]).await?); diff --git a/src/database/types.rs b/src/database/types.rs index cfca25a7..d90beb71 100644 --- a/src/database/types.rs +++ b/src/database/types.rs @@ -8,16 +8,6 @@ use sqlx::{Database, Decode, Encode, Postgres, Type}; use crate::identity_tree::Hash; use crate::prover::identity::Identity; -#[derive(FromRow)] -pub struct RecoveryEntry { - // existing commitment is used in tests only, but recoveries in general - // are used in production code via the FromRow trait - // so removing this field would break the production code - #[allow(unused)] - pub existing_commitment: Hash, - pub new_commitment: Hash, -} - pub struct LatestInsertionEntry { pub timestamp: DateTime, } diff --git a/src/identity/processor.rs b/src/identity/processor.rs index 2d8821ef..545802e3 100644 --- a/src/identity/processor.rs +++ b/src/identity/processor.rs @@ -97,7 +97,6 @@ impl IdentityProcessor for OnChainIdentityProcessor { self.finalize_mainnet_roots( processed_tree, &mainnet_logs, - self.config.app.max_epoch_duration, ) .await?; @@ -407,7 +406,6 @@ impl OnChainIdentityProcessor { &self, processed_tree: &TreeVersion, logs: &[Log], - max_epoch_duration: Duration, ) -> Result<(), anyhow::Error> { for log in logs { let Some(event) = Self::raw_log_to_tree_changed(log) else { @@ -434,14 +432,6 @@ impl OnChainIdentityProcessor { info!(?pre_root, ?post_root, ?kind, "Batch mined"); - if kind == TreeChangeKind::Deletion { - // NOTE: We must do this before updating the tree - // because we fetch commitments from the processed tree - // before they are deleted - self.update_eligible_recoveries(log, max_epoch_duration) - .await?; - } - let updates_count = processed_tree.apply_updates_up_to(post_root.into()); info!(updates_count, ?pre_root, ?post_root, "Mined tree updated"); @@ -509,59 +499,6 @@ impl OnChainIdentityProcessor { roots } - - async fn update_eligible_recoveries( - &self, - log: &Log, - max_epoch_duration: Duration, - ) -> anyhow::Result<()> { - let tx_hash = log.transaction_hash.context("Missing tx hash")?; - let commitments = self - .identity_manager - .fetch_deletion_indices_from_tx(tx_hash) - .await - .context("Could not fetch deletion indices from tx")?; - - let commitments = self - .database - .get_non_zero_commitments_by_leaf_indexes(commitments.iter().copied()) - .await?; - let commitments: Vec = commitments.into_iter().map(Into::into).collect(); - - // Fetch the root history expiry time on chain - let root_history_expiry = self.identity_manager.root_history_expiry().await?; - - // Use the root history expiry to calculate the eligibility timestamp for the - // new insertion - let root_history_expiry_duration = - chrono::Duration::seconds(root_history_expiry.as_u64() as i64); - let max_epoch_duration = chrono::Duration::from_std(max_epoch_duration)?; - - let delay = root_history_expiry_duration + max_epoch_duration; - - let eligibility_timestamp = Utc::now() + delay; - - let mut tx = self - .database - .begin_tx(IsolationLevel::ReadCommitted) - .await?; - - // Check if any deleted commitments correspond with entries in the - // recoveries table and insert the new commitment into the unprocessed - // identities table with the proper eligibility timestamp - let deleted_recoveries = tx.delete_recoveries(commitments).await?; - - // For each deletion, if there is a corresponding recovery, insert a new - // identity with the specified eligibility timestamp - for recovery in deleted_recoveries { - tx.insert_unprocessed_identity(recovery.new_commitment, eligibility_timestamp) - .await?; - } - - tx.commit().await?; - - Ok(()) - } } pub struct OffChainIdentityProcessor { @@ -588,10 +525,6 @@ impl IdentityProcessor for OffChainIdentityProcessor { }; for batch in batches.iter() { - if batch.batch_type == BatchType::Deletion { - self.update_eligible_recoveries(batch).await?; - } - let mut tx = self .database .begin_tx(IsolationLevel::ReadCommitted) @@ -638,30 +571,4 @@ impl OffChainIdentityProcessor { committed_batches.push(batch_entry); } - - async fn update_eligible_recoveries(&self, batch: &BatchEntry) -> anyhow::Result<()> { - let commitments: Vec = batch.data.identities.iter().map(|v| v.commitment).collect(); - let eligibility_timestamp = Utc::now(); - - let mut tx = self - .database - .begin_tx(IsolationLevel::ReadCommitted) - .await?; - - // Check if any deleted commitments correspond with entries in the - // recoveries table and insert the new commitment into the unprocessed - // identities table with the proper eligibility timestamp - let deleted_recoveries = tx.delete_recoveries(commitments).await?; - - // For each deletion, if there is a corresponding recovery, insert a new - // identity with the specified eligibility timestamp - for recovery in deleted_recoveries { - tx.insert_unprocessed_identity(recovery.new_commitment, eligibility_timestamp) - .await?; - } - - tx.commit().await?; - - Ok(()) - } } diff --git a/src/identity_tree/mod.rs b/src/identity_tree/mod.rs index 03c6ed00..acca0afd 100644 --- a/src/identity_tree/mod.rs +++ b/src/identity_tree/mod.rs @@ -16,7 +16,7 @@ mod status; pub type PoseidonTree = LazyMerkleTree; pub type Hash = ::Hash; -pub use self::status::{ProcessedStatus, Status, UnknownStatus, UnprocessedStatus}; +pub use self::status::{ProcessedStatus, Status, UnknownStatus}; #[derive(Clone, Eq, PartialEq, Hash, Debug, FromRow)] pub struct TreeUpdate { diff --git a/src/identity_tree/status.rs b/src/identity_tree/status.rs index f08c18fe..f61aa140 100644 --- a/src/identity_tree/status.rs +++ b/src/identity_tree/status.rs @@ -27,22 +27,12 @@ pub enum ProcessedStatus { Mined, } -/// Status of identity commitments which have not yet been included in the tree -#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] -#[serde(rename_all = "camelCase")] -pub enum UnprocessedStatus { - /// Root is unprocessed - i.e. not included in sequencer's - /// in-memory tree. - New, -} - /// A status type visible on the API level - contains both the processed and /// unprocessed statuses #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] #[serde(rename_all = "camelCase")] #[serde(untagged)] -pub enum Status { - Unprocessed(UnprocessedStatus), +pub enum Status { // TODO: Remove Processed(ProcessedStatus), } @@ -85,9 +75,7 @@ impl FromStr for Status { type Err = UnknownStatus; fn from_str(s: &str) -> Result { - if let Ok(s) = UnprocessedStatus::from_str(s) { - Ok(Self::Unprocessed(s)) - } else if let Ok(s) = ProcessedStatus::from_str(s) { + if let Ok(s) = ProcessedStatus::from_str(s) { Ok(Self::Processed(s)) } else { Err(UnknownStatus) @@ -95,31 +83,6 @@ impl FromStr for Status { } } -impl FromStr for UnprocessedStatus { - type Err = UnknownStatus; - - fn from_str(s: &str) -> Result { - match s { - "new" => Ok(Self::New), - _ => Err(UnknownStatus), - } - } -} - -impl From for &str { - fn from(scope: UnprocessedStatus) -> Self { - match scope { - UnprocessedStatus::New => "new", - } - } -} - -impl From for Status { - fn from(status: UnprocessedStatus) -> Self { - Self::Unprocessed(status) - } -} - impl From for Status { fn from(status: ProcessedStatus) -> Self { Self::Processed(status) @@ -134,7 +97,6 @@ mod tests { #[test_case(Status::Processed(ProcessedStatus::Pending) => "pending")] #[test_case(Status::Processed(ProcessedStatus::Mined) => "mined")] - #[test_case(Status::Unprocessed(UnprocessedStatus::New) => "new")] fn serialize_status(api_status: Status) -> &'static str { let s = serde_json::to_string(&api_status).unwrap(); @@ -146,7 +108,6 @@ mod tests { #[test_case("pending" => Status::Processed(ProcessedStatus::Pending))] #[test_case("mined" => Status::Processed(ProcessedStatus::Mined))] - #[test_case("new" => Status::Unprocessed(UnprocessedStatus::New))] fn deserialize_status(s: &str) -> Status { // Wrapped because JSON expected `"something"` and not `something` let wrapped = format!("\"{s}\""); diff --git a/src/server/data.rs b/src/server/data.rs index 1d4650a2..1c9160a9 100644 --- a/src/server/data.rs +++ b/src/server/data.rs @@ -80,16 +80,6 @@ pub struct DeletionRequest { pub identity_commitment: Hash, } -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct RecoveryRequest { - /// The leaf index of the identity commitment to delete. - pub previous_identity_commitment: Hash, - /// The new identity commitment to insert. - pub new_identity_commitment: Hash, -} - impl From for InclusionProofResponse { fn from(value: InclusionProof) -> Self { Self(value) diff --git a/src/task_monitor/tasks/insert_identities.rs b/src/task_monitor/tasks/insert_identities.rs index 9674b84b..39db52a8 100644 --- a/src/task_monitor/tasks/insert_identities.rs +++ b/src/task_monitor/tasks/insert_identities.rs @@ -8,7 +8,7 @@ use tracing::info; use crate::app::App; use crate::database::methods::DbMethods as _; use crate::database::IsolationLevel; -use crate::identity_tree::{TreeVersionOps, UnprocessedStatus}; +use crate::identity_tree::TreeVersionOps; // Insertion here differs from delete_identities task. This is because two // different flows are created for both tasks. We need to insert identities as @@ -29,10 +29,7 @@ pub async fn insert_identities( info!("Insertion processor woken due to timeout."); // get commits from database - let unprocessed = app - .database - .get_unprocessed_commitments(UnprocessedStatus::New) - .await?; + let unprocessed = app.database.get_unprocessed_commitments().await?; if unprocessed.is_empty() { continue; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 18835722..d79718b2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -79,7 +79,7 @@ use semaphore::poseidon_tree::Proof; use signup_sequencer::identity_tree::{InclusionProof, TreeState, TreeVersionOps}; use signup_sequencer::server::data::{ AddBatchSizeRequest, DeletionRequest, InclusionProofRequest, InclusionProofResponse, - InsertCommitmentRequest, RecoveryRequest, RemoveBatchSizeRequest, VerifySemaphoreProofRequest, + InsertCommitmentRequest, RemoveBatchSizeRequest, VerifySemaphoreProofRequest, }; use signup_sequencer::task_monitor::TaskMonitor; use testcontainers::clients::Cli; @@ -596,19 +596,6 @@ fn construct_delete_identity_body(identity_commitment: &Hash) -> Body { ) } -pub fn construct_recover_identity_body( - previous_identity_commitment: &Hash, - new_identity_commitment: &Hash, -) -> Body { - Body::from( - serde_json::to_string(&RecoveryRequest { - previous_identity_commitment: *previous_identity_commitment, - new_identity_commitment: *new_identity_commitment, - }) - .expect("Cannot serialize RecoveryRequest"), - ) -} - pub fn construct_insert_identity_body(identity_commitment: &Field) -> Body { Body::from( serde_json::to_string(&InsertCommitmentRequest { From a17c95849f6f87b8aa742530b1c85b12a86985c4 Mon Sep 17 00:00:00 2001 From: Dzejkop Date: Mon, 21 Oct 2024 20:29:41 +0200 Subject: [PATCH 3/5] chop chop --- src/config.rs | 8 -------- tests/common/test_config.rs | 1 - 2 files changed, 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index 8982af9e..1fac496a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -272,10 +272,6 @@ pub mod default { 100 } - pub fn max_epoch_duration() -> Duration { - Duration::from_secs(0) - } - pub fn scanning_window_size() -> u64 { 100 } @@ -363,7 +359,6 @@ mod tests { batch_insertion_timeout = "3m" batch_deletion_timeout = "1h" min_batch_deletion_size = 100 - max_epoch_duration = "0s" scanning_window_size = 100 scanning_chain_head_offset = 0 time_between_scans = "30s" @@ -416,7 +411,6 @@ mod tests { batch_insertion_timeout = "3m" batch_deletion_timeout = "1h" min_batch_deletion_size = 100 - max_epoch_duration = "0s" scanning_window_size = 100 scanning_chain_head_offset = 0 time_between_scans = "30s" @@ -454,7 +448,6 @@ mod tests { SEQ__APP__BATCH_INSERTION_TIMEOUT=3m SEQ__APP__BATCH_DELETION_TIMEOUT=1h SEQ__APP__MIN_BATCH_DELETION_SIZE=100 - SEQ__APP__MAX_EPOCH_DURATION=0s SEQ__APP__SCANNING_WINDOW_SIZE=100 SEQ__APP__SCANNING_CHAIN_HEAD_OFFSET=0 SEQ__APP__TIME_BETWEEN_SCANS=30s @@ -497,7 +490,6 @@ mod tests { SEQ__APP__BATCH_INSERTION_TIMEOUT=3m SEQ__APP__BATCH_DELETION_TIMEOUT=1h SEQ__APP__MIN_BATCH_DELETION_SIZE=100 - SEQ__APP__MAX_EPOCH_DURATION=0s SEQ__APP__SCANNING_WINDOW_SIZE=100 SEQ__APP__SCANNING_CHAIN_HEAD_OFFSET=0 SEQ__APP__TIME_BETWEEN_SCANS=30s diff --git a/tests/common/test_config.rs b/tests/common/test_config.rs index fb9ac187..e033e0d4 100644 --- a/tests/common/test_config.rs +++ b/tests/common/test_config.rs @@ -144,7 +144,6 @@ impl TestConfigBuilder { batch_insertion_timeout: self.batch_insertion_timeout, batch_deletion_timeout: self.batch_deletion_timeout, min_batch_deletion_size: self.min_batch_deletion_size, - max_epoch_duration: default::max_epoch_duration(), scanning_window_size: default::scanning_window_size(), scanning_chain_head_offset: default::scanning_chain_head_offset(), time_between_scans: Duration::from_secs(DEFAULT_TIME_BETWEEN_SCANS_SECONDS), From f6bdbde19b41f16497d72082cb35b051df88f87b Mon Sep 17 00:00:00 2001 From: Dzejkop Date: Tue, 22 Oct 2024 13:29:19 +0200 Subject: [PATCH 4/5] wip --- src/app.rs | 2 +- src/contracts/mod.rs | 44 +++---------------------------------- src/database/methods.rs | 10 +++------ src/database/mod.rs | 2 +- src/identity/processor.rs | 11 +++------- src/identity_tree/status.rs | 6 ++--- 6 files changed, 14 insertions(+), 61 deletions(-) diff --git a/src/app.rs b/src/app.rs index a3e89d55..1a9519a3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -16,7 +16,7 @@ use crate::identity::processor::{ }; use crate::identity::validator::IdentityValidator; use crate::identity_tree::initializer::TreeInitializer; -use crate::identity_tree::{Hash, InclusionProof, RootItem, TreeState, TreeVersionOps}; +use crate::identity_tree::{Hash, RootItem, TreeState, TreeVersionOps}; use crate::prover::map::initialize_prover_maps; use crate::prover::repository::ProverRepository; use crate::prover::{ProverConfig, ProverType}; diff --git a/src/contracts/mod.rs b/src/contracts/mod.rs index 35c34bac..714abb97 100644 --- a/src/contracts/mod.rs +++ b/src/contracts/mod.rs @@ -2,18 +2,17 @@ pub mod abi; pub mod scanner; -use anyhow::{anyhow, bail, Context}; +use anyhow::{anyhow, bail}; use ethers::providers::Middleware; -use ethers::types::{H256, U256}; +use ethers::types::U256; use tracing::{error, info, instrument}; -use self::abi::{BridgedWorldId, DeleteIdentitiesCall, WorldId}; +use self::abi::{BridgedWorldId, WorldId}; use crate::config::Config; use crate::ethereum::{Ethereum, ReadProvider}; use crate::identity::processor::TransactionId; use crate::prover::identity::Identity; use crate::prover::Proof; -use crate::utils::index_packing::unpack_indices; /// A structure representing the interface to the batch-based identity manager /// contract. @@ -22,7 +21,6 @@ pub struct IdentityManager { ethereum: Ethereum, abi: WorldId, secondary_abis: Vec>, - tree_depth: usize, } impl IdentityManager { @@ -84,22 +82,15 @@ impl IdentityManager { secondary_abis.push(abi); } - let tree_depth = config.tree.tree_depth; - let identity_manager = Self { ethereum, abi, secondary_abis, - tree_depth, }; Ok(identity_manager) } - pub async fn root_history_expiry(&self) -> anyhow::Result { - Ok(self.abi.get_root_history_expiry().call().await?) - } - #[instrument(level = "debug", skip(self, identity_commitments, proof_data))] pub async fn register_identities( &self, @@ -171,35 +162,6 @@ impl IdentityManager { Ok(latest_root) } - /// Fetches the identity commitments from a - /// `deleteIdentities` transaction by tx hash - #[instrument(level = "debug", skip_all)] - pub async fn fetch_deletion_indices_from_tx( - &self, - tx_hash: H256, - ) -> anyhow::Result> { - let provider = self.ethereum.provider(); - - let tx = provider - .get_transaction(tx_hash) - .await? - .context("Missing tx")?; - - use ethers::abi::AbiDecode; - let delete_identities = DeleteIdentitiesCall::decode(&tx.input)?; - - let packed_deletion_indices: &[u8] = delete_identities.packed_deletion_indices.as_ref(); - let indices = unpack_indices(packed_deletion_indices); - - let padding_index = 2u32.pow(self.tree_depth as u32); - - Ok(indices - .into_iter() - .filter(|idx| *idx != padding_index) - .map(|x| x as usize) - .collect()) - } - #[instrument(level = "debug", skip_all)] pub async fn is_root_mined(&self, root: U256) -> anyhow::Result { let (root_on_mainnet, ..) = self.abi.query_root(root).call().await?; diff --git a/src/database/methods.rs b/src/database/methods.rs index 1c556f76..333f008c 100644 --- a/src/database/methods.rs +++ b/src/database/methods.rs @@ -2,17 +2,13 @@ use std::collections::HashSet; use axum::async_trait; use chrono::{DateTime, Utc}; -use ruint::aliases::U256; use sqlx::{Acquire, Executor, Postgres, Row}; use tracing::instrument; -use types::{DeletionEntry}; -use super::types::{LatestDeletionEntry, LatestInsertionEntry}; +use super::types::{DeletionEntry, LatestDeletionEntry, LatestInsertionEntry}; use crate::database::types::{BatchEntry, BatchEntryData, BatchType}; -use crate::database::{types, Error}; -use crate::identity_tree::{ - Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate, -}; +use crate::database::Error; +use crate::identity_tree::{Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate}; use crate::prover::identity::Identity; use crate::prover::{ProverConfig, ProverType}; diff --git a/src/database/mod.rs b/src/database/mod.rs index 8ec7d54c..2b187367 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -182,7 +182,7 @@ mod test { use std::time::Duration; use anyhow::Context; - use chrono::{Days, Utc}; + use chrono::Utc; use ethers::types::U256; use postgres_docker_utils::DockerContainer; use ruint::Uint; diff --git a/src/identity/processor.rs b/src/identity/processor.rs index 545802e3..4beb63a2 100644 --- a/src/identity/processor.rs +++ b/src/identity/processor.rs @@ -1,10 +1,8 @@ use std::collections::HashMap; use std::sync::{Arc, Mutex}; -use std::time::Duration; -use anyhow::{anyhow, Context}; +use anyhow::anyhow; use async_trait::async_trait; -use chrono::Utc; use ethers::abi::RawLog; use ethers::addressbook::Address; use ethers::contract::EthEvent; @@ -94,11 +92,8 @@ impl IdentityProcessor for OnChainIdentityProcessor { ) -> anyhow::Result<()> { let mainnet_logs = self.fetch_mainnet_logs().await?; - self.finalize_mainnet_roots( - processed_tree, - &mainnet_logs, - ) - .await?; + self.finalize_mainnet_roots(processed_tree, &mainnet_logs) + .await?; let mut roots = Self::extract_roots_from_mainnet_logs(mainnet_logs); roots.extend(self.fetch_secondary_logs().await?); diff --git a/src/identity_tree/status.rs b/src/identity_tree/status.rs index f61aa140..db2b45d3 100644 --- a/src/identity_tree/status.rs +++ b/src/identity_tree/status.rs @@ -27,12 +27,12 @@ pub enum ProcessedStatus { Mined, } -/// A status type visible on the API level - contains both the processed and -/// unprocessed statuses +/// A status type visible on the API level +// TODO: Remove #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] #[serde(rename_all = "camelCase")] #[serde(untagged)] -pub enum Status { // TODO: Remove +pub enum Status { Processed(ProcessedStatus), } From d8b4fcdae8a113eacea4c242817d3116c0b434aa Mon Sep 17 00:00:00 2001 From: Dzejkop Date: Tue, 22 Oct 2024 13:54:54 +0200 Subject: [PATCH 5/5] fix --- src/database/methods.rs | 2 +- src/database/mod.rs | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/database/methods.rs b/src/database/methods.rs index 333f008c..7b5e2b27 100644 --- a/src/database/methods.rs +++ b/src/database/methods.rs @@ -620,7 +620,7 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized { let result: Vec<(Hash,)> = sqlx::query_as( r#" SELECT commitment FROM unprocessed_identities - LIMIT $2 + LIMIT $1 "#, ) .bind(MAX_UNPROCESSED_FETCH_COUNT) diff --git a/src/database/mod.rs b/src/database/mod.rs index 2b187367..1907a8cb 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -947,12 +947,6 @@ mod test { let root_item_1 = db.get_root_state(&roots[1]).await?.unwrap(); assert!(root_item_0.pending_valid_as_of < root_1_inserted_at); - println!("root_1_inserted_at = {root_1_inserted_at:?}"); - println!( - "root_item_1.pending_valid_as_of = {:?}", - root_item_1.pending_valid_as_of - ); - assert_same_time!(root_item_1.pending_valid_as_of, root_1_inserted_at); // Test mined roots