From 1616e6cffbd6ad977902a7a930222caddc13da22 Mon Sep 17 00:00:00 2001 From: 0xKitsune <0xKitsune@protonmail.com> Date: Mon, 25 Sep 2023 17:14:18 -0400 Subject: [PATCH] added logic to trim deletions to max deletion batch size, added logic to configure batch size during process identities --- src/contracts/mod.rs | 6 +++++- src/task_monitor/tasks/delete_identities.rs | 20 +++++++++++++++++--- src/task_monitor/tasks/process_identities.rs | 7 ++++++- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/src/contracts/mod.rs b/src/contracts/mod.rs index 84737307..c451eade 100644 --- a/src/contracts/mod.rs +++ b/src/contracts/mod.rs @@ -146,10 +146,14 @@ impl IdentityManager { self.tree_depth } - pub async fn max_batch_size(&self) -> usize { + pub async fn max_insertion_batch_size(&self) -> usize { self.insertion_prover_map.read().await.max_batch_size() } + pub async fn max_deletion_batch_size(&self) -> usize { + self.deletion_prover_map.read().await.max_batch_size() + } + #[must_use] pub const fn initial_leaf_value(&self) -> Field { self.initial_leaf_value diff --git a/src/task_monitor/tasks/delete_identities.rs b/src/task_monitor/tasks/delete_identities.rs index 83b8a984..53905eeb 100644 --- a/src/task_monitor/tasks/delete_identities.rs +++ b/src/task_monitor/tasks/delete_identities.rs @@ -9,6 +9,7 @@ use tracing::info; use crate::database::types::DeletionEntry; use crate::database::Database; use crate::identity_tree::{Hash, Latest, TreeVersion}; +use crate::prover::{ProverConfiguration, ProverType}; pub struct DeleteIdentities { database: Arc, @@ -61,9 +62,6 @@ async fn delete_identities( loop { let deletions = database.get_deletions().await?; if deletions.is_empty() { - // Sleep for one hour - // TODO: should we make this dynamic? This causes an issue with tests so its set - // to 1 sec atm tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; continue; } @@ -78,6 +76,22 @@ async fn delete_identities( // Dedup deletion entries let deletions = deletions.into_iter().collect::>(); + // Get the max batch size for deletion provers + let max_batch_size = database + .get_provers() + .await? + .iter() + .filter(|p| p.prover_type == ProverType::Deletion) + .max_by_key(|p| p.batch_size) + .map(|p| p.batch_size) + .expect("Could not get match batch size"); + + // Trim deletions the max deletion batch size available + let deletions = deletions + .into_iter() + .take(max_batch_size) + .collect::>(); + let (leaf_indices, previous_commitments): (Vec, Vec) = deletions .iter() .map(|d| (d.leaf_index, d.commitment)) diff --git a/src/task_monitor/tasks/process_identities.rs b/src/task_monitor/tasks/process_identities.rs index f9e57460..85834477 100644 --- a/src/task_monitor/tasks/process_identities.rs +++ b/src/task_monitor/tasks/process_identities.rs @@ -72,7 +72,9 @@ async fn process_identities( identity_manager.await_clean_slate().await?; info!("Starting identity processor."); - let batch_size = identity_manager.max_batch_size().await; + let insertion_batch_size = identity_manager.max_insertion_batch_size().await; + let deletion_batch_size = identity_manager.max_deletion_batch_size().await; + let batch_size = std::cmp::max(insertion_batch_size, deletion_batch_size); // We start a timer and force it to perform one initial tick to avoid an // immediate trigger. @@ -101,6 +103,9 @@ async fn process_identities( // If the timer has fired we want to insert whatever // identities we have, even if it's not many. This ensures // a minimum quality of service for API users. + + batching_tree. + let updates = batching_tree.peek_next_updates(batch_size); if updates.is_empty() { continue;