diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index aebbef0d..e8c168b5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -56,7 +56,7 @@ jobs: - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} - args: --locked --all-targets + args: --workspace --tests --locked --all-targets - name: Check docs uses: actions-rs/cargo@v1 with: diff --git a/e2e_tests/scenarios/tests/common/docker_compose.rs b/e2e_tests/scenarios/tests/common/docker_compose.rs index 81af9baf..029358d4 100644 --- a/e2e_tests/scenarios/tests/common/docker_compose.rs +++ b/e2e_tests/scenarios/tests/common/docker_compose.rs @@ -162,9 +162,9 @@ pub async fn setup(cwd: &str) -> anyhow::Result { let balancer_port = parse_exposed_port(stdout); res.update_balancer_port(balancer_port); - _ = await_running(&res).await?; + await_running(&res).await?; - return Ok(res); + Ok(res) } fn generate_project_name() -> String { @@ -186,7 +186,7 @@ async fn await_running(docker_compose_guard: &DockerComposeGuard<'_>) -> anyhow: loop { let healthy = check_health(docker_compose_guard.get_local_addr()).await; if healthy.is_ok() && healthy.unwrap() { - success_counter = success_counter + 1; + success_counter += 1; } if success_counter >= min_success_counts { @@ -214,7 +214,7 @@ async fn check_health(local_addr: String) -> anyhow::Result { let response = client.request(healthcheck).await?; - return Ok(response.status() == StatusCode::OK); + Ok(response.status() == StatusCode::OK) } fn run_cmd_to_output( @@ -237,7 +237,7 @@ fn run_cmd_to_output( let output = command .output() - .expect(&format!("Failed to run command: {}", cmd_str)); + .with_context(|| format!("Failed to run command: {}", cmd_str))?; let stdout_utf = String::from_utf8(output.stdout)?; let stderr_utf = String::from_utf8(output.stderr)?; @@ -261,7 +261,7 @@ fn parse_exposed_port(s: String) -> u32 { parts .last() .unwrap() - .split(":") + .split(':') .last() .unwrap() .parse::() diff --git a/e2e_tests/scenarios/tests/common/mod.rs b/e2e_tests/scenarios/tests/common/mod.rs index 55219746..9d1eebe2 100644 --- a/e2e_tests/scenarios/tests/common/mod.rs +++ b/e2e_tests/scenarios/tests/common/mod.rs @@ -119,7 +119,7 @@ pub async fn insert_identity_with_retries( ) -> anyhow::Result<()> { let mut last_err = None; for _ in 0..retries_count { - match insert_identity(&client, &uri, &commitment).await { + match insert_identity(client, uri, commitment).await { Ok(_) => return Ok(()), Err(err) => last_err = Some(err), } @@ -138,7 +138,7 @@ pub async fn mined_inclusion_proof_with_retries( ) -> anyhow::Result { let mut last_res = Err(anyhow!("No calls at all")); for _i in 0..retries_count { - last_res = inclusion_proof(&client, &uri, &commitment).await; + last_res = inclusion_proof(client, uri, commitment).await; if let Ok(ref inclusion_proof_json) = last_res { if inclusion_proof_json.0.status == Status::Processed(Mined) { diff --git a/e2e_tests/scenarios/tests/insert_100.rs b/e2e_tests/scenarios/tests/insert_100.rs index bd6861e6..2710fc77 100644 --- a/e2e_tests/scenarios/tests/insert_100.rs +++ b/e2e_tests/scenarios/tests/insert_100.rs @@ -18,11 +18,11 @@ async fn insert_100() -> anyhow::Result<()> { let identities = generate_test_commitments(10); for commitment in identities.iter() { - _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; } for commitment in identities.iter() { - _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; } Ok(()) diff --git a/e2e_tests/scenarios/tests/insert_delete_insert.rs b/e2e_tests/scenarios/tests/insert_delete_insert.rs index 8708bf76..ef604fd5 100644 --- a/e2e_tests/scenarios/tests/insert_delete_insert.rs +++ b/e2e_tests/scenarios/tests/insert_delete_insert.rs @@ -18,27 +18,26 @@ async fn insert_delete_insert() -> anyhow::Result<()> { let identities = generate_test_commitments(10); for commitment in identities.iter() { - _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; } for commitment in identities.iter() { - _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; } let first_commitment = identities.first().unwrap(); - _ = delete_identity_with_retries(&client, &uri, &first_commitment, 10, 3.0).await?; - _ = bad_request_inclusion_proof_with_retries(&client, &uri, &first_commitment, 60, 10.0) - .await?; + delete_identity_with_retries(&client, &uri, first_commitment, 10, 3.0).await?; + bad_request_inclusion_proof_with_retries(&client, &uri, first_commitment, 60, 10.0).await?; let new_identities = generate_test_commitments(10); for commitment in new_identities.iter() { - _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; } for commitment in new_identities.iter() { - _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; } Ok(()) diff --git a/e2e_tests/scenarios/tests/insert_restart_insert.rs b/e2e_tests/scenarios/tests/insert_restart_insert.rs index 49c6f426..55948bf6 100644 --- a/e2e_tests/scenarios/tests/insert_restart_insert.rs +++ b/e2e_tests/scenarios/tests/insert_restart_insert.rs @@ -18,23 +18,23 @@ async fn insert_restart_insert() -> anyhow::Result<()> { let identities = generate_test_commitments(10); for commitment in identities.iter() { - _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; } for commitment in identities.iter() { - _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; } - _ = docker_compose.restart_sequencer().await?; + docker_compose.restart_sequencer().await?; let identities = generate_test_commitments(10); for commitment in identities.iter() { - _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; } for commitment in identities.iter() { - _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; } Ok(()) diff --git a/src/task_monitor/tasks/delete_identities.rs b/src/task_monitor/tasks/delete_identities.rs index 3693254f..a499553c 100644 --- a/src/task_monitor/tasks/delete_identities.rs +++ b/src/task_monitor/tasks/delete_identities.rs @@ -54,12 +54,18 @@ pub async fn delete_identities( let _guard = pending_insertions_mutex.lock().await; - if deletions.len() == 1 { - let last_insertion_idx = app.tree_state()?.latest_tree().next_leaf() - 1; - - let only_deletion_idx = *leaf_indices.first().unwrap(); - - if only_deletion_idx == last_insertion_idx { + // Check if the deletion batch could potentially create a duplicate root batch + if let Some(last_leaf_index) = app.tree_state()?.latest_tree().next_leaf().checked_sub(1) { + let mut sorted_indices = leaf_indices.clone(); + sorted_indices.sort(); + + let indices_are_continuous = sorted_indices.windows(2).all(|w| w[1] == w[0] + 1); + + if indices_are_continuous && sorted_indices.last().unwrap() == &last_leaf_index { + tracing::warn!( + "Deletion batch could potentially create a duplicate root batch. Deletion \ + batch will be postponed" + ); continue; } } diff --git a/tests/immediate_deletion.rs b/tests/immediate_deletion_of_continuous_set_of_identities.rs similarity index 81% rename from tests/immediate_deletion.rs rename to tests/immediate_deletion_of_continuous_set_of_identities.rs index 1b0a6414..d08fa119 100644 --- a/tests/immediate_deletion.rs +++ b/tests/immediate_deletion_of_continuous_set_of_identities.rs @@ -11,8 +11,16 @@ use crate::common::test_delete_identity; const IDLE_TIME: u64 = 7; +/// This test ensures that we're safe against a scenario where we delete a set +/// of identities which were just inserted. +/// +/// Example: +/// Let's say we insert identities at indexes 0, 1, 2, 3, 4 in batches of 0, 1, +/// 2 and 3, 4. We then delete identites at indexes 4 and 3 - this will result +/// in the same batch post root as from the insertion batch 0, 1, 2. This breaks +/// a central assumption that roots are unique. #[tokio::test] -async fn immediate_deletion() -> anyhow::Result<()> { +async fn immediate_deletion_of_continuous_set_of_identities() -> anyhow::Result<()> { // Initialize logging for the test. init_tracing_subscriber(); info!("Starting integration test"); @@ -92,8 +100,7 @@ async fn immediate_deletion() -> anyhow::Result<()> { .await; } - // Only delete the last identity from the previous batch - // which would create a duplicate root + // Delete 2 identities from the top - this could create a duplicate root test_delete_identity( &uri, &client, @@ -103,6 +110,15 @@ async fn immediate_deletion() -> anyhow::Result<()> { false, ) .await; + test_delete_identity( + &uri, + &client, + &mut ref_tree, + &identities_ref, + insertion_batch_size - 2, + false, + ) + .await; tokio::time::sleep(Duration::from_secs(IDLE_TIME * 2)).await; @@ -116,7 +132,8 @@ async fn immediate_deletion() -> anyhow::Result<()> { ) .await; - // Delete another identity to trigger a deletion batch + // Delete another identity to trigger a deletion batch - crucially there must be + // gaps in deletions, or a new insertion must happen in between test_delete_identity(&uri, &client, &mut ref_tree, &identities_ref, 0, false).await; tokio::time::sleep(Duration::from_secs(IDLE_TIME * 2)).await;