Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dzejkop/better-safeguard #759

Merged
merged 2 commits into from
Jul 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --locked --all-targets
args: --workspace --tests --locked --all-targets
- name: Check docs
uses: actions-rs/cargo@v1
with:
Expand Down
12 changes: 6 additions & 6 deletions e2e_tests/scenarios/tests/common/docker_compose.rs
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,9 @@ pub async fn setup(cwd: &str) -> anyhow::Result<DockerComposeGuard> {
let balancer_port = parse_exposed_port(stdout);
res.update_balancer_port(balancer_port);

_ = await_running(&res).await?;
await_running(&res).await?;

return Ok(res);
Ok(res)
}

fn generate_project_name() -> String {
Expand All @@ -186,7 +186,7 @@ async fn await_running(docker_compose_guard: &DockerComposeGuard<'_>) -> anyhow:
loop {
let healthy = check_health(docker_compose_guard.get_local_addr()).await;
if healthy.is_ok() && healthy.unwrap() {
success_counter = success_counter + 1;
success_counter += 1;
}

if success_counter >= min_success_counts {
Expand Down Expand Up @@ -214,7 +214,7 @@ async fn check_health(local_addr: String) -> anyhow::Result<bool> {

let response = client.request(healthcheck).await?;

return Ok(response.status() == StatusCode::OK);
Ok(response.status() == StatusCode::OK)
}

fn run_cmd_to_output(
Expand All @@ -237,7 +237,7 @@ fn run_cmd_to_output(

let output = command
.output()
.expect(&format!("Failed to run command: {}", cmd_str));
.with_context(|| format!("Failed to run command: {}", cmd_str))?;

let stdout_utf = String::from_utf8(output.stdout)?;
let stderr_utf = String::from_utf8(output.stderr)?;
Expand All @@ -261,7 +261,7 @@ fn parse_exposed_port(s: String) -> u32 {
parts
.last()
.unwrap()
.split(":")
.split(':')
.last()
.unwrap()
.parse::<u32>()
Expand Down
4 changes: 2 additions & 2 deletions e2e_tests/scenarios/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ pub async fn insert_identity_with_retries(
) -> anyhow::Result<()> {
let mut last_err = None;
for _ in 0..retries_count {
match insert_identity(&client, &uri, &commitment).await {
match insert_identity(client, uri, commitment).await {
Ok(_) => return Ok(()),
Err(err) => last_err = Some(err),
}
Expand All @@ -138,7 +138,7 @@ pub async fn mined_inclusion_proof_with_retries(
) -> anyhow::Result<InclusionProofResponse> {
let mut last_res = Err(anyhow!("No calls at all"));
for _i in 0..retries_count {
last_res = inclusion_proof(&client, &uri, &commitment).await;
last_res = inclusion_proof(client, uri, commitment).await;

if let Ok(ref inclusion_proof_json) = last_res {
if inclusion_proof_json.0.status == Status::Processed(Mined) {
Expand Down
4 changes: 2 additions & 2 deletions e2e_tests/scenarios/tests/insert_100.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ async fn insert_100() -> anyhow::Result<()> {
let identities = generate_test_commitments(10);

for commitment in identities.iter() {
_ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
}

for commitment in identities.iter() {
_ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
}

Ok(())
Expand Down
13 changes: 6 additions & 7 deletions e2e_tests/scenarios/tests/insert_delete_insert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,26 @@ async fn insert_delete_insert() -> anyhow::Result<()> {
let identities = generate_test_commitments(10);

for commitment in identities.iter() {
_ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
}

for commitment in identities.iter() {
_ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
}

let first_commitment = identities.first().unwrap();

_ = delete_identity_with_retries(&client, &uri, &first_commitment, 10, 3.0).await?;
_ = bad_request_inclusion_proof_with_retries(&client, &uri, &first_commitment, 60, 10.0)
.await?;
delete_identity_with_retries(&client, &uri, first_commitment, 10, 3.0).await?;
bad_request_inclusion_proof_with_retries(&client, &uri, first_commitment, 60, 10.0).await?;

let new_identities = generate_test_commitments(10);

for commitment in new_identities.iter() {
_ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
}

for commitment in new_identities.iter() {
_ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
}

Ok(())
Expand Down
10 changes: 5 additions & 5 deletions e2e_tests/scenarios/tests/insert_restart_insert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,23 @@ async fn insert_restart_insert() -> anyhow::Result<()> {
let identities = generate_test_commitments(10);

for commitment in identities.iter() {
_ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
}

for commitment in identities.iter() {
_ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
}

_ = docker_compose.restart_sequencer().await?;
docker_compose.restart_sequencer().await?;

let identities = generate_test_commitments(10);

for commitment in identities.iter() {
_ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?;
}

for commitment in identities.iter() {
_ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?;
}

Ok(())
Expand Down
18 changes: 12 additions & 6 deletions src/task_monitor/tasks/delete_identities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,18 @@ pub async fn delete_identities(

let _guard = pending_insertions_mutex.lock().await;

if deletions.len() == 1 {
let last_insertion_idx = app.tree_state()?.latest_tree().next_leaf() - 1;

let only_deletion_idx = *leaf_indices.first().unwrap();

if only_deletion_idx == last_insertion_idx {
// Check if the deletion batch could potentially create a duplicate root batch
if let Some(last_leaf_index) = app.tree_state()?.latest_tree().next_leaf().checked_sub(1) {
let mut sorted_indices = leaf_indices.clone();
sorted_indices.sort();

let indices_are_continuous = sorted_indices.windows(2).all(|w| w[1] == w[0] + 1);

if indices_are_continuous && sorted_indices.last().unwrap() == &last_leaf_index {
tracing::warn!(
"Deletion batch could potentially create a duplicate root batch. Deletion \
batch will be postponed"
);
continue;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,16 @@ use crate::common::test_delete_identity;

const IDLE_TIME: u64 = 7;

/// This test ensures that we're safe against a scenario where we delete a set
/// of identities which were just inserted.
///
/// Example:
/// Let's say we insert identities at indexes 0, 1, 2, 3, 4 in batches of 0, 1,
/// 2 and 3, 4. We then delete identites at indexes 4 and 3 - this will result
/// in the same batch post root as from the insertion batch 0, 1, 2. This breaks
/// a central assumption that roots are unique.
#[tokio::test]
async fn immediate_deletion() -> anyhow::Result<()> {
async fn immediate_deletion_of_continuous_set_of_identities() -> anyhow::Result<()> {
// Initialize logging for the test.
init_tracing_subscriber();
info!("Starting integration test");
Expand Down Expand Up @@ -92,8 +100,7 @@ async fn immediate_deletion() -> anyhow::Result<()> {
.await;
}

// Only delete the last identity from the previous batch
// which would create a duplicate root
// Delete 2 identities from the top - this could create a duplicate root
test_delete_identity(
&uri,
&client,
Expand All @@ -103,6 +110,15 @@ async fn immediate_deletion() -> anyhow::Result<()> {
false,
)
.await;
test_delete_identity(
&uri,
&client,
&mut ref_tree,
&identities_ref,
insertion_batch_size - 2,
false,
)
.await;

tokio::time::sleep(Duration::from_secs(IDLE_TIME * 2)).await;

Expand All @@ -116,7 +132,8 @@ async fn immediate_deletion() -> anyhow::Result<()> {
)
.await;

// Delete another identity to trigger a deletion batch
// Delete another identity to trigger a deletion batch - crucially there must be
// gaps in deletions, or a new insertion must happen in between
test_delete_identity(&uri, &client, &mut ref_tree, &identities_ref, 0, false).await;

tokio::time::sleep(Duration::from_secs(IDLE_TIME * 2)).await;
Expand Down
Loading