Skip to content

Commit

Permalink
Remove /identityHistory endpoint.
Browse files Browse the repository at this point in the history
  • Loading branch information
piohei committed May 29, 2024
1 parent 8f3941f commit e65f6e1
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 691 deletions.
54 changes: 2 additions & 52 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@ use crate::database::query::DatabaseQuery as _;
use crate::database::Database;
use crate::ethereum::Ethereum;
use crate::identity_tree::{
CanonicalTreeBuilder, Hash, InclusionProof, ProcessedStatus, RootItem, Status, TreeState,
TreeUpdate, TreeVersionReadOps, TreeWithNextVersion, UnprocessedStatus,
CanonicalTreeBuilder, Hash, InclusionProof, ProcessedStatus, RootItem, TreeState, TreeUpdate,
TreeVersionReadOps, TreeWithNextVersion,
};
use crate::prover::map::initialize_prover_maps;
use crate::prover::{ProverConfig, ProverType};
use crate::server::data::{
IdentityHistoryEntry, IdentityHistoryEntryKind, IdentityHistoryEntryStatus,
InclusionProofResponse, ListBatchSizesResponse, VerifySemaphoreProofQuery,
VerifySemaphoreProofRequest, VerifySemaphoreProofResponse,
};
Expand Down Expand Up @@ -540,55 +539,6 @@ impl App {
.await
}

pub async fn identity_history(
&self,
commitment: &Hash,
) -> Result<Vec<IdentityHistoryEntry>, ServerError> {
let entries = self
.database
.get_identity_history_entries(commitment)
.await?;

let mut history = vec![];

for entry in entries {
let mut status = match entry.status {
Status::Processed(ProcessedStatus::Pending) => IdentityHistoryEntryStatus::Pending,
Status::Processed(ProcessedStatus::Processed) => IdentityHistoryEntryStatus::Mined,
Status::Processed(ProcessedStatus::Mined) => IdentityHistoryEntryStatus::Bridged,
Status::Unprocessed(UnprocessedStatus::New) => IdentityHistoryEntryStatus::Buffered,
};

match status {
// A pending identity can be present in the batching tree and therefore status
// should be set to Batched
IdentityHistoryEntryStatus::Pending => {
if let Some(leaf_index) = entry.leaf_index {
if self.tree_state()?.get_batching_tree().get_leaf(leaf_index)
== entry.commitment
{
status = IdentityHistoryEntryStatus::Batched;
}
}
}
IdentityHistoryEntryStatus::Buffered if entry.held_back => {
status = IdentityHistoryEntryStatus::Queued;
}
_ => (),
}

let kind = if entry.commitment == Uint::ZERO {
IdentityHistoryEntryKind::Deletion
} else {
IdentityHistoryEntryKind::Insertion
};

history.push(IdentityHistoryEntry { kind, status });
}

Ok(history)
}

fn merge_env_provers(
prover_urls: &[ProverConfig],
existing_provers: &mut HashSet<ProverConfig>,
Expand Down
141 changes: 1 addition & 140 deletions src/database/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ mod test {
use crate::config::DatabaseConfig;
use crate::database::query::DatabaseQuery;
use crate::database::types::BatchType;
use crate::identity_tree::{Hash, ProcessedStatus, Status, UnprocessedStatus};
use crate::identity_tree::{Hash, ProcessedStatus, UnprocessedStatus};
use crate::prover::identity::Identity;
use crate::prover::{ProverConfig, ProverType};
use crate::utils::secret::SecretUrl;
Expand Down Expand Up @@ -1175,145 +1175,6 @@ mod test {
Ok(())
}

#[tokio::test]
async fn test_history_unprocessed_identities() -> anyhow::Result<()> {
let docker = Cli::default();
let (db, _db_container) = setup_db(&docker).await?;
let identities = mock_identities(2);

let now = Utc::now();

let insertion_timestamp = now - chrono::Duration::seconds(5);
db.insert_new_identity(identities[0], insertion_timestamp)
.await?;

let insertion_timestamp = now + chrono::Duration::seconds(5);
db.insert_new_identity(identities[1], insertion_timestamp)
.await?;

let history = db.get_identity_history_entries(&identities[0]).await?;

assert_eq!(history.len(), 1);
assert_eq!(
history[0].status,
Status::Unprocessed(UnprocessedStatus::New)
);
assert!(!history[0].held_back, "Identity should not be held back");
assert_eq!(history[0].leaf_index, None);

let history = db.get_identity_history_entries(&identities[1]).await?;

assert_eq!(history.len(), 1);
assert_eq!(
history[0].status,
Status::Unprocessed(UnprocessedStatus::New)
);
assert!(history[0].held_back, "Identity should be held back");
assert_eq!(history[0].leaf_index, None);

Ok(())
}

#[tokio::test]
async fn test_history_unprocessed_deletion_identities() -> anyhow::Result<()> {
let docker = Cli::default();
let (db, _db_container) = setup_db(&docker).await?;
let identities = mock_identities(2);
let roots = mock_roots(2);

db.insert_pending_identity(0, &identities[0], &roots[0])
.await?;
db.mark_root_as_mined_tx(&roots[0]).await?;

db.insert_new_deletion(0, &identities[0]).await?;

let history = db.get_identity_history_entries(&identities[0]).await?;

assert_eq!(history.len(), 2);

assert_eq!(history[0].status, Status::Processed(ProcessedStatus::Mined));
assert_eq!(history[0].commitment, identities[0]);
assert_eq!(history[0].leaf_index, Some(0));
assert!(!history[0].held_back, "Identity should not be held back");

assert_eq!(
history[1].status,
Status::Unprocessed(UnprocessedStatus::New)
);
assert_eq!(history[1].commitment, Hash::ZERO);
assert_eq!(history[1].leaf_index, Some(0));
assert!(!history[1].held_back, "Identity should not be held back");

Ok(())
}

#[tokio::test]
async fn test_history_processed_deletion_identities() -> anyhow::Result<()> {
let docker = Cli::default();
let (db, _db_container) = setup_db(&docker).await?;
let identities = mock_identities(2);
let roots = mock_roots(2);

db.insert_pending_identity(0, &identities[0], &roots[0])
.await?;
db.insert_pending_identity(0, &Hash::ZERO, &roots[1])
.await?;

db.mark_root_as_mined_tx(&roots[1]).await?;

let history = db.get_identity_history_entries(&identities[0]).await?;

assert_eq!(history.len(), 2);

assert_eq!(history[0].status, Status::Processed(ProcessedStatus::Mined));
assert_eq!(history[0].commitment, identities[0]);
assert_eq!(history[0].leaf_index, Some(0));
assert!(!history[0].held_back, "Identity should not be held back");

assert_eq!(history[1].status, Status::Processed(ProcessedStatus::Mined));
assert_eq!(history[1].commitment, Hash::ZERO);
assert_eq!(history[1].leaf_index, Some(0));
assert!(!history[1].held_back, "Identity should not be held back");

Ok(())
}

#[tokio::test]
async fn test_history_processed_identity() -> anyhow::Result<()> {
let docker = Cli::default();
let (db, _db_container) = setup_db(&docker).await?;
let identities = mock_identities(2);
let roots = mock_roots(2);

db.insert_pending_identity(0, &identities[0], &roots[0])
.await?;

let history = db.get_identity_history_entries(&identities[0]).await?;

assert_eq!(history.len(), 1);

assert_eq!(
history[0].status,
Status::Processed(ProcessedStatus::Pending)
);
assert_eq!(history[0].commitment, identities[0]);
assert_eq!(history[0].leaf_index, Some(0));
assert!(!history[0].held_back, "Identity should not be held back");

db.mark_root_as_mined_tx(&roots[0]).await?;

let history = db.get_identity_history_entries(&identities[0]).await?;

assert_eq!(history.len(), 1);

assert_eq!(history[0].status, Status::Processed(ProcessedStatus::Mined));
assert_eq!(history[0].commitment, identities[0]);
assert_eq!(history[0].leaf_index, Some(0));
assert!(!history[0].held_back, "Identity should not be held back");

Ok(())
}

#[tokio::test]
async fn can_insert_same_root_multiple_times() -> anyhow::Result<()> {
let docker = Cli::default();
Expand Down
93 changes: 2 additions & 91 deletions src/database/transaction.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
use chrono::{DateTime, Utc};
use sqlx::{Executor, Row};
use sqlx::Executor;
use tracing::instrument;

use crate::database::query::DatabaseQuery;
use crate::database::types::CommitmentHistoryEntry;
use crate::database::{Database, Error};
use crate::identity_tree::{Hash, ProcessedStatus, UnprocessedStatus};
use crate::identity_tree::{Hash, ProcessedStatus};
use crate::utils::retry_tx;

/// impl block for database transactions
Expand Down Expand Up @@ -86,91 +84,4 @@ impl Database {
})
.await
}

pub async fn get_identity_history_entries(
&self,
commitment: &Hash,
) -> Result<Vec<CommitmentHistoryEntry>, Error> {
let unprocessed = sqlx::query(
r#"
SELECT commitment, status, eligibility
FROM unprocessed_identities
WHERE commitment = $1
"#,
)
.bind(commitment);

let rows = self.pool.fetch_all(unprocessed).await?;
let unprocessed_updates = rows
.into_iter()
.map(|row| {
let eligibility_timestamp: DateTime<Utc> = row.get(2);
let held_back = Utc::now() < eligibility_timestamp;

CommitmentHistoryEntry {
leaf_index: None,
commitment: row.get::<Hash, _>(0),
held_back,
status: row
.get::<&str, _>(1)
.parse()
.expect("Failed to parse unprocessed status"),
}
})
.collect::<Vec<CommitmentHistoryEntry>>();

let leaf_index = self.get_identity_leaf_index(commitment).await?;
let Some(leaf_index) = leaf_index else {
return Ok(unprocessed_updates);
};

let identity_deletions = sqlx::query(
r#"
SELECT commitment
FROM deletions
WHERE leaf_index = $1
"#,
)
.bind(leaf_index.leaf_index as i64);

let rows = self.pool.fetch_all(identity_deletions).await?;
let deletions = rows
.into_iter()
.map(|_row| CommitmentHistoryEntry {
leaf_index: Some(leaf_index.leaf_index),
commitment: Hash::ZERO,
held_back: false,
status: UnprocessedStatus::New.into(),
})
.collect::<Vec<CommitmentHistoryEntry>>();

let processed_updates = sqlx::query(
r#"
SELECT commitment, status
FROM identities
WHERE leaf_index = $1
ORDER BY id ASC
"#,
)
.bind(leaf_index.leaf_index as i64);

let rows = self.pool.fetch_all(processed_updates).await?;
let processed_updates: Vec<CommitmentHistoryEntry> = rows
.into_iter()
.map(|row| CommitmentHistoryEntry {
leaf_index: Some(leaf_index.leaf_index),
commitment: row.get::<Hash, _>(0),
held_back: false,
status: row
.get::<&str, _>(1)
.parse()
.expect("Status is unreadable, database is corrupt"),
})
.collect();

Ok([processed_updates, unprocessed_updates, deletions]
.concat()
.into_iter()
.collect())
}
}
12 changes: 1 addition & 11 deletions src/database/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use sqlx::error::BoxDynError;
use sqlx::prelude::FromRow;
use sqlx::{Decode, Encode, Postgres, Type};

use crate::identity_tree::{Hash, Status, UnprocessedStatus};
use crate::identity_tree::{Hash, UnprocessedStatus};
use crate::prover::identity::Identity;

pub struct UnprocessedCommitment {
Expand Down Expand Up @@ -34,16 +34,6 @@ pub struct DeletionEntry {
pub commitment: Hash,
}

#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct CommitmentHistoryEntry {
pub commitment: Hash,
pub leaf_index: Option<usize>,
// Only applies to buffered entries
// set to true if the eligibility timestamp is in the future
pub held_back: bool,
pub status: Status,
}

#[derive(Debug, Copy, Clone, sqlx::Type, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[sqlx(type_name = "VARCHAR", rename_all = "PascalCase")]
Expand Down
18 changes: 4 additions & 14 deletions src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ mod custom_middleware;
pub mod data;

use self::data::{
AddBatchSizeRequest, DeletionRequest, IdentityHistoryRequest, IdentityHistoryResponse,
InclusionProofRequest, InclusionProofResponse, InsertCommitmentRequest, ListBatchSizesResponse,
RecoveryRequest, RemoveBatchSizeRequest, ToResponseCode, VerifySemaphoreProofQuery,
VerifySemaphoreProofRequest, VerifySemaphoreProofResponse,
AddBatchSizeRequest, DeletionRequest, InclusionProofRequest, InclusionProofResponse,
InsertCommitmentRequest, ListBatchSizesResponse, RecoveryRequest, RemoveBatchSizeRequest,
ToResponseCode, VerifySemaphoreProofQuery, VerifySemaphoreProofRequest,
VerifySemaphoreProofResponse,
};

async fn inclusion_proof(
Expand Down Expand Up @@ -104,15 +104,6 @@ async fn recover_identity(
Ok(())
}

async fn identity_history(
State(app): State<Arc<App>>,
Json(req): Json<IdentityHistoryRequest>,
) -> Result<Json<IdentityHistoryResponse>, Error> {
let history = app.identity_history(&req.identity_commitment).await?;

Ok(Json(IdentityHistoryResponse { history }))
}

async fn remove_batch_size(
State(app): State<Arc<App>>,
Json(req): Json<RemoveBatchSizeRequest>,
Expand Down Expand Up @@ -182,7 +173,6 @@ pub async fn bind_from_listener(
.route("/insertIdentity", post(insert_identity))
.route("/deleteIdentity", post(delete_identity))
.route("/recoverIdentity", post(recover_identity))
.route("/identityHistory", post(identity_history))
// Operate on batch sizes
.route("/addBatchSize", post(add_batch_size))
.route("/removeBatchSize", post(remove_batch_size))
Expand Down
Loading

0 comments on commit e65f6e1

Please sign in to comment.