Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
Dzejkop committed Oct 21, 2024
1 parent e5a3b2f commit c5cab66
Show file tree
Hide file tree
Showing 10 changed files with 67 additions and 345 deletions.
11 changes: 4 additions & 7 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,14 @@ Sequencer has 6 API routes.
indeed in the tree. The inclusion proof is then returned to the API caller.
3. `/deleteIdentity` - Takes an identity commitment hash, ensures that it exists and hasn't been deleted yet. This
identity is then scheduled for deletion.
4. `/recoverIdentity` - Takes two identity commitment hashes. The first must exist and will be scheduled for deletion
and the other will be inserted as a replacement after the first identity has been deleted and a set amount of time (
depends on configuration parameters) has passed.
5. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof.
4. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof.
The proving key is fetched based on the depth index, and verification key as well.
The list of prime fields is created based on request input mentioned before, and then we proceed to verify the proof.
Sequencer uses groth16 zk-SNARK implementation.
The API call returns the proof as a response.
6. `/addBatchSize` - Adds a prover with specific batch size to a list of provers.
7. `/removeBatchSize` - Removes the prover based on batch size.
8. `/listBatchSizes` - Lists all provers that are added to the Sequencer.
5. `/addBatchSize` - Adds a prover with specific batch size to a list of provers.
6. `/removeBatchSize` - Removes the prover based on batch size.
7. `/listBatchSizes` - Lists all provers that are added to the Sequencer.

## Getting Started

Expand Down
13 changes: 13 additions & 0 deletions schemas/database/016_remove_recovery.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
CREATE TABLE recoveries (
existing_commitment BYTEA NOT NULL UNIQUE,
new_commitment BYTEA NOT NULL UNIQUE
);

ALTER TABLE unprocessed_identities
ADD COLUMN eligibility TIMESTAMPTZ,
ADD COLUMN status VARCHAR(50) NOT NULL,
ADD COLUMN processed_at TIMESTAMPTZ,
ADD COLUMN error_message TEXT;

ALTER TABLE unprocessed_identities
DROP CONSTRAINT unique_commitment;
11 changes: 11 additions & 0 deletions schemas/database/016_remove_recovery.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
DROP TABLE recoveries;

ALTER TABLE unprocessed_identities
DROP COLUMN eligibility,
DROP COLUMN status,
DROP COLUMN processed_at,
DROP COLUMN error_message;

ALTER TABLE unprocessed_identities
ADD CONSTRAINT unique_commitment UNIQUE (commitment);

29 changes: 0 additions & 29 deletions schemas/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,26 +62,6 @@ paths:
schema:
description: 'Identity could not be queued for deletion'
type: 'string'
/recoverIdentity:
post:
summary: 'Queues a recovery request, deleting the previous identity specified and inserting the new one.
New insertions must wait a specified time delay before being included in the merkle tree'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/RecoveryRequest'
responses:
'202':
description: 'Identity has been successfully queued for recovery'
'400':
description: 'Invalid request'
content:
application/json:
schema:
description: 'Identity could not be queued for recovery'
type: 'string'
/inclusionProof:
post:
summary: 'Get Merkle inclusion proof'
Expand Down Expand Up @@ -152,15 +132,6 @@ paths:

components:
schemas:
RecoveryRequest:
type: object
properties:
previousIdentityCommitment:
type: string
pattern: '^[A-F0-9]{64}$'
newIdentityCommitment:
type: string
pattern: '^[A-F0-9]{64}$'
IdentityCommitment:
type: object
properties:
Expand Down
4 changes: 4 additions & 0 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,11 @@ impl App {
return Err(ServerError::DuplicateCommitment);
}

<<<<<<< HEAD

Check failure on line 167 in src/app.rs

View workflow job for this annotation

GitHub Actions / Lint

encountered diff marker
tx.insert_new_identity(commitment, Utc::now()).await?;
=======
tx.insert_unprocessed_identity(commitment, Utc::now()).await?;
>>>>>>> 428daf1 (WIP)

tx.commit().await?;

Expand Down
110 changes: 5 additions & 105 deletions src/database/methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -475,50 +475,23 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized {
}

#[instrument(skip(self), level = "debug")]
async fn insert_new_identity(
self,
identity: Hash,
eligibility_timestamp: sqlx::types::chrono::DateTime<Utc>,
) -> Result<Hash, Error> {
async fn insert_unprocessed_identity(self, identity: Hash) -> Result<Hash, Error> {
let mut conn = self.acquire().await?;

sqlx::query(
r#"
INSERT INTO unprocessed_identities (commitment, status, created_at, eligibility)
VALUES ($1, $2, CURRENT_TIMESTAMP, $3)
INSERT INTO unprocessed_identities (commitment, created_at)
VALUES ($1, CURRENT_TIMESTAMP)
ON CONFLICT DO NOTHING
"#,
)
.bind(identity)
.bind(<&str>::from(UnprocessedStatus::New))
.bind(eligibility_timestamp)
.execute(&mut *conn)
.await?;

Ok(identity)
}

#[instrument(skip(self), level = "debug")]
async fn insert_new_recovery(
self,
existing_commitment: &Hash,
new_commitment: &Hash,
) -> Result<(), Error> {
let mut conn = self.acquire().await?;

sqlx::query(
r#"
INSERT INTO recoveries (existing_commitment, new_commitment)
VALUES ($1, $2)
"#,
)
.bind(existing_commitment)
.bind(new_commitment)
.execute(&mut *conn)
.await?;

Ok(())
}

#[instrument(skip(self), level = "debug")]
async fn get_latest_deletion(self) -> Result<LatestDeletionEntry, Error> {
let mut conn = self.acquire().await?;
Expand Down Expand Up @@ -580,47 +553,6 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized {
Ok(())
}

#[cfg(test)]
#[instrument(skip(self), level = "debug")]
async fn get_all_recoveries(self) -> Result<Vec<RecoveryEntry>, Error> {
let mut conn = self.acquire().await?;

Ok(
sqlx::query_as::<_, RecoveryEntry>("SELECT * FROM recoveries")
.fetch_all(&mut *conn)
.await?,
)
}

#[instrument(skip(self, prev_commits), level = "debug")]
async fn delete_recoveries<I, T>(self, prev_commits: I) -> Result<Vec<RecoveryEntry>, Error>
where
I: IntoIterator<Item = T> + Send,
T: Into<U256>,
{
let mut conn = self.acquire().await?;

// TODO: upstream PgHasArrayType impl to ruint
let prev_commits = prev_commits
.into_iter()
.map(|c| c.into().to_be_bytes())
.collect::<Vec<[u8; 32]>>();

let res = sqlx::query_as::<_, RecoveryEntry>(
r#"
DELETE
FROM recoveries
WHERE existing_commitment = ANY($1)
RETURNING *
"#,
)
.bind(&prev_commits)
.fetch_all(&mut *conn)
.await?;

Ok(res)
}

/// Inserts a new deletion into the deletions table
///
/// This method is idempotent and on conflict nothing will happen
Expand Down Expand Up @@ -686,54 +618,22 @@ pub trait DbMethods<'c>: Acquire<'c, Database = Postgres> + Sized {
}

#[instrument(skip(self), level = "debug")]
async fn get_eligible_unprocessed_commitments(
self,
status: UnprocessedStatus,
) -> Result<Vec<Hash>, Error> {
async fn get_unprocessed_commitments(self) -> Result<Vec<Hash>, Error> {
let mut conn = self.acquire().await?;

let result: Vec<(Hash,)> = sqlx::query_as(
r#"
SELECT commitment FROM unprocessed_identities
WHERE status = $1 AND CURRENT_TIMESTAMP > eligibility
LIMIT $2
"#,
)
.bind(<&str>::from(status))
.bind(MAX_UNPROCESSED_FETCH_COUNT)
.fetch_all(&mut *conn)
.await?;

Ok(result.into_iter().map(|(commitment,)| commitment).collect())
}

/// Returns the error message from the unprocessed identities table
/// if it exists
///
/// - The outer option represents the existence of the commitment in the
/// unprocessed_identities table
/// - The inner option represents the existence of an error message
#[instrument(skip(self), level = "debug")]
async fn get_unprocessed_error(
self,
commitment: &Hash,
) -> Result<Option<Option<String>>, Error> {
let mut conn = self.acquire().await?;

let result: Option<(Option<String>,)> = sqlx::query_as(
r#"
SELECT error_message
FROM unprocessed_identities
WHERE commitment = $1
"#,
)
.bind(commitment)
.fetch_optional(&mut *conn)
.await?;

Ok(result.map(|(error_message,)| error_message))
}

#[instrument(skip(self), level = "debug")]
async fn remove_unprocessed_identity(self, commitment: &Hash) -> Result<(), Error> {
let mut conn = self.acquire().await?;
Expand Down
Loading

0 comments on commit c5cab66

Please sign in to comment.