diff --git a/crates/cognitoauth/src/cognito_srp_auth.rs b/crates/cognitoauth/src/cognito_srp_auth.rs index 6c146260..d2b3129a 100644 --- a/crates/cognitoauth/src/cognito_srp_auth.rs +++ b/crates/cognitoauth/src/cognito_srp_auth.rs @@ -13,11 +13,11 @@ use cognito_srp::SrpClient; use crate::error::CognitoSrpAuthError; pub struct CognitoAuthInput { - pub client_id: String, - pub pool_id: String, - pub username: String, - pub password: String, - pub mfa: Option, + pub client_id: String, + pub pool_id: String, + pub username: String, + pub password: String, + pub mfa: Option, pub client_secret: Option, // not yet supported } diff --git a/crates/micro-oz/src/lib.rs b/crates/micro-oz/src/lib.rs index da6755cd..a7f3906b 100644 --- a/crates/micro-oz/src/lib.rs +++ b/crates/micro-oz/src/lib.rs @@ -27,11 +27,11 @@ pub struct Pinhead { } struct PinheadInner { - signer: Arc, - is_running: AtomicBool, - tx_id_counter: AtomicU64, + signer: Arc, + is_running: AtomicBool, + tx_id_counter: AtomicU64, txs_to_execute: mpsc::Sender, - txs: Mutex>>>, + txs: Mutex>>>, } impl Drop for PinheadInner { @@ -154,16 +154,16 @@ impl Pinhead { let tx = RelayerTransactionBase { transaction_id: tx_id.clone(), - to: tx_request.to.context("Missing to")?, - value: tx_request.value, - gas_limit: tx_request + to: tx_request.to.context("Missing to")?, + value: tx_request.value, + gas_limit: tx_request .gas_limit .map(|gas_limit| gas_limit.as_u32()) .unwrap_or(DEFAULT_GAS_LIMIT), - data: tx_request.data, - status: Status::Pending, - hash: None, - valid_until: tx_request + data: tx_request.data, + status: Status::Pending, + hash: None, + valid_until: tx_request .valid_until .unwrap_or(Utc::now() + chrono::Duration::hours(24)), }; diff --git a/crates/micro-oz/src/server.rs b/crates/micro-oz/src/server.rs index 76f2c9ef..3d09d497 100644 --- a/crates/micro-oz/src/server.rs +++ b/crates/micro-oz/src/server.rs @@ -36,7 +36,7 @@ struct ListTransactionsQuery { #[serde(default)] status: Option, #[serde(default)] - limit: Option, + limit: Option, } async fn list_transactions( @@ -72,9 +72,9 @@ async fn query_transaction( } pub struct ServerHandle { - pinhead: Pinhead, - addr: SocketAddr, - shutdown_notify: Arc, + pinhead: Pinhead, + addr: SocketAddr, + shutdown_notify: Arc, server_join_handle: JoinHandle>, } diff --git a/crates/oz-api/src/auth.rs b/crates/oz-api/src/auth.rs index 6538a6b8..23b26683 100644 --- a/crates/oz-api/src/auth.rs +++ b/crates/oz-api/src/auth.rs @@ -12,7 +12,7 @@ const POOL_ID: &str = "us-west-2_iLmIggsiy"; #[derive(Clone, Debug)] pub struct ExpiringHeaders { - pub headers: HeaderMap, + pub headers: HeaderMap, /// The timestamp at which the headers will expire in seconds pub expiration_time: Instant, } @@ -20,7 +20,7 @@ pub struct ExpiringHeaders { impl ExpiringHeaders { pub fn empty() -> Self { Self { - headers: HeaderMap::new(), + headers: HeaderMap::new(), expiration_time: Instant::now(), } } @@ -29,11 +29,11 @@ impl ExpiringHeaders { let now = Instant::now(); let input = CognitoAuthInput { - client_id: CLIENT_ID.to_string(), - pool_id: POOL_ID.to_string(), - username: api_key.to_string(), - password: api_secret.to_string(), - mfa: None, + client_id: CLIENT_ID.to_string(), + pool_id: POOL_ID.to_string(), + username: api_key.to_string(), + password: api_secret.to_string(), + mfa: None, client_secret: None, }; diff --git a/crates/oz-api/src/data/transactions/mod.rs b/crates/oz-api/src/data/transactions/mod.rs index 7bcfd99b..dd579ee7 100644 --- a/crates/oz-api/src/data/transactions/mod.rs +++ b/crates/oz-api/src/data/transactions/mod.rs @@ -44,13 +44,13 @@ impl fmt::Display for Status { #[serde(rename_all = "camelCase")] pub struct SendBaseTransactionRequest<'a> { #[serde(skip_serializing_if = "Option::is_none")] - pub to: Option<&'a NameOrAddress>, + pub to: Option<&'a NameOrAddress>, #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option<&'a U256>, + pub value: Option<&'a U256>, #[serde(skip_serializing_if = "Option::is_none")] - pub data: Option<&'a Bytes>, + pub data: Option<&'a Bytes>, #[serde(skip_serializing_if = "Option::is_none")] - pub gas_limit: Option<&'a U256>, + pub gas_limit: Option<&'a U256>, #[serde(skip_serializing_if = "Option::is_none")] pub valid_until: Option>, } @@ -63,16 +63,16 @@ pub struct SendBaseTransactionRequest<'a> { pub struct SendBaseTransactionRequestOwned { #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub to: Option, + pub to: Option, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub value: Option, + pub value: Option, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub data: Option, + pub data: Option, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub gas_limit: Option, + pub gas_limit: Option, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] pub valid_until: Option>, @@ -87,16 +87,16 @@ pub struct SendBaseTransactionRequestOwned { pub struct RelayerTransactionBase { #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub hash: Option, + pub hash: Option, pub transaction_id: String, - pub to: NameOrAddress, + pub to: NameOrAddress, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub value: Option, - pub gas_limit: u32, + pub value: Option, + pub gas_limit: u32, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub data: Option, - pub valid_until: DateTime, - pub status: Status, + pub data: Option, + pub valid_until: DateTime, + pub status: Status, } diff --git a/crates/oz-api/src/lib.rs b/crates/oz-api/src/lib.rs index eac8f043..321fe394 100644 --- a/crates/oz-api/src/lib.rs +++ b/crates/oz-api/src/lib.rs @@ -16,12 +16,12 @@ pub type Result = std::result::Result; #[derive(Debug)] pub struct OzApi { - client: reqwest::Client, - api_url: Url, + client: reqwest::Client, + api_url: Url, expiring_headers: Mutex, - api_key: String, - api_secret: String, - auth_disabled: bool, + api_key: String, + api_secret: String, + auth_disabled: bool, } impl OzApi { diff --git a/crates/tx-sitter-client/src/data.rs b/crates/tx-sitter-client/src/data.rs index fee4109c..cfaeddeb 100644 --- a/crates/tx-sitter-client/src/data.rs +++ b/crates/tx-sitter-client/src/data.rs @@ -7,17 +7,17 @@ mod decimal_u256; #[derive(Debug, Default, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SendTxRequest { - pub to: Address, + pub to: Address, #[serde(with = "decimal_u256")] - pub value: U256, + pub value: U256, #[serde(default)] - pub data: Option, + pub data: Option, #[serde(with = "decimal_u256")] pub gas_limit: U256, #[serde(default)] - pub priority: TransactionPriority, + pub priority: TransactionPriority, #[serde(default)] - pub tx_id: Option, + pub tx_id: Option, } #[derive(Deserialize, Serialize, Debug, Clone, Copy, Default)] @@ -26,12 +26,12 @@ pub enum TransactionPriority { // 5th percentile Slowest = 0, // 25th percentile - Slow = 1, + Slow = 1, // 50th percentile #[default] Regular = 2, // 75th percentile - Fast = 3, + Fast = 3, // 95th percentile Fastest = 4, } @@ -45,21 +45,21 @@ pub struct SendTxResponse { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetTxResponse { - pub tx_id: String, - pub to: Address, + pub tx_id: String, + pub to: Address, #[serde(default, skip_serializing_if = "Option::is_none")] - pub data: Option, + pub data: Option, #[serde(with = "decimal_u256")] - pub value: U256, + pub value: U256, #[serde(with = "decimal_u256")] pub gas_limit: U256, - pub nonce: u64, + pub nonce: u64, // Sent tx data #[serde(default, skip_serializing_if = "Option::is_none")] pub tx_hash: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub status: Option, + pub status: Option, } #[derive(Debug, Clone, Copy, Serialize, Deserialize, Display, PartialEq, Eq)] diff --git a/e2e_tests/scenarios/tests/common/api.rs b/e2e_tests/scenarios/tests/common/api.rs index eee76f2d..3421f8ac 100644 --- a/e2e_tests/scenarios/tests/common/api.rs +++ b/e2e_tests/scenarios/tests/common/api.rs @@ -14,7 +14,7 @@ use crate::common::prelude::StatusCode; pub struct RawResponse { pub status_code: StatusCode, - pub body: String, + pub body: String, } pub async fn insert_identity( @@ -118,7 +118,7 @@ pub async fn inclusion_proof_raw( Ok(RawResponse { status_code: response.status(), - body: result, + body: result, }) } diff --git a/e2e_tests/scenarios/tests/common/chain.rs b/e2e_tests/scenarios/tests/common/chain.rs index 70d9d559..03392ff5 100644 --- a/e2e_tests/scenarios/tests/common/chain.rs +++ b/e2e_tests/scenarios/tests/common/chain.rs @@ -25,7 +25,7 @@ type SpecialisedFactory = ContractFactory; pub type SpecialisedContract = Contract; pub struct Chain { - pub private_key: SigningKey, + pub private_key: SigningKey, pub identity_manager: IWorldIDIdentityManager, } diff --git a/src/app.rs b/src/app.rs index f1d24fcc..5dc577d8 100644 --- a/src/app.rs +++ b/src/app.rs @@ -29,11 +29,11 @@ use crate::server::data::{ use crate::server::error::Error as ServerError; pub struct App { - pub database: Arc, + pub database: Arc, pub identity_processor: Arc, - pub prover_repository: Arc, - tree_state: OnceLock, - pub config: Config, + pub prover_repository: Arc, + tree_state: OnceLock, + pub config: Config, pub identity_validator: IdentityValidator, } @@ -293,9 +293,9 @@ impl App { .iter() .cloned() .map(|opt| ProverConfig { - url: opt.url, - batch_size: opt.batch_size, - timeout_s: opt.timeout_s, + url: opt.url, + batch_size: opt.batch_size, + timeout_s: opt.timeout_s, prover_type: opt.prover_type, }) .collect(); @@ -375,8 +375,8 @@ impl App { if let Some(error_message) = self.database.get_unprocessed_error(commitment).await? { return Ok(InclusionProof { - root: None, - proof: None, + root: None, + proof: None, message: error_message .or_else(|| Some("identity exists but has not yet been processed".to_string())), } diff --git a/src/bin/tool.rs b/src/bin/tool.rs index 269c5763..fd34c7e9 100644 --- a/src/bin/tool.rs +++ b/src/bin/tool.rs @@ -265,7 +265,7 @@ async fn main() -> anyhow::Result<()> { #[derive(Debug, Clone, Serialize, Deserialize)] struct SerializedIdentity { nullifier: Field, - trapdoor: Field, + trapdoor: Field, } async fn load_identity(path: impl AsRef) -> anyhow::Result { @@ -275,14 +275,14 @@ async fn load_identity(path: impl AsRef) -> anyhow::Result { Ok(Identity { nullifier: identity.nullifier, - trapdoor: identity.trapdoor, + trapdoor: identity.trapdoor, }) } async fn save_identity(path: impl AsRef, identity: &Identity) -> anyhow::Result<()> { let identity = SerializedIdentity { nullifier: identity.nullifier, - trapdoor: identity.trapdoor, + trapdoor: identity.trapdoor, }; let identity = serde_json::to_string_pretty(&identity)?; diff --git a/src/config.rs b/src/config.rs index d7dab681..acdfc388 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,18 +31,18 @@ pub fn load_config(config_file_path: Option<&Path>) -> anyhow::Result { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Config { - pub app: AppConfig, - pub tree: TreeConfig, + pub app: AppConfig, + pub tree: TreeConfig, #[serde(default)] - pub network: Option, + pub network: Option, #[serde(default)] - pub providers: Option, + pub providers: Option, #[serde(default)] - pub relayer: Option, - pub database: DatabaseConfig, - pub server: ServerConfig, + pub relayer: Option, + pub database: DatabaseConfig, + pub server: ServerConfig, #[serde(default)] - pub service: ServiceConfig, + pub service: ServiceConfig, #[serde(default)] pub offchain_mode: OffchainModeConfig, } @@ -235,7 +235,7 @@ pub struct ServiceConfig { // Service name - used for logging, metrics and tracing #[serde(default = "default::service_name")] pub service_name: String, - pub datadog: Option, + pub datadog: Option, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/src/contracts/mod.rs b/src/contracts/mod.rs index 5e021733..35c34bac 100644 --- a/src/contracts/mod.rs +++ b/src/contracts/mod.rs @@ -19,10 +19,10 @@ use crate::utils::index_packing::unpack_indices; /// contract. #[derive(Debug)] pub struct IdentityManager { - ethereum: Ethereum, - abi: WorldId, + ethereum: Ethereum, + abi: WorldId, secondary_abis: Vec>, - tree_depth: usize, + tree_depth: usize, } impl IdentityManager { diff --git a/src/contracts/scanner.rs b/src/contracts/scanner.rs index 9d880864..7f6605de 100644 --- a/src/contracts/scanner.rs +++ b/src/contracts/scanner.rs @@ -4,7 +4,7 @@ use ethers::types::{Address, BlockNumber, Filter, FilterBlockOption, Log, Topic, pub struct BlockScanner { read_provider: T, current_block: u64, - window_size: u64, + window_size: u64, // How many blocks from the chain head to scan to // e.g. if latest block is 20 and offset is set to 3 diff --git a/src/database/mod.rs b/src/database/mod.rs index f91e896d..ed503b9e 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -17,8 +17,8 @@ use tracing::{error, info, instrument, warn}; use crate::config::DatabaseConfig; use crate::identity_tree::Hash; -pub mod types; pub mod methods; +pub mod types; // Statically link in migration files static MIGRATOR: Migrator = sqlx::migrate!("schemas/database"); @@ -197,8 +197,8 @@ mod test { ); let db = Database::new(&DatabaseConfig { - database: SecretUrl::from_str(&url)?, - migrate: true, + database: SecretUrl::from_str(&url)?, + migrate: true, max_connections: 1, }) .await?; @@ -305,16 +305,16 @@ mod test { let mut provers = HashSet::new(); provers.insert(ProverConfig { - batch_size: 100, - url: "http://localhost:8080".to_string(), - timeout_s: 100, + batch_size: 100, + url: "http://localhost:8080".to_string(), + timeout_s: 100, prover_type: ProverType::Insertion, }); provers.insert(ProverConfig { - batch_size: 100, - url: "http://localhost:8080".to_string(), - timeout_s: 100, + batch_size: 100, + url: "http://localhost:8080".to_string(), + timeout_s: 100, prover_type: ProverType::Deletion, }); @@ -327,16 +327,16 @@ mod test { let (db, _db_container) = setup_db(&docker).await?; let mock_prover_configuration_0 = ProverConfig { - batch_size: 100, - url: "http://localhost:8080".to_string(), - timeout_s: 100, + batch_size: 100, + url: "http://localhost:8080".to_string(), + timeout_s: 100, prover_type: ProverType::Insertion, }; let mock_prover_configuration_1 = ProverConfig { - batch_size: 100, - url: "http://localhost:8081".to_string(), - timeout_s: 100, + batch_size: 100, + url: "http://localhost:8081".to_string(), + timeout_s: 100, prover_type: ProverType::Deletion, }; @@ -1252,9 +1252,13 @@ mod test { let roots = mock_roots(2); db.insert_new_batch_head(&roots[0]).await?; - db.insert_new_batch(&roots[1], &roots[0], BatchType::Insertion, &identities, &[ - 0, - ]) + db.insert_new_batch( + &roots[1], + &roots[0], + BatchType::Insertion, + &identities, + &[0], + ) .await?; Ok(()) diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index e4447ce6..18738426 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -19,10 +19,10 @@ mod write_provider; #[derive(Clone, Debug)] pub struct Ethereum { - read_provider: Arc, + read_provider: Arc, // Mapping of chain id to provider secondary_read_providers: HashMap>, - write_provider: Arc, + write_provider: Arc, } impl Ethereum { diff --git a/src/ethereum/write_provider/inner.rs b/src/ethereum/write_provider/inner.rs index 7ac6c64a..451342d5 100644 --- a/src/ethereum/write_provider/inner.rs +++ b/src/ethereum/write_provider/inner.rs @@ -19,5 +19,5 @@ pub trait Inner: Send + Sync + 'static { pub struct TransactionResult { pub transaction_id: String, - pub hash: Option, + pub hash: Option, } diff --git a/src/ethereum/write_provider/mod.rs b/src/ethereum/write_provider/mod.rs index b80194ca..f9094657 100644 --- a/src/ethereum/write_provider/mod.rs +++ b/src/ethereum/write_provider/mod.rs @@ -20,8 +20,8 @@ mod tx_sitter; pub struct WriteProvider { read_provider: ReadProvider, - inner: Arc, - address: Address, + inner: Arc, + address: Address, } impl fmt::Debug for WriteProvider { diff --git a/src/ethereum/write_provider/openzeppelin.rs b/src/ethereum/write_provider/openzeppelin.rs index 7222b39b..3d98fb94 100644 --- a/src/ethereum/write_provider/openzeppelin.rs +++ b/src/ethereum/write_provider/openzeppelin.rs @@ -16,19 +16,21 @@ use crate::ethereum::TxError; use crate::identity::processor::TransactionId; static TX_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!("eth_tx_count", "The transaction count by bytes4.", &[ - "bytes4" - ]) + register_int_counter_vec!( + "eth_tx_count", + "The transaction count by bytes4.", + &["bytes4"] + ) .unwrap() }); #[derive(Debug)] pub struct OzRelay { - oz_api: OzApi, + oz_api: OzApi, transaction_validity: chrono::Duration, - send_timeout: Duration, - mine_timeout: Duration, - gas_limit: Option, + send_timeout: Duration, + mine_timeout: Duration, + gas_limit: Option, } impl OzRelay { @@ -107,10 +109,10 @@ impl OzRelay { ) -> Result { let tx: TypedTransaction = tx.into(); let api_tx = SendBaseTransactionRequest { - to: tx.to(), - value: tx.value(), - gas_limit: tx.gas(), - data: tx.data(), + to: tx.to(), + value: tx.value(), + gas_limit: tx.gas(), + data: tx.data(), valid_until: Some(chrono::Utc::now() + self.transaction_validity), }; @@ -233,7 +235,7 @@ impl Inner for OzRelay { Ok(TransactionResult { transaction_id: transaction.transaction_id, - hash: transaction.hash, + hash: transaction.hash, }) } } diff --git a/src/ethereum/write_provider/tx_sitter.rs b/src/ethereum/write_provider/tx_sitter.rs index c986e3b7..3d7a9cd0 100644 --- a/src/ethereum/write_provider/tx_sitter.rs +++ b/src/ethereum/write_provider/tx_sitter.rs @@ -15,14 +15,14 @@ use crate::identity::processor::TransactionId; const MINING_TIMEOUT: Duration = Duration::from_secs(60); pub struct TxSitter { - client: TxSitterClient, + client: TxSitterClient, gas_limit: Option, } impl TxSitter { pub fn new(config: &TxSitterConfig) -> Self { Self { - client: TxSitterClient::new(&config.tx_sitter_url), + client: TxSitterClient::new(&config.tx_sitter_url), gas_limit: config.tx_sitter_gas_limit, } } @@ -42,7 +42,7 @@ impl TxSitter { if tx.status == Some(TxStatus::Mined) || tx.status == Some(TxStatus::Finalized) { return Ok(TransactionResult { transaction_id: tx.tx_id, - hash: Some( + hash: Some( tx.tx_hash .context("Missing hash on a mined tx") .map_err(TxError::Send)?, @@ -70,18 +70,18 @@ impl Inner for TxSitter { let tx = self .client .send_tx(&SendTxRequest { - to: *tx + to: *tx .to_addr() .context("Tx receiver must be an address") .map_err(TxError::Send)?, - value: tx.value().copied().unwrap_or(U256::zero()), - data: tx.data().cloned(), + value: tx.value().copied().unwrap_or(U256::zero()), + data: tx.data().cloned(), gas_limit: *tx .gas() .context("Missing tx gas limit") .map_err(TxError::Send)?, - priority: TransactionPriority::Regular, - tx_id: None, + priority: TransactionPriority::Regular, + tx_id: None, }) .await .context("Error sending transaction") diff --git a/src/identity_tree/initializer.rs b/src/identity_tree/initializer.rs index aeff37e0..3200af82 100644 --- a/src/identity_tree/initializer.rs +++ b/src/identity_tree/initializer.rs @@ -15,9 +15,9 @@ use crate::identity_tree::{ use crate::utils::tree_updates::dedup_tree_updates; pub struct TreeInitializer { - pub database: Arc, + pub database: Arc, pub identity_processor: Arc, - pub config: TreeConfig, + pub config: TreeConfig, } impl TreeInitializer { @@ -285,7 +285,7 @@ mod test { identities.push(TreeUpdate { leaf_index: i, - element: identity, + element: identity, }); } diff --git a/src/identity_tree/mod.rs b/src/identity_tree/mod.rs index d0b4ce16..cd8bed97 100644 --- a/src/identity_tree/mod.rs +++ b/src/identity_tree/mod.rs @@ -22,7 +22,7 @@ pub use self::status::{ProcessedStatus, Status, UnknownStatus, UnprocessedStatus pub struct TreeUpdate { #[sqlx(try_from = "i64")] pub leaf_index: usize, - pub element: Hash, + pub element: Hash, } impl TreeUpdate { @@ -37,32 +37,32 @@ impl TreeUpdate { #[derive(Debug)] pub struct TreeItem { - pub status: ProcessedStatus, + pub status: ProcessedStatus, pub leaf_index: usize, } #[derive(Debug, Serialize, Deserialize, FromRow)] #[serde(rename_all = "camelCase")] pub struct RootItem { - pub root: Field, + pub root: Field, #[sqlx(try_from = "&'a str")] - pub status: ProcessedStatus, + pub status: ProcessedStatus, pub pending_valid_as_of: chrono::DateTime, - pub mined_valid_as_of: Option>, + pub mined_valid_as_of: Option>, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct InclusionProof { - pub root: Option, - pub proof: Option, + pub root: Option, + pub proof: Option, pub message: Option, } /// Additional data held by the canonical tree version. It includes data /// necessary to control garbage collection. pub struct CanonicalTreeMetadata { - flatten_threshold: usize, + flatten_threshold: usize, count_since_last_flatten: usize, } @@ -98,10 +98,10 @@ impl AllowedTreeVersionMarker for lazy_merkle_tree::Derived { /// next leaf (only used in the latest tree), a pointer to the next version (if /// exists) and the metadata specified by the version marker. struct TreeVersionData { - tree: PoseidonTree, + tree: PoseidonTree, next_leaf: usize, - next: Option>, - metadata: V::Metadata, + next: Option>, + metadata: V::Metadata, } /// Basic operations that should be available for all tree versions. @@ -501,8 +501,8 @@ where #[derive(Clone)] pub struct TreeState { processed: TreeVersion, - batching: TreeVersion, - latest: TreeVersion, + batching: TreeVersion, + latest: TreeVersion, } impl TreeState { @@ -551,8 +551,8 @@ impl TreeState { let (leaf, root, proof) = self.latest.get_leaf_and_proof(item.leaf_index); let proof = InclusionProof { - root: Some(root), - proof: Some(proof), + root: Some(root), + proof: Some(proof), message: None, }; @@ -598,7 +598,7 @@ impl CanonicalTreeBuilder { mmap_file_path ).unwrap(); let metadata = CanonicalTreeMetadata { - flatten_threshold: flattening_threshold, + flatten_threshold: flattening_threshold, count_since_last_flatten: 0, }; let mut builder = Self(TreeVersionData { @@ -610,7 +610,7 @@ impl CanonicalTreeBuilder { for (index, leaf) in leftover_initial_leaves.iter().enumerate() { builder.update(&TreeUpdate { leaf_index: index + initial_leaves_in_dense_count, - element: *leaf, + element: *leaf, }); } builder @@ -640,7 +640,7 @@ impl CanonicalTreeBuilder { }; let metadata = CanonicalTreeMetadata { - flatten_threshold: flattening_threshold, + flatten_threshold: flattening_threshold, count_since_last_flatten: 0, }; let next_leaf = last_index.map(|v| v + 1).unwrap_or(0); @@ -654,7 +654,7 @@ impl CanonicalTreeBuilder { for (index, leaf) in leftover_items.iter().enumerate() { builder.update(&TreeUpdate { leaf_index: next_leaf + index, - element: *leaf, + element: *leaf, }); } @@ -680,7 +680,7 @@ impl CanonicalTreeBuilder { /// A helper for building successive tree versions. Exposes a type-safe API over /// building a sequence of tree versions efficiently. pub struct DerivedTreeBuilder { - prev: TreeVersion

, + prev: TreeVersion

, current: TreeVersionData, } diff --git a/src/prover/map.rs b/src/prover/map.rs index 4c6eea68..72665e5a 100644 --- a/src/prover/map.rs +++ b/src/prover/map.rs @@ -47,9 +47,9 @@ impl ProverMap { self.map .iter() .map(|(k, v)| ProverConfig { - url: v.url(), - timeout_s: v.timeout_s(), - batch_size: *k, + url: v.url(), + timeout_s: v.timeout_s(), + batch_size: *k, prover_type: v.prover_type(), }) .collect() diff --git a/src/prover/proof.rs b/src/prover/proof.rs index 69cf1b0b..b517b905 100644 --- a/src/prover/proof.rs +++ b/src/prover/proof.rs @@ -10,16 +10,16 @@ use crate::prover::Prover; /// The names of the data fields match those from the JSON response exactly. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Proof { - pub ar: [U256; 2], - pub bs: [[U256; 2]; 2], + pub ar: [U256; 2], + pub bs: [[U256; 2]; 2], pub krs: [U256; 2], } impl From<[U256; 8]> for Proof { fn from(value: [U256; 8]) -> Self { Self { - ar: [value[0], value[1]], - bs: [[value[2], value[3]], [value[4], value[5]]], + ar: [value[0], value[1]], + bs: [[value[2], value[3]], [value[4], value[5]]], krs: [value[6], value[7]], } } diff --git a/src/prover/repository.rs b/src/prover/repository.rs index be39886e..be2301c2 100644 --- a/src/prover/repository.rs +++ b/src/prover/repository.rs @@ -6,7 +6,7 @@ use crate::prover::{Prover, ProverConfig, ProverMap, ProverType}; pub struct ProverRepository { insertion_prover_map: RwLock, - deletion_prover_map: RwLock, + deletion_prover_map: RwLock, } impl ProverRepository { diff --git a/src/server/data.rs b/src/server/data.rs index 23aa30cd..1d4650a2 100644 --- a/src/server/data.rs +++ b/src/server/data.rs @@ -27,13 +27,13 @@ pub struct InsertCommitmentRequest { #[serde(deny_unknown_fields)] pub struct AddBatchSizeRequest { /// The URL of the prover for the provided batch size. - pub url: String, + pub url: String, /// The batch size to add. - pub batch_size: usize, + pub batch_size: usize, /// The timeout for communications with the prover service. pub timeout_seconds: u64, // TODO: add docs - pub prover_type: ProverType, + pub prover_type: ProverType, } #[derive(Clone, Serialize, Deserialize)] @@ -41,7 +41,7 @@ pub struct AddBatchSizeRequest { #[serde(deny_unknown_fields)] pub struct RemoveBatchSizeRequest { /// The batch size to remove from the prover map. - pub batch_size: usize, + pub batch_size: usize, // TODO: add docs pub prover_type: ProverType, } @@ -57,11 +57,11 @@ pub struct InclusionProofRequest { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct VerifySemaphoreProofRequest { - pub root: Field, - pub signal_hash: Field, - pub nullifier_hash: Field, + pub root: Field, + pub signal_hash: Field, + pub nullifier_hash: Field, pub external_nullifier_hash: Field, - pub proof: Proof, + pub proof: Proof, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -87,7 +87,7 @@ pub struct RecoveryRequest { /// The leaf index of the identity commitment to delete. pub previous_identity_commitment: Hash, /// The new identity commitment to insert. - pub new_identity_commitment: Hash, + pub new_identity_commitment: Hash, } impl From for InclusionProofResponse { diff --git a/src/task_monitor/mod.rs b/src/task_monitor/mod.rs index de0cc358..860e6ecf 100644 --- a/src/task_monitor/mod.rs +++ b/src/task_monitor/mod.rs @@ -22,7 +22,7 @@ const INSERT_IDENTITIES_BACKOFF: Duration = Duration::from_secs(5); const DELETE_IDENTITIES_BACKOFF: Duration = Duration::from_secs(5); struct RunningInstance { - handles: Vec>, + handles: Vec>, shutdown_sender: broadcast::Sender<()>, } @@ -78,7 +78,7 @@ pub struct TaskMonitor { /// extension the instance. instance: RwLock>, shutdown: Arc, - app: Arc, + app: Arc, } impl TaskMonitor { diff --git a/tests/common/chain_mock.rs b/tests/common/chain_mock.rs index 6ca7c454..399f2b39 100644 --- a/tests/common/chain_mock.rs +++ b/tests/common/chain_mock.rs @@ -22,8 +22,8 @@ use super::{abi as ContractAbi, CompiledContract}; pub type SpecialisedContract = Contract; pub struct MockChain { - pub anvil: AnvilInstance, - pub private_key: SigningKey, + pub anvil: AnvilInstance, + pub private_key: SigningKey, pub identity_manager: IWorldIDIdentityManager, } diff --git a/tests/common/prover_mock.rs b/tests/common/prover_mock.rs index d2f0fdcf..ff6534f5 100644 --- a/tests/common/prover_mock.rs +++ b/tests/common/prover_mock.rs @@ -21,7 +21,7 @@ use tokio::sync::Mutex; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ProverError { - pub code: String, + pub code: String, pub message: String, } @@ -39,12 +39,12 @@ impl Display for ProverError { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct InsertionProofInput { - input_hash: U256, - start_index: u32, - pre_root: U256, - post_root: U256, + input_hash: U256, + start_index: u32, + pre_root: U256, + post_root: U256, identity_commitments: Vec, - merkle_proofs: Vec>, + merkle_proofs: Vec>, } // TODO: ideally we just import the InsertionProofInput and DeletionProofInput @@ -55,27 +55,27 @@ struct InsertionProofInput { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct DeletionProofInput { - input_hash: U256, - pre_root: U256, - post_root: U256, - deletion_indices: Vec, + input_hash: U256, + pre_root: U256, + post_root: U256, + deletion_indices: Vec, identity_commitments: Vec, - merkle_proofs: Vec>, + merkle_proofs: Vec>, } /// The proof response from the prover. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct Proof { - pub ar: [U256; 2], - pub bs: [[U256; 2]; 2], + pub ar: [U256; 2], + pub bs: [[U256; 2]; 2], pub krs: [U256; 2], } impl From<[U256; 8]> for Proof { fn from(value: [U256; 8]) -> Self { Self { - ar: [value[0], value[1]], - bs: [[value[2], value[3]], [value[4], value[5]]], + ar: [value[0], value[1]], + bs: [[value[2], value[3]], [value[4], value[5]]], krs: [value[6], value[7]], } } @@ -101,7 +101,7 @@ impl ProveResponse { /// Constructs a failure response from the provided `code` and `message`. pub fn failure(code: impl Into, message: impl Into) -> Self { Self::ProofFailure(ProverError { - code: code.into(), + code: code.into(), message: message.into(), }) } @@ -109,16 +109,16 @@ impl ProveResponse { /// The mock prover service. pub struct ProverService { - server: Handle, - inner: Arc>, - address: SocketAddr, - batch_size: usize, + server: Handle, + inner: Arc>, + address: SocketAddr, + batch_size: usize, prover_type: ProverType, } struct Prover { is_available: bool, - tree_depth: u8, + tree_depth: u8, } impl ProverService { diff --git a/tests/common/test_config.rs b/tests/common/test_config.rs index ec9331ae..fb9ac187 100644 --- a/tests/common/test_config.rs +++ b/tests/common/test_config.rs @@ -20,37 +20,37 @@ pub const DEFAULT_TREE_DENSE_PREFIX_DEPTH: usize = 10; pub const DEFAULT_TIME_BETWEEN_SCANS_SECONDS: u64 = 1; pub struct TestConfigBuilder { - tree_depth: usize, - dense_tree_prefix_depth: usize, - prover_urls: Vec, - batch_insertion_timeout: Duration, - batch_deletion_timeout: Duration, - min_batch_deletion_size: usize, - db_url: Option, - oz_api_url: Option, - oz_address: Option

, - cache_file: Option, + tree_depth: usize, + dense_tree_prefix_depth: usize, + prover_urls: Vec, + batch_insertion_timeout: Duration, + batch_deletion_timeout: Duration, + min_batch_deletion_size: usize, + db_url: Option, + oz_api_url: Option, + oz_address: Option
, + cache_file: Option, identity_manager_address: Option
, primary_network_provider: Option, - offchain_mode: bool, + offchain_mode: bool, } impl TestConfigBuilder { pub fn new() -> Self { Self { - tree_depth: DEFAULT_TREE_DEPTH, - dense_tree_prefix_depth: DEFAULT_TREE_DENSE_PREFIX_DEPTH, - prover_urls: vec![], - batch_insertion_timeout: Duration::from_secs(DEFAULT_BATCH_INSERTION_TIMEOUT_SECONDS), - batch_deletion_timeout: Duration::from_secs(DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS), - min_batch_deletion_size: 1, - db_url: None, - oz_api_url: None, - oz_address: None, - cache_file: None, + tree_depth: DEFAULT_TREE_DEPTH, + dense_tree_prefix_depth: DEFAULT_TREE_DENSE_PREFIX_DEPTH, + prover_urls: vec![], + batch_insertion_timeout: Duration::from_secs(DEFAULT_BATCH_INSERTION_TIMEOUT_SECONDS), + batch_deletion_timeout: Duration::from_secs(DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS), + min_batch_deletion_size: 1, + db_url: None, + oz_api_url: None, + oz_address: None, + cache_file: None, identity_manager_address: None, primary_network_provider: None, - offchain_mode: false, + offchain_mode: false, } } @@ -115,10 +115,10 @@ impl TestConfigBuilder { pub fn add_prover(mut self, prover: &ProverService) -> Self { let prover_config = ProverConfig { - url: prover.url().to_string(), + url: prover.url().to_string(), // TODO: Make this configurable? - timeout_s: 30, - batch_size: prover.batch_size(), + timeout_s: 30, + batch_size: prover.batch_size(), prover_type: prover.prover_type(), }; @@ -139,69 +139,69 @@ impl TestConfigBuilder { let database = SecretUrl::new(Url::parse(&db_url)?); let config = Config { - app: AppConfig { - provers_urls: self.prover_urls.into(), - batch_insertion_timeout: self.batch_insertion_timeout, - batch_deletion_timeout: self.batch_deletion_timeout, - min_batch_deletion_size: self.min_batch_deletion_size, - max_epoch_duration: default::max_epoch_duration(), - scanning_window_size: default::scanning_window_size(), + app: AppConfig { + provers_urls: self.prover_urls.into(), + batch_insertion_timeout: self.batch_insertion_timeout, + batch_deletion_timeout: self.batch_deletion_timeout, + min_batch_deletion_size: self.min_batch_deletion_size, + max_epoch_duration: default::max_epoch_duration(), + scanning_window_size: default::scanning_window_size(), scanning_chain_head_offset: default::scanning_chain_head_offset(), - time_between_scans: Duration::from_secs(DEFAULT_TIME_BETWEEN_SCANS_SECONDS), - monitored_txs_capacity: default::monitored_txs_capacity(), + time_between_scans: Duration::from_secs(DEFAULT_TIME_BETWEEN_SCANS_SECONDS), + monitored_txs_capacity: default::monitored_txs_capacity(), }, - tree: TreeConfig { - tree_depth: self.tree_depth, + tree: TreeConfig { + tree_depth: self.tree_depth, dense_tree_prefix_depth: self.dense_tree_prefix_depth, - tree_gc_threshold: default::tree_gc_threshold(), - cache_file: self.cache_file.context("Missing cache file")?, - force_cache_purge: default::force_cache_purge(), - initial_leaf_value: default::initial_leaf_value(), + tree_gc_threshold: default::tree_gc_threshold(), + cache_file: self.cache_file.context("Missing cache file")?, + force_cache_purge: default::force_cache_purge(), + initial_leaf_value: default::initial_leaf_value(), }, - network: if self.offchain_mode { + network: if self.offchain_mode { None } else { Some(NetworkConfig { - identity_manager_address: self + identity_manager_address: self .identity_manager_address .context("Missing identity manager address")?, relayed_identity_manager_addresses: Default::default(), }) }, - providers: if self.offchain_mode { + providers: if self.offchain_mode { None } else { Some(ProvidersConfig { - primary_network_provider: self + primary_network_provider: self .primary_network_provider .context("Missing primary network provider")?, relayed_network_providers: Default::default(), }) }, - relayer: if self.offchain_mode { + relayer: if self.offchain_mode { None } else { Some(RelayerConfig::OzDefender(OzDefenderConfig { - oz_api_url: self.oz_api_url.context("Missing oz api url")?, - oz_address: self.oz_address.context("Missing oz address")?, - oz_api_key: "".to_string(), - oz_api_secret: "".to_string(), + oz_api_url: self.oz_api_url.context("Missing oz api url")?, + oz_address: self.oz_address.context("Missing oz address")?, + oz_api_key: "".to_string(), + oz_api_secret: "".to_string(), oz_transaction_validity: default::oz_transaction_validity(), - oz_send_timeout: default::oz_send_timeout(), - oz_mine_timeout: default::oz_mine_timeout(), - oz_gas_limit: Default::default(), + oz_send_timeout: default::oz_send_timeout(), + oz_mine_timeout: default::oz_mine_timeout(), + oz_gas_limit: Default::default(), })) }, - database: DatabaseConfig { + database: DatabaseConfig { database, migrate: default::migrate(), max_connections: default::max_connections(), }, - server: ServerConfig { - address: SocketAddr::from(([127, 0, 0, 1], 0)), + server: ServerConfig { + address: SocketAddr::from(([127, 0, 0, 1], 0)), serve_timeout: default::serve_timeout(), }, - service: ServiceConfig::default(), + service: ServiceConfig::default(), offchain_mode: OffchainModeConfig { enabled: self.offchain_mode, },