diff --git a/Cargo.lock b/Cargo.lock index e51a06a9..92560569 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1416,6 +1416,37 @@ dependencies = [ "memchr", ] +[[package]] +name = "config" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca" +dependencies = [ + "async-trait", + "json5", + "lazy_static", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml 0.5.9", + "yaml-rust", +] + +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "const-hex" version = "1.9.0" @@ -1944,6 +1975,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + [[package]] name = "dotenvy" version = "0.15.6" @@ -2002,6 +2039,12 @@ dependencies = [ "log", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.31" @@ -2938,6 +2981,22 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.27" @@ -3108,6 +3167,12 @@ dependencies = [ "hashbrown 0.14.2", ] +[[package]] +name = "indoc" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" + [[package]] name = "inout" version = "0.1.3" @@ -3208,6 +3273,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "jsonwebtoken" version = "8.3.0" @@ -3320,6 +3396,12 @@ dependencies = [ "cc", ] +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.0.46" @@ -3961,6 +4043,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-multimap" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +dependencies = [ + "dlv-list", + "hashbrown 0.12.3", +] + [[package]] name = "outref" version = "0.5.1" @@ -4100,6 +4192,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "pbkdf2" version = "0.11.0" @@ -4139,14 +4237,49 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f400b0f7905bf702f9f3dc3df5a121b16c54e9e8012c082905fdf09a931861a" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] +[[package]] +name = "pest_derive" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.32", +] + +[[package]] +name = "pest_meta" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + [[package]] name = "petgraph" version = "0.6.2" @@ -4783,6 +4916,17 @@ dependencies = [ "paste", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64 0.13.1", + "bitflags 1.3.2", + "serde", +] + [[package]] name = "ruint" version = "1.7.0" @@ -4806,6 +4950,16 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62cc5760263ea229d367e7dff3c0cbf09e4797a125bd87059a6c095804f3b2d1" +[[package]] +name = "rust-ini" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -5300,7 +5454,7 @@ dependencies = [ [[package]] name = "signup-sequencer" -version = "1.0.1" +version = "2.0.0" dependencies = [ "anyhow", "async-stream", @@ -5311,6 +5465,7 @@ dependencies = [ "chrono", "clap 4.3.14", "cli-batteries", + "config", "ethers", "ethers-solc", "eyre", @@ -5318,7 +5473,10 @@ dependencies = [ "futures-util", "hex", "hex-literal 0.4.1", + "humantime", + "humantime-serde", "hyper", + "indoc", "maplit", "micro-oz", "once_cell", @@ -5331,12 +5489,14 @@ dependencies = [ "semaphore", "serde", "serde_json", + "similar-asserts", "sqlx", "take_mut", "tempfile", "test-case", "thiserror", "tokio", + "toml 0.8.8", "tracing", "tracing-futures", "tracing-subscriber 0.3.17", @@ -5346,6 +5506,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "similar" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" +dependencies = [ + "bstr", + "unicode-segmentation", +] + +[[package]] +name = "similar-asserts" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" +dependencies = [ + "console", + "similar", +] + [[package]] name = "simple_asn1" version = "0.6.2" @@ -6029,7 +6209,19 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.15", +] + +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", ] [[package]] @@ -6054,6 +6246,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.8.3" @@ -7005,6 +7210,15 @@ dependencies = [ "windows-targets 0.48.0", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7035,6 +7249,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.0", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -7047,6 +7276,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.33.0" @@ -7071,6 +7306,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.33.0" @@ -7095,6 +7336,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.33.0" @@ -7119,6 +7366,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.33.0" @@ -7143,6 +7396,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -7155,6 +7414,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.33.0" @@ -7179,6 +7444,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.19" @@ -7231,6 +7502,15 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "yansi" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index a795c6e0..5f96f984 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "signup-sequencer" -version = "1.0.1" +version = "2.0.0" authors = [ "Remco Bloemen ", "Lucas Ege ", @@ -38,13 +38,18 @@ cli-batteries = { git = "https://github.com/recmo/cli-batteries", rev = "fc1186d "otlp", "datadog", ] } +config = "0.13.4" ethers = { version = "2.0.10", features = ["ws", "ipc", "openssl", "abigen"] } ethers-solc = "2.0.10" eyre = "0.6" futures = "0.3" futures-util = { version = "^0.3" } hex = "0.4.3" +hex-literal = "0.4.1" +humantime = "2.1.0" +humantime-serde = "1.1.1" hyper = { version = "^0.14.17", features = ["server", "tcp", "http1", "http2"] } +indoc = "2.0.4" once_cell = "1.8" oz-api = { path = "crates/oz-api" } prometheus = "0.13.3" # We need upstream PR#465 to fix #272. @@ -74,6 +79,7 @@ tokio = { version = "1.17", features = [ "tracing", "test-util", ] } +toml = "0.8.8" tracing = "0.1" tracing-futures = "0.2" tx-sitter-client = { path = "crates/tx-sitter-client" } @@ -85,7 +91,6 @@ cli-batteries = { git = "https://github.com/recmo/cli-batteries", rev = "fc1186d "mock-shutdown", ] } hex = "0.4.3" -hex-literal = "0.4.1" maplit = "1.0.2" micro-oz = { path = "crates/micro-oz" } postgres-docker-utils = { path = "crates/postgres-docker-utils" } @@ -93,6 +98,7 @@ regex = { version = "1.7.1", features = ["std"] } semaphore = { git = "https://github.com/worldcoin/semaphore-rs", branch = "main", features = [ "depth_20", ] } +similar-asserts = "1.5.0" test-case = "3.0" tracing-subscriber = "0.3.11" tracing-test = "0.2" diff --git a/src/app.rs b/src/app.rs index 2daf4c67..c45dcf1a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -3,80 +3,35 @@ use std::sync::Arc; use std::time::Instant; use chrono::{Duration, Utc}; -use clap::Parser; use ruint::Uint; use semaphore::poseidon_tree::LazyPoseidonTree; use semaphore::protocol::verify_proof; use tracing::{info, instrument, warn}; +use crate::config::Config; use crate::contracts::{IdentityManager, SharedIdentityManager}; -use crate::database::{self, Database}; -use crate::ethereum::{self, Ethereum}; +use crate::database::Database; +use crate::ethereum::Ethereum; use crate::identity_tree::{ CanonicalTreeBuilder, Hash, InclusionProof, ProcessedStatus, RootItem, Status, TreeState, TreeUpdate, TreeVersionReadOps, UnprocessedStatus, }; use crate::prover::map::initialize_prover_maps; -use crate::prover::{self, ProverConfiguration, ProverType, Provers}; +use crate::prover::{ProverConfig, ProverType}; use crate::server::data::{ IdentityHistoryEntry, IdentityHistoryEntryKind, IdentityHistoryEntryStatus, InclusionProofResponse, ListBatchSizesResponse, VerifySemaphoreProofQuery, VerifySemaphoreProofRequest, VerifySemaphoreProofResponse, }; use crate::server::error::Error as ServerError; -use crate::task_monitor::TaskMonitor; use crate::utils::tree_updates::dedup_tree_updates; -use crate::{contracts, task_monitor}; - -#[derive(Clone, Debug, PartialEq, Parser)] -#[group(skip)] -pub struct Options { - #[clap(flatten)] - pub ethereum: ethereum::Options, - - #[clap(flatten)] - pub contracts: contracts::Options, - - #[clap(flatten)] - pub database: database::Options, - - #[clap(flatten)] - pub batch_provers: prover::Options, - - #[clap(flatten)] - pub committer: task_monitor::Options, - - /// Block number to start syncing from - #[clap(long, env, default_value = "0")] - pub starting_block: u64, - - /// Timeout for the tree lock (seconds). - #[clap(long, env, default_value = "120")] - pub lock_timeout: u64, - - /// The depth of the tree prefix that is vectorized. - #[clap(long, env, default_value = "20")] - pub dense_tree_prefix_depth: usize, - - /// The number of updates to trigger garbage collection. - #[clap(long, env, default_value = "10000")] - pub tree_gc_threshold: usize, - - /// Path and file name to use for mmap file when building dense tree. - #[clap(long, env, default_value = "./dense_tree_mmap")] - pub dense_tree_mmap_file: String, - - /// If set will not use cached tree state. - #[clap(long, env)] - pub force_cache_purge: bool, -} pub struct App { - database: Arc, - identity_manager: SharedIdentityManager, - identity_committer: Arc, - tree_state: TreeState, - snark_scalar_field: Hash, + pub database: Arc, + pub identity_manager: SharedIdentityManager, + pub tree_state: TreeState, + pub snark_scalar_field: Hash, + pub config: Config, } impl App { @@ -84,24 +39,25 @@ impl App { /// /// Will return `Err` if the internal Ethereum handler errors or if the /// `options.storage_file` is not accessible. - #[instrument(name = "App::new", level = "debug")] - pub async fn new(options: Options) -> anyhow::Result { - let ethereum = Ethereum::new(options.ethereum); - let db = Database::new(options.database); + #[instrument(name = "App::new", level = "debug", skip_all)] + pub async fn new(config: Config) -> anyhow::Result { + let ethereum = Ethereum::new(&config); + let db = Database::new(&config.database); let (ethereum, db) = tokio::try_join!(ethereum, db)?; let database = Arc::new(db); - let mut provers: HashSet = database.get_provers().await?; + let mut provers: HashSet = database.get_provers().await?; - let non_inserted_provers = Self::merge_env_provers(options.batch_provers, &mut provers); + let non_inserted_provers = + Self::merge_env_provers(&config.app.provers_urls.0, &mut provers); database.insert_provers(non_inserted_provers).await?; let (insertion_prover_map, deletion_prover_map) = initialize_prover_maps(provers)?; let identity_manager = IdentityManager::new( - options.contracts, + &config, ethereum.clone(), insertion_prover_map, deletion_prover_map, @@ -140,12 +96,12 @@ impl App { &database, // Poseidon tree depth is one more than the contract's tree depth identity_manager.tree_depth(), - options.dense_tree_prefix_depth, - options.tree_gc_threshold, + config.tree.dense_tree_prefix_depth, + config.tree.tree_gc_threshold, identity_manager.initial_leaf_value(), initial_root_hash, - &options.dense_tree_mmap_file, - options.force_cache_purge, + &config.tree.cache_file, + config.tree.force_cache_purge, ) .await?; info!("Tree state initialization took: {:?}", timer.elapsed()); @@ -162,23 +118,16 @@ impl App { &database, // Poseidon tree depth is one more than the contract's tree depth identity_manager.tree_depth(), - options.dense_tree_prefix_depth, - options.tree_gc_threshold, + config.tree.dense_tree_prefix_depth, + config.tree.tree_gc_threshold, identity_manager.initial_leaf_value(), initial_root_hash, - &options.dense_tree_mmap_file, + &config.tree.cache_file, true, ) .await?; } - let identity_committer = Arc::new(TaskMonitor::new( - database.clone(), - identity_manager.clone(), - tree_state.clone(), - &options.committer, - )); - // TODO Export the reduced-ness check that this is enabling from the // `semaphore-rs` library when we bump the version. let snark_scalar_field = Hash::from_str_radix( @@ -187,16 +136,13 @@ impl App { ) .expect("This should just parse."); - // Process to push new identities to Ethereum - identity_committer.start().await; - // Sync with chain on start up let app = Self { database, identity_manager, - identity_committer, tree_state, snark_scalar_field, + config, }; Ok(app) @@ -615,12 +561,14 @@ impl App { Ok(history) } - fn merge_env_provers(options: prover::Options, existing_provers: &mut Provers) -> Provers { - let options_set: HashSet = options - .prover_urls - .0 - .into_iter() - .map(|opt| ProverConfiguration { + fn merge_env_provers( + prover_urls: &[ProverConfig], + existing_provers: &mut HashSet, + ) -> HashSet { + let options_set: HashSet = prover_urls + .iter() + .cloned() + .map(|opt| ProverConfig { url: opt.url, batch_size: opt.batch_size, timeout_s: opt.timeout_s, @@ -780,17 +728,26 @@ impl App { let processed_root = self.tree_state.get_processed_tree().get_root(); let mined_root = self.tree_state.get_mined_tree().get_root(); + tracing::info!("Validating age max_root_age: {max_root_age:?}"); + let root = root_state.root; match root_state.status { // Pending status implies the batching or latest tree ProcessedStatus::Pending if latest_root == root || batching_root == root => { - return Ok(()) + tracing::warn!("Root is pending - skipping"); + return Ok(()); } // Processed status is hidden - this should never happen - ProcessedStatus::Processed if processed_root == root => return Ok(()), + ProcessedStatus::Processed if processed_root == root => { + tracing::warn!("Root is processed - skipping"); + return Ok(()); + } // Processed status is hidden so it could be either processed or mined - ProcessedStatus::Mined if processed_root == root || mined_root == root => return Ok(()), + ProcessedStatus::Mined if processed_root == root || mined_root == root => { + tracing::warn!("Root is mined - skipping"); + return Ok(()); + } _ => (), } @@ -808,21 +765,14 @@ impl App { now - mined_at }; + tracing::warn!("Root age: {root_age:?}"); + if root_age > max_root_age { Err(ServerError::RootTooOld) } else { Ok(()) } } - - /// # Errors - /// - /// Will return an Error if any of the components cannot be shut down - /// gracefully. - pub async fn shutdown(&self) -> anyhow::Result<()> { - info!("Shutting down identity committer."); - self.identity_committer.shutdown().await - } } #[cfg(test)] diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 00000000..f92f125b --- /dev/null +++ b/src/config.rs @@ -0,0 +1,381 @@ +use std::collections::HashMap; +use std::net::SocketAddr; +use std::time::Duration; + +use ethers::types::{Address, H160}; +use semaphore::Field; +use serde::{Deserialize, Serialize}; + +use crate::prover::ProverConfig; +use crate::utils::secret::SecretUrl; +use crate::utils::serde_utils::JsonStrWrapper; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub app: AppConfig, + pub tree: TreeConfig, + pub network: NetworkConfig, + pub providers: ProvidersConfig, + pub relayer: RelayerConfig, + pub database: DatabaseConfig, + pub server: ServerConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AppConfig { + /// A list of prover urls (along with batch size, type and timeout) that + /// will be inserted into the DB at startup + pub provers_urls: JsonStrWrapper>, + + /// The maximum number of seconds the sequencer will wait before sending a + /// batch of identities to the chain, even if the batch is not full. + #[serde(with = "humantime_serde")] + #[serde(default = "default::batch_insertion_timeout")] + pub batch_insertion_timeout: Duration, + + /// The maximum number of seconds the sequencer will wait before sending a + /// batch of deletions to the chain, even if the batch is not full. + #[serde(with = "humantime_serde")] + #[serde(default = "default::batch_deletion_timeout")] + pub batch_deletion_timeout: Duration, + + /// The smallest deletion batch size that we'll allow + #[serde(default = "default::min_batch_deletion_size")] + pub min_batch_deletion_size: usize, + + /// The parameter to control the delay between mining a deletion batch and + /// inserting the recovery identities + /// + /// The sequencer will insert the recovery identities after + /// max_epoch_duration_seconds + root_history_expiry) seconds have passed + /// + /// By default the value is set to 0 so the sequencer will only use + /// root_history_expiry + #[serde(with = "humantime_serde")] + #[serde(default = "default::max_epoch_duration")] + pub max_epoch_duration: Duration, + + /// The maximum number of windows to scan for finalization logs + #[serde(default = "default::scanning_window_size")] + pub scanning_window_size: u64, + + /// The offset from the latest block to scan + #[serde(default = "default::scanning_chain_head_offset")] + pub scanning_chain_head_offset: u64, + + /// The number of seconds to wait between fetching logs + #[serde(with = "humantime_serde")] + #[serde(default = "default::time_between_scans")] + pub time_between_scans: Duration, + + /// The number of txs in the channel that we'll be monitoring + #[serde(default = "default::monitored_txs_capacity")] + pub monitored_txs_capacity: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreeConfig { + /// The depth of the tree that the contract is working with. This needs to + /// agree with the verifier in the deployed contract, and also with + /// `semaphore-mtb` + #[serde(default = "default::tree_depth")] + pub tree_depth: usize, + + /// The depth of the tree prefix that is vectorized + #[serde(default = "default::dense_tree_prefix_depth")] + pub dense_tree_prefix_depth: usize, + + /// The number of updates to trigger garbage collection + #[serde(default = "default::tree_gc_threshold")] + pub tree_gc_threshold: usize, + + // TODO: Allow running without a cache file + /// Path and file name to use for mmap file when building dense tree + #[serde(default = "default::cache_file")] + pub cache_file: String, + + /// If set will not use cached tree state + #[serde(default = "default::force_cache_purge")] + pub force_cache_purge: bool, + + /// Initial value of the Merkle tree leaves. Defaults to the initial value + /// used in the identity manager contract. + #[serde(default = "default::initial_leaf_value")] + pub initial_leaf_value: Field, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkConfig { + /// The address of the identity manager contract. + pub identity_manager_address: Address, + + /// The addresses of world id contracts on secondary chains + /// mapped by chain id + #[serde(default)] + pub relayed_identity_manager_addresses: JsonStrWrapper>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProvidersConfig { + /// Provider url for the primary chain + pub primary_network_provider: SecretUrl, + + /// Provider urls for the secondary chains + #[serde(default)] + pub relayed_network_providers: JsonStrWrapper>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "kind")] +#[serde(rename_all = "snake_case")] +pub enum RelayerConfig { + OzDefender(OzDefenderConfig), + TxSitter(TxSitterConfig), +} + +impl RelayerConfig { + // TODO: Extract into a common field + pub fn address(&self) -> Address { + match self { + RelayerConfig::OzDefender(config) => config.oz_address, + RelayerConfig::TxSitter(config) => config.tx_sitter_address, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OzDefenderConfig { + /// Api url + #[serde(default = "default::oz_api_url")] + pub oz_api_url: String, + + /// OpenZeppelin Defender API Key + pub oz_api_key: String, + + /// OpenZeppelin Defender API Secret + pub oz_api_secret: String, + + /// Address of OZ Relayer + pub oz_address: H160, + + /// For how long should we track and retry the transaction (in + /// seconds) Default: 7 days (7 * 24 * 60 * 60 = 604800 seconds) + #[serde(with = "humantime_serde")] + #[serde(default = "default::oz_transaction_validity")] + pub oz_transaction_validity: Duration, + + #[serde(with = "humantime_serde")] + #[serde(default = "default::oz_send_timeout")] + pub oz_send_timeout: Duration, + + #[serde(with = "humantime_serde")] + #[serde(default = "default::oz_mine_timeout")] + pub oz_mine_timeout: Duration, + + pub oz_gas_limit: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxSitterConfig { + pub tx_sitter_url: String, + + pub tx_sitter_address: H160, + + pub tx_sitter_gas_limit: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DatabaseConfig { + pub database: SecretUrl, + + #[serde(default = "default::migrate")] + pub migrate: bool, + + #[serde(default = "default::max_connections")] + pub max_connections: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ServerConfig { + pub address: SocketAddr, + + #[serde(with = "humantime_serde")] + #[serde(default = "default::serve_timeout")] + pub serve_timeout: Duration, +} + +pub mod default { + use std::time::Duration; + + pub fn oz_api_url() -> String { + "https://api.defender.openzeppelin.com".to_string() + } + + pub fn oz_transaction_validity() -> Duration { + Duration::from_secs(604800) + } + + pub fn oz_send_timeout() -> Duration { + Duration::from_secs(60) + } + + pub fn oz_mine_timeout() -> Duration { + Duration::from_secs(60) + } + + pub fn batch_insertion_timeout() -> Duration { + Duration::from_secs(180) + } + + pub fn batch_deletion_timeout() -> Duration { + Duration::from_secs(3600) + } + + pub fn min_batch_deletion_size() -> usize { + 100 + } + + pub fn max_epoch_duration() -> Duration { + Duration::from_secs(0) + } + + pub fn scanning_window_size() -> u64 { + 100 + } + + pub fn scanning_chain_head_offset() -> u64 { + 0 + } + + pub fn time_between_scans() -> Duration { + Duration::from_secs(30) + } + + pub fn monitored_txs_capacity() -> usize { + 100 + } + + pub fn serve_timeout() -> Duration { + Duration::from_secs(30) + } + + pub fn migrate() -> bool { + true + } + + pub fn max_connections() -> u32 { + 10 + } + + pub fn tree_depth() -> usize { + 30 + } + + pub fn dense_tree_prefix_depth() -> usize { + 20 + } + + pub fn tree_gc_threshold() -> usize { + 10_000 + } + + pub fn cache_file() -> String { + "/data/cache_file".to_string() + } + + pub fn force_cache_purge() -> bool { + false + } + + pub fn initial_leaf_value() -> semaphore::Field { + semaphore::Field::from_be_bytes(hex_literal::hex!( + "0000000000000000000000000000000000000000000000000000000000000000" + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const MINIMAL_TOML: &str = indoc::indoc! {r#" + [app] + provers_urls = "[]" + + [tree] + + [network] + identity_manager_address = "0x0000000000000000000000000000000000000000" + + [providers] + primary_network_provider = "http://localhost:8545" + + [relayer] + kind = "tx_sitter" + tx_sitter_url = "http://localhost:3000" + tx_sitter_address = "0x0000000000000000000000000000000000000000" + + [database] + database = "postgres://user:password@localhost:5432/database" + + [server] + address = "0.0.0.0:3001" + "#}; + + #[test] + fn deserialize_minimal_config() { + let _config: Config = toml::from_str(MINIMAL_TOML).unwrap(); + } + + const FULL_TOML: &str = indoc::indoc! {r#" + [app] + provers_urls = "[]" + batch_insertion_timeout = "3m" + batch_deletion_timeout = "1h" + min_batch_deletion_size = 100 + max_epoch_duration = "0s" + scanning_window_size = 100 + scanning_chain_head_offset = 0 + time_between_scans = "30s" + monitored_txs_capacity = 100 + + [tree] + tree_depth = 30 + dense_tree_prefix_depth = 20 + tree_gc_threshold = 10000 + cache_file = "/data/cache_file" + force_cache_purge = false + initial_leaf_value = "0x0000000000000000000000000000000000000000000000000000000000000001" + + [network] + identity_manager_address = "0x0000000000000000000000000000000000000000" + relayed_identity_manager_addresses = "{}" + + [providers] + primary_network_provider = "http://localhost:8545/" + relayed_network_providers = "[]" + + [relayer] + kind = "tx_sitter" + tx_sitter_url = "http://localhost:3000" + tx_sitter_address = "0x0000000000000000000000000000000000000000" + tx_sitter_gas_limit = 100000 + + [database] + database = "postgres://user:password@localhost:5432/database" + migrate = true + max_connections = 10 + + [server] + address = "0.0.0.0:3001" + serve_timeout = "30s" + "#}; + + #[test] + fn full_toml_round_trip() { + let config: Config = toml::from_str(FULL_TOML).unwrap(); + let serialized = toml::to_string_pretty(&config).unwrap(); + + similar_asserts::assert_eq!(serialized.trim(), FULL_TOML.trim()); + } +} diff --git a/src/contracts/mod.rs b/src/contracts/mod.rs index 15556f28..f4e64832 100644 --- a/src/contracts/mod.rs +++ b/src/contracts/mod.rs @@ -2,64 +2,31 @@ pub mod abi; pub mod scanner; -use std::collections::HashMap; use std::sync::Arc; use anyhow::{anyhow, Context}; -use clap::Parser; use ethers::providers::Middleware; -use ethers::types::{Address, H256, U256}; +use ethers::types::{H256, U256}; use semaphore::Field; -use tokio::sync::RwLockReadGuard; +use tokio::sync::{RwLock, RwLockReadGuard}; use tracing::{error, info, instrument, warn}; use self::abi::{BridgedWorldId, DeleteIdentitiesCall, WorldId}; +use crate::config::Config; use crate::ethereum::write::TransactionId; use crate::ethereum::{Ethereum, ReadProvider}; use crate::prover::identity::Identity; -use crate::prover::map::{DeletionProverMap, InsertionProverMap, ReadOnlyInsertionProver}; -use crate::prover::{Proof, Prover, ProverConfiguration, ProverType, ReadOnlyProver}; -use crate::serde_utils::JsonStrWrapper; +use crate::prover::{Proof, Prover, ProverConfig, ProverMap, ProverType}; use crate::server::error::Error as ServerError; use crate::utils::index_packing::unpack_indices; -/// Configuration options for the component responsible for interacting with the -/// contract. -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -#[group(skip)] -pub struct Options { - /// The address of the identity manager contract. - #[clap(long, env)] - pub identity_manager_address: Address, - - /// The addresses of world id contracts on secondary chains - /// mapped by chain id - #[clap(long, env, default_value = "{}")] - pub relayed_identity_manager_addresses: JsonStrWrapper>, - - /// The depth of the tree that the contract is working with. This needs to - /// agree with the verifier in the deployed contract, and also with - /// `semaphore-mtb`. - #[clap(long, env, default_value = "10")] - pub tree_depth: usize, - - /// Initial value of the Merkle tree leaves. Defaults to the initial value - /// used in the identity manager contract. - #[clap( - long, - env, - default_value = "0000000000000000000000000000000000000000000000000000000000000000" - )] - pub initial_leaf_value: Field, -} - /// A structure representing the interface to the batch-based identity manager /// contract. #[derive(Debug)] pub struct IdentityManager { ethereum: Ethereum, - insertion_prover_map: InsertionProverMap, - deletion_prover_map: DeletionProverMap, + insertion_prover_map: RwLock, + deletion_prover_map: RwLock, abi: WorldId, secondary_abis: Vec>, initial_leaf_value: Field, @@ -78,16 +45,16 @@ impl IdentityManager { #[instrument(level = "debug", skip_all)] pub async fn new( - options: Options, + config: &Config, ethereum: Ethereum, - insertion_prover_map: InsertionProverMap, - deletion_prover_map: DeletionProverMap, + insertion_prover_map: ProverMap, + deletion_prover_map: ProverMap, ) -> anyhow::Result where Self: Sized, { // Check that there is code deployed at the target address. - let address = options.identity_manager_address; + let address = config.network.identity_manager_address; let code = ethereum.provider().get_code(address, None).await?; if code.as_ref().is_empty() { error!( @@ -98,7 +65,7 @@ impl IdentityManager { // Connect to the running batching contract. let abi = WorldId::new( - options.identity_manager_address, + config.network.identity_manager_address, ethereum.provider().clone(), ); @@ -117,17 +84,20 @@ impl IdentityManager { let secondary_providers = ethereum.secondary_providers(); let mut secondary_abis = Vec::new(); - for (chain_id, address) in options.relayed_identity_manager_addresses.0 { + for (chain_id, address) in &config.network.relayed_identity_manager_addresses.0 { let provider = secondary_providers - .get(&chain_id) + .get(chain_id) .ok_or_else(|| anyhow!("No provider for chain id: {}", chain_id))?; - let abi = BridgedWorldId::new(address, provider.clone()); + let abi = BridgedWorldId::new(*address, provider.clone()); secondary_abis.push(abi); } - let initial_leaf_value = options.initial_leaf_value; - let tree_depth = options.tree_depth; + let initial_leaf_value = config.tree.initial_leaf_value; + let tree_depth = config.tree.tree_depth; + + let insertion_prover_map = RwLock::new(insertion_prover_map); + let deletion_prover_map = RwLock::new(deletion_prover_map); let identity_manager = Self { ethereum, @@ -179,7 +149,7 @@ impl IdentityManager { pub async fn get_suitable_insertion_prover( &self, num_identities: usize, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let prover_map = self.insertion_prover_map.read().await; match RwLockReadGuard::try_map(prover_map, |map| map.get(num_identities)) { @@ -193,7 +163,7 @@ impl IdentityManager { pub async fn get_suitable_deletion_prover( &self, num_identities: usize, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let prover_map = self.deletion_prover_map.read().await; match RwLockReadGuard::try_map(prover_map, |map| map.get(num_identities)) { @@ -210,7 +180,7 @@ impl IdentityManager { #[instrument(level = "debug", skip(prover, identity_commitments))] pub async fn prepare_insertion_proof( - prover: ReadOnlyInsertionProver<'_>, + prover: &Prover, start_index: usize, pre_root: U256, identity_commitments: &[Identity], @@ -240,7 +210,7 @@ impl IdentityManager { #[instrument(level = "debug", skip(prover, identity_commitments))] pub async fn prepare_deletion_proof( - prover: ReadOnlyProver<'_, Prover>, + prover: &Prover, pre_root: U256, deletion_indices: Vec, identity_commitments: Vec, @@ -443,7 +413,7 @@ impl IdentityManager { return Err(ServerError::BatchSizeAlreadyExists); } - let prover = Prover::new(&ProverConfiguration { + let prover = Prover::new(&ProverConfig { url: url.to_string(), batch_size, prover_type, @@ -480,7 +450,7 @@ impl IdentityManager { } } - pub async fn list_batch_sizes(&self) -> Result, ServerError> { + pub async fn list_batch_sizes(&self) -> Result, ServerError> { let mut provers = self .insertion_prover_map .read() diff --git a/src/database/mod.rs b/src/database/mod.rs index 55290490..36ae1696 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -8,7 +8,6 @@ use std::collections::HashSet; use anyhow::{anyhow, Context, Error as ErrReport}; use chrono::{DateTime, Utc}; -use clap::Parser; use sqlx::migrate::{Migrate, MigrateDatabase, Migrator}; use sqlx::pool::PoolOptions; use sqlx::{Executor, Pool, Postgres, Row}; @@ -16,55 +15,38 @@ use thiserror::Error; use tracing::{error, info, instrument, warn}; use self::types::{CommitmentHistoryEntry, DeletionEntry, LatestDeletionEntry, RecoveryEntry}; +use crate::config::DatabaseConfig; use crate::identity_tree::{ Hash, ProcessedStatus, RootItem, TreeItem, TreeUpdate, UnprocessedStatus, }; pub mod types; -use crate::prover::{ProverConfiguration, ProverType, Provers}; -use crate::secret::SecretUrl; +use crate::prover::{ProverConfig, ProverType}; // Statically link in migration files static MIGRATOR: Migrator = sqlx::migrate!("schemas/database"); const MAX_UNPROCESSED_FETCH_COUNT: i64 = 10_000; -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -pub struct Options { - /// Database server connection string. - /// Example: `postgres://user:password@localhost:5432/database` - #[clap(long, env)] - pub database: SecretUrl, - - /// Allow creation or migration of the database schema. - #[clap(long, default_value = "true")] - pub database_migrate: bool, - - /// Maximum number of connections in the database connection pool - #[clap(long, env, default_value = "10")] - pub database_max_connections: u32, -} - pub struct Database { pool: Pool, } impl Database { #[instrument(skip_all)] - pub async fn new(options: Options) -> Result { - info!(url = %&options.database, "Connecting to database"); + pub async fn new(config: &DatabaseConfig) -> Result { + info!(url = %&config.database, "Connecting to database"); // Create database if requested and does not exist - if options.database_migrate && !Postgres::database_exists(options.database.expose()).await? - { - warn!(url = %&options.database, "Database does not exist, creating database"); - Postgres::create_database(options.database.expose()).await?; + if config.migrate && !Postgres::database_exists(config.database.expose()).await? { + warn!(url = %&config.database, "Database does not exist, creating database"); + Postgres::create_database(config.database.expose()).await?; } // Create a connection pool let pool = PoolOptions::::new() - .max_connections(options.database_max_connections) - .connect(options.database.expose()) + .max_connections(config.max_connections) + .connect(config.database.expose()) .await .context("error connecting to database")?; @@ -73,7 +55,7 @@ impl Database { .await .context("error getting database version")? .get::(0); - info!(url = %&options.database, ?version, "Connected to database"); + info!(url = %&config.database, ?version, "Connected to database"); // Run migrations if requested. let latest = MIGRATOR @@ -82,8 +64,8 @@ impl Database { .expect("Missing migrations") .version; - if options.database_migrate { - info!(url = %&options.database, "Running migrations"); + if config.migrate { + info!(url = %&config.database, "Running migrations"); MIGRATOR.run(&pool).await?; } @@ -92,7 +74,7 @@ impl Database { if let Some((version, dirty)) = pool.acquire().await?.version().await? { if dirty { error!( - url = %&options.database, + url = %&config.database, version, expected = latest, "Database is in incomplete migration state.", @@ -100,7 +82,7 @@ impl Database { return Err(anyhow!("Database is in incomplete migration state.")); } else if version < latest { error!( - url = %&options.database, + url = %&config.database, version, expected = latest, "Database is not up to date, try rerunning with --database-migrate", @@ -110,7 +92,7 @@ impl Database { )); } else if version > latest { error!( - url = %&options.database, + url = %&config.database, version, latest, "Database version is newer than this version of the software, please update.", @@ -120,13 +102,13 @@ impl Database { )); } info!( - url = %&options.database, + url = %&config.database, version, latest, "Database version is up to date.", ); } else { - error!(url = %&options.database, "Could not get database version"); + error!(url = %&config.database, "Could not get database version"); return Err(anyhow!("Could not get database version.")); } @@ -532,7 +514,7 @@ impl Database { Ok(result.get::(0) as i32) } - pub async fn get_provers(&self) -> Result { + pub async fn get_provers(&self) -> Result, Error> { let query = sqlx::query( r#" SELECT batch_size, url, timeout_s, prover_type @@ -549,14 +531,15 @@ impl Database { let url = row.get::(1); let timeout_s = row.get::(2) as u64; let prover_type = row.get::(3); - ProverConfiguration { + + ProverConfig { url, timeout_s, batch_size, prover_type, } }) - .collect::()) + .collect()) } pub async fn insert_prover_configuration( @@ -584,7 +567,7 @@ impl Database { Ok(()) } - pub async fn insert_provers(&self, provers: HashSet) -> Result<(), Error> { + pub async fn insert_provers(&self, provers: HashSet) -> Result<(), Error> { if provers.is_empty() { return Ok(()); } @@ -915,10 +898,11 @@ mod test { use ruint::Uint; use semaphore::Field; - use super::{Database, Options}; + use super::Database; + use crate::config::DatabaseConfig; use crate::identity_tree::{Hash, ProcessedStatus, Status, UnprocessedStatus}; - use crate::prover::{ProverConfiguration, ProverType}; - use crate::secret::SecretUrl; + use crate::prover::{ProverConfig, ProverType}; + use crate::utils::secret::SecretUrl; macro_rules! assert_same_time { ($a:expr, $b:expr, $diff:expr) => { @@ -947,10 +931,10 @@ mod test { let db_socket_addr = db_container.address(); let url = format!("postgres://postgres:postgres@{db_socket_addr}/database"); - let db = Database::new(Options { - database: SecretUrl::from_str(&url)?, - database_migrate: true, - database_max_connections: 1, + let db = Database::new(&DatabaseConfig { + database: SecretUrl::from_str(&url)?, + migrate: true, + max_connections: 1, }) .await?; @@ -1047,17 +1031,17 @@ mod test { Ok(()) } - fn mock_provers() -> HashSet { + fn mock_provers() -> HashSet { let mut provers = HashSet::new(); - provers.insert(ProverConfiguration { + provers.insert(ProverConfig { batch_size: 100, url: "http://localhost:8080".to_string(), timeout_s: 100, prover_type: ProverType::Insertion, }); - provers.insert(ProverConfiguration { + provers.insert(ProverConfig { batch_size: 100, url: "http://localhost:8080".to_string(), timeout_s: 100, @@ -1071,14 +1055,14 @@ mod test { async fn test_insert_prover_configuration() -> anyhow::Result<()> { let (db, _db_container) = setup_db().await?; - let mock_prover_configuration_0 = ProverConfiguration { + let mock_prover_configuration_0 = ProverConfig { batch_size: 100, url: "http://localhost:8080".to_string(), timeout_s: 100, prover_type: ProverType::Insertion, }; - let mock_prover_configuration_1 = ProverConfiguration { + let mock_prover_configuration_1 = ProverConfig { batch_size: 100, url: "http://localhost:8081".to_string(), timeout_s: 100, diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index 38c641b7..95609e32 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -1,39 +1,21 @@ use std::collections::HashMap; use std::sync::Arc; -use clap::Parser; use ethers::types::transaction::eip2718::TypedTransaction; use ethers::types::Address; pub use read::{EventError, ReadProvider}; use tracing::instrument; -use url::Url; pub use write::TxError; use self::write::TransactionId; use self::write_provider::WriteProvider; -use crate::serde_utils::JsonStrWrapper; +use crate::config::Config; pub mod read; pub mod write; mod write_provider; -// TODO: Log and metrics for signer / nonces. -#[derive(Clone, Debug, PartialEq, Parser)] -#[group(skip)] -pub struct Options { - /// Ethereum API Provider - #[clap(long, env, default_value = "http://localhost:8545")] - pub ethereum_provider: Url, - - /// Provider urls for the secondary chains - #[clap(long, env, default_value = "[]")] - pub secondary_providers: JsonStrWrapper>, - - #[clap(flatten)] - pub write_options: write_provider::Options, -} - #[derive(Clone, Debug)] pub struct Ethereum { read_provider: Arc, @@ -44,13 +26,14 @@ pub struct Ethereum { impl Ethereum { #[instrument(name = "Ethereum::new", level = "debug", skip_all)] - pub async fn new(options: Options) -> anyhow::Result { - let read_provider = ReadProvider::new(options.ethereum_provider).await?; + pub async fn new(config: &Config) -> anyhow::Result { + let read_provider = + ReadProvider::new(config.providers.primary_network_provider.clone().into()).await?; let mut secondary_read_providers = HashMap::new(); - for secondary_url in &options.secondary_providers.0 { - let secondary_read_provider = ReadProvider::new(secondary_url.clone()).await?; + for secondary_url in &config.providers.relayed_network_providers.0 { + let secondary_read_provider = ReadProvider::new(secondary_url.clone().into()).await?; secondary_read_providers.insert( secondary_read_provider.chain_id.as_u64(), Arc::new(secondary_read_provider), @@ -58,8 +41,7 @@ impl Ethereum { } let write_provider: Arc = Arc::new( - write_provider::WriteProvider::new(read_provider.clone(), &options.write_options) - .await?, + write_provider::WriteProvider::new(read_provider.clone(), &config.relayer).await?, ); Ok(Self { diff --git a/src/ethereum/write_provider/mod.rs b/src/ethereum/write_provider/mod.rs index f3707e13..f0c7d6aa 100644 --- a/src/ethereum/write_provider/mod.rs +++ b/src/ethereum/write_provider/mod.rs @@ -8,19 +8,16 @@ use tracing::{info, warn}; use self::inner::Inner; use self::openzeppelin::OzRelay; -use self::options::ParsedOptions; use self::tx_sitter::TxSitter; use super::write::TransactionId; use super::{ReadProvider, TxError}; +use crate::config::RelayerConfig; mod error; mod inner; mod openzeppelin; -mod options; mod tx_sitter; -pub use self::options::Options; - pub struct WriteProvider { read_provider: ReadProvider, inner: Arc, @@ -38,18 +35,17 @@ impl fmt::Debug for WriteProvider { } impl WriteProvider { - pub async fn new(read_provider: ReadProvider, options: &Options) -> anyhow::Result { - let options = options.to_parsed()?; - let address = options.address(); + pub async fn new(read_provider: ReadProvider, config: &RelayerConfig) -> anyhow::Result { + let address = config.address(); - let inner: Arc = match options { - ParsedOptions::Oz(oz_options) => { + let inner: Arc = match config { + RelayerConfig::OzDefender(oz_config) => { tracing::info!("Initializing OZ Relayer"); - Arc::new(OzRelay::new(&oz_options).await?) + Arc::new(OzRelay::new(oz_config).await?) } - ParsedOptions::TxSitter(tx_sitter_options) => { + RelayerConfig::TxSitter(tx_sitter_config) => { tracing::info!("Initializing TxSitter"); - Arc::new(TxSitter::new(&tx_sitter_options)) + Arc::new(TxSitter::new(tx_sitter_config)) } }; diff --git a/src/ethereum/write_provider/openzeppelin.rs b/src/ethereum/write_provider/openzeppelin.rs index 3a751357..072c4cef 100644 --- a/src/ethereum/write_provider/openzeppelin.rs +++ b/src/ethereum/write_provider/openzeppelin.rs @@ -11,7 +11,7 @@ use tracing::{error, info, info_span, Instrument}; use super::error::Error; use super::inner::{Inner, TransactionResult}; -use super::options::OzOptions; +use crate::config::OzDefenderConfig; use crate::ethereum::write::TransactionId; use crate::ethereum::TxError; @@ -32,7 +32,7 @@ pub struct OzRelay { } impl OzRelay { - pub async fn new(options: &OzOptions) -> anyhow::Result { + pub async fn new(options: &OzDefenderConfig) -> anyhow::Result { let oz_api = if options.oz_api_key.is_empty() && options.oz_api_secret.is_empty() { tracing::warn!( "OpenZeppelin Defender API Key and Secret are empty. Connection will operate \ diff --git a/src/ethereum/write_provider/options.rs b/src/ethereum/write_provider/options.rs deleted file mode 100644 index 53f6ba04..00000000 --- a/src/ethereum/write_provider/options.rs +++ /dev/null @@ -1,160 +0,0 @@ -use std::num::ParseIntError; -use std::str::FromStr; -use std::time::Duration; - -use anyhow::anyhow; -use clap::Parser; -use ethers::types::H160; - -// TODO: Log and metrics for signer / nonces. -#[derive(Clone, Debug, Eq, PartialEq, Parser)] -#[group(skip)] -pub struct Options { - // ### OZ Params ### - #[clap(long, env, default_value = "https://api.defender.openzeppelin.com")] - pub oz_api_url: Option, - - /// OpenZeppelin Defender API Key - #[clap(long, env)] - pub oz_api_key: Option, - - /// OpenZeppelin Defender API Secret - #[clap(long, env)] - pub oz_api_secret: Option, - - /// OpenZeppelin Defender API Secret - #[clap(long, env)] - pub oz_address: Option, - - /// For how long OpenZeppelin should track and retry the transaction (in - /// seconds) Default: 7 days (7 * 24 * 60 * 60 = 604800 seconds) - #[clap(long, env, value_parser=duration_from_str, default_value="604800")] - pub oz_transaction_validity: Duration, - - #[clap(long, env, value_parser=duration_from_str, default_value="60")] - pub oz_send_timeout: Duration, - - #[clap(long, env, value_parser=duration_from_str, default_value="60")] - pub oz_mine_timeout: Duration, - - #[clap(long, env)] - pub oz_gas_limit: Option, - - // ### TxSitter Params ### - #[clap(long, env)] - pub tx_sitter_url: Option, - - #[clap(long, env)] - pub tx_sitter_address: Option, - - #[clap(long, env)] - pub tx_sitter_gas_limit: Option, -} - -fn duration_from_str(value: &str) -> Result { - Ok(Duration::from_secs(u64::from_str(value)?)) -} - -impl Options { - pub fn to_parsed(&self) -> anyhow::Result { - let oz_options = OzOptions::try_from(self); - if let Ok(oz_options) = oz_options { - return Ok(ParsedOptions::Oz(oz_options)); - } - - let tx_sitter_options = TxSitterOptions::try_from(self); - if let Ok(tx_sitter_options) = tx_sitter_options { - return Ok(ParsedOptions::TxSitter(tx_sitter_options)); - } - - Err(anyhow!("Invalid options")) - } -} - -pub enum ParsedOptions { - Oz(OzOptions), - TxSitter(TxSitterOptions), -} - -impl ParsedOptions { - pub fn address(&self) -> H160 { - match self { - Self::Oz(oz_options) => oz_options.oz_address, - Self::TxSitter(tx_sitter_options) => tx_sitter_options.tx_sitter_address, - } - } -} - -pub struct OzOptions { - pub oz_api_url: String, - - /// OpenZeppelin Defender API Key - pub oz_api_key: String, - - /// OpenZeppelin Defender API Secret - pub oz_api_secret: String, - - /// OpenZeppelin Defender API Secret - pub oz_address: H160, - - /// For how long OpenZeppelin should track and retry the transaction (in - /// seconds) Default: 7 days (7 * 24 * 60 * 60 = 604800 seconds) - pub oz_transaction_validity: Duration, - - pub oz_send_timeout: Duration, - - pub oz_mine_timeout: Duration, - - pub oz_gas_limit: Option, -} - -impl<'a> TryFrom<&'a Options> for OzOptions { - type Error = anyhow::Error; - - fn try_from(value: &'a Options) -> Result { - Ok(Self { - oz_api_url: value - .oz_api_url - .clone() - .ok_or_else(|| anyhow!("Missing oz_api_url"))?, - oz_api_key: value - .oz_api_key - .clone() - .ok_or_else(|| anyhow!("Missing oz_api_key"))?, - oz_api_secret: value - .oz_api_secret - .clone() - .ok_or_else(|| anyhow!("Missing oz_api_secret"))?, - oz_address: value - .oz_address - .ok_or_else(|| anyhow!("Missing oz_address"))?, - oz_transaction_validity: value.oz_transaction_validity, - oz_send_timeout: value.oz_send_timeout, - oz_mine_timeout: value.oz_mine_timeout, - oz_gas_limit: value.oz_gas_limit, - }) - } -} - -pub struct TxSitterOptions { - pub tx_sitter_url: String, - pub tx_sitter_address: H160, - pub tx_sitter_gas_limit: Option, -} - -impl<'a> TryFrom<&'a Options> for TxSitterOptions { - type Error = anyhow::Error; - - fn try_from(value: &'a Options) -> Result { - Ok(Self { - tx_sitter_url: value - .tx_sitter_url - .clone() - .ok_or_else(|| anyhow!("Missing tx_sitter_url"))?, - tx_sitter_address: value - .tx_sitter_address - .ok_or_else(|| anyhow!("Missing tx_sitter_address"))?, - tx_sitter_gas_limit: value.tx_sitter_gas_limit, - }) - } -} diff --git a/src/ethereum/write_provider/tx_sitter.rs b/src/ethereum/write_provider/tx_sitter.rs index a96a0433..1c80ce63 100644 --- a/src/ethereum/write_provider/tx_sitter.rs +++ b/src/ethereum/write_provider/tx_sitter.rs @@ -8,7 +8,7 @@ use tx_sitter_client::data::{SendTxRequest, TransactionPriority, TxStatus}; use tx_sitter_client::TxSitterClient; use super::inner::{Inner, TransactionResult}; -use super::options::TxSitterOptions; +use crate::config::TxSitterConfig; use crate::ethereum::write::TransactionId; use crate::ethereum::TxError; @@ -20,10 +20,10 @@ pub struct TxSitter { } impl TxSitter { - pub fn new(options: &TxSitterOptions) -> Self { + pub fn new(config: &TxSitterConfig) -> Self { Self { - client: TxSitterClient::new(&options.tx_sitter_url), - gas_limit: options.tx_sitter_gas_limit, + client: TxSitterClient::new(&config.tx_sitter_url), + gas_limit: config.tx_sitter_gas_limit, } } diff --git a/src/identity_tree.rs b/src/identity_tree.rs index 6b3e71f4..011e24bc 100644 --- a/src/identity_tree.rs +++ b/src/identity_tree.rs @@ -519,6 +519,10 @@ impl TreeState { } } + pub fn latest_tree(&self) -> &TreeVersion { + &self.latest + } + #[must_use] pub fn get_latest_tree(&self) -> TreeVersion { self.latest.clone() @@ -529,16 +533,28 @@ impl TreeState { self.mined.clone() } + pub fn mined_tree(&self) -> &TreeVersion { + &self.mined + } + #[must_use] pub fn get_processed_tree(&self) -> TreeVersion { self.processed.clone() } + pub fn processed_tree(&self) -> &TreeVersion { + &self.processed + } + #[must_use] pub fn get_batching_tree(&self) -> TreeVersion { self.batching.clone() } + pub fn batching_tree(&self) -> &TreeVersion { + &self.batching + } + #[must_use] pub fn get_proof_for(&self, item: &TreeItem) -> (Field, InclusionProof) { let (leaf, root, proof) = match item.status { diff --git a/src/lib.rs b/src/lib.rs index 984b8793..6e130269 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,90 +1,14 @@ #![doc = include_str!("../Readme.md")] #![warn(clippy::cargo)] -#![allow( - clippy::module_name_repetitions, - clippy::wildcard_imports, - clippy::too_many_arguments -)] +#![allow(clippy::too_many_arguments)] pub mod app; +pub mod config; mod contracts; mod database; mod ethereum; pub mod identity_tree; -mod prover; -pub mod secret; -mod serde_utils; +pub mod prover; pub mod server; -mod task_monitor; +pub mod task_monitor; pub mod utils; - -use std::sync::Arc; - -use clap::Parser; -use tracing::info; - -use crate::app::App; - -#[derive(Clone, Debug, PartialEq, Parser)] -#[group(skip)] -pub struct Options { - #[clap(flatten)] - pub app: app::Options, - - #[clap(flatten)] - pub server: server::Options, -} - -/// ``` -/// assert!(true); -/// ``` -#[allow(clippy::missing_errors_doc)] -pub async fn main(options: Options) -> anyhow::Result<()> { - // Create App struct - let app = Arc::new(App::new(options.app).await?); - let app_for_server = app.clone(); - - // Start server (will stop on shutdown signal) - server::main(app_for_server, options.server).await?; - - info!("Stopping the app"); - app.shutdown().await?; - - Ok(()) -} - -#[cfg(test)] -pub mod test { - use tracing::{error, warn}; - use tracing_test::traced_test; - - use super::*; - - #[test] - #[allow(clippy::disallowed_methods)] // False positive from macro - #[traced_test] - fn test_with_log_output() { - error!("logged on the error level"); - assert!(logs_contain("logged on the error level")); - } - - #[tokio::test] - #[allow(clippy::disallowed_methods)] // False positive from macro - #[traced_test] - async fn async_test_with_log() { - // Local log - info!("This is being logged on the info level"); - - // Log from a spawned task (which runs in a separate thread) - tokio::spawn(async { - warn!("This is being logged on the warn level from a spawned task"); - }) - .await - .unwrap(); - - // Ensure that `logs_contain` works as intended - assert!(logs_contain("logged on the info level")); - assert!(logs_contain("logged on the warn level")); - assert!(!logs_contain("logged on the error level")); - } -} diff --git a/src/main.rs b/src/main.rs index 612cc4b1..f77da435 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,15 +2,61 @@ #![warn(clippy::cargo)] #![allow(clippy::module_name_repetitions, clippy::wildcard_imports)] +use std::path::PathBuf; +use std::sync::Arc; + +use clap::Parser; use cli_batteries::{run, version}; -use signup_sequencer::{main as sequencer_app, Options}; +use signup_sequencer::app::App; +use signup_sequencer::config::Config; +use signup_sequencer::server; +use signup_sequencer::task_monitor::TaskMonitor; + +#[derive(Debug, Clone, Parser)] +struct Args { + /// Path to the optional config file + config: Option, +} -async fn app(options: Options) -> eyre::Result<()> { - sequencer_app(options) +async fn app(args: Args) -> eyre::Result<()> { + sequencer_app(args) .await .map_err(|e| eyre::eyre!("{:?}", e)) } +async fn sequencer_app(args: Args) -> anyhow::Result<()> { + let mut settings = config::Config::builder(); + + if let Some(path) = args.config { + settings = settings.add_source(config::File::from(path).required(true)); + } + + let settings = settings + .add_source(config::Environment::with_prefix("SEQ").separator("__")) + .build()?; + + let config = settings.try_deserialize::()?; + + let server_config = config.server.clone(); + + // Create App struct + let app = Arc::new(App::new(config).await?); + let app_for_server = app.clone(); + + let task_monitor = TaskMonitor::new(app); + + // Process to push new identities to Ethereum + task_monitor.start().await; + + // Start server (will stop on shutdown signal) + server::run(app_for_server, server_config).await?; + + tracing::info!("Stopping the app"); + task_monitor.shutdown().await?; + + Ok(()) +} + fn main() { run(version!(semaphore, ethers), app); } diff --git a/src/prover/mod.rs b/src/prover.rs similarity index 95% rename from src/prover/mod.rs rename to src/prover.rs index d178fe0d..5f7a8f97 100644 --- a/src/prover/mod.rs +++ b/src/prover.rs @@ -11,16 +11,14 @@ pub mod identity; pub mod map; pub mod proof; -use std::collections::HashSet; use std::fmt::{Display, Formatter}; use std::hash::{Hash, Hasher}; use std::mem::size_of; use std::time::Duration; -use clap::Parser; use ethers::types::U256; use ethers::utils::keccak256; -pub use map::{InsertionProverMap, ProverMap, ReadOnlyProver}; +pub use map::ProverMap; use once_cell::sync::Lazy; use prometheus::{exponential_buckets, register_histogram, Histogram}; pub use proof::Proof; @@ -28,7 +26,6 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::prover::identity::Identity; -use crate::serde_utils::JsonStrWrapper; use crate::utils::index_packing::pack_indices; /// The endpoint used for proving operations. @@ -52,24 +49,10 @@ static PROVER_PROVING_TIME: Lazy = Lazy::new(|| { .unwrap() }); -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -#[group(skip)] -pub struct Options { - /// The options for configuring the batch insertion prover service. - /// - /// This should be a JSON array containing objects of the following format `{"url": "http://localhost:3001","batch_size": 3,"timeout_s": 30,"prover_type", "insertion"}` - #[clap( - long, - env, - default_value = r#"[{"url": "http://localhost:3001","batch_size": 3,"timeout_s": 30,"prover_type": "insertion"}]"# //TODO: update this and test - )] - pub prover_urls: JsonStrWrapper>, -} - /// Configuration options for the component responsible for interacting with the /// prover service. #[derive(Clone, Debug, Eq, Serialize, Deserialize)] -pub struct ProverConfiguration { +pub struct ProverConfig { /// The URL at which to contact the semaphore prover service for proof /// generation. pub url: String, @@ -95,21 +78,28 @@ pub enum ProverType { Deletion, } -impl Hash for ProverConfiguration { +impl std::fmt::Display for ProverType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + ProverType::Insertion => write!(f, "insertion"), + ProverType::Deletion => write!(f, "deletion"), + } + } +} + +impl Hash for ProverConfig { fn hash(&self, state: &mut H) { self.batch_size.hash(state); self.prover_type.hash(state); } } -impl PartialEq for ProverConfiguration { +impl PartialEq for ProverConfig { fn eq(&self, other: &Self) -> bool { self.batch_size.eq(&other.batch_size) && self.prover_type.eq(&other.prover_type) } } -pub type Provers = HashSet; - /// A representation of the connection to the MTB prover service. #[derive(Clone, Debug)] pub struct Prover { @@ -125,7 +115,7 @@ impl Prover { /// /// # Arguments /// - `options`: The prover configuration options. - pub fn new(options: &ProverConfiguration) -> anyhow::Result { + pub fn new(options: &ProverConfig) -> anyhow::Result { let target_url = Url::parse(&options.url)?; let timeout_duration = Duration::from_secs(options.timeout_s); let client = reqwest::Client::builder() @@ -146,7 +136,7 @@ impl Prover { /// Creates a new batch insertion prover from the prover taken from the /// database - pub fn from_prover_conf(prover_conf: &ProverConfiguration) -> anyhow::Result { + pub fn from_prover_conf(prover_conf: &ProverConfig) -> anyhow::Result { let target_url = Url::parse(&prover_conf.url)?; let timeout_duration = Duration::from_secs(prover_conf.timeout_s); let client = reqwest::Client::builder() @@ -430,7 +420,7 @@ mod test { let mock_url: String = "0.0.0.0:3001".into(); let mock_service = mock::Service::new(mock_url.clone()).await?; - let options = ProverConfiguration { + let options = ProverConfig { url: "http://localhost:3001".into(), timeout_s: 30, batch_size: 3, @@ -462,7 +452,7 @@ mod test { let mock_url: String = "0.0.0.0:3002".into(); let mock_service = mock::Service::new(mock_url.clone()).await?; - let options = ProverConfiguration { + let options = ProverConfig { url: "http://localhost:3002".into(), timeout_s: 30, batch_size: 3, @@ -490,7 +480,7 @@ mod test { #[tokio::test] async fn prover_should_error_if_batch_size_wrong() -> anyhow::Result<()> { - let options = ProverConfiguration { + let options = ProverConfig { url: "http://localhost:3002".into(), timeout_s: 30, batch_size: 10, diff --git a/src/prover/batch_insertion/mod.rs b/src/prover/batch_insertion/mod.rs deleted file mode 100644 index 0166bb3b..00000000 --- a/src/prover/batch_insertion/mod.rs +++ /dev/null @@ -1,588 +0,0 @@ -mod identity; - -use std::fmt::{Display, Formatter}; -use std::mem::size_of; -use std::time::Duration; - -use clap::Parser; -use ethers::types::U256; -use ethers::utils::keccak256; -use once_cell::sync::Lazy; -use prometheus::{exponential_buckets, register_histogram, Histogram}; -use serde::{Deserialize, Serialize}; -use url::Url; - -use crate::database::prover::ProverConfiguration as DbProverConfiguration; -pub use crate::prover::batch_insertion::identity::Identity; -use crate::prover::Proof; -use crate::serde_utils::JsonStrWrapper; - -/// The endpoint used for proving operations. -const MTB_PROVE_ENDPOINT: &str = "prove"; - -static TOTAL_PROVING_TIME: Lazy = Lazy::new(|| { - register_histogram!( - "total_proving_time", - "The time to generate a proof in seconds. Includes preparing the data for the prover", - exponential_buckets(0.1, 1.5, 25).unwrap() - ) - .unwrap() -}); - -static PROVER_PROVING_TIME: Lazy = Lazy::new(|| { - register_histogram!( - "prover_proving_time", - "Only the time between sending a request and receiving the proof", - exponential_buckets(0.1, 1.5, 25).unwrap() - ) - .unwrap() -}); - -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -#[group(skip)] -pub struct Options { - /// The options for configuring the batch insertion prover service. - /// - /// This should be a JSON array containing objects of the following format `{"url": "http://localhost:3001","batch_size": 3,"timeout_s": 30}` - #[clap( - long, - env, - default_value = r#"[{"url": "http://localhost:3001","batch_size": 3,"timeout_s": 30}]"# - )] - pub prover_urls: JsonStrWrapper>, -} - -/// Configuration options for the component responsible for interacting with the -/// prover service. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ProverConfiguration { - /// The URL at which to contact the semaphore prover service for proof - /// generation. - pub url: String, - - /// The number of seconds to wait before timing out the transaction. - pub timeout_s: u64, - - // TODO Add and query a prover `info` endpoint instead. - /// The batch size that the prover is set up to work with. This must match - /// the deployed prover. - pub batch_size: usize, -} - -/// A representation of the connection to the MTB prover service. -#[derive(Clone, Debug)] -pub struct Prover { - target_url: Url, - client: reqwest::Client, - batch_size: usize, - timeout_s: u64, -} - -impl Prover { - /// Constructs a new instance of the Merkle Tree Batcher (or Mtb). - /// - /// # Arguments - /// - `options`: The prover configuration options. - pub fn new(options: &ProverConfiguration) -> anyhow::Result { - let target_url = Url::parse(&options.url)?; - let timeout_duration = Duration::from_secs(options.timeout_s); - let timeout_s = options.timeout_s; - let batch_size = options.batch_size; - let client = reqwest::Client::builder() - .connect_timeout(timeout_duration) - .https_only(false) - .build()?; - let mtb = Self { - target_url, - client, - batch_size, - timeout_s, - }; - - Ok(mtb) - } - - /// Creates a new batch insertion prover from the prover taken from the - /// database - pub fn from_prover_conf(prover_conf: &DbProverConfiguration) -> anyhow::Result { - let target_url = Url::parse(&prover_conf.url)?; - let timeout_duration = Duration::from_secs(prover_conf.timeout_s); - let client = reqwest::Client::builder() - .connect_timeout(timeout_duration) - .https_only(false) - .build()?; - - Ok(Self { - target_url, - client, - batch_size: prover_conf.batch_size, - timeout_s: prover_conf.timeout_s, - }) - } - - pub fn batch_size(&self) -> usize { - self.batch_size - } - - pub fn timeout_s(&self) -> u64 { - self.timeout_s - } - - /// Generates a proof term for the provided identity insertions into the - /// merkle tree. - /// - /// # Arguments - /// - `start_index`: The index in the merkle tree at which the insertions - /// were started. - /// - `pre_root`: The value of the merkle tree's root before identities were - /// inserted. - /// - `post_root`: The value of the merkle tree's root after the identities - /// were inserted. - /// - `identities`: A list of identity insertions, ordered in the order the - /// identities were inserted into the merkle tree. - pub async fn generate_proof( - &self, - start_index: u32, - pre_root: U256, - post_root: U256, - identities: &[Identity], - ) -> anyhow::Result { - if identities.len() != self.batch_size { - return Err(anyhow::Error::msg( - "Provided batch does not match prover batch size.", - )); - } - - let total_proving_time_timer = TOTAL_PROVING_TIME.start_timer(); - - let identity_commitments: Vec = identities.iter().map(|id| id.commitment).collect(); - let input_hash = - compute_input_hash(start_index, pre_root, post_root, &identity_commitments); - let merkle_proofs = identities - .iter() - .map(|id| id.merkle_proof.clone()) - .collect(); - - let proof_input = ProofInput { - input_hash, - start_index, - pre_root, - post_root, - identity_commitments, - merkle_proofs, - }; - - let request = self - .client - .post(self.target_url.join(MTB_PROVE_ENDPOINT)?) - .body("OH MY GOD") - .json(&proof_input) - .build()?; - - let prover_proving_time_timer = PROVER_PROVING_TIME.start_timer(); - let proof_term = self.client.execute(request).await?; - let proof_term = proof_term.error_for_status()?; - prover_proving_time_timer.observe_duration(); - - let json = proof_term.text().await?; - - let Ok(proof) = serde_json::from_str::(&json) else { - let error: ProverError = serde_json::from_str(&json)?; - return Err(anyhow::Error::msg(format!("{error}"))); - }; - - total_proving_time_timer.observe_duration(); - - Ok(proof) - } - - pub fn url(&self) -> String { - self.target_url.to_string() - } -} - -/// Computes the input hash to the prover. -/// -/// The input hash is specified as the `keccak256` hash of the inputs arranged -/// as follows: -/// -/// ```md -/// StartIndex || PreRoot || PostRoot || IdComms[0] || IdComms[1] || ... || IdComms[batchSize-1] -/// 32 || 256 || 256 || 256 || 256 || ... || 256 bits -/// ``` -/// -/// where: -/// - `StartIndex` is `start_index`, the leaf index in the tree from which the -/// insertions started. -/// - `PreRoot` is `pre_root`, the root value of the merkle tree before the -/// insertions were made. -/// - `PostRoot` is `post_root`, the root value of the merkle tree after the -/// insertions were made. -/// - `IdComms` is `identity_commitments`, the list of identity commitments -/// provided in the order that they were inserted into the tree. -/// -/// The result is computed using the inputs in _big-endian_ byte ordering. -pub fn compute_input_hash( - start_index: u32, - pre_root: U256, - post_root: U256, - identity_commitments: &[U256], -) -> U256 { - let mut pre_root_bytes: [u8; size_of::()] = Default::default(); - pre_root.to_big_endian(pre_root_bytes.as_mut_slice()); - let mut post_root_bytes: [u8; size_of::()] = Default::default(); - post_root.to_big_endian(post_root_bytes.as_mut_slice()); - - let mut bytes: Vec = vec![]; - bytes.extend_from_slice(&start_index.to_be_bytes()); - bytes.extend(pre_root_bytes.iter()); - bytes.extend(post_root_bytes.iter()); - - for commitment in identity_commitments.iter() { - let mut commitment_bytes: [u8; size_of::()] = Default::default(); - commitment.to_big_endian(commitment_bytes.as_mut_slice()); - bytes.extend(commitment_bytes.iter()); - } - - keccak256(bytes).into() -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -struct ProverError { - pub code: String, - pub message: String, -} - -impl Display for ProverError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "PROVER FAILURE: Code = {}, Message = {}", - self.code, self.message - ) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -struct ProofInput { - input_hash: U256, - start_index: u32, - pre_root: U256, - post_root: U256, - identity_commitments: Vec, - merkle_proofs: Vec>, -} - -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn mtb_should_generate_proof_with_correct_inputs() -> anyhow::Result<()> { - let mock_url: String = "0.0.0.0:3001".into(); - let mock_service = mock::Service::new(mock_url.clone()).await?; - - let options = ProverConfiguration { - url: "http://localhost:3001".into(), - timeout_s: 30, - batch_size: 3, - }; - let mtb = Prover::new(&options).unwrap(); - let input_data = get_default_proof_input(); - let identities: Vec = extract_identities_from(&input_data); - - let expected_proof = get_default_proof_output(); - let proof = mtb - .generate_proof( - input_data.start_index, - input_data.pre_root, - input_data.post_root, - &identities, - ) - .await?; - - mock_service.stop(); - - assert_eq!(proof, expected_proof); - - Ok(()) - } - - #[tokio::test] - async fn mtb_should_respond_with_error_if_inputs_incorrect() -> anyhow::Result<()> { - let mock_url: String = "0.0.0.0:3002".into(); - let mock_service = mock::Service::new(mock_url.clone()).await?; - - let options = ProverConfiguration { - url: "http://localhost:3002".into(), - timeout_s: 30, - batch_size: 3, - }; - let mtb = Prover::new(&options).unwrap(); - let mut input_data = get_default_proof_input(); - let identities = extract_identities_from(&input_data); - input_data.post_root = U256::from(2); - - let prover_result = mtb - .generate_proof( - input_data.start_index, - input_data.pre_root, - input_data.post_root, - &identities, - ) - .await; - - mock_service.stop(); - assert!(prover_result.is_err()); - - Ok(()) - } - - #[tokio::test] - async fn prover_should_error_if_batch_size_wrong() -> anyhow::Result<()> { - let options = ProverConfiguration { - url: "http://localhost:3002".into(), - timeout_s: 30, - batch_size: 10, - }; - let mtb = Prover::new(&options).unwrap(); - let input_data = get_default_proof_input(); - let identities = extract_identities_from(&input_data); - - let prover_result = mtb - .generate_proof( - input_data.start_index, - input_data.pre_root, - input_data.post_root, - &identities, - ) - .await; - - assert!(prover_result.is_err()); - assert_eq!( - prover_result.unwrap_err().to_string(), - anyhow::Error::msg("Provided batch does not match prover batch size.").to_string() - ); - - Ok(()) - } - - #[test] - fn compute_input_hash_should_succeed() { - let input = get_default_proof_input(); - - assert_eq!( - compute_input_hash( - input.start_index, - input.pre_root, - input.post_root, - &input.identity_commitments - ), - input.input_hash - ); - } - - #[test] - fn proof_input_should_serde() { - let expected_data: ProofInput = serde_json::from_str(EXPECTED_JSON).unwrap(); - let proof_input = get_default_proof_input(); - - assert_eq!(proof_input, expected_data); - } - - fn extract_identities_from(proof_input: &ProofInput) -> Vec { - proof_input - .identity_commitments - .iter() - .zip(&proof_input.merkle_proofs) - .map(|(comm, prf)| Identity::new(*comm, prf.clone())) - .collect() - } - - pub fn get_default_proof_output() -> Proof { - Proof::from([ - "0x12bba8b5a46139c819d83544f024828ece34f4f46be933a377a07c1904e96ec4".into(), - "0x112c8d7c63b6c431cef23e9c0d9ffff39d1d660f514030d4f2787960b437a1d5".into(), - "0x2413396a2af3add6fbe8137cfe7657917e31a5cdab0b7d1d645bd5eeb47ba601".into(), - "0x1ad029539528b32ba70964ce43dbf9bba2501cdb3aaa04e4d58982e2f6c34752".into(), - "0x5bb975296032b135458bd49f92d5e9d363367804440d4692708de92e887cf17".into(), - "0x14932600f53a1ceb11d79a7bdd9688a2f8d1919176f257f132587b2b3274c41e".into(), - "0x13d7b19c7b67bf5d3adf2ac2d3885fd5d49435b6069c0656939cd1fb7bef9dc9".into(), - "0x142e14f90c49c79b4edf5f6b7acbcdb0b0f376a4311fc036f1006679bd53ca9e".into(), - ]) - } - - fn get_default_proof_input() -> ProofInput { - let start_index: u32 = 0; - let pre_root: U256 = - "0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2".into(); - let post_root: U256 = - "0x7b248024e18c30f6c8a6c63dad3748d72cd13d1197bfd79a1323216d6ac6e99".into(); - let identities: Vec = vec!["0x1".into(), "0x2".into(), "0x3".into()]; - let merkle_proofs: Vec> = vec![ - vec![ - "0x0".into(), - "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864".into(), - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1".into(), - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238".into(), - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a".into(), - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55".into(), - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78".into(), - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d".into(), - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61".into(), - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747".into(), - ], - vec![ - "0x1".into(), - "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864".into(), - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1".into(), - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238".into(), - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a".into(), - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55".into(), - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78".into(), - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d".into(), - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61".into(), - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747".into(), - ], - vec![ - "0x0".into(), - "0x115cc0f5e7d690413df64c6b9662e9cf2a3617f2743245519e19607a4417189a".into(), - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1".into(), - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238".into(), - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a".into(), - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55".into(), - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78".into(), - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d".into(), - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61".into(), - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747".into(), - ], - ]; - let input_hash: U256 = - "0xa2d9c54a0aecf0f2aeb502c4a14ac45209d636986294c5e3168a54a7f143b1d8".into(); - - ProofInput { - input_hash, - start_index, - pre_root, - post_root, - identity_commitments: identities, - merkle_proofs, - } - } - - const EXPECTED_JSON: &str = r#"{ - "inputHash": "0xa2d9c54a0aecf0f2aeb502c4a14ac45209d636986294c5e3168a54a7f143b1d8", - "startIndex": 0, - "preRoot": "0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2", - "postRoot": "0x7b248024e18c30f6c8a6c63dad3748d72cd13d1197bfd79a1323216d6ac6e99", - "identityCommitments": [ - "0x1", - "0x2", - "0x3" - ], - "merkleProofs": [ - [ - "0x0", - "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1", - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238", - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a", - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55", - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78", - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d", - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61", - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747" - ], - [ - "0x1", - "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1", - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238", - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a", - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55", - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78", - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d", - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61", - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747" - ], - [ - "0x0", - "0x115cc0f5e7d690413df64c6b9662e9cf2a3617f2743245519e19607a4417189a", - "0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1", - "0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238", - "0x7f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a", - "0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55", - "0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78", - "0x78295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d", - "0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61", - "0xe884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747" - ] - ] -} -"#; -} - -#[cfg(test)] -pub mod mock { - use std::net::SocketAddr; - - use axum::routing::post; - use axum::{Json, Router}; - use axum_server::Handle; - - use super::*; - - pub struct Service { - server: Handle, - } - - #[derive(Serialize, Deserialize)] - #[serde(untagged)] - #[allow(clippy::large_enum_variant)] - enum ProveResponse { - ProofSuccess(Proof), - ProofFailure(ProverError), - } - - impl Service { - pub async fn new(url: String) -> anyhow::Result { - let prove = |Json(payload): Json| async move { - match payload.post_root.div_mod(U256::from(2)) { - (_, y) if y != U256::zero() => { - Json(ProveResponse::ProofSuccess(test::get_default_proof_output())) - } - _ => { - let error = ProverError { - code: "Oh no!".into(), - message: "Things went wrong.".into(), - }; - Json(ProveResponse::ProofFailure(error)) - } - } - }; - let app = Router::new().route("/prove", post(prove)); - - let addr: SocketAddr = url.parse()?; - let server = Handle::new(); - let serverside_handle = server.clone(); - let service = app.into_make_service(); - - tokio::spawn(async move { - axum_server::bind(addr) - .handle(serverside_handle) - .serve(service) - .await - .unwrap(); - }); - - let service = Self { server }; - Ok(service) - } - - pub fn stop(self) { - self.server.shutdown(); - } - } -} diff --git a/src/prover/map.rs b/src/prover/map.rs index ba5a27c2..4c6eea68 100644 --- a/src/prover/map.rs +++ b/src/prover/map.rs @@ -1,69 +1,52 @@ -use std::collections::BTreeMap; +use std::collections::HashSet; -use tokio::sync::{RwLock, RwLockReadGuard}; - -use crate::prover::{Prover, ProverConfiguration, ProverType, Provers}; - -/// The type of a map containing a mapping from a usize to a locked item. -type SharedProverMap

= RwLock>; - -/// A prover that can have read-only operations performed on it. -pub type ReadOnlyProver<'a, P> = RwLockReadGuard<'a, P>; +use crate::prover::{Prover, ProverConfig, ProverType}; +use crate::utils::min_map::MinMap; /// A map that contains a prover for each batch size. /// /// Provides utility methods for getting the appropriate provers -/// -/// The struct is generic over P for testing purposes. -#[derive(Debug)] -pub struct ProverMap

{ - map: BTreeMap, +#[derive(Debug, Default)] +pub struct ProverMap { + map: MinMap, } -impl

ProverMap

{ - pub fn new(map: BTreeMap) -> Self { - Self { map } - } - +impl ProverMap { /// Get the smallest prover that can handle the given batch size. - pub fn get(&self, batch_size: usize) -> Option<&P> { - for (size, prover) in &self.map { - if batch_size <= *size { - return Some(prover); - } - } - - None + pub fn get(&self, batch_size: usize) -> Option<&Prover> { + self.map.get(batch_size) } /// Registers the provided `prover` for the given `batch_size` in the map. - pub fn add(&mut self, batch_size: usize, prover: P) { - self.map.insert(batch_size, prover); + pub fn add(&mut self, batch_size: usize, prover: Prover) { + self.map.add(batch_size, prover); } /// Removes the prover for the provided `batch_size` from the prover map. - pub fn remove(&mut self, batch_size: usize) -> Option

{ - self.map.remove(&batch_size) + pub fn remove(&mut self, batch_size: usize) -> Option { + self.map.remove(batch_size) } pub fn len(&self) -> usize { self.map.len() } + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + pub fn max_batch_size(&self) -> usize { - self.map.iter().next_back().map_or(0, |(size, _)| *size) + self.map.max_key().unwrap_or(0) } pub fn batch_size_exists(&self, batch_size: usize) -> bool { - self.map.contains_key(&batch_size) + self.map.key_exists(batch_size) } -} -impl ProverMap { - pub fn as_configuration_vec(&self) -> Vec { + pub fn as_configuration_vec(&self) -> Vec { self.map .iter() - .map(|(k, v)| ProverConfiguration { + .map(|(k, v)| ProverConfig { url: v.url(), timeout_s: v.timeout_s(), batch_size: *k, @@ -73,64 +56,24 @@ impl ProverMap { } } -impl

From> for ProverMap

{ - fn from(map: BTreeMap) -> Self { - Self { map } - } -} - -/// A map of provers for batch insertion operations. -pub type InsertionProverMap = SharedProverMap; -/// A map of provers for batch deletion operations. -pub type DeletionProverMap = SharedProverMap; - -/// The type of provers that can only be read from for insertion operations. -pub type ReadOnlyInsertionProver<'a> = ReadOnlyProver<'a, Prover>; - /// Builds an insertion prover map from the provided configuration. pub fn initialize_prover_maps( - db_provers: Provers, -) -> anyhow::Result<(InsertionProverMap, DeletionProverMap)> { - let mut insertion_map = BTreeMap::new(); - let mut deletion_map = BTreeMap::new(); + db_provers: HashSet, +) -> anyhow::Result<(ProverMap, ProverMap)> { + let mut insertion_map = ProverMap::default(); + let mut deletion_map = ProverMap::default(); for prover in db_provers { match prover.prover_type { ProverType::Insertion => { - insertion_map.insert(prover.batch_size, Prover::from_prover_conf(&prover)?); + insertion_map.add(prover.batch_size, Prover::from_prover_conf(&prover)?); } ProverType::Deletion => { - deletion_map.insert(prover.batch_size, Prover::from_prover_conf(&prover)?); + deletion_map.add(prover.batch_size, Prover::from_prover_conf(&prover)?); } } } - Ok(( - RwLock::new(ProverMap::new(insertion_map)), - RwLock::new(ProverMap::new(deletion_map)), - )) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn prover_map_tests() { - let prover_map: ProverMap = ProverMap::from(maplit::btreemap! { - 3 => 3, - 5 => 5, - 7 => 7, - }); - - assert_eq!(prover_map.max_batch_size(), 7); - - assert_eq!(prover_map.get(1), Some(&3)); - assert_eq!(prover_map.get(2), Some(&3)); - assert_eq!(prover_map.get(3), Some(&3)); - assert_eq!(prover_map.get(4), Some(&5)); - assert_eq!(prover_map.get(7), Some(&7)); - assert!(prover_map.get(8).is_none()); - } + Ok((insertion_map, deletion_map)) } diff --git a/src/serde_utils.rs b/src/serde_utils.rs deleted file mode 100644 index 3aae7e4f..00000000 --- a/src/serde_utils.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::fmt; -use std::str::FromStr; - -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(transparent)] -pub struct JsonStrWrapper(pub T); - -impl FromStr for JsonStrWrapper -where - T: DeserializeOwned, -{ - type Err = serde_json::Error; - - fn from_str(s: &str) -> Result { - serde_json::from_str(s).map(JsonStrWrapper) - } -} - -impl fmt::Display for JsonStrWrapper -where - T: Serialize, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = serde_json::to_string(self).map_err(|_| fmt::Error)?; - - s.fmt(f) - } -} diff --git a/src/server/mod.rs b/src/server.rs similarity index 78% rename from src/server/mod.rs rename to src/server.rs index 38d8a368..179ab312 100644 --- a/src/server/mod.rs +++ b/src/server.rs @@ -1,21 +1,19 @@ pub mod error; -use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}; +use std::net::TcpListener; use std::sync::Arc; use std::time::Duration; -use anyhow::{bail, ensure}; use axum::extract::{Query, State}; use axum::routing::{get, post}; use axum::{middleware, Json, Router}; -use clap::Parser; use cli_batteries::await_shutdown; use error::Error; use hyper::StatusCode; use tracing::info; -use url::{Host, Url}; use crate::app::App; +use crate::config::ServerConfig; mod custom_middleware; pub mod data; @@ -27,19 +25,6 @@ use self::data::{ VerifySemaphoreProofRequest, VerifySemaphoreProofResponse, }; -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -#[group(skip)] -pub struct Options { - // TODO: This should be a `SocketAddr`. It makes no sense for us to allow a full on URL here - /// API Server url - #[clap(long, env, default_value = "http://127.0.0.1:8080/")] - pub server: Url, - - /// Request handling timeout (seconds) - #[clap(long, env, default_value = "300")] - pub serve_timeout: u64, -} - async fn inclusion_proof( State(app): State>, Json(inclusion_proof_request): Json, @@ -148,32 +133,11 @@ async fn list_batch_sizes( /// Will return `Err` if `options.server` URI is not http, incorrectly includes /// a path beyond `/`, or cannot be cast into an IP address. Also returns an /// `Err` if the server cannot bind to the given address. -pub async fn main(app: Arc, options: Options) -> anyhow::Result<()> { - ensure!( - options.server.scheme() == "http", - "Only http:// is supported in {}", - options.server - ); - ensure!( - options.server.path() == "/", - "Only / is supported in {}", - options.server - ); - - let ip: IpAddr = match options.server.host() { - Some(Host::Ipv4(ip)) => ip.into(), - Some(Host::Ipv6(ip)) => ip.into(), - Some(_) => bail!("Cannot bind {}", options.server), - None => Ipv4Addr::LOCALHOST.into(), - }; - let port = options.server.port().unwrap_or(9998); - let addr = SocketAddr::new(ip, port); - - info!("Will listen on {}", addr); - let listener = TcpListener::bind(addr)?; - - let serve_timeout = Duration::from_secs(options.serve_timeout); - bind_from_listener(app, serve_timeout, listener).await?; +pub async fn run(app: Arc, config: ServerConfig) -> anyhow::Result<()> { + info!("Will listen on {}", config.address); + let listener = TcpListener::bind(config.address)?; + + bind_from_listener(app, config.serve_timeout, listener).await?; Ok(()) } diff --git a/src/server/data.rs b/src/server/data.rs index 13e13aed..887c7b09 100644 --- a/src/server/data.rs +++ b/src/server/data.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::identity_tree::{ Hash, InclusionProof, ProcessedStatus, RootItem, Status, UnprocessedStatus, }; -use crate::prover::{ProverConfiguration, ProverType}; +use crate::prover::{ProverConfig, ProverType}; #[derive(Serialize)] #[serde(transparent)] @@ -14,7 +14,7 @@ pub struct InclusionProofResponse(pub InclusionProof); #[derive(Serialize)] #[serde(transparent)] -pub struct ListBatchSizesResponse(pub Vec); +pub struct ListBatchSizesResponse(pub Vec); #[derive(Serialize)] #[serde(transparent)] @@ -174,8 +174,8 @@ impl ToResponseCode for InclusionProofResponse { } } -impl From> for ListBatchSizesResponse { - fn from(value: Vec) -> Self { +impl From> for ListBatchSizesResponse { + fn from(value: Vec) -> Self { Self(value) } } diff --git a/src/task_monitor.rs b/src/task_monitor.rs index 03654287..9c290ea8 100644 --- a/src/task_monitor.rs +++ b/src/task_monitor.rs @@ -1,21 +1,14 @@ use std::sync::Arc; use std::time::Duration; -use clap::Parser; use once_cell::sync::Lazy; use prometheus::{linear_buckets, register_gauge, register_histogram, Gauge, Histogram}; -use tokio::sync::{broadcast, mpsc, Notify, RwLock}; +use tokio::sync::{broadcast, mpsc, Mutex, Notify, RwLock}; use tokio::task::JoinHandle; use tracing::{info, instrument, warn}; -use self::tasks::delete_identities::DeleteIdentities; -use self::tasks::finalize_identities::FinalizeRoots; -use self::tasks::insert_identities::InsertIdentities; -use self::tasks::monitor_txs::MonitorTxs; -use self::tasks::process_identities::ProcessIdentities; -use crate::contracts::SharedIdentityManager; +use crate::app::App; use crate::database::Database; -use crate::identity_tree::TreeState; pub mod tasks; @@ -68,53 +61,6 @@ impl RunningInstance { } } -/// Configuration options for the component responsible for committing -/// identities when queried. -#[derive(Clone, Debug, PartialEq, Eq, Parser)] -#[group(skip)] -pub struct Options { - /// The maximum number of seconds the sequencer will wait before sending a - /// batch of identities to the chain, even if the batch is not full. - // TODO: do we want to change this to batch_insertion_timeout_secs - #[clap(long, env, default_value = "180")] - pub batch_timeout_seconds: u64, - - /// TODO: - #[clap(long, env, default_value = "3600")] - pub batch_deletion_timeout_seconds: i64, - - /// TODO: - #[clap(long, env, default_value = "100")] - pub min_batch_deletion_size: usize, - - /// The parameter to control the delay between mining a deletion batch and - /// inserting the recovery identities - /// - /// The sequencer will insert the recovery identities after - /// max_epoch_duration_seconds + root_history_expiry) seconds have passed - /// - /// By default the value is set to 0 so the sequencer will only use - /// root_history_expiry - #[clap(long, env, default_value = "0")] - pub max_epoch_duration_seconds: u64, - - /// The maximum number of windows to scan for finalization logs - #[clap(long, env, default_value = "100")] - pub scanning_window_size: u64, - - /// The offset from the latest block to scan - #[clap(long, env, default_value = "0")] - pub scanning_chain_head_offset: u64, - - /// The number of seconds to wait between fetching logs - #[clap(long, env, default_value = "30")] - pub time_between_scans_seconds: u64, - - /// The number of txs in the channel that we'll be monitoring - #[clap(long, env, default_value = "100")] - pub monitored_txs_capacity: usize, -} - /// A worker that commits identities to the blockchain. /// /// This uses the database to keep track of identities that need to be @@ -126,55 +72,15 @@ pub struct TaskMonitor { /// when shutdown is called we want to be able to gracefully /// await the join handles - which requires ownership of the handle and by /// extension the instance. - instance: RwLock>, - database: Arc, - identity_manager: SharedIdentityManager, - tree_state: TreeState, - batch_insert_timeout_secs: u64, - - // Finalization params - scanning_window_size: u64, - scanning_chain_head_offset: u64, - time_between_scans: Duration, - max_epoch_duration: Duration, - // TODO: docs - batch_deletion_timeout_seconds: i64, - // TODO: docs - min_batch_deletion_size: usize, - monitored_txs_capacity: usize, + instance: RwLock>, + app: Arc, } impl TaskMonitor { - pub fn new( - database: Arc, - contracts: SharedIdentityManager, - tree_state: TreeState, - options: &Options, - ) -> Self { - let Options { - batch_timeout_seconds, - scanning_window_size, - scanning_chain_head_offset, - time_between_scans_seconds, - max_epoch_duration_seconds, - monitored_txs_capacity, - batch_deletion_timeout_seconds, - min_batch_deletion_size, - } = *options; - + pub fn new(app: Arc) -> Self { Self { instance: RwLock::new(None), - database, - identity_manager: contracts, - tree_state, - batch_insert_timeout_secs: batch_timeout_seconds, - scanning_window_size, - scanning_chain_head_offset, - time_between_scans: Duration::from_secs(time_between_scans_seconds), - batch_deletion_timeout_seconds, - min_batch_deletion_size, - max_epoch_duration: Duration::from_secs(max_epoch_duration_seconds), - monitored_txs_capacity, + app, } } @@ -190,89 +96,80 @@ impl TaskMonitor { let (shutdown_sender, _) = broadcast::channel(1); let (monitored_txs_sender, monitored_txs_receiver) = - mpsc::channel(self.monitored_txs_capacity); + mpsc::channel(self.app.config.app.monitored_txs_capacity); + + let monitored_txs_sender = Arc::new(monitored_txs_sender); + let monitored_txs_receiver = Arc::new(Mutex::new(monitored_txs_receiver)); - let wake_up_notify = Arc::new(Notify::new()); + let base_wake_up_notify = Arc::new(Notify::new()); // Immediately notify so we can start processing if we have pending identities // in the database - wake_up_notify.notify_one(); + base_wake_up_notify.notify_one(); let mut handles = Vec::new(); - // Finalize identities task - let finalize_identities = FinalizeRoots::new( - self.database.clone(), - self.identity_manager.clone(), - self.tree_state.get_processed_tree(), - self.tree_state.get_mined_tree(), - self.scanning_window_size, - self.scanning_chain_head_offset, - self.time_between_scans, - self.max_epoch_duration, - ); + let app = self.app.clone(); + let finalize_identities = move || tasks::finalize_identities::finalize_roots(app.clone()); let finalize_identities_handle = crate::utils::spawn_monitored_with_backoff( - move || finalize_identities.clone().run(), + finalize_identities, shutdown_sender.clone(), FINALIZE_IDENTITIES_BACKOFF, ); handles.push(finalize_identities_handle); - // Process identities task - let process_identities = ProcessIdentities::new( - self.database.clone(), - self.identity_manager.clone(), - self.tree_state.get_batching_tree(), - self.batch_insert_timeout_secs, - monitored_txs_sender, - wake_up_notify.clone(), - ); + let app = self.app.clone(); + let wake_up_notify = base_wake_up_notify.clone(); + let process_identities = move || { + tasks::process_identities::process_identities( + app.clone(), + monitored_txs_sender.clone(), + wake_up_notify.clone(), + ) + }; let process_identities_handle = crate::utils::spawn_monitored_with_backoff( - move || process_identities.clone().run(), + process_identities, shutdown_sender.clone(), PROCESS_IDENTITIES_BACKOFF, ); handles.push(process_identities_handle); - let monitor_txs = MonitorTxs::new(self.identity_manager.clone(), monitored_txs_receiver); + let app = self.app.clone(); + let monitor_txs = + move || tasks::monitor_txs::monitor_txs(app.clone(), monitored_txs_receiver.clone()); let monitor_txs_handle = crate::utils::spawn_monitored_with_backoff( - move || monitor_txs.clone().run(), + monitor_txs, shutdown_sender.clone(), PROCESS_IDENTITIES_BACKOFF, ); handles.push(monitor_txs_handle); - // Insert identities task - let insert_identities = InsertIdentities::new( - self.database.clone(), - self.tree_state.get_latest_tree(), - wake_up_notify.clone(), - ); - + let app = self.app.clone(); + let wake_up_notify = base_wake_up_notify.clone(); + let insert_identities = move || { + self::tasks::insert_identities::insert_identities(app.clone(), wake_up_notify.clone()) + }; let insert_identities_handle = crate::utils::spawn_monitored_with_backoff( - move || insert_identities.clone().run(), + insert_identities, shutdown_sender.clone(), INSERT_IDENTITIES_BACKOFF, ); handles.push(insert_identities_handle); - // Delete identities task - let delete_identities = DeleteIdentities::new( - self.database.clone(), - self.tree_state.get_latest_tree(), - self.batch_deletion_timeout_seconds, - self.min_batch_deletion_size, - wake_up_notify, - ); + let app = self.app.clone(); + let wake_up_notify = base_wake_up_notify.clone(); + let delete_identities = move || { + self::tasks::delete_identities::delete_identities(app.clone(), wake_up_notify.clone()) + }; let delete_identities_handle = crate::utils::spawn_monitored_with_backoff( - move || delete_identities.clone().run(), + delete_identities, shutdown_sender.clone(), DELETE_IDENTITIES_BACKOFF, ); diff --git a/src/task_monitor/tasks/delete_identities.rs b/src/task_monitor/tasks/delete_identities.rs index 66dee074..aa1f5873 100644 --- a/src/task_monitor/tasks/delete_identities.rs +++ b/src/task_monitor/tasks/delete_identities.rs @@ -1,75 +1,34 @@ use std::collections::HashSet; use std::sync::Arc; +use anyhow::Context; use chrono::Utc; use tokio::sync::Notify; use tracing::info; +use crate::app::App; use crate::database::types::DeletionEntry; -use crate::database::Database; -use crate::identity_tree::{Hash, Latest, TreeVersion}; +use crate::identity_tree::Hash; -pub struct DeleteIdentities { - database: Arc, - latest_tree: TreeVersion, - deletion_time_interval: i64, - min_deletion_batch_size: usize, - wake_up_notify: Arc, -} - -impl DeleteIdentities { - pub fn new( - database: Arc, - latest_tree: TreeVersion, - deletion_time_interval: i64, - min_deletion_batch_size: usize, - wake_up_notify: Arc, - ) -> Arc { - Arc::new(Self { - database, - latest_tree, - deletion_time_interval, - min_deletion_batch_size, - wake_up_notify, - }) - } - - pub async fn run(self: Arc) -> anyhow::Result<()> { - delete_identities( - &self.database, - &self.latest_tree, - self.deletion_time_interval, - self.min_deletion_batch_size, - self.wake_up_notify.clone(), - ) - .await - } -} - -async fn delete_identities( - database: &Database, - latest_tree: &TreeVersion, - deletion_time_interval: i64, - min_deletion_batch_size: usize, - wake_up_notify: Arc, -) -> anyhow::Result<()> { +pub async fn delete_identities(app: Arc, wake_up_notify: Arc) -> anyhow::Result<()> { info!("Starting deletion processor."); - let deletion_time_interval = chrono::Duration::seconds(deletion_time_interval); + let batch_deletion_timeout = chrono::Duration::from_std(app.config.app.batch_deletion_timeout) + .context("Invalid batch deletion timeout duration")?; loop { - let deletions = database.get_deletions().await?; + let deletions = app.database.get_deletions().await?; if deletions.is_empty() { tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; continue; } - let last_deletion_timestamp = database.get_latest_deletion().await?.timestamp; + let last_deletion_timestamp = app.database.get_latest_deletion().await?.timestamp; // If the minimum deletions batch size is reached or the deletion time interval // has elapsed, run a batch of deletions - if deletions.len() >= min_deletion_batch_size - || Utc::now() - last_deletion_timestamp > deletion_time_interval + if deletions.len() >= app.config.app.min_batch_deletion_size + || Utc::now() - last_deletion_timestamp > batch_deletion_timeout { // Dedup deletion entries let deletions = deletions.into_iter().collect::>(); @@ -81,7 +40,7 @@ async fn delete_identities( // Delete the commitments at the target leaf indices in the latest tree, // generating the proof for each update - let data = latest_tree.delete_many(&leaf_indices); + let data = app.tree_state.latest_tree().delete_many(&leaf_indices); assert_eq!( data.len(), @@ -92,13 +51,13 @@ async fn delete_identities( // Insert the new items into pending identities let items = data.into_iter().zip(leaf_indices); for ((root, _proof), leaf_index) in items { - database + app.database .insert_pending_identity(leaf_index, &Hash::ZERO, &root) .await?; } // Remove the previous commitments from the deletions table - database.remove_deletions(previous_commitments).await?; + app.database.remove_deletions(previous_commitments).await?; wake_up_notify.notify_one(); } } diff --git a/src/task_monitor/tasks/finalize_identities.rs b/src/task_monitor/tasks/finalize_identities.rs index 79805170..9e871571 100644 --- a/src/task_monitor/tasks/finalize_identities.rs +++ b/src/task_monitor/tasks/finalize_identities.rs @@ -10,83 +10,27 @@ use ethers::providers::Middleware; use ethers::types::{Address, Log, Topic, ValueOrArray, U256}; use tracing::{info, instrument}; +use crate::app::App; use crate::contracts::abi::{BridgedWorldId, RootAddedFilter, TreeChangeKind, TreeChangedFilter}; use crate::contracts::scanner::BlockScanner; -use crate::contracts::{IdentityManager, SharedIdentityManager}; +use crate::contracts::IdentityManager; use crate::database::Database; use crate::identity_tree::{Canonical, Intermediate, TreeVersion, TreeWithNextVersion}; use crate::task_monitor::TaskMonitor; -pub struct FinalizeRoots { - database: Arc, - identity_manager: SharedIdentityManager, - processed_tree: TreeVersion, - finalized_tree: TreeVersion, +pub async fn finalize_roots(app: Arc) -> anyhow::Result<()> { + let mainnet_abi = app.identity_manager.abi(); + let secondary_abis = app.identity_manager.secondary_abis(); - scanning_window_size: u64, - scanning_chain_head_offset: u64, - time_between_scans: Duration, - max_epoch_duration: Duration, -} - -impl FinalizeRoots { - pub fn new( - database: Arc, - identity_manager: SharedIdentityManager, - processed_tree: TreeVersion, - finalized_tree: TreeVersion, - scanning_window_size: u64, - scanning_chain_head_offset: u64, - time_between_scans: Duration, - max_epoch_duration: Duration, - ) -> Arc { - Arc::new(Self { - database, - identity_manager, - processed_tree, - finalized_tree, - scanning_window_size, - scanning_chain_head_offset, - time_between_scans, - max_epoch_duration, - }) - } - - pub async fn run(self: Arc) -> anyhow::Result<()> { - finalize_roots_loop( - &self.database, - &self.identity_manager, - &self.processed_tree, - &self.finalized_tree, - self.scanning_window_size, - self.scanning_chain_head_offset, - self.time_between_scans, - self.max_epoch_duration, - ) - .await - } -} - -async fn finalize_roots_loop( - database: &Database, - identity_manager: &IdentityManager, - processed_tree: &TreeVersion, - finalized_tree: &TreeVersion, - scanning_window_size: u64, - scanning_chain_head_offset: u64, - time_between_scans: Duration, - max_epoch_duration: Duration, -) -> anyhow::Result<()> { - let mainnet_abi = identity_manager.abi(); - let secondary_abis = identity_manager.secondary_abis(); - - let mut mainnet_scanner = - BlockScanner::new_latest(mainnet_abi.client().clone(), scanning_window_size) - .await? - .with_offset(scanning_chain_head_offset); + let mut mainnet_scanner = BlockScanner::new_latest( + mainnet_abi.client().clone(), + app.config.app.scanning_window_size, + ) + .await? + .with_offset(app.config.app.scanning_chain_head_offset); let mut secondary_scanners = - init_secondary_scanners(secondary_abis, scanning_window_size).await?; + init_secondary_scanners(secondary_abis, app.config.app.scanning_window_size).await?; let mainnet_address = mainnet_abi.address(); @@ -94,20 +38,26 @@ async fn finalize_roots_loop( let mainnet_logs = fetch_mainnet_logs(&mut mainnet_scanner, mainnet_address).await?; finalize_mainnet_roots( - database, - identity_manager, - processed_tree, + &app.database, + &app.identity_manager, + app.tree_state.processed_tree(), &mainnet_logs, - max_epoch_duration, + app.config.app.max_epoch_duration, ) .await?; let mut roots = extract_roots_from_mainnet_logs(mainnet_logs); roots.extend(fetch_secondary_logs(&mut secondary_scanners).await?); - finalize_secondary_roots(database, identity_manager, finalized_tree, roots).await?; + finalize_secondary_roots( + &app.database, + &app.identity_manager, + app.tree_state.mined_tree(), + roots, + ) + .await?; - tokio::time::sleep(time_between_scans).await; + tokio::time::sleep(app.config.app.time_between_scans).await; } } diff --git a/src/task_monitor/tasks/insert_identities.rs b/src/task_monitor/tasks/insert_identities.rs index 10fc65f7..adf4c284 100644 --- a/src/task_monitor/tasks/insert_identities.rs +++ b/src/task_monitor/tasks/insert_identities.rs @@ -5,42 +5,16 @@ use tokio::sync::Notify; use tokio::time::sleep; use tracing::instrument; +use crate::app::App; use crate::database::types::UnprocessedCommitment; use crate::database::Database; use crate::identity_tree::{Latest, TreeVersion, TreeVersionReadOps, UnprocessedStatus}; -pub struct InsertIdentities { - database: Arc, - latest_tree: TreeVersion, - wake_up_notify: Arc, -} - -impl InsertIdentities { - pub fn new( - database: Arc, - latest_tree: TreeVersion, - wake_up_notify: Arc, - ) -> Arc { - Arc::new(Self { - database, - latest_tree, - wake_up_notify, - }) - } - - pub async fn run(self: Arc) -> anyhow::Result<()> { - insert_identities_loop(&self.database, &self.latest_tree, &self.wake_up_notify).await - } -} - -async fn insert_identities_loop( - database: &Database, - latest_tree: &TreeVersion, - wake_up_notify: &Notify, -) -> anyhow::Result<()> { +pub async fn insert_identities(app: Arc, wake_up_notify: Arc) -> anyhow::Result<()> { loop { // get commits from database - let unprocessed = database + let unprocessed = app + .database .get_eligible_unprocessed_commitments(UnprocessedStatus::New) .await?; if unprocessed.is_empty() { @@ -48,14 +22,14 @@ async fn insert_identities_loop( continue; } - insert_identities(database, latest_tree, unprocessed).await?; + insert_identities_batch(&app.database, app.tree_state.latest_tree(), unprocessed).await?; // Notify the identity processing task, that there are new identities wake_up_notify.notify_one(); } } #[instrument(level = "info", skip_all)] -async fn insert_identities( +async fn insert_identities_batch( database: &Database, latest_tree: &TreeVersion, identities: Vec, diff --git a/src/task_monitor/tasks/monitor_txs.rs b/src/task_monitor/tasks/monitor_txs.rs index b3663430..70a496f0 100644 --- a/src/task_monitor/tasks/monitor_txs.rs +++ b/src/task_monitor/tasks/monitor_txs.rs @@ -2,41 +2,18 @@ use std::sync::Arc; use tokio::sync::{mpsc, Mutex}; -use crate::contracts::{IdentityManager, SharedIdentityManager}; +use crate::app::App; use crate::ethereum::write::TransactionId; -pub struct MonitorTxs { - identity_manager: SharedIdentityManager, +pub async fn monitor_txs( + app: Arc, monitored_txs_receiver: Arc>>, -} - -impl MonitorTxs { - pub fn new( - identity_manager: SharedIdentityManager, - monitored_txs_receiver: mpsc::Receiver, - ) -> Arc { - Arc::new(Self { - identity_manager, - monitored_txs_receiver: Arc::new(Mutex::new(monitored_txs_receiver)), - }) - } - - pub async fn run(self: Arc) -> anyhow::Result<()> { - monitor_txs_loop(&self.identity_manager, &self.monitored_txs_receiver).await?; - - Ok(()) - } -} - -async fn monitor_txs_loop( - identity_manager: &IdentityManager, - monitored_txs_receiver: &Mutex>, ) -> anyhow::Result<()> { let mut monitored_txs_receiver = monitored_txs_receiver.lock().await; while let Some(tx) = monitored_txs_receiver.recv().await { assert!( - (identity_manager.mine_transaction(tx.clone()).await?), + (app.identity_manager.mine_transaction(tx.clone()).await?), "Failed to mine transaction: {}", tx ); diff --git a/src/task_monitor/tasks/process_identities.rs b/src/task_monitor/tasks/process_identities.rs index 3c7708d9..980ada5a 100644 --- a/src/task_monitor/tasks/process_identities.rs +++ b/src/task_monitor/tasks/process_identities.rs @@ -1,88 +1,44 @@ use std::sync::Arc; -use std::time::Duration; use anyhow::Context; use chrono::{DateTime, Utc}; use ethers::types::U256; use ruint::Uint; use semaphore::merkle_tree::Proof; -use semaphore::poseidon_tree::Branch; +use semaphore::poseidon_tree::{Branch, PoseidonHash}; use tokio::sync::{mpsc, Notify}; use tokio::{select, time}; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; -use crate::contracts::{IdentityManager, SharedIdentityManager}; -use crate::database::Database; +use crate::app::App; +use crate::contracts::IdentityManager; use crate::ethereum::write::TransactionId; use crate::identity_tree::{ AppliedTreeUpdate, Hash, Intermediate, TreeVersion, TreeVersionReadOps, TreeWithNextVersion, }; use crate::prover::identity::Identity; -use crate::prover::{Prover, ReadOnlyProver}; +use crate::prover::Prover; use crate::task_monitor::TaskMonitor; +use crate::utils::batch_type::BatchType; use crate::utils::index_packing::pack_indices; /// The number of seconds either side of the timer tick to treat as enough to /// trigger a forced batch insertion. -const DEBOUNCE_THRESHOLD_SECS: u64 = 1; - -pub struct ProcessIdentities { - database: Arc, - identity_manager: SharedIdentityManager, - batching_tree: TreeVersion, - batch_insert_timeout_secs: u64, - monitored_txs_sender: mpsc::Sender, - wake_up_notify: Arc, -} - -impl ProcessIdentities { - pub fn new( - database: Arc, - identity_manager: SharedIdentityManager, - batching_tree: TreeVersion, - batch_insert_timeout_secs: u64, - monitored_txs_sender: mpsc::Sender, - wake_up_notify: Arc, - ) -> Arc { - Arc::new(Self { - database, - identity_manager, - batching_tree, - batch_insert_timeout_secs, - monitored_txs_sender, - wake_up_notify, - }) - } - - pub async fn run(self: Arc) -> anyhow::Result<()> { - process_identities( - &self.database, - &self.identity_manager, - &self.batching_tree, - &self.monitored_txs_sender, - &self.wake_up_notify, - self.batch_insert_timeout_secs, - ) - .await - } -} +const DEBOUNCE_THRESHOLD_SECS: i64 = 1; -async fn process_identities( - database: &Database, - identity_manager: &IdentityManager, - batching_tree: &TreeVersion, - monitored_txs_sender: &mpsc::Sender, - wake_up_notify: &Notify, - timeout_secs: u64, +pub async fn process_identities( + app: Arc, + monitored_txs_sender: Arc>, + wake_up_notify: Arc, ) -> anyhow::Result<()> { - info!("Awaiting for a clean slate"); - identity_manager.await_clean_slate().await?; + tracing::info!("Awaiting for a clean slate"); + app.identity_manager.await_clean_slate().await?; - info!("Starting identity processor."); + tracing::info!("Starting identity processor."); // We start a timer and force it to perform one initial tick to avoid an // immediate trigger. - let mut timer = time::interval(Duration::from_secs(timeout_secs)); + let mut timer = time::interval(app.config.app.batch_insertion_timeout); timer.tick().await; // When both futures are woken at once, the choice is made @@ -93,120 +49,76 @@ async fn process_identities( // inserted. If we have an incomplete batch but are within a small delta of the // tick happening anyway in the wake branch, we insert the current // (possibly-incomplete) batch anyway. - let mut last_batch_time: DateTime = database + let mut last_batch_time: DateTime = app + .database .get_latest_insertion_timestamp() .await? .unwrap_or(Utc::now()); loop { - // We ping-pong between two cases for being woken. This ensures that there is a - // maximum time that users can wait for their identity commitment to be - // processed, but also that we are not inefficient with on-chain gas by being - // too eager. + // We wait either for a timer tick or a full batch select! { _ = timer.tick() => { - debug!("Identity batch insertion woken due to timeout."); - - // If the timer has fired we want to insert whatever - // identities we have, even if it's not many. This ensures - // a minimum quality of service for API users. - let next_update = batching_tree.peek_next_updates(1); - if next_update.is_empty() { - continue; - } - - let batch_size = if next_update[0].update.element == Hash::ZERO { - identity_manager.max_deletion_batch_size().await - }else{ - identity_manager.max_insertion_batch_size().await - }; - - let updates = batching_tree.peek_next_updates(batch_size); - - commit_identities( - database, - identity_manager, - batching_tree, - monitored_txs_sender, - &updates, - ).await?; - - last_batch_time = Utc::now(); - database.update_latest_insertion_timestamp(last_batch_time).await?; - - // Also wake up if woken up due to a tick - wake_up_notify.notify_one(); + tracing::info!("Identity batch insertion woken due to timeout"); } () = wake_up_notify.notified() => { - tracing::trace!("Identity batch insertion woken due to request."); - - // Capture the time difference since the last batch, and compute - // whether we want to insert anyway. We do this if the difference - // is less than some debounce threshold. - // - // We unconditionally convert `u64 -> i64` as numbers should - // always be small. If the numbers are not always small then - // we _want_ to panic as something is horribly broken. - let current_time = Utc::now(); - let diff_secs = current_time - last_batch_time; - #[allow(clippy::cast_sign_loss)] - let diff_secs_u64 = diff_secs.num_seconds() as u64; - let should_process_anyway = - timeout_secs.abs_diff(diff_secs_u64) <= DEBOUNCE_THRESHOLD_SECS; - - let next_update = batching_tree.peek_next_updates(1); - if next_update.is_empty() { - continue; - } - - let batch_size = if next_update[0].update.element == Hash::ZERO { - identity_manager.max_deletion_batch_size().await - }else{ - identity_manager.max_insertion_batch_size().await - }; - - // We have _at most_ one complete batch here. - let updates = batching_tree.peek_next_updates(batch_size); - - // If there are not enough identities to insert at this - // stage we can wait. The timer will ensure that the API - // clients do not wait too long for their submission to be - // completed. - if updates.len() < batch_size && !should_process_anyway { - // We do not reset the timer here as we may want to - // insert anyway soon. - tracing::trace!( - "Pending identities ({}) is less than batch size ({}). Waiting.", - updates.len(), - batch_size - ); - continue; - } - - commit_identities( - database, - identity_manager, - batching_tree, - monitored_txs_sender, - &updates, - ).await?; - - // We've inserted the identities, so we want to ensure that - // we don't trigger again until either we get a full batch - // or the timer ticks. - timer.reset(); - last_batch_time = Utc::now(); - database.update_latest_insertion_timestamp(last_batch_time).await?; - - // We want to check if there's a full batch available immediately - wake_up_notify.notify_one(); - } + tracing::trace!("Identity batch insertion woken due to request"); + }, + } + + let Some(batch_type) = determine_batch_type(app.tree_state.batching_tree()) else { + continue; + }; + + let batch_size = if batch_type.is_deletion() { + app.identity_manager.max_deletion_batch_size().await + } else { + app.identity_manager.max_insertion_batch_size().await + }; + + let updates = app.tree_state.batching_tree().peek_next_updates(batch_size); + + let current_time = Utc::now(); + let batch_insertion_timeout = + chrono::Duration::from_std(app.config.app.batch_insertion_timeout)?; + + let timeout_batch_time = last_batch_time + + batch_insertion_timeout + + chrono::Duration::seconds(DEBOUNCE_THRESHOLD_SECS); + + let can_skip_batch = current_time < timeout_batch_time; + + if updates.len() < batch_size && can_skip_batch { + tracing::trace!( + num_updates = updates.len(), + batch_size, + ?last_batch_time, + "Pending identities is less than batch size, skipping batch", + ); + + continue; } + + commit_identities( + &app.identity_manager, + app.tree_state.batching_tree(), + &monitored_txs_sender, + &updates, + ) + .await?; + + timer.reset(); + last_batch_time = Utc::now(); + app.database + .update_latest_insertion_timestamp(last_batch_time) + .await?; + + // We want to check if there's a full batch available immediately + wake_up_notify.notify_one(); } } async fn commit_identities( - database: &Database, identity_manager: &IdentityManager, batching_tree: &TreeVersion, monitored_txs_sender: &mpsc::Sender, @@ -224,25 +136,25 @@ async fn commit_identities( .get_suitable_insertion_prover(updates.len()) .await?; - info!( - "Sending timed-out insertion batch with {}/{} updates.", - updates.len(), - prover.batch_size() + tracing::info!( + num_updates = updates.len(), + batch_size = prover.batch_size(), + "Insertion batch", ); - insert_identities(database, identity_manager, batching_tree, updates, prover).await? + insert_identities(identity_manager, batching_tree, updates, &prover).await? } else { let prover = identity_manager .get_suitable_deletion_prover(updates.len()) .await?; - info!( - "Sending timed-out deletion batch with {}/{} updates.", - updates.len(), - prover.batch_size() + tracing::info!( + num_updates = updates.len(), + batch_size = prover.batch_size(), + "Deletion batch" ); - delete_identities(database, identity_manager, batching_tree, updates, prover).await? + delete_identities(identity_manager, batching_tree, updates, &prover).await? }; if let Some(tx_id) = tx_id { @@ -254,49 +166,12 @@ async fn commit_identities( #[instrument(level = "info", skip_all)] pub async fn insert_identities( - database: &Database, identity_manager: &IdentityManager, batching_tree: &TreeVersion, updates: &[AppliedTreeUpdate], - prover: ReadOnlyProver<'_, Prover>, + prover: &Prover, ) -> anyhow::Result> { - TaskMonitor::log_identities_queues(database).await?; - - if updates.is_empty() { - warn!("Identity commit requested with zero identities. Continuing."); - return Ok(None); - } - - debug!("Starting identity commit for {} identities.", updates.len()); - - let mut last_index = updates - .first() - .expect("Updates is non empty.") - .update - .leaf_index; - - for update in &updates[1..] { - if last_index + 1 != update.update.leaf_index { - let leaf_indexes = updates - .iter() - .map(|update| update.update.leaf_index) - .collect::>(); - let commitments = updates - .iter() - .map(|update| update.update.element) - .collect::>(); - - panic!( - "Identities are not consecutive leaves in the tree (leaf_indexes = {:?}, \ - commitments = {:?})", - leaf_indexes, commitments - ); - } - - last_index = update.update.leaf_index; - } - - // Grab the initial conditions before the updates are applied to the tree. + assert_updates_are_consecutive(updates); let start_index = updates[0].update.leaf_index; let pre_root: U256 = batching_tree.get_root().into(); @@ -315,11 +190,7 @@ pub async fn insert_identities( // intermediate versions of the tree let mut merkle_proofs: Vec<_> = updates .iter() - .map(|update_with_tree| { - update_with_tree - .result - .proof(update_with_tree.update.leaf_index) - }) + .map(|update| update.result.proof(update.update.leaf_index)) .collect(); // Grab some variables for sizes to make querying easier. @@ -369,21 +240,7 @@ pub async fn insert_identities( // With the updates applied we can grab the value of the tree's new root and // build our identities for sending to the identity manager. let post_root: U256 = latest_tree_from_updates.root().into(); - let identity_commitments: Vec = commitments - .iter() - .zip(merkle_proofs) - .map(|(id, prf)| { - let commitment: U256 = id.into(); - let proof: Vec = prf - .0 - .iter() - .map(|branch| match branch { - Branch::Left(v) | Branch::Right(v) => U256::from(*v), - }) - .collect(); - Identity::new(commitment, proof) - }) - .collect(); + let identity_commitments = zip_commitments_and_proofs(commitments, merkle_proofs); identity_manager.validate_merkle_proofs(&identity_commitments)?; @@ -397,7 +254,7 @@ pub async fn insert_identities( ) .await?; - info!( + tracing::info!( start_index, ?pre_root, ?post_root, @@ -416,11 +273,11 @@ pub async fn insert_identities( ) .await .map_err(|e| { - error!(?e, "Failed to insert identity to contract."); + tracing::error!(?e, "Failed to insert identity to contract."); e })?; - info!( + tracing::info!( start_index, ?pre_root, ?post_root, @@ -431,29 +288,43 @@ pub async fn insert_identities( // Update the batching tree only after submitting the identities to the chain batching_tree.apply_updates_up_to(post_root.into()); - info!(start_index, ?pre_root, ?post_root, "Tree updated"); + tracing::info!(start_index, ?pre_root, ?post_root, "Tree updated"); TaskMonitor::log_batch_size(updates.len()); Ok(Some(transaction_id)) } +fn assert_updates_are_consecutive(updates: &[AppliedTreeUpdate]) { + for updates in updates.windows(2) { + let first = &updates[0]; + let second = &updates[1]; + + if first.update.leaf_index + 1 != second.update.leaf_index { + let leaf_indexes = updates + .iter() + .map(|update| update.update.leaf_index) + .collect::>(); + let commitments = updates + .iter() + .map(|update| update.update.element) + .collect::>(); + + panic!( + "Identities are not consecutive leaves in the tree (leaf_indexes = {:?}, \ + commitments = {:?})", + leaf_indexes, commitments + ); + } + } +} + pub async fn delete_identities( - database: &Database, identity_manager: &IdentityManager, batching_tree: &TreeVersion, updates: &[AppliedTreeUpdate], - prover: ReadOnlyProver<'_, Prover>, + prover: &Prover, ) -> anyhow::Result> { - TaskMonitor::log_identities_queues(database).await?; - - if updates.is_empty() { - warn!("Identity commit requested with zero identities. Continuing."); - return Ok(None); - } - - debug!("Starting identity commit for {} identities.", updates.len()); - // Grab the initial conditions before the updates are applied to the tree. let pre_root: U256 = batching_tree.get_root().into(); @@ -524,23 +395,7 @@ pub async fn delete_identities( // With the updates applied we can grab the value of the tree's new root and // build our identities for sending to the identity manager. let post_root: U256 = latest_tree_from_updates.root().into(); - - // Get the previous identity - let identity_commitments: Vec = commitments - .iter() - .zip(merkle_proofs) - .map(|(id, prf)| { - let commitment: U256 = id.into(); - let proof: Vec = prf - .0 - .iter() - .map(|branch| match branch { - Branch::Left(v) | Branch::Right(v) => U256::from(*v), - }) - .collect(); - Identity::new(commitment, proof) - }) - .collect(); + let identity_commitments = zip_commitments_and_proofs(commitments, merkle_proofs); identity_manager.validate_merkle_proofs(&identity_commitments)?; @@ -556,7 +411,7 @@ pub async fn delete_identities( let packed_deletion_indices = pack_indices(&deletion_indices); - info!(?pre_root, ?post_root, "Submitting deletion batch"); + tracing::info!(?pre_root, ?post_root, "Submitting deletion batch"); // With all the data prepared we can submit the identities to the on-chain // identity manager and wait for that transaction to be mined. @@ -564,11 +419,11 @@ pub async fn delete_identities( .delete_identities(proof, packed_deletion_indices, pre_root, post_root) .await .map_err(|e| { - error!(?e, "Failed to insert identity to contract."); + tracing::error!(?e, "Failed to insert identity to contract."); e })?; - info!( + tracing::info!( ?pre_root, ?post_root, ?transaction_id, @@ -578,9 +433,45 @@ pub async fn delete_identities( // Update the batching tree only after submitting the identities to the chain batching_tree.apply_updates_up_to(post_root.into()); - info!(?pre_root, ?post_root, "Tree updated"); + tracing::info!(?pre_root, ?post_root, "Tree updated"); TaskMonitor::log_batch_size(updates.len()); Ok(Some(transaction_id)) } + +fn zip_commitments_and_proofs( + commitments: Vec, + merkle_proofs: Vec>, +) -> Vec { + commitments + .iter() + .zip(merkle_proofs) + .map(|(id, prf)| { + let commitment: U256 = id.into(); + let proof: Vec = prf + .0 + .iter() + .map(|branch| match branch { + Branch::Left(v) | Branch::Right(v) => U256::from(*v), + }) + .collect(); + Identity::new(commitment, proof) + }) + .collect() +} + +fn determine_batch_type(tree: &TreeVersion) -> Option { + let next_update = tree.peek_next_updates(1); + if next_update.is_empty() { + return None; + } + + let batch_type = if next_update[0].update.element == Hash::ZERO { + BatchType::Deletion + } else { + BatchType::Insertion + }; + + Some(batch_type) +} diff --git a/src/utils.rs b/src/utils.rs index 19c84fa1..e7846ce9 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -7,7 +7,11 @@ use tokio::sync::broadcast; use tokio::task::JoinHandle; use tracing::{error, info}; +pub mod batch_type; pub mod index_packing; +pub mod min_map; +pub mod secret; +pub mod serde_utils; pub mod tree_updates; pub fn spawn_monitored_with_backoff( diff --git a/src/utils/batch_type.rs b/src/utils/batch_type.rs new file mode 100644 index 00000000..8b85a147 --- /dev/null +++ b/src/utils/batch_type.rs @@ -0,0 +1,11 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum BatchType { + Insertion, + Deletion, +} + +impl BatchType { + pub fn is_deletion(self) -> bool { + self == Self::Deletion + } +} diff --git a/src/utils/min_map.rs b/src/utils/min_map.rs new file mode 100644 index 00000000..6747c3d4 --- /dev/null +++ b/src/utils/min_map.rs @@ -0,0 +1,95 @@ +use std::collections::BTreeMap; + +/// A wrapper over a BTreeMap that returns a given value if the key is at least +/// a value present in internal map +#[derive(Debug)] +pub struct MinMap { + map: BTreeMap, +} + +impl Default for MinMap { + fn default() -> Self { + Self { + map: BTreeMap::default(), + } + } +} + +impl MinMap +where + K: Ord + Copy, +{ + pub fn new() -> Self { + Self { + map: BTreeMap::default(), + } + } + + /// Get the smallest value that is smaller than the given key + pub fn get(&self, key: K) -> Option<&T> { + for (size, value) in &self.map { + if key <= *size { + return Some(value); + } + } + + None + } + + pub fn add(&mut self, key: K, value: T) { + self.map.insert(key, value); + } + + pub fn remove(&mut self, key: K) -> Option { + self.map.remove(&key) + } + + pub fn len(&self) -> usize { + self.map.len() + } + + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + pub fn max_key(&self) -> Option { + self.map.keys().next_back().copied() + } + + pub fn key_exists(&self, key: K) -> bool { + self.map.contains_key(&key) + } + + pub fn iter(&self) -> impl Iterator { + self.map.iter() + } +} + +impl From> for MinMap { + fn from(map: BTreeMap) -> Self { + Self { map } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn min_map_tests() { + let min_map: MinMap = MinMap::from(maplit::btreemap! { + 3 => 3, + 5 => 5, + 7 => 7, + }); + + assert_eq!(min_map.max_key(), Some(7)); + + assert_eq!(min_map.get(1), Some(&3)); + assert_eq!(min_map.get(2), Some(&3)); + assert_eq!(min_map.get(3), Some(&3)); + assert_eq!(min_map.get(4), Some(&5)); + assert_eq!(min_map.get(7), Some(&7)); + assert!(min_map.get(8).is_none()); + } +} diff --git a/src/secret.rs b/src/utils/secret.rs similarity index 80% rename from src/secret.rs rename to src/utils/secret.rs index 99723169..43743e35 100644 --- a/src/secret.rs +++ b/src/utils/secret.rs @@ -1,9 +1,11 @@ use std::fmt; use std::str::FromStr; +use serde::{Deserialize, Serialize}; use url::Url; -#[derive(Clone, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] pub struct SecretUrl(Url); impl SecretUrl { @@ -49,6 +51,18 @@ impl fmt::Debug for SecretUrl { } } +impl From for SecretUrl { + fn from(url: Url) -> Self { + Self::new(url) + } +} + +impl From for Url { + fn from(secret_url: SecretUrl) -> Self { + secret_url.0 + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/utils/serde_utils.rs b/src/utils/serde_utils.rs new file mode 100644 index 00000000..2fe93b67 --- /dev/null +++ b/src/utils/serde_utils.rs @@ -0,0 +1,96 @@ +use std::borrow::Cow; +use std::fmt; +use std::str::FromStr; + +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct JsonStrWrapper(pub T); + +impl FromStr for JsonStrWrapper +where + T: DeserializeOwned, +{ + type Err = serde_json::Error; + + fn from_str(s: &str) -> Result { + serde_json::from_str(s).map(JsonStrWrapper) + } +} + +impl fmt::Display for JsonStrWrapper +where + T: Serialize, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = serde_json::to_string(self).map_err(|_| fmt::Error)?; + + s.fmt(f) + } +} + +impl Serialize for JsonStrWrapper +where + T: Serialize, +{ + fn serialize(&self, serializer: S) -> Result { + serde_json::to_string(&self.0) + .map_err(serde::ser::Error::custom)? + .serialize(serializer) + } +} + +impl<'de, T> Deserialize<'de> for JsonStrWrapper +where + // TODO: Is there some way to use T: + // Deserialize<'de> here? + T: DeserializeOwned, +{ + fn deserialize>(deserializer: D) -> Result { + let s = Cow::<'static, str>::deserialize(deserializer)?; + + serde_json::from_str(&s) + .map(JsonStrWrapper) + .map_err(serde::de::Error::custom) + } +} + +impl From for JsonStrWrapper { + fn from(t: T) -> Self { + Self(t) + } +} + +#[cfg(test)] +mod tests { + use serde_json::Value; + + use super::*; + + #[test] + fn json() { + let wrapper = JsonStrWrapper(vec![1, 2, 3]); + + let s = serde_json::to_string(&wrapper).unwrap(); + + assert_eq!(s, "\"[1,2,3]\""); + + let wrapper: JsonStrWrapper> = serde_json::from_str(&s).unwrap(); + + assert_eq!(wrapper.0, vec![1, 2, 3]); + } + + #[test] + fn json_value() { + let wrapper = JsonStrWrapper(vec![1, 2, 3]); + + let s = serde_json::to_value(wrapper).unwrap(); + + assert_eq!(s, Value::String("[1,2,3]".to_string())); + + let wrapper: JsonStrWrapper> = serde_json::from_value(s).unwrap(); + + assert_eq!(wrapper.0, vec![1, 2, 3]); + } +} diff --git a/supply-chain/config.toml b/supply-chain/config.toml index a31e6c67..9dd7bb66 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -359,6 +359,14 @@ criteria = "safe-to-deploy" version = "4.6.6" criteria = "safe-to-deploy" +[[exemptions.config]] +version = "0.13.4" +criteria = "safe-to-deploy" + +[[exemptions.console]] +version = "0.15.8" +criteria = "safe-to-run" + [[exemptions.const-hex]] version = "1.9.0" criteria = "safe-to-deploy" @@ -491,6 +499,10 @@ criteria = "safe-to-deploy" version = "0.1.2" criteria = "safe-to-deploy" +[[exemptions.dlv-list]] +version = "0.3.0" +criteria = "safe-to-deploy" + [[exemptions.dotenvy]] version = "0.15.6" criteria = "safe-to-deploy" @@ -511,6 +523,10 @@ criteria = "safe-to-deploy" version = "0.14.0" criteria = "safe-to-deploy" +[[exemptions.encode_unicode]] +version = "0.3.6" +criteria = "safe-to-run" + [[exemptions.enr]] version = "0.9.1" criteria = "safe-to-deploy" @@ -723,6 +739,14 @@ criteria = "safe-to-deploy" version = "0.4.5" criteria = "safe-to-deploy" +[[exemptions.humantime]] +version = "2.1.0" +criteria = "safe-to-deploy" + +[[exemptions.humantime-serde]] +version = "1.1.1" +criteria = "safe-to-deploy" + [[exemptions.hyper-rustls]] version = "0.24.1" criteria = "safe-to-deploy" @@ -759,6 +783,10 @@ criteria = "safe-to-deploy" version = "0.3.3" criteria = "safe-to-deploy" +[[exemptions.indoc]] +version = "2.0.4" +criteria = "safe-to-deploy" + [[exemptions.instant]] version = "0.1.12" criteria = "safe-to-deploy" @@ -787,6 +815,10 @@ criteria = "safe-to-deploy" version = "0.3.60" criteria = "safe-to-deploy" +[[exemptions.json5]] +version = "0.4.1" +criteria = "safe-to-deploy" + [[exemptions.k256]] version = "0.13.1" criteria = "safe-to-deploy" @@ -807,6 +839,10 @@ criteria = "safe-to-deploy" version = "0.1.28" criteria = "safe-to-deploy" +[[exemptions.linked-hash-map]] +version = "0.5.6" +criteria = "safe-to-deploy" + [[exemptions.linux-raw-sys]] version = "0.0.46" criteria = "safe-to-deploy" @@ -963,6 +999,10 @@ criteria = "safe-to-deploy" version = "0.2.0" criteria = "safe-to-deploy" +[[exemptions.ordered-multimap]] +version = "0.4.3" +criteria = "safe-to-deploy" + [[exemptions.outref]] version = "0.5.1" criteria = "safe-to-deploy" @@ -1003,6 +1043,10 @@ criteria = "safe-to-deploy" version = "0.2.1" criteria = "safe-to-deploy" +[[exemptions.pathdiff]] +version = "0.2.1" +criteria = "safe-to-deploy" + [[exemptions.pbkdf2]] version = "0.11.0" criteria = "safe-to-deploy" @@ -1016,7 +1060,19 @@ version = "1.1.1" criteria = "safe-to-deploy" [[exemptions.pest]] -version = "2.5.0" +version = "2.7.6" +criteria = "safe-to-deploy" + +[[exemptions.pest_derive]] +version = "2.7.6" +criteria = "safe-to-deploy" + +[[exemptions.pest_generator]] +version = "2.7.6" +criteria = "safe-to-deploy" + +[[exemptions.pest_meta]] +version = "2.7.6" criteria = "safe-to-deploy" [[exemptions.petgraph]] @@ -1195,6 +1251,10 @@ criteria = "safe-to-deploy" version = "0.8.11" criteria = "safe-to-deploy" +[[exemptions.ron]] +version = "0.7.0" +criteria = "safe-to-deploy" + [[exemptions.ruint]] version = "1.7.0" criteria = "safe-to-deploy" @@ -1203,6 +1263,10 @@ criteria = "safe-to-deploy" version = "1.0.2" criteria = "safe-to-deploy" +[[exemptions.rust-ini]] +version = "0.18.0" +criteria = "safe-to-deploy" + [[exemptions.rustc-hex]] version = "2.1.0" criteria = "safe-to-deploy" @@ -1315,6 +1379,14 @@ criteria = "safe-to-deploy" version = "2.0.0" criteria = "safe-to-deploy" +[[exemptions.similar]] +version = "2.4.0" +criteria = "safe-to-run" + +[[exemptions.similar-asserts]] +version = "1.5.0" +criteria = "safe-to-run" + [[exemptions.simple_asn1]] version = "0.6.2" criteria = "safe-to-deploy" diff --git a/supply-chain/imports.lock b/supply-chain/imports.lock index fba8c13c..dc5f9d52 100644 --- a/supply-chain/imports.lock +++ b/supply-chain/imports.lock @@ -408,6 +408,13 @@ user-id = 6743 user-login = "epage" user-name = "Ed Page" +[[publisher.toml]] +version = "0.8.8" +when = "2023-11-06" +user-id = 6743 +user-login = "epage" +user-name = "Ed Page" + [[publisher.toml_datetime]] version = "0.6.5" when = "2023-10-23" @@ -422,6 +429,13 @@ user-id = 6743 user-login = "epage" user-name = "Ed Page" +[[publisher.toml_edit]] +version = "0.21.0" +when = "2023-11-06" +user-id = 6743 +user-login = "epage" +user-name = "Ed Page" + [[publisher.try-lock]] version = "0.2.3" when = "2020-07-10" @@ -492,6 +506,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows-sys]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows-targets]] version = "0.42.2" when = "2023-03-13" @@ -506,6 +527,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows-targets]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_aarch64_gnullvm]] version = "0.42.2" when = "2023-03-13" @@ -520,6 +548,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_aarch64_gnullvm]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_aarch64_msvc]] version = "0.33.0" when = "2022-02-24" @@ -548,6 +583,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_aarch64_msvc]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_i686_gnu]] version = "0.33.0" when = "2022-02-24" @@ -576,6 +618,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_i686_gnu]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_i686_msvc]] version = "0.33.0" when = "2022-02-24" @@ -604,6 +653,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_i686_msvc]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_x86_64_gnu]] version = "0.33.0" when = "2022-02-24" @@ -632,6 +688,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_x86_64_gnu]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_x86_64_gnullvm]] version = "0.42.2" when = "2023-03-13" @@ -646,6 +709,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_x86_64_gnullvm]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.windows_x86_64_msvc]] version = "0.33.0" when = "2022-02-24" @@ -674,6 +744,13 @@ user-id = 64539 user-login = "kennykerr" user-name = "Kenny Kerr" +[[publisher.windows_x86_64_msvc]] +version = "0.52.0" +when = "2023-11-15" +user-id = 64539 +user-login = "kennykerr" +user-name = "Kenny Kerr" + [[publisher.winnow]] version = "0.5.19" when = "2023-11-03" @@ -1250,6 +1327,12 @@ criteria = "safe-to-deploy" version = "0.22.4" notes = "Inspected it to confirm that it only contains data definitions and no runtime code" +[[audits.embark.audits.yaml-rust]] +who = "Johan Andersson " +criteria = "safe-to-deploy" +version = "0.4.5" +notes = "No unsafe usage or ambient capabilities" + [[audits.google.audits.aes]] who = "Joshua Liebow-Feeser " criteria = "safe-to-deploy" @@ -1960,6 +2043,12 @@ criteria = "safe-to-deploy" delta = "0.2.16 -> 0.3.5" aggregated-from = "https://raw.githubusercontent.com/mozilla/glean/main/supply-chain/audits.toml" +[[audits.mozilla.audits.ron]] +who = "Mike Hommey " +criteria = "safe-to-deploy" +delta = "0.7.0 -> 0.7.1" +aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml" + [[audits.mozilla.audits.rustc-hash]] who = "Bobby Holley " criteria = "safe-to-deploy" diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 25376b4a..b55df02f 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -5,6 +5,7 @@ pub mod abi; mod chain_mock; mod prover_mock; +pub mod test_config; pub mod prelude { pub use std::time::Duration; @@ -36,8 +37,13 @@ pub mod prelude { pub use serde::{Deserialize, Serialize}; pub use serde_json::json; pub use signup_sequencer::app::App; + pub use signup_sequencer::config::{ + AppConfig, Config, DatabaseConfig, OzDefenderConfig, ProvidersConfig, RelayerConfig, + ServerConfig, TreeConfig, TxSitterConfig, + }; pub use signup_sequencer::identity_tree::Hash; - pub use signup_sequencer::{server, Options}; + pub use signup_sequencer::prover::ProverType; + pub use signup_sequencer::server; pub use tokio::spawn; pub use tokio::task::JoinHandle; pub use tracing::{error, info, instrument}; @@ -46,6 +52,10 @@ pub mod prelude { pub use url::{Host, Url}; pub use super::prover_mock::ProverService; + pub use super::test_config::{ + self, TestConfigBuilder, DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS, + DEFAULT_TREE_DENSE_PREFIX_DEPTH, DEFAULT_TREE_DEPTH, + }; pub use super::{ abi as ContractAbi, generate_reference_proof_json, generate_test_identities, init_tracing_subscriber, spawn_app, spawn_deps, spawn_mock_deletion_prover, @@ -55,7 +65,7 @@ pub mod prelude { } use std::collections::HashMap; -use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}; +use std::net::{SocketAddr, TcpListener}; use std::str::FromStr; use std::sync::Arc; @@ -63,10 +73,10 @@ use futures::stream::FuturesUnordered; use futures::StreamExt; use hyper::StatusCode; use signup_sequencer::identity_tree::Status; +use signup_sequencer::task_monitor::TaskMonitor; use self::chain_mock::{spawn_mock_chain, MockChain, SpecialisedContract}; use self::prelude::*; -use self::prover_mock::ProverType; const NUM_ATTEMPTS_FOR_INCLUSION_PROOF: usize = 20; @@ -578,24 +588,22 @@ fn construct_verify_proof_body( } #[instrument(skip_all)] -pub async fn spawn_app(options: Options) -> anyhow::Result<(JoinHandle<()>, SocketAddr)> { - let app = App::new(options.app).await.expect("Failed to create App"); - - let ip: IpAddr = match options.server.server.host() { - Some(Host::Ipv4(ip)) => ip.into(), - Some(Host::Ipv6(ip)) => ip.into(), - Some(_) => return Err(anyhow::anyhow!("Cannot bind {}", options.server.server)), - None => Ipv4Addr::LOCALHOST.into(), - }; - let port = options.server.server.port().unwrap_or(9998); - let addr = SocketAddr::new(ip, port); - let listener = TcpListener::bind(addr).expect("Failed to bind random port"); +pub async fn spawn_app(config: Config) -> anyhow::Result<(JoinHandle<()>, SocketAddr)> { + let server_config = config.server.clone(); + let app = App::new(config).await.expect("Failed to create App"); + let app = Arc::new(app); + + let task_monitor = TaskMonitor::new(app.clone()); + + task_monitor.start().await; + + let listener = TcpListener::bind(server_config.address).expect("Failed to bind random port"); let local_addr = listener.local_addr()?; let app = spawn({ async move { info!("App thread starting"); - server::bind_from_listener(Arc::new(app), Duration::from_secs(30), listener) + server::bind_from_listener(app, Duration::from_secs(30), listener) .await .expect("Failed to bind address"); info!("App thread stopping"); @@ -691,8 +699,7 @@ pub async fn spawn_mock_insertion_prover( tree_depth: u8, ) -> anyhow::Result { let mock_prover_service = - prover_mock::ProverService::new(batch_size, tree_depth, prover_mock::ProverType::Insertion) - .await?; + prover_mock::ProverService::new(batch_size, tree_depth, ProverType::Insertion).await?; Ok(mock_prover_service) } @@ -702,8 +709,7 @@ pub async fn spawn_mock_deletion_prover( tree_depth: u8, ) -> anyhow::Result { let mock_prover_service = - prover_mock::ProverService::new(batch_size, tree_depth, prover_mock::ProverType::Deletion) - .await?; + prover_mock::ProverService::new(batch_size, tree_depth, ProverType::Deletion).await?; Ok(mock_prover_service) } @@ -717,6 +723,7 @@ pub fn init_tracing_subscriber() { let result = if quiet_mode { tracing_subscriber::fmt() .with_env_filter("info,signup_sequencer=debug") + .compact() .with_timer(Uptime::default()) .try_init() } else { diff --git a/tests/common/prover_mock.rs b/tests/common/prover_mock.rs index a9311ee3..d2f0fdcf 100644 --- a/tests/common/prover_mock.rs +++ b/tests/common/prover_mock.rs @@ -13,6 +13,7 @@ use ethers::utils::keccak256; use hyper::StatusCode; use semaphore::poseidon_tree::{Branch, Proof as TreeProof}; use serde::{Deserialize, Serialize}; +use signup_sequencer::prover::ProverType; use signup_sequencer::utils::index_packing::pack_indices; use tokio::sync::Mutex; @@ -115,25 +116,6 @@ pub struct ProverService { prover_type: ProverType, } -// TODO: we could just import this from the sequencer -#[derive(Debug, Copy, Clone, sqlx::Type, PartialEq, Eq, Serialize, Deserialize, Default)] -#[serde(rename_all = "camelCase")] -#[sqlx(type_name = "prover_enum", rename_all = "PascalCase")] -pub enum ProverType { - #[default] - Insertion, - Deletion, -} - -impl std::fmt::Display for ProverType { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - ProverType::Insertion => write!(f, "insertion"), - ProverType::Deletion => write!(f, "deletion"), - } - } -} - struct Prover { is_available: bool, tree_depth: u8, diff --git a/tests/common/test_config.rs b/tests/common/test_config.rs new file mode 100644 index 00000000..d6a983f2 --- /dev/null +++ b/tests/common/test_config.rs @@ -0,0 +1,183 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use anyhow::Context; +use ethers::types::Address; +use signup_sequencer::config::{ + default, AppConfig, Config, DatabaseConfig, NetworkConfig, OzDefenderConfig, ProvidersConfig, + RelayerConfig, ServerConfig, TreeConfig, +}; +use signup_sequencer::prover::ProverConfig; +use signup_sequencer::utils::secret::SecretUrl; +use url::Url; + +use crate::ProverService; + +pub const DEFAULT_BATCH_INSERTION_TIMEOUT_SECONDS: u64 = 10; +pub const DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS: u64 = 10; +pub const DEFAULT_TREE_DEPTH: usize = 20; +pub const DEFAULT_TREE_DENSE_PREFIX_DEPTH: usize = 10; +pub const DEFAULT_TIME_BETWEEN_SCANS_SECONDS: u64 = 1; + +pub struct TestConfigBuilder { + tree_depth: usize, + dense_tree_prefix_depth: usize, + prover_urls: Vec, + batch_insertion_timeout: Duration, + batch_deletion_timeout: Duration, + min_batch_deletion_size: usize, + db_url: Option, + oz_api_url: Option, + oz_address: Option

, + cache_file: Option, + identity_manager_address: Option
, + primary_network_provider: Option, +} + +impl TestConfigBuilder { + pub fn new() -> Self { + Self { + tree_depth: DEFAULT_TREE_DEPTH, + dense_tree_prefix_depth: DEFAULT_TREE_DENSE_PREFIX_DEPTH, + prover_urls: vec![], + batch_insertion_timeout: Duration::from_secs(DEFAULT_BATCH_INSERTION_TIMEOUT_SECONDS), + batch_deletion_timeout: Duration::from_secs(DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS), + min_batch_deletion_size: 1, + db_url: None, + oz_api_url: None, + oz_address: None, + cache_file: None, + identity_manager_address: None, + primary_network_provider: None, + } + } + + pub fn min_batch_deletion_size(mut self, min_batch_deletion_size: usize) -> Self { + self.min_batch_deletion_size = min_batch_deletion_size; + self + } + + pub fn batch_insertion_timeout(mut self, batch_insertion_timeout: Duration) -> Self { + self.batch_insertion_timeout = batch_insertion_timeout; + self + } + + pub fn tree_depth(mut self, tree_depth: usize) -> Self { + self.tree_depth = tree_depth; + self + } + + pub fn dense_tree_prefix_depth(mut self, dense_tree_prefix_depth: usize) -> Self { + self.dense_tree_prefix_depth = dense_tree_prefix_depth; + self + } + + pub fn db_url(mut self, db_url: &str) -> Self { + self.db_url = Some(db_url.to_string()); + self + } + + pub fn oz_api_url(mut self, oz_api_url: &str) -> Self { + self.oz_api_url = Some(oz_api_url.to_string()); + self + } + + pub fn oz_address(mut self, oz_address: Address) -> Self { + self.oz_address = Some(oz_address); + self + } + + pub fn cache_file(mut self, cache_file: &str) -> Self { + self.cache_file = Some(cache_file.to_string()); + self + } + + pub fn identity_manager_address(mut self, identity_manager_address: Address) -> Self { + self.identity_manager_address = Some(identity_manager_address); + self + } + + pub fn primary_network_provider(mut self, primary_network_provider: impl AsRef) -> Self { + let primary_network_provider = primary_network_provider.as_ref(); + let url: Url = primary_network_provider.parse().expect("Invalid URL"); + + self.primary_network_provider = Some(url.into()); + + self + } + + pub fn add_prover(mut self, prover: &ProverService) -> Self { + let prover_config = ProverConfig { + url: prover.url().to_string(), + // TODO: Make this configurable? + timeout_s: 30, + batch_size: prover.batch_size(), + prover_type: prover.prover_type(), + }; + + self.prover_urls.push(prover_config); + + self + } + + pub fn build(self) -> anyhow::Result { + let db_url = self.db_url.context("Missing database url")?; + + let database = SecretUrl::new(Url::parse(&db_url)?); + + let config = Config { + app: AppConfig { + provers_urls: self.prover_urls.into(), + batch_insertion_timeout: self.batch_insertion_timeout, + batch_deletion_timeout: self.batch_deletion_timeout, + min_batch_deletion_size: self.min_batch_deletion_size, + max_epoch_duration: default::max_epoch_duration(), + scanning_window_size: default::scanning_window_size(), + scanning_chain_head_offset: default::scanning_chain_head_offset(), + time_between_scans: Duration::from_secs(DEFAULT_TIME_BETWEEN_SCANS_SECONDS), + monitored_txs_capacity: default::monitored_txs_capacity(), + }, + tree: TreeConfig { + tree_depth: self.tree_depth, + dense_tree_prefix_depth: self.dense_tree_prefix_depth, + tree_gc_threshold: default::tree_gc_threshold(), + cache_file: self.cache_file.context("Missing cache file")?, + force_cache_purge: default::force_cache_purge(), + initial_leaf_value: default::initial_leaf_value(), + }, + network: NetworkConfig { + identity_manager_address: self + .identity_manager_address + .context("Missing identity manager address")?, + relayed_identity_manager_addresses: Default::default(), + }, + providers: ProvidersConfig { + primary_network_provider: self + .primary_network_provider + .context("Missing primary network provider")?, + relayed_network_providers: Default::default(), + }, + relayer: RelayerConfig::OzDefender(OzDefenderConfig { + oz_api_url: self.oz_api_url.context("Missing oz api url")?, + oz_address: self.oz_address.context("Missing oz address")?, + oz_api_key: "".to_string(), + oz_api_secret: "".to_string(), + oz_transaction_validity: default::oz_transaction_validity(), + oz_send_timeout: default::oz_send_timeout(), + oz_mine_timeout: default::oz_mine_timeout(), + oz_gas_limit: Default::default(), + }), + database: DatabaseConfig { + database, + migrate: default::migrate(), + max_connections: default::max_connections(), + }, + server: ServerConfig { + address: SocketAddr::from(([127, 0, 0, 1], 0)), + serve_timeout: default::serve_timeout(), + }, + }; + + Ok(config) + } +} diff --git a/tests/delete_identities.rs b/tests/delete_identities.rs index 3129c8d8..29cb45e2 100644 --- a/tests/delete_identities.rs +++ b/tests/delete_identities.rs @@ -6,7 +6,6 @@ use common::prelude::*; use crate::common::test_delete_identity; -const SUPPORTED_DEPTH: usize = 18; const IDLE_TIME: u64 = 7; #[tokio::test] @@ -17,12 +16,8 @@ async fn delete_identities() -> anyhow::Result<()> { let insertion_batch_size: usize = 8; let deletion_batch_size: usize = 3; - let batch_deletion_timeout_seconds: usize = 10; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, deletion_prover_map, micro_oz) = @@ -30,7 +25,7 @@ async fn delete_identities() -> anyhow::Result<()> { initial_root, &[insertion_batch_size], &[deletion_batch_size], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; @@ -46,54 +41,18 @@ async fn delete_identities() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &format!( - "[{}, {}]", - mock_insertion_prover.arg_string_single(), - mock_deletion_prover.arg_string_single() - ), - "--batch-timeout-seconds", - "10", - "--batch-deletion-timeout-seconds", - &format!("{batch_deletion_timeout_seconds}"), - "--min-batch-deletion-size", - &format!("{deletion_batch_size}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint()).expect( - " - Failed to parse Anvil url", - ); - - let (app, local_addr) = spawn_app(options.clone()) + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(mock_insertion_prover) + .add_prover(mock_deletion_prover) + .build()?; + + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); @@ -162,9 +121,7 @@ async fn delete_identities() -> anyhow::Result<()> { reset_shutdown(); // Test loading the state from a file when the on-chain contract has the state. - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); // Ensure that identities have been deleted diff --git a/tests/delete_padded_identity.rs b/tests/delete_padded_identity.rs index 645bc76c..2a0b061c 100644 --- a/tests/delete_padded_identity.rs +++ b/tests/delete_padded_identity.rs @@ -6,7 +6,6 @@ use common::prelude::*; use crate::common::test_delete_identity; -const SUPPORTED_DEPTH: u8 = 18; const IDLE_TIME: u64 = 7; #[tokio::test] @@ -17,12 +16,8 @@ async fn delete_padded_identity() -> anyhow::Result<()> { let insertion_batch_size: usize = 8; let deletion_batch_size: usize = 3; - let batch_deletion_timeout_seconds: usize = 10; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH; - - let mut ref_tree = PoseidonTree::new((SUPPORTED_DEPTH + 1).into(), ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, deletion_prover_map, micro_oz) = @@ -30,7 +25,7 @@ async fn delete_padded_identity() -> anyhow::Result<()> { initial_root, &[insertion_batch_size], &[deletion_batch_size], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; @@ -46,56 +41,18 @@ async fn delete_padded_identity() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &format!( - "[{}, {}]", - mock_insertion_prover.arg_string_single(), - mock_deletion_prover.arg_string_single() - ), - "--batch-timeout-seconds", - "10", - "--batch-deletion-timeout-seconds", - &format!("{batch_deletion_timeout_seconds}"), - "--min-batch-deletion-size", - &format!("{deletion_batch_size}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint()).expect( - " - Failed to parse Anvil url", - ); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(mock_insertion_prover) + .add_prover(mock_deletion_prover) + .build()?; - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(insertion_batch_size * 3); let identities_ref: Vec = test_identities @@ -132,7 +89,7 @@ async fn delete_padded_identity() -> anyhow::Result<()> { test_delete_identity(&uri, &client, &mut ref_tree, &identities_ref, 1, false).await; tokio::time::sleep(Duration::from_secs( - batch_deletion_timeout_seconds as u64 * 3, + DEFAULT_BATCH_DELETION_TIMEOUT_SECONDS * 3, )) .await; diff --git a/tests/dynamic_batch_sizes.rs b/tests/dynamic_batch_sizes.rs index 6ad6c7f0..c25cc296 100644 --- a/tests/dynamic_batch_sizes.rs +++ b/tests/dynamic_batch_sizes.rs @@ -7,7 +7,6 @@ use hyper::Uri; use crate::common::{test_add_batch_size, test_remove_batch_size}; -const SUPPORTED_DEPTH: usize = 20; const IDLE_TIME: u64 = 10; #[tokio::test] @@ -19,17 +18,14 @@ async fn dynamic_batch_sizes() -> anyhow::Result<()> { let first_batch_size: usize = 3; let second_batch_size: usize = 2; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = spawn_deps( initial_root, &[first_batch_size, second_batch_size], &[], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; @@ -45,50 +41,18 @@ async fn dynamic_batch_sizes() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - // We initially spawn the service with a single prover for batch size 3. - - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &first_prover.arg_string(), - "--batch-timeout-seconds", - "3", - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = - Url::parse(&mock_chain.anvil.endpoint()).expect("Failed to parse Anvil url"); - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + // We initially spawn the sequencer with only the first prover + .add_prover(first_prover) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(first_batch_size * 5); let identities_ref: Vec = test_identities diff --git a/tests/identity_history.rs b/tests/identity_history.rs index 7e9c7caa..05568d82 100644 --- a/tests/identity_history.rs +++ b/tests/identity_history.rs @@ -8,8 +8,6 @@ use signup_sequencer::server::data::{ use crate::common::test_recover_identity; -const SUPPORTED_DEPTH: usize = 18; - const HISTORY_POLLING_SLEEP: Duration = Duration::from_secs(5); const MAX_HISTORY_POLLING_ATTEMPTS: usize = 24; // 2 minutes @@ -21,12 +19,8 @@ async fn identity_history() -> anyhow::Result<()> { let insertion_batch_size: usize = 8; let deletion_batch_size: usize = 3; - let batch_deletion_timeout_seconds: usize = 10; - - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, deletion_prover_map, micro_oz) = @@ -34,7 +28,7 @@ async fn identity_history() -> anyhow::Result<()> { initial_root, &[insertion_batch_size], &[deletion_batch_size], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; @@ -59,54 +53,18 @@ async fn identity_history() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &format!( - "[{}, {}]", - mock_insertion_prover.arg_string_single(), - mock_deletion_prover.arg_string_single() - ), - "--batch-timeout-seconds", - "10", - "--batch-deletion-timeout-seconds", - &format!("{batch_deletion_timeout_seconds}"), - "--min-batch-deletion-size", - &format!("{deletion_batch_size}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = - Url::parse(&mock_chain.anvil.endpoint()).expect("Failed to parse Anvil url"); - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(mock_insertion_prover) + .add_prover(mock_deletion_prover) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(insertion_batch_size * 3); let identities_ref: Vec = test_identities diff --git a/tests/insert_identity_and_proofs.rs b/tests/insert_identity_and_proofs.rs index ce93f5cd..179a41a5 100644 --- a/tests/insert_identity_and_proofs.rs +++ b/tests/insert_identity_and_proofs.rs @@ -2,7 +2,6 @@ mod common; use common::prelude::*; -const SUPPORTED_DEPTH: usize = 20; const IDLE_TIME: u64 = 7; #[tokio::test] @@ -12,14 +11,12 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { info!("Starting integration test"); let batch_size: usize = 3; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; @@ -33,46 +30,17 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - "10", - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = - Url::parse(&mock_chain.anvil.endpoint()).expect("Failed to parse Anvil url"); - - let (app, local_addr) = spawn_app(options.clone()) + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); @@ -92,7 +60,7 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { &client, 0, &ref_tree, - &options.app.contracts.initial_leaf_value, + &config.tree.initial_leaf_value, true, ) .await; @@ -101,7 +69,7 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { &client, 1, &ref_tree, - &options.app.contracts.initial_leaf_value, + &config.tree.initial_leaf_value, true, ) .await; @@ -183,7 +151,7 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { reset_shutdown(); // Test loading the state from a file when the on-chain contract has the state. - let (app, local_addr) = spawn_app(options.clone()) + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); @@ -220,7 +188,7 @@ async fn insert_identity_and_proofs() -> anyhow::Result<()> { // Test loading the state from the saved tree when the on-chain contract has the // state. - let (app, local_addr) = spawn_app(options.clone()) + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); diff --git a/tests/malformed_payload.rs b/tests/malformed_payload.rs index 05779329..498cdfa2 100644 --- a/tests/malformed_payload.rs +++ b/tests/malformed_payload.rs @@ -10,15 +10,13 @@ async fn malformed_payload() -> anyhow::Result<()> { init_tracing_subscriber(); info!("Starting malformed payload test"); - let tree_depth: u8 = 20; - - let ref_tree = PoseidonTree::new(tree_depth as usize + 1, ruint::Uint::ZERO); + let ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_size: usize = 3; let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; @@ -31,47 +29,17 @@ async fn malformed_payload() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - "10", - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/")?; - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint())?; - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); let client = Client::new(); diff --git a/tests/more_identities_than_dense_prefix.rs b/tests/more_identities_than_dense_prefix.rs index db7d591c..7527a7e7 100644 --- a/tests/more_identities_than_dense_prefix.rs +++ b/tests/more_identities_than_dense_prefix.rs @@ -2,7 +2,6 @@ mod common; use common::prelude::*; -const SUPPORTED_DEPTH: usize = 20; const IDLE_TIME: u64 = 12; #[tokio::test] @@ -11,11 +10,13 @@ async fn more_identities_than_dense_prefix() -> anyhow::Result<()> { init_tracing_subscriber(); info!("Starting integration test"); + let tree_depth = 20; + let dense_prefix = 3; + let batch_size: usize = 4; - let dense_prefix_depth: usize = 3; // 2^3 = 8, so 2 batches - let num_identities_in_dense_prefix = 2usize.pow(dense_prefix_depth as u32); + let num_identities_in_dense_prefix = 2usize.pow(dense_prefix as u32); let num_identities_above_dense_prefix = batch_size * 2; // A total of 4 batches (4 * 4 = 16 identities) @@ -23,14 +24,11 @@ async fn more_identities_than_dense_prefix() -> anyhow::Result<()> { let num_batches_total = num_identities_total / batch_size; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(tree_depth + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, prover_map, _deletion_prover_map, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], tree_depth as u8).await?; let prover_mock = &prover_map[&batch_size]; @@ -44,43 +42,19 @@ async fn more_identities_than_dense_prefix() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - "10", - "--dense-tree-prefix-depth", - &format!("{dense_prefix_depth}"), - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint())?; - - let (app, local_addr) = spawn_app(options.clone()) + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .tree_depth(tree_depth) + .dense_tree_prefix_depth(dense_prefix) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); @@ -127,7 +101,7 @@ async fn more_identities_than_dense_prefix() -> anyhow::Result<()> { reset_shutdown(); // Test loading the state from a file when the on-chain contract has the state. - let (app, local_addr) = spawn_app(options.clone()) + let (app, local_addr) = spawn_app(config.clone()) .await .expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); diff --git a/tests/multi_prover.rs b/tests/multi_prover.rs index 9de2eab4..f81f66dd 100644 --- a/tests/multi_prover.rs +++ b/tests/multi_prover.rs @@ -7,11 +7,9 @@ use common::prelude::*; #[tokio::test] async fn multi_prover() -> anyhow::Result<()> { init_tracing_subscriber(); - info!("Starting unavailable prover test"); + info!("Starting multi prover test"); - let tree_depth: u8 = 20; - - let mut ref_tree = PoseidonTree::new(tree_depth as usize + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_timeout_seconds: u64 = 11; @@ -23,21 +21,13 @@ async fn multi_prover() -> anyhow::Result<()> { initial_root, &[batch_size_3, batch_size_10], &[], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; let prover_mock_batch_size_3 = &insertion_prover_map[&batch_size_3]; let prover_mock_batch_size_10 = &insertion_prover_map[&batch_size_10]; - let prover_arg_string = format!( - "[{},{}]", - prover_mock_batch_size_3.arg_string_single(), - prover_mock_batch_size_10.arg_string_single() - ); - - info!("Running with {prover_arg_string}"); - let db_socket_addr = db_container.address(); let db_url = format!("postgres://postgres:postgres@{db_socket_addr}/database"); @@ -47,47 +37,19 @@ async fn multi_prover() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_arg_string, - "--batch-timeout-seconds", - &format!("{batch_timeout_seconds}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/")?; - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint())?; - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock_batch_size_3) + .add_prover(prover_mock_batch_size_10) + .build()?; + + tracing::info!("Spawning app"); + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(batch_size_3 + batch_size_10); diff --git a/tests/recover_identities.rs b/tests/recover_identities.rs index 2e11155a..4ead71cd 100644 --- a/tests/recover_identities.rs +++ b/tests/recover_identities.rs @@ -6,7 +6,7 @@ use common::prelude::*; use signup_sequencer::identity_tree::{ProcessedStatus, UnprocessedStatus}; use crate::common::{test_inclusion_status, test_recover_identity}; -const SUPPORTED_DEPTH: usize = 18; + const IDLE_TIME: u64 = 7; #[tokio::test] @@ -17,12 +17,8 @@ async fn recover_identities() -> anyhow::Result<()> { let insertion_batch_size: usize = 8; let deletion_batch_size: usize = 3; - let batch_deletion_timeout_seconds: usize = 10; - - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let (mock_chain, db_container, insertion_prover_map, deletion_prover_map, micro_oz) = @@ -30,7 +26,7 @@ async fn recover_identities() -> anyhow::Result<()> { initial_root, &[insertion_batch_size], &[deletion_batch_size], - tree_depth, + DEFAULT_TREE_DEPTH as u8, ) .await?; @@ -55,56 +51,19 @@ async fn recover_identities() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &format!( - "[{}, {}]", - mock_insertion_prover.arg_string_single(), - mock_deletion_prover.arg_string_single() - ), - "--batch-timeout-seconds", - "10", - "--batch-deletion-timeout-seconds", - &format!("{batch_deletion_timeout_seconds}"), - "--min-batch-deletion-size", - &format!("{deletion_batch_size}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint()).expect( - " - Failed to parse Anvil url", - ); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .min_batch_deletion_size(deletion_batch_size) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(mock_insertion_prover) + .add_prover(mock_deletion_prover) + .build()?; - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(insertion_batch_size * 3); let identities_ref: Vec = test_identities @@ -167,7 +126,7 @@ async fn recover_identities() -> anyhow::Result<()> { next_leaf_index += 1; } - tokio::time::sleep(Duration::from_secs(IDLE_TIME * 6)).await; + tokio::time::sleep(Duration::from_secs(IDLE_TIME * 3)).await; // Ensure that identities have been deleted for i in 0..deletion_batch_size { diff --git a/tests/unavailable_prover.rs b/tests/unavailable_prover.rs index 1448c06e..d82fd38b 100644 --- a/tests/unavailable_prover.rs +++ b/tests/unavailable_prover.rs @@ -9,15 +9,13 @@ async fn unavailable_prover() -> anyhow::Result<()> { init_tracing_subscriber(); info!("Starting unavailable prover test"); - let tree_depth: u8 = 20; - - let mut ref_tree = PoseidonTree::new(tree_depth as usize + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_size: usize = 3; let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; @@ -32,47 +30,17 @@ async fn unavailable_prover() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - "10", - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/")?; - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint())?; - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let test_identities = generate_test_identities(batch_size * 2); let identities_ref: Vec = test_identities diff --git a/tests/unreduced_identity.rs b/tests/unreduced_identity.rs index f19a1078..c969e4d0 100644 --- a/tests/unreduced_identity.rs +++ b/tests/unreduced_identity.rs @@ -5,14 +5,12 @@ use common::prelude::*; async fn test_unreduced_identity() -> anyhow::Result<()> { info!("Starting unavailable prover test"); - let tree_depth: u8 = 20; - - let ref_tree = PoseidonTree::new(tree_depth as usize + 1, ruint::Uint::ZERO); + let ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_size: usize = 3; let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; prover_mock.set_availability(false).await; @@ -25,47 +23,17 @@ async fn test_unreduced_identity() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - "10", - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .context("Failed to create options")?; - - options.server.server = Url::parse("http://127.0.0.1:0/")?; - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = Url::parse(&mock_chain.anvil.endpoint())?; - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); let client = Client::new(); diff --git a/tests/validate_proof_with_age.rs b/tests/validate_proof_with_age.rs index bded5dea..622e9599 100644 --- a/tests/validate_proof_with_age.rs +++ b/tests/validate_proof_with_age.rs @@ -6,25 +6,22 @@ use common::prelude::*; use crate::common::test_verify_proof_with_age; -const SUPPORTED_DEPTH: usize = 20; - #[tokio::test] async fn validate_proof_with_age() -> anyhow::Result<()> { // Initialize logging for the test. init_tracing_subscriber(); info!("Starting integration test"); - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_timeout_seconds: u64 = 1; #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; let batch_size = 3; let (mock_chain, db_container, insertion_prover_map, _deletion_prover_map, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; @@ -37,47 +34,18 @@ async fn validate_proof_with_age() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - &format!("{batch_timeout_seconds}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .expect("Failed to create options"); - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = - Url::parse(&mock_chain.anvil.endpoint()).expect("Failed to parse ganache endpoint"); - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .batch_insertion_timeout(Duration::from_secs(batch_timeout_seconds)) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); let client = Client::new(); diff --git a/tests/validate_proofs.rs b/tests/validate_proofs.rs index 1618603f..eaf027f6 100644 --- a/tests/validate_proofs.rs +++ b/tests/validate_proofs.rs @@ -2,25 +2,21 @@ mod common; use common::prelude::*; -const SUPPORTED_DEPTH: usize = 20; - #[tokio::test] async fn validate_proofs() -> anyhow::Result<()> { // Initialize logging for the test. init_tracing_subscriber(); info!("Starting integration test"); - let mut ref_tree = PoseidonTree::new(SUPPORTED_DEPTH + 1, ruint::Uint::ZERO); + let mut ref_tree = PoseidonTree::new(DEFAULT_TREE_DEPTH + 1, ruint::Uint::ZERO); let initial_root: U256 = ref_tree.root().into(); let batch_timeout_seconds: u64 = 1; - #[allow(clippy::cast_possible_truncation)] - let tree_depth: u8 = SUPPORTED_DEPTH as u8; let batch_size = 3; let (mock_chain, db_container, insertion_prover_map, _, micro_oz) = - spawn_deps(initial_root, &[batch_size], &[], tree_depth).await?; + spawn_deps(initial_root, &[batch_size], &[], DEFAULT_TREE_DEPTH as u8).await?; let prover_mock = &insertion_prover_map[&batch_size]; @@ -35,47 +31,17 @@ async fn validate_proofs() -> anyhow::Result<()> { temp_dir.path().join("testfile") ); - let mut options = Options::try_parse_from([ - "signup-sequencer", - "--identity-manager-address", - "0x0000000000000000000000000000000000000000", // placeholder, updated below - "--database", - &db_url, - "--database-max-connections", - "1", - "--tree-depth", - &format!("{tree_depth}"), - "--prover-urls", - &prover_mock.arg_string(), - "--batch-timeout-seconds", - &format!("{batch_timeout_seconds}"), - "--dense-tree-prefix-depth", - "10", - "--tree-gc-threshold", - "1", - "--oz-api-key", - "", - "--oz-api-secret", - "", - "--oz-api-url", - µ_oz.endpoint(), - "--oz-address", - &format!("{:?}", micro_oz.address()), - "--time-between-scans-seconds", - "1", - "--dense-tree-mmap-file", - temp_dir.path().join("testfile").to_str().unwrap(), - ]) - .expect("Failed to create options"); - options.server.server = Url::parse("http://127.0.0.1:0/").expect("Failed to parse URL"); - - options.app.contracts.identity_manager_address = mock_chain.identity_manager.address(); - options.app.ethereum.ethereum_provider = - Url::parse(&mock_chain.anvil.endpoint()).expect("Failed to parse ganache endpoint"); - - let (app, local_addr) = spawn_app(options.clone()) - .await - .expect("Failed to spawn app."); + let config = TestConfigBuilder::new() + .db_url(&db_url) + .oz_api_url(µ_oz.endpoint()) + .oz_address(micro_oz.address()) + .identity_manager_address(mock_chain.identity_manager.address()) + .primary_network_provider(mock_chain.anvil.endpoint()) + .cache_file(temp_dir.path().join("testfile").to_str().unwrap()) + .add_prover(prover_mock) + .build()?; + + let (app, local_addr) = spawn_app(config).await.expect("Failed to spawn app."); let uri = "http://".to_owned() + &local_addr.to_string(); let client = Client::new();