diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c29c3cf8..aebbef0d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,7 +9,7 @@ on: branches: - main pull_request: - types: [opened, synchronize, reopened] + types: [ opened, synchronize, reopened ] branches: - main @@ -98,12 +98,12 @@ jobs: uses: actions-rs/cargo@v1 with: command: nextest - args: run --workspace --no-run + args: run --workspace --exclude e2e-tests --no-run - name: Run tests uses: actions-rs/cargo@v1 with: command: nextest - args: run --workspace + args: run --workspace --exclude e2e-tests cargo-vet: name: Vet Dependencies diff --git a/.gitignore b/.gitignore index d4bdd4e7..7487727a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ commitments.json !.editorconfig !.dockerignore !.cargo + +e2e_tests/docker-compose/keys/* +!e2e_tests/docker-compose/keys/.keep \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 14cf6fcf..049013e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1977,6 +1977,25 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "e2e-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "ethers", + "hex", + "hyper", + "rand", + "retry", + "serde_json", + "signup-sequencer", + "tokio", + "tracing", + "tracing-futures", + "tracing-subscriber 0.3.18", + "tracing-test", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -4873,6 +4892,15 @@ dependencies = [ "winreg", ] +[[package]] +name = "retry" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" +dependencies = [ + "rand", +] + [[package]] name = "rfc6979" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 22c6946a..09ffe2ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ license-file = "LICENSE.md" build = "build.rs" [workspace] -members = ["crates/*"] +members = ["crates/*", "e2e_tests/scenarios"] [features] default = [] diff --git a/Readme.md b/Readme.md index 6dc5f7e0..4e0ce338 100644 --- a/Readme.md +++ b/Readme.md @@ -24,15 +24,22 @@ Sequencer has 6 API routes. The database is polled every few seconds and added to insertion task. 2. Processing: The processing of identities, where current batching tree is taken and processed so we end up with pre root (the root of tree before proofs are generated), post root, start index and - identity commitments (with their proofs). All of those get sent to a [prover](#semaphore-mtb) for proof generation. - The identities transaction is then mined, with aforementioned fields and pending identities are sent to task to be mined on-chain. + identity commitments (with their proofs). All of those get sent to a [prover](#semaphore-mtb) for proof + generation. + The identities transaction is then mined, with aforementioned fields and pending identities are sent to task to + be mined on-chain. 3. Mining: The transaction ID from processing task gets mined and Sequencer database gets updated accordingly. Now with blockchain and database being in sync, the mined tree gets updated as well. -2. `/inclusionProof` - Takes the identity commitment hash, and checks for any errors that might have occurred in the insert identity steps. - Then leaf index is fetched from the database, corresponding to the identity hash provided, and then we check if the identity is +2. `/inclusionProof` - Takes the identity commitment hash, and checks for any errors that might have occurred in the + insert identity steps. + Then leaf index is fetched from the database, corresponding to the identity hash provided, and then we check if the + identity is indeed in the tree. The inclusion proof is then returned to the API caller. -3. `/deleteIdentity` - Takes an identity commitment hash, ensures that it exists and hasn't been deleted yet. This identity is then scheduled for deletion. -4. `/recoverIdentity` - Takes two identity commitment hashes. The first must exist and will be scheduled for deletion and the other will be inserted as a replacement after the first identity has been deleted and a set amount of time (depends on configuration parameters) has passed. +3. `/deleteIdentity` - Takes an identity commitment hash, ensures that it exists and hasn't been deleted yet. This + identity is then scheduled for deletion. +4. `/recoverIdentity` - Takes two identity commitment hashes. The first must exist and will be scheduled for deletion + and the other will be inserted as a replacement after the first identity has been deleted and a set amount of time ( + depends on configuration parameters) has passed. 5. `/verifySemaphoreProof` - This call takes root, signal hash, nullifier hash, external nullifier hash and a proof. The proving key is fetched based on the depth index, and verification key as well. The list of prime fields is created based on request input mentioned before, and then we proceed to verify the proof. @@ -63,7 +70,8 @@ docker pull postgres ### Local Node -You'll need to run a local node like geth or [ganache](https://archive.trufflesuite.com/ganache/). Start up a new chain and take note of the dev addresses. You can follow instructions [here](https://book.getfoundry.sh/anvil/). +You'll need to run a local node like geth or [ganache](https://archive.trufflesuite.com/ganache/). Start up a new chain +and take note of the dev addresses. You can follow instructions [here](https://book.getfoundry.sh/anvil/). ### Worldcoin id contracts @@ -75,10 +83,12 @@ Clone [contracts-deployer](https://github.com/worldcoin/contract-deployer.git) a Semaphore-mtb is a service for batch processing of Merkle tree updates. -Clone [semaphore-mtb](https://github.com/worldcoin/semaphore-mtb) and execute `go build .` (you will need a golang compiler) +Clone [semaphore-mtb](https://github.com/worldcoin/semaphore-mtb) and execute `go build .` (you will need a golang +compiler) Go build will create an executable named gnark-mbu. If you went through the contracts-deployer, -you will have a generated a keys file that is used by semaphore-mtb. If your deployment contains more than one prover, then you must run this command for each one and configure them to listen on different ports. +you will have a generated a keys file that is used by semaphore-mtb. If your deployment contains more than one prover, +then you must run this command for each one and configure them to listen on different ports. ```shell ./gnark-mbu start --keys-file path/to/contracts-deployer//.cache/keys/ --mode @@ -102,7 +112,7 @@ Now you need to create a `config.toml` file for signup-sequencer: ```toml [app] -provers_urls ='[]' +provers_urls = '[]' [tree] @@ -138,10 +148,27 @@ sudo ln -sf `pwd`/signup_sequencer_data /data ``` And then run the daemon: + ```shell cargo run config.toml ``` +### Docker compose + +Docker compose from E2E tests can also be used for local development. To run it first export alchemy API key +for anvil fork to work: + +```shell +export ALCHEMY_API_KEY= +``` + +Then you can run docker compose (without signup sequencer): + +```shell +cd e2e_tests/docker-compose +docker compose up +``` + ## Tests Lint, build, test @@ -152,6 +179,21 @@ First ensure you have the docker daemon up and running, then: cargo fmt && cargo clippy --all-targets && cargo build --all-targets && cargo test --all-targets ``` +## E2E Tests + +Before running please make sure to build signup-sequencer image. + +```shell +docker build -t signup-sequencer . +``` + +Then run tests. You need alchemy API key to run docker compose which is used by E2E tests. + +```shell +export ALCHEMY_API_KEY= +cd e2e_tests/scenarios && cargo test +``` + ## Contributing We welcome your pull requests! But also consider the following: diff --git a/e2e_tests/docker-compose/compose.yml b/e2e_tests/docker-compose/compose.yml new file mode 100644 index 00000000..ef0e1cb8 --- /dev/null +++ b/e2e_tests/docker-compose/compose.yml @@ -0,0 +1,162 @@ +services: + chain: + image: ghcr.io/foundry-rs/foundry + hostname: chain + platform: linux/amd64 + ports: + - ${CHAIN_PORT:-8545}:8545 + command: [ "anvil --host 0.0.0.0 --chain-id 31337 --block-time 30 --base-fee 0 --gas-limit 0 --gas-price 0 --fork-url https://eth-sepolia.g.alchemy.com/v2/${ALCHEMY_API_KEY}@5091094" ] + tx-sitter-db: + image: postgres:latest + hostname: tx-sitter-db + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=tx-sitter + ports: + - ${TX_SITTER_DB_PORT:-5460}:5432 + volumes: + - tx_sitter_db_data:/var/lib/postgresql/data + sequencer-db: + image: postgres:latest + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=sequencer + ports: + - ${SEQUENCER_DB_PORT:-5461}:5432 + volumes: + - sequencer_db_data:/var/lib/postgresql/data + tx-sitter: + image: ghcr.io/worldcoin/tx-sitter-monolith:latest + hostname: tx-sitter + depends_on: + - tx-sitter-db + - chain + restart: always + ports: + - ${TX_SITTER_PORT:-3000}:3000 + environment: + - RUST_LOG=info + - TX_SITTER__SERVICE__ESCALATION_INTERVAL=3s + - TX_SITTER__DATABASE__KIND=connection_string + - TX_SITTER__DATABASE__CONNECTION_STRING=postgres://postgres:postgres@tx-sitter-db:5432/tx-sitter?sslmode=disable + - TX_SITTER__KEYS__KIND=local + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__CHAIN_ID=31337 + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__NAME=Anvil + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__HTTP_RPC=http://chain:8545 + - TX_SITTER__SERVICE__PREDEFINED__NETWORK__WS_RPC=ws://chain:8545 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__ID=1b908a34-5dc1-4d2d-a146-5eb46e975830 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__NAME=Relayer + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__CHAIN_ID=31337 + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__KEY_ID=d10607662a85424f02a33fb1e6d095bd0ac7154396ff09762e41f82ff2233aaa + - TX_SITTER__SERVICE__PREDEFINED__RELAYER__API_KEY=G5CKNF3BTS2hRl60bpdYMNPqXvXsP-QZd2lrtmgctsnllwU9D3Z4D8gOt04M0QNH + - TX_SITTER__SERVER__HOST=0.0.0.0:3000 + - TX_SITTER__SERVER__DISABLE_AUTH=true + semaphore-keys-init-insertion: + image: curlimages/curl:latest + user: "0:0" + volumes: + - ./keys:/keys + entrypoint: /bin/sh + restart: on-failure + command: + - "-c" + - > + if [ ! -f "/keys/insertion_b10t30.ps" ]; then + curl "https://semaphore-mtb-trusted-setup-ceremony.s3.amazonaws.com/insertion_b10/insertion_b10t30.ps" -o /keys/insertion_b10t30.ps && + chown -f $(stat -c "%u:%g" /keys/.keep) /keys/insertion_b10t30.ps; + fi + semaphore-keys-init-deletion: + image: curlimages/curl:latest + user: "0:0" + volumes: + - ./keys:/keys + entrypoint: /bin/sh + restart: on-failure + command: + - "-c" + - > + if [ ! -f "/keys/deletion_b10t30.ps" ]; then + curl "https://semaphore-mtb-trusted-setup-ceremony.s3.amazonaws.com/deletion_b10/deletion_b10t30.ps" -o /keys/deletion_b10t30.ps && + chown -f $(stat -c "%u:%g" /keys/.keep) /keys/deletion_b10t30.ps; + fi + semaphore-insertion: + image: ghcr.io/worldcoin/semaphore-mtb:latest + hostname: semaphore-insertion + restart: always + ports: + - ${SEMAPHORE_INSERTION_PORT:-3001}:3001 + command: [ "start", "--keys-file", "/mtb/keys/insertion_b10t30.ps", "--prover-address", "0.0.0.0:3001", "--mode", "insertion" ] + volumes: + - ./keys:/mtb/keys + environment: + BATCH_TIMEOUT_SECONDS: 1 + depends_on: + semaphore-keys-init-insertion: + condition: service_completed_successfully + semaphore-deletion: + image: ghcr.io/worldcoin/semaphore-mtb:latest + hostname: semaphore-deletion + restart: always + ports: + - ${SEMAPHORE_DELETION_PORT:-3002}:3001 + command: [ "start", "--keys-file", "/mtb/keys/deletion_b10t30.ps", "--prover-address", "0.0.0.0:3001", "--mode", "deletion" ] + volumes: + - ./keys:/mtb/keys + environment: + BATCH_DELETION_TIMEOUT_SECONDS: 1 + depends_on: + semaphore-keys-init-deletion: + condition: service_completed_successfully + signup-sequencer-balancer: + image: haproxy:3.0.0 + hostname: signup-sequencer-balancer + restart: always + profiles: [ e2e-ha ] + ports: + - ${SIGNUP_SEQUENCER_BALANCER_PORT:-8080}:8080 + volumes: + - ./haproxy:/usr/local/etc/haproxy + depends_on: + - signup-sequencer-0 + signup-sequencer-0: &signup-sequencer-def + image: signup-sequencer + hostname: signup-sequencer-0 + profiles: [ e2e-ha ] + build: + context: ./../../ + depends_on: + - sequencer-db + - chain + - semaphore-insertion + - semaphore-deletion + - tx-sitter + restart: always + ports: + - ${SIGNUP_SEQUENCER_0_PORT:-9080}:8080 + volumes: + - ./signup_sequencer/config.toml:/config.toml + command: [ "/config.toml" ] + environment: + - RUST_LOG=debug +# signup-sequencer-1: +# <<: *signup-sequencer-def +# hostname: signup-sequencer-1 +# ports: +# - ${SIGNUP_SEQUENCER_0_PORT:-9081}:8080 +# signup-sequencer-2: +# <<: *signup-sequencer-def +# hostname: signup-sequencer-2 +# ports: +# - ${SIGNUP_SEQUENCER_0_PORT:-9082}:8080 +# signup-sequencer-3: +# <<: *signup-sequencer-def +# hostname: signup-sequencer-3 +# ports: +# - ${SIGNUP_SEQUENCER_0_PORT:-9083}:8080 +volumes: + tx_sitter_db_data: + driver: local + sequencer_db_data: + driver: local diff --git a/e2e_tests/docker-compose/haproxy/haproxy.cfg b/e2e_tests/docker-compose/haproxy/haproxy.cfg new file mode 100644 index 00000000..16533b6e --- /dev/null +++ b/e2e_tests/docker-compose/haproxy/haproxy.cfg @@ -0,0 +1,10 @@ +frontend http-in + bind *:8080 + default_backend http_back + +backend http_back + balance roundrobin + server signup-sequencer-0 signup-sequencer-0:8080 check +# server signup-sequencer-1 signup-sequencer-1:8080 check +# server signup-sequencer-2 signup-sequencer-2:8080 check +# server signup-sequencer-3 signup-sequencer-3:8080 check diff --git a/e2e_tests/docker-compose/keys/.keep b/e2e_tests/docker-compose/keys/.keep new file mode 100644 index 00000000..e69de29b diff --git a/e2e_tests/docker-compose/signup_sequencer/config.toml b/e2e_tests/docker-compose/signup_sequencer/config.toml new file mode 100644 index 00000000..7c5f302a --- /dev/null +++ b/e2e_tests/docker-compose/signup_sequencer/config.toml @@ -0,0 +1,28 @@ +[tree] +tree_depth = 30 +dense_tree_prefix_depth = 10 +tree_gc_threshold = 10000000 +cache_file = "./cache_file" + +[server] +address = "0.0.0.0:8080" + +[network] +identity_manager_address = "0x48483748eb0446A16cAE79141D0688e3F624Cb73" + +[relayer] +kind = "tx_sitter" +tx_sitter_url = "http://tx-sitter:3000/1/api/G5CKNF3BTS2hRl60bpdYMNPqXvXsP-QZd2lrtmgctsnllwU9D3Z4D8gOt04M0QNH" +tx_sitter_address = "0x1d7ffed610cc4cdC097ecDc835Ae5FEE93C9e3Da" +tx_sitter_gas_limit = 2000000 + +[providers] +primary_network_provider = "http://chain:8545" + +[app] +provers_urls = '[{"url": "http://semaphore-insertion:3001", "prover_type": "insertion", "batch_size": 10,"timeout_s": 30}, {"url": "http://semaphore-deletion:3001", "prover_type": "deletion", "batch_size": 10,"timeout_s": 30}]' +batch_insertion_timeout = "30s" +batch_deletion_timeout = "1s" + +[database] +database = "postgres://postgres:postgres@sequencer-db:5432/sequencer?sslmode=disable" diff --git a/e2e_tests/scenarios/Cargo.toml b/e2e_tests/scenarios/Cargo.toml new file mode 100644 index 00000000..ec9a6a00 --- /dev/null +++ b/e2e_tests/scenarios/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "e2e-tests" +version = "0.1.0" +edition = "2021" +description = "A tool that processes WorldID signups on-chain." +keywords = ["worldcoin", "protocol", "signup"] +categories = ["cryptography::cryptocurrencies"] +repository = "https://github.com/worldcoin/signup-sequencer" +readme = "./../../Readme.md" +license-file = "./../../LICENSE.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +signup-sequencer = { path = "./../.." } + +anyhow = "1.0" +ethers = { version = "2.0.10" } +hex = "0.4.3" +hyper = { version = "^0.14.17", features = ["tcp", "http1", "http2", "client"] } +rand = "0.8.5" +retry = "2.0.0" +serde_json = "1.0" +tokio = { version = "1.0", features = ["full"] } +tracing = "0.1" +tracing-futures = "0.2" +tracing-subscriber = "0.3.11" +tracing-test = "0.2" diff --git a/e2e_tests/scenarios/tests/common/api.rs b/e2e_tests/scenarios/tests/common/api.rs new file mode 100644 index 00000000..eee76f2d --- /dev/null +++ b/e2e_tests/scenarios/tests/common/api.rs @@ -0,0 +1,136 @@ +use std::time::Duration; + +use anyhow::Error; +use hyper::client::HttpConnector; +use hyper::{Body, Client, Request}; +use serde_json::{json, Value}; +use signup_sequencer::identity_tree::Hash; +use signup_sequencer::server::data::{ + DeletionRequest, InclusionProofRequest, InclusionProofResponse, InsertCommitmentRequest, +}; +use tracing::debug; + +use crate::common::prelude::StatusCode; + +pub struct RawResponse { + pub status_code: StatusCode, + pub body: String, +} + +pub async fn insert_identity( + client: &Client, + uri: &String, + commitment: &Hash, +) -> anyhow::Result<()> { + debug!("Calling /insertIdentity"); + let body = Body::from(serde_json::to_string(&InsertCommitmentRequest { + identity_commitment: *commitment, + })?); + + let req = Request::builder() + .method("POST") + .uri(uri.to_owned() + "/insertIdentity") + .header("Content-Type", "application/json") + .body(body) + .expect("Failed to create insert identity hyper::Body"); + + let mut response = client + .request(req) + .await + .expect("Failed to execute request."); + let bytes = hyper::body::to_bytes(response.body_mut()) + .await + .expect("Failed to convert response body to bytes"); + if !response.status().is_success() { + return Err(Error::msg(format!( + "Failed to insert identity: response = {}", + response.status() + ))); + } + + assert!(bytes.is_empty()); + + Ok(()) +} + +pub async fn delete_identity( + client: &Client, + uri: &String, + commitment: &Hash, +) -> anyhow::Result<()> { + debug!("Calling /deleteIdentity"); + let body = Body::from(serde_json::to_string(&DeletionRequest { + identity_commitment: *commitment, + })?); + + let req = Request::builder() + .method("POST") + .uri(uri.to_owned() + "/deleteIdentity") + .header("Content-Type", "application/json") + .body(body) + .expect("Failed to create delete identity hyper::Body"); + + let mut response = client + .request(req) + .await + .expect("Failed to execute request."); + let bytes = hyper::body::to_bytes(response.body_mut()) + .await + .expect("Failed to convert response body to bytes"); + if !response.status().is_success() { + return Err(Error::msg(format!( + "Failed to delete identity: response = {}", + response.status() + ))); + } + + assert!(bytes.is_empty()); + + Ok(()) +} + +pub async fn inclusion_proof_raw( + client: &Client, + uri: &String, + commitment: &Hash, +) -> anyhow::Result { + debug!("Calling /inclusionProof"); + let body = Body::from(serde_json::to_string(&InclusionProofRequest { + identity_commitment: *commitment, + })?); + + let req = Request::builder() + .method("POST") + .uri(uri.to_owned() + "/inclusionProof") + .header("Content-Type", "application/json") + .body(body) + .expect("Failed to create inclusion proof hyper::Body"); + + let mut response = client + .request(req) + .await + .expect("Failed to execute request."); + let bytes = hyper::body::to_bytes(response.body_mut()) + .await + .expect("Failed to convert response body to bytes"); + let result = String::from_utf8(bytes.into_iter().collect()) + .expect("Could not parse response bytes to utf-8"); + + Ok(RawResponse { + status_code: response.status(), + body: result, + }) +} + +pub async fn inclusion_proof( + client: &Client, + uri: &String, + commitment: &Hash, +) -> anyhow::Result { + let result = inclusion_proof_raw(client, uri, commitment).await?; + + let result_json = serde_json::from_str::(&result.body) + .expect("Failed to parse response as json"); + + Ok(result_json) +} diff --git a/e2e_tests/scenarios/tests/common/docker_compose.rs b/e2e_tests/scenarios/tests/common/docker_compose.rs new file mode 100644 index 00000000..81af9baf --- /dev/null +++ b/e2e_tests/scenarios/tests/common/docker_compose.rs @@ -0,0 +1,269 @@ +use std::collections::HashMap; +use std::process::{Command, Stdio}; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::time::{Duration, Instant}; + +use anyhow::{Context, Error}; +use hyper::{Body, Client}; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tracing::{debug, info}; +use tracing_subscriber::fmt::format; + +use crate::common::prelude::{Request, StatusCode}; + +const LOCAL_ADDR: &str = "localhost"; + +#[derive(Debug)] +pub struct DockerComposeGuard<'a> { + // Current working dir containing compose.yml + cwd: &'a str, + project_name: String, + chain_port: u32, + tx_sitter_db_port: u32, + sequencer_db_port: u32, + tx_sitter_port: u32, + semaphore_insertion_port: u32, + semaphore_deletion_port: u32, + signup_sequencer_0_port: u32, + signup_sequencer_balancer_port: u32, +} + +impl<'a> DockerComposeGuard<'a> { + pub fn get_local_addr(&self) -> String { + format!("{}:{}", LOCAL_ADDR, self.signup_sequencer_balancer_port) + } + + pub async fn restart_sequencer(&self) -> anyhow::Result<()> { + let (stdout, stderr) = run_cmd_to_output( + self.cwd, + self.envs_with_ports(), + self.generate_command("restart signup-sequencer-0"), + ) + .context("Restarting sequencer.")?; + + debug!( + "Docker compose rstart output:\n stdout:\n{}\nstderr:\n{}\n", + stdout, stderr + ); + + await_running(self).await + } + + fn envs_with_ports(&self) -> HashMap { + let mut res = HashMap::new(); + + res.insert(String::from("CHAIN_PORT"), self.chain_port.to_string()); + res.insert( + String::from("TX_SITTER_DB_PORT"), + self.tx_sitter_db_port.to_string(), + ); + res.insert( + String::from("SEQUENCER_DB_PORT"), + self.sequencer_db_port.to_string(), + ); + res.insert( + String::from("TX_SITTER_PORT"), + self.tx_sitter_port.to_string(), + ); + res.insert( + String::from("SEMAPHORE_INSERTION_PORT"), + self.semaphore_insertion_port.to_string(), + ); + res.insert( + String::from("SEMAPHORE_DELETION_PORT"), + self.semaphore_deletion_port.to_string(), + ); + res.insert( + String::from("SIGNUP_SEQUENCER_0_PORT"), + self.signup_sequencer_0_port.to_string(), + ); + res.insert( + String::from("SIGNUP_SEQUENCER_BALANCER_PORT"), + self.signup_sequencer_balancer_port.to_string(), + ); + + res + } + + fn generate_command(&self, args: &str) -> String { + format!( + "docker compose -p {} --profile e2e-ha {}", + self.project_name, args + ) + } + + fn update_balancer_port(&mut self, signup_sequencer_balancer_port: u32) { + self.signup_sequencer_balancer_port = signup_sequencer_balancer_port + } +} + +impl<'a> Drop for DockerComposeGuard<'a> { + fn drop(&mut self) { + // May run when compose is not up but better to be sure its down. + // Parameter '-v' is removing all volumes and networks. + if let Err(err) = run_cmd( + self.cwd, + self.envs_with_ports(), + self.generate_command("down -v"), + ) { + eprintln!("Failed to put down docker compose: {}", err); + } + } +} + +/// Starts a docker compose infrastructure. It will be stopped and removed when +/// the guard is dropped. +/// +/// Note that we're using sync code here so we'll block the executor - but this +/// is fine, because the spawned container will still run in the background. +pub async fn setup(cwd: &str) -> anyhow::Result { + let mut res = DockerComposeGuard { + cwd, + project_name: generate_project_name(), + chain_port: 0, + tx_sitter_db_port: 0, + sequencer_db_port: 0, + tx_sitter_port: 0, + semaphore_insertion_port: 0, + semaphore_deletion_port: 0, + signup_sequencer_0_port: 0, + signup_sequencer_balancer_port: 0, + }; + + debug!("Configuration: {:#?}", res); + + let (stdout, stderr) = run_cmd_to_output( + res.cwd, + res.envs_with_ports(), + res.generate_command("up -d"), + ) + .context("Starting e2e test docker compose infrastructure.")?; + + debug!( + "Docker compose starting output:\n stdout:\n{}\nstderr:\n{}\n", + stdout, stderr + ); + + tokio::time::sleep(Duration::from_secs(1)).await; + + let (stdout, stderr) = run_cmd_to_output( + res.cwd, + res.envs_with_ports(), + res.generate_command("port signup-sequencer-balancer 8080"), + ) + .context("Looking for balancer selected port.")?; + + debug!( + "Docker compose starting output:\n stdout:\n{}\nstderr:\n{}\n", + stdout, stderr + ); + + let balancer_port = parse_exposed_port(stdout); + res.update_balancer_port(balancer_port); + + _ = await_running(&res).await?; + + return Ok(res); +} + +fn generate_project_name() -> String { + thread_rng() + .sample_iter(Alphanumeric) + .filter(|c| c.is_ascii_lowercase()) + .take(8) + .map(char::from) + .collect() +} + +async fn await_running(docker_compose_guard: &DockerComposeGuard<'_>) -> anyhow::Result<()> { + let timeout = Duration::from_secs_f32(30.0); + let check_interval = Duration::from_secs_f32(2.0); + let min_success_counts = 5; + let mut success_counter = 0; + + let timer = Instant::now(); + loop { + let healthy = check_health(docker_compose_guard.get_local_addr()).await; + if healthy.is_ok() && healthy.unwrap() { + success_counter = success_counter + 1; + } + + if success_counter >= min_success_counts { + return Ok(()); + } + + if timer.elapsed() > timeout { + return Err(Error::msg("Timed out waiting for healthcheck.")); + } + + tokio::time::sleep(check_interval).await; + } +} + +async fn check_health(local_addr: String) -> anyhow::Result { + let uri = format!("http://{}", local_addr); + let client = Client::new(); + + let healthcheck = Request::builder() + .method("GET") + .uri(format!("{uri}/health")) + .header("Content-Type", "application/json") + .body(Body::empty()) + .unwrap(); + + let response = client.request(healthcheck).await?; + + return Ok(response.status() == StatusCode::OK); +} + +fn run_cmd_to_output( + cwd: &str, + envs: HashMap, + cmd_str: String, +) -> anyhow::Result<(String, String)> { + let args: Vec<_> = cmd_str.split(' ').collect(); + let mut command = Command::new(args[0]); + + for arg in &args[1..] { + command.arg(arg); + } + + command + .current_dir(cwd) + .envs(envs) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let output = command + .output() + .expect(&format!("Failed to run command: {}", cmd_str)); + + let stdout_utf = String::from_utf8(output.stdout)?; + let stderr_utf = String::from_utf8(output.stderr)?; + + Ok((stdout_utf.trim().to_string(), stderr_utf.trim().to_string())) +} + +fn run_cmd(cwd: &str, envs: HashMap, cmd_str: String) -> anyhow::Result<()> { + run_cmd_to_output(cwd, envs, cmd_str)?; + + Ok(()) +} + +fn parse_exposed_port(s: String) -> u32 { + let parts: Vec<_> = s + .split_whitespace() + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .collect(); + + parts + .last() + .unwrap() + .split(":") + .last() + .unwrap() + .parse::() + .unwrap() +} diff --git a/e2e_tests/scenarios/tests/common/mod.rs b/e2e_tests/scenarios/tests/common/mod.rs new file mode 100644 index 00000000..55219746 --- /dev/null +++ b/e2e_tests/scenarios/tests/common/mod.rs @@ -0,0 +1,181 @@ +// We include this module in multiple in multiple integration +// test crates - so some code may not be used in some cases +#![allow(dead_code, clippy::too_many_arguments, unused_imports)] + +use std::time::Duration; + +use anyhow::anyhow; +use ethers::types::U256; +use hyper::client::HttpConnector; +use hyper::Client; +use serde_json::Value; +use signup_sequencer::identity_tree::ProcessedStatus::Mined; +use signup_sequencer::identity_tree::{Hash, Status}; +use signup_sequencer::server::data::InclusionProofResponse; +use tokio::time::sleep; +use tracing::error; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::fmt::time::Uptime; + +use crate::common::api::{delete_identity, inclusion_proof, inclusion_proof_raw, insert_identity}; +use crate::common::prelude::StatusCode; + +mod api; +pub mod docker_compose; + +#[allow(unused)] +pub mod prelude { + pub use std::time::Duration; + + pub use anyhow::{Context, Error}; + pub use hyper::client::HttpConnector; + pub use hyper::{Body, Client, Request, StatusCode}; + pub use retry::delay::Fixed; + pub use retry::retry; + pub use serde_json::json; + pub use tokio::spawn; + pub use tokio::task::JoinHandle; + pub use tracing::{error, info, instrument}; + pub use tracing_subscriber::fmt::format; + + pub use super::{ + bad_request_inclusion_proof_with_retries, delete_identity_with_retries, + generate_test_commitments, init_tracing_subscriber, insert_identity_with_retries, + mined_inclusion_proof_with_retries, + }; + pub use crate::common::api::{ + delete_identity, inclusion_proof, inclusion_proof_raw, insert_identity, + }; +} + +/// Initializes the tracing subscriber. +/// +/// Set the `QUIET_MODE` environment variable to reduce the complexity of the +/// log output. +pub fn init_tracing_subscriber() { + let quiet_mode = std::env::var("QUIET_MODE").is_ok(); + let rust_log = std::env::var("RUST_LOG").unwrap_or("info".to_string()); + let result = if quiet_mode { + tracing_subscriber::fmt() + .with_env_filter(rust_log) + .compact() + .with_timer(Uptime::default()) + .try_init() + } else { + tracing_subscriber::fmt() + .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) + .with_line_number(true) + .with_env_filter(rust_log) + .with_timer(Uptime::default()) + // .pretty() + .try_init() + }; + if let Err(error) = result { + error!(error, "Failed to initialize tracing_subscriber"); + } +} + +/// Generates identities for the purposes of testing. The identities are encoded +/// in hexadecimal as a string but without the "0x" prefix as required by the +/// testing utilities. +/// +/// # Note +/// This utilises a significantly smaller portion of the 256-bit identities than +/// would be used in reality. This is both to make them easier to generate and +/// to ensure that we do not run afoul of the element numeric limit for the +/// snark scalar field. +pub fn generate_test_commitments(count: usize) -> Vec { + (0..count) + .map(|_| Hash::from(rand::random::())) + .collect() +} + +pub async fn delete_identity_with_retries( + client: &Client, + uri: &String, + commitment: &Hash, + retries_count: usize, + retries_interval: f32, +) -> anyhow::Result<()> { + let mut last_err = None; + + for _ in 0..retries_count { + match delete_identity(client, uri, commitment).await { + Ok(_) => return Ok(()), + Err(err) => last_err = Some(err), + } + sleep(Duration::from_secs_f32(retries_interval)).await; + } + + Err(last_err.unwrap_or_else(|| anyhow!("All retries failed without error"))) +} + +pub async fn insert_identity_with_retries( + client: &Client, + uri: &String, + commitment: &Hash, + retries_count: usize, + retries_interval: f32, +) -> anyhow::Result<()> { + let mut last_err = None; + for _ in 0..retries_count { + match insert_identity(&client, &uri, &commitment).await { + Ok(_) => return Ok(()), + Err(err) => last_err = Some(err), + } + _ = sleep(Duration::from_secs_f32(retries_interval)).await; + } + + Err(last_err.unwrap_or_else(|| anyhow!("All retries failed without error"))) +} + +pub async fn mined_inclusion_proof_with_retries( + client: &Client, + uri: &String, + commitment: &Hash, + retries_count: usize, + retries_interval: f32, +) -> anyhow::Result { + let mut last_res = Err(anyhow!("No calls at all")); + for _i in 0..retries_count { + last_res = inclusion_proof(&client, &uri, &commitment).await; + + if let Ok(ref inclusion_proof_json) = last_res { + if inclusion_proof_json.0.status == Status::Processed(Mined) { + break; + } + }; + + _ = sleep(Duration::from_secs_f32(retries_interval)).await; + } + + let inclusion_proof_json = last_res?; + + assert_eq!(inclusion_proof_json.0.status, Status::Processed(Mined)); + + Ok(inclusion_proof_json) +} + +pub async fn bad_request_inclusion_proof_with_retries( + client: &Client, + uri: &String, + commitment: &Hash, + retries_count: usize, + retries_interval: f32, +) -> anyhow::Result<()> { + let mut last_err = None; + + for _ in 0..retries_count { + match inclusion_proof_raw(client, uri, commitment).await { + Ok(response) if response.status_code == StatusCode::BAD_REQUEST => return Ok(()), + Err(err) => { + error!("error: {}", err); + last_err = Some(err); + } + _ => {} + } + sleep(Duration::from_secs_f32(retries_interval)).await; + } + + Err(last_err.unwrap_or_else(|| anyhow!("All retries failed to return BAD_REQUEST"))) +} diff --git a/e2e_tests/scenarios/tests/insert_100.rs b/e2e_tests/scenarios/tests/insert_100.rs new file mode 100644 index 00000000..bd6861e6 --- /dev/null +++ b/e2e_tests/scenarios/tests/insert_100.rs @@ -0,0 +1,29 @@ +mod common; + +use common::prelude::*; + +use crate::common::docker_compose; + +#[tokio::test] +async fn insert_100() -> anyhow::Result<()> { + // Initialize logging for the test. + init_tracing_subscriber(); + info!("Starting e2e test"); + + let docker_compose = docker_compose::setup("./../docker-compose").await?; + + let uri = format!("http://{}", docker_compose.get_local_addr()); + let client = Client::new(); + + let identities = generate_test_commitments(10); + + for commitment in identities.iter() { + _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + } + + for commitment in identities.iter() { + _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + } + + Ok(()) +} diff --git a/e2e_tests/scenarios/tests/insert_delete_insert.rs b/e2e_tests/scenarios/tests/insert_delete_insert.rs new file mode 100644 index 00000000..8708bf76 --- /dev/null +++ b/e2e_tests/scenarios/tests/insert_delete_insert.rs @@ -0,0 +1,45 @@ +mod common; + +use common::prelude::*; + +use crate::common::docker_compose; + +#[tokio::test] +async fn insert_delete_insert() -> anyhow::Result<()> { + // Initialize logging for the test. + init_tracing_subscriber(); + info!("Starting e2e test"); + + let docker_compose = docker_compose::setup("./../docker-compose").await?; + + let uri = format!("http://{}", docker_compose.get_local_addr()); + let client = Client::new(); + + let identities = generate_test_commitments(10); + + for commitment in identities.iter() { + _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + } + + for commitment in identities.iter() { + _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + } + + let first_commitment = identities.first().unwrap(); + + _ = delete_identity_with_retries(&client, &uri, &first_commitment, 10, 3.0).await?; + _ = bad_request_inclusion_proof_with_retries(&client, &uri, &first_commitment, 60, 10.0) + .await?; + + let new_identities = generate_test_commitments(10); + + for commitment in new_identities.iter() { + _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + } + + for commitment in new_identities.iter() { + _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + } + + Ok(()) +} diff --git a/e2e_tests/scenarios/tests/insert_restart_insert.rs b/e2e_tests/scenarios/tests/insert_restart_insert.rs new file mode 100644 index 00000000..49c6f426 --- /dev/null +++ b/e2e_tests/scenarios/tests/insert_restart_insert.rs @@ -0,0 +1,41 @@ +mod common; + +use common::prelude::*; + +use crate::common::docker_compose; + +#[tokio::test] +async fn insert_restart_insert() -> anyhow::Result<()> { + // Initialize logging for the test. + init_tracing_subscriber(); + info!("Starting e2e test"); + + let docker_compose = docker_compose::setup("./../docker-compose").await?; + + let uri = format!("http://{}", docker_compose.get_local_addr()); + let client = Client::new(); + + let identities = generate_test_commitments(10); + + for commitment in identities.iter() { + _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + } + + for commitment in identities.iter() { + _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + } + + _ = docker_compose.restart_sequencer().await?; + + let identities = generate_test_commitments(10); + + for commitment in identities.iter() { + _ = insert_identity_with_retries(&client, &uri, commitment, 10, 3.0).await?; + } + + for commitment in identities.iter() { + _ = mined_inclusion_proof_with_retries(&client, &uri, commitment, 60, 10.0).await?; + } + + Ok(()) +} diff --git a/src/identity_tree/mod.rs b/src/identity_tree/mod.rs index 2b32dcef..303a1557 100644 --- a/src/identity_tree/mod.rs +++ b/src/identity_tree/mod.rs @@ -51,7 +51,7 @@ pub struct RootItem { pub mined_valid_as_of: Option>, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct InclusionProof { pub status: Status, diff --git a/src/server/data.rs b/src/server/data.rs index 7e4ef702..6697e619 100644 --- a/src/server/data.rs +++ b/src/server/data.rs @@ -8,7 +8,7 @@ use crate::identity_tree::{ }; use crate::prover::{ProverConfig, ProverType}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(transparent)] pub struct InclusionProofResponse(pub InclusionProof); diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index dc19d999..f32df1f3 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -1,7 +1,20 @@ # cargo-vet audits file -[audits] +[[audits.retry]] +who = "Piotr Heilman " +criteria = "safe-to-deploy" +version = "2.0.0" + +[[audits.tracing-test]] +who = "Piotr Heilman " +criteria = "safe-to-deploy" +version = "0.2.4" + +[[audits.tracing-test-macro]] +who = "Piotr Heilman " +criteria = "safe-to-deploy" +version = "0.2.4" [[trusted.anstream]] criteria = "safe-to-deploy" diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 7f9f4029..6921af04 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -1704,14 +1704,6 @@ criteria = "safe-to-deploy" version = "0.2.25" criteria = "safe-to-deploy" -[[exemptions.tracing-test]] -version = "0.2.4" -criteria = "safe-to-run" - -[[exemptions.tracing-test-macro]] -version = "0.2.4" -criteria = "safe-to-run" - [[exemptions.tungstenite]] version = "0.20.1" criteria = "safe-to-deploy" diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9c8968c7..4963c0b6 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -59,7 +59,7 @@ pub mod prelude { DEFAULT_TREE_DENSE_PREFIX_DEPTH, DEFAULT_TREE_DEPTH, }; pub use super::{ - abi as ContractAbi, generate_reference_proof_json, generate_test_identities, + abi as ContractAbi, generate_reference_proof, generate_test_identities, init_tracing_subscriber, spawn_app, spawn_deps, spawn_mock_deletion_prover, spawn_mock_insertion_prover, test_inclusion_proof, test_insert_identity, test_verify_proof, test_verify_proof_on_chain, @@ -76,7 +76,13 @@ use std::sync::{Arc, Once}; use futures::stream::FuturesUnordered; use futures::StreamExt; use hyper::StatusCode; -use signup_sequencer::identity_tree::{Status, TreeState, TreeVersionReadOps}; +use semaphore::poseidon_tree::Proof; +use signup_sequencer::identity_tree::ProcessedStatus::{Mined, Pending}; +use signup_sequencer::identity_tree::{InclusionProof, Status, TreeState, TreeVersionReadOps}; +use signup_sequencer::server::data::{ + AddBatchSizeRequest, DeletionRequest, InclusionProofRequest, InclusionProofResponse, + InsertCommitmentRequest, RecoveryRequest, RemoveBatchSizeRequest, VerifySemaphoreProofRequest, +}; use signup_sequencer::task_monitor::TaskMonitor; use testcontainers::clients::Cli; use tracing::trace; @@ -269,28 +275,28 @@ pub async fn test_inclusion_proof( .expect("Failed to convert response body to bytes"); let result = String::from_utf8(bytes.into_iter().collect()) .expect("Could not parse response bytes to utf-8"); - let result_json = serde_json::from_str::(&result) + let result = serde_json::from_str::(&result) .expect("Failed to parse response as json"); - let status = result_json["status"] - .as_str() - .expect("Failed to get status"); - - if status == "pending" { - assert_eq!( - result_json, - generate_reference_proof_json(ref_tree, leaf_index, "pending") - ); - assert_eq!(response.status(), StatusCode::ACCEPTED); - info!("Got pending, waiting 5 seconds, iteration {}", i); - tokio::time::sleep(Duration::from_secs(5)).await; - } else if status == "mined" { - // We don't differentiate between these 2 states in tests - let proof_json = generate_reference_proof_json(ref_tree, leaf_index, status); - assert_eq!(result_json, proof_json); - return; - } else { - panic!("Unexpected status: {}", status); + match result.0.status { + Status::Processed(Pending) => { + assert_eq!( + result, + generate_reference_proof(ref_tree, leaf_index, Status::Processed(Pending)) + ); + assert_eq!(response.status(), StatusCode::ACCEPTED); + info!("Got pending, waiting 5 seconds, iteration {}", i); + tokio::time::sleep(Duration::from_secs(5)).await; + } + Status::Processed(Mined) => { + // We don't differentiate between these 2 states in tests + let proof_json = + generate_reference_proof(ref_tree, leaf_index, Status::Processed(Mined)); + assert_eq!(result, proof_json); + + return; + } + _ => panic!("Unexpected status: {:?}", result.0.status), } } @@ -387,18 +393,12 @@ pub async fn test_inclusion_status( result: {:?}", result ); - let result_json = serde_json::from_str::(&result) + let result = serde_json::from_str::(&result) .expect("Failed to parse response as json"); - let status = result_json["status"] - .as_str() - .expect("Failed to get status"); let expected_status = expected_status.into(); - assert_eq!( - expected_status, - Status::from_str(status).expect("Could not convert str to Status") - ); + assert_eq!(expected_status, result.0.status,); } #[instrument(skip_all)] @@ -497,17 +497,12 @@ pub async fn test_add_batch_size( prover_type: ProverType, client: &Client, ) -> anyhow::Result<()> { - let prover_url_string: String = prover_url.into(); - - let body = Body::from( - json!({ - "url": prover_url_string, - "batchSize": batch_size, - "timeoutSeconds": 3, - "proverType": prover_type - }) - .to_string(), - ); + let body = Body::from(serde_json::to_string(&AddBatchSizeRequest { + url: prover_url.into(), + batch_size: batch_size as usize, + timeout_seconds: 3, + prover_type, + })?); let request = Request::builder() .method("POST") .uri(uri.into() + "/addBatchSize") @@ -531,8 +526,10 @@ pub async fn test_remove_batch_size( prover_type: ProverType, expect_failure: bool, ) -> anyhow::Result<()> { - let body = - Body::from(json!({ "batchSize": batch_size, "proverType": prover_type }).to_string()); + let body = Body::from(serde_json::to_string(&RemoveBatchSizeRequest { + batch_size: batch_size as usize, + prover_type, + })?); let request = Request::builder() .method("POST") .uri(uri.into() + "/removeBatchSize") @@ -593,41 +590,41 @@ pub async fn test_insert_identity( fn construct_inclusion_proof_body(identity_commitment: &Hash) -> Body { Body::from( - json!({ - "identityCommitment": identity_commitment, + serde_json::to_string(&InclusionProofRequest { + identity_commitment: *identity_commitment, }) - .to_string(), + .expect("Cannot serialize InclusionProofRequest"), ) } fn construct_delete_identity_body(identity_commitment: &Hash) -> Body { Body::from( - json!({ - "identityCommitment": identity_commitment, + serde_json::to_string(&DeletionRequest { + identity_commitment: *identity_commitment, }) - .to_string(), + .expect("Cannot serialize DeletionRequest"), ) } pub fn construct_recover_identity_body( - prev_identity_commitment: &Hash, + previous_identity_commitment: &Hash, new_identity_commitment: &Hash, ) -> Body { Body::from( - json!({ - "previousIdentityCommitment":prev_identity_commitment , - "newIdentityCommitment": new_identity_commitment, + serde_json::to_string(&RecoveryRequest { + previous_identity_commitment: *previous_identity_commitment, + new_identity_commitment: *new_identity_commitment, }) - .to_string(), + .expect("Cannot serialize RecoveryRequest"), ) } pub fn construct_insert_identity_body(identity_commitment: &Field) -> Body { Body::from( - json!({ - "identityCommitment": identity_commitment, + serde_json::to_string(&InsertCommitmentRequest { + identity_commitment: *identity_commitment, }) - .to_string(), + .expect("Cannot serialize InsertCommitmentRequest"), ) } @@ -639,14 +636,14 @@ fn construct_verify_proof_body( proof: protocol::Proof, ) -> Body { Body::from( - json!({ - "root": root, - "signalHash": signal_hash, - "nullifierHash": nullifer_hash, - "externalNullifierHash": external_nullifier_hash, - "proof": proof, + serde_json::to_string(&VerifySemaphoreProofRequest { + root, + signal_hash, + nullifier_hash: nullifer_hash, + external_nullifier_hash, + proof, }) - .to_string(), + .expect("Cannot serialize VerifySemaphoreProofRequest"), ) } @@ -870,27 +867,16 @@ pub fn init_tracing_subscriber() { }); } -pub fn generate_reference_proof_json( +pub fn generate_reference_proof( ref_tree: &PoseidonTree, leaf_idx: usize, - status: &str, -) -> serde_json::Value { - let proof = ref_tree - .proof(leaf_idx) - .unwrap() - .0 - .iter() - .map(|branch| match branch { - Branch::Left(hash) => json!({ "Left": hash }), - Branch::Right(hash) => json!({ "Right": hash }), - }) - .collect::>(); - let root = ref_tree.root(); - json!({ - "status": status, - "root": root, - "proof": proof, - "message": serde_json::Value::Null + status: Status, +) -> InclusionProofResponse { + InclusionProofResponse(InclusionProof { + status, + root: Some(ref_tree.root()), + proof: ref_tree.proof(leaf_idx), + message: None, }) }