Skip to content

Commit

Permalink
Refactor code and introduce offchain mode.
Browse files Browse the repository at this point in the history
  • Loading branch information
piohei committed Jun 12, 2024
1 parent 3c40f15 commit 9880502
Show file tree
Hide file tree
Showing 44 changed files with 994 additions and 584 deletions.
86 changes: 48 additions & 38 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,17 @@ use crate::contracts::IdentityManager;
use crate::database::query::DatabaseQuery as _;
use crate::database::Database;
use crate::ethereum::Ethereum;
use crate::identity::transaction_manager::{
IdentityTransactionManager, OnChainIdentityTransactionManager,
use crate::identity::processor::{
IdentityProcessor, OffChainIdentityProcessor, OnChainIdentityProcessor,
};
use crate::identity::validator::IdentityValidator;
use crate::identity_tree::initializer::TreeInitializer;
use crate::identity_tree::{
Hash, InclusionProof, ProcessedStatus, RootItem, TreeState, TreeVersionReadOps,
};
use crate::prover::map::initialize_prover_maps;
use crate::prover::repository::ProverRepository;
use crate::prover::validator::ProofValidator;
use crate::prover::{ProverConfig, ProverType};
use crate::server::data::{
InclusionProofResponse, ListBatchSizesResponse, VerifySemaphoreProofQuery,
Expand All @@ -30,13 +32,15 @@ use crate::server::error::Error as ServerError;
use crate::utils::retry_tx;

pub struct App {
pub database: Arc<Database>,
pub identity_manager: Arc<IdentityManager>,
pub transaction_manager: Arc<dyn IdentityTransactionManager>,
tree_state: OnceLock<TreeState>,
pub config: Config,
pub database: Arc<Database>,
pub identity_manager: Arc<IdentityManager>,
pub identity_processor: Arc<dyn IdentityProcessor>,
pub prover_repository: Arc<ProverRepository>,
tree_state: OnceLock<TreeState>,
pub config: Config,

pub identity_validator: IdentityValidator,
pub proof_validator: Arc<ProofValidator>,
}

impl App {
Expand Down Expand Up @@ -65,32 +69,39 @@ impl App {

let (insertion_prover_map, deletion_prover_map) = initialize_prover_maps(provers)?;

let identity_manager = Arc::new(
IdentityManager::new(
&config,
ethereum.clone(),
insertion_prover_map,
deletion_prover_map,
)
.await?,
);
let identity_manager = Arc::new(IdentityManager::new(&config, ethereum.clone()).await?);

let prover_repository = Arc::new(ProverRepository::new(
insertion_prover_map,
deletion_prover_map,
));

let transaction_manager = Arc::new(OnChainIdentityTransactionManager::new(
ethereum.clone(),
config.clone(),
database.clone(),
identity_manager.clone(),
)?);
let proof_validator = Arc::new(ProofValidator::new(&config));

let identity_processor: Arc<dyn IdentityProcessor> = if config.offchain_mode.enabled {
Arc::new(OffChainIdentityProcessor::new(database.clone()))
} else {
Arc::new(OnChainIdentityProcessor::new(
ethereum.clone(),
config.clone(),
database.clone(),
identity_manager.clone(),
prover_repository.clone(),
proof_validator.clone(),
)?)
};

let identity_validator = Default::default();
let identity_validator = IdentityValidator::new(&config);

let app = Arc::new(Self {
database,
identity_manager,
transaction_manager,
identity_processor,
prover_repository,
tree_state: OnceLock::new(),
config,
identity_validator,
proof_validator,
});

Ok(app)
Expand All @@ -101,8 +112,7 @@ impl App {
pub async fn init_tree(self: Arc<Self>) -> anyhow::Result<()> {
let tree_state = TreeInitializer::new(
self.database.clone(),
self.identity_manager.clone(),
self.transaction_manager.clone(),
self.identity_processor.clone(),
self.config.tree.clone(),
)
.run()
Expand Down Expand Up @@ -132,12 +142,12 @@ impl App {
/// queue malfunctions.
#[instrument(level = "debug", skip(self))]
pub async fn insert_identity(&self, commitment: Hash) -> Result<(), ServerError> {
if commitment == self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(&commitment) {
warn!(?commitment, "Attempt to insert initial leaf.");
return Err(ServerError::InvalidCommitment);
}

if !self.identity_manager.has_insertion_provers().await {
if !self.prover_repository.has_insertion_provers().await {
warn!(
?commitment,
"Identity Manager has no insertion provers. Add provers with /addBatchSize \
Expand All @@ -146,7 +156,7 @@ impl App {
return Err(ServerError::NoProversOnIdInsert);
}

if !self.identity_validator.identity_is_reduced(commitment) {
if !self.identity_validator.is_reduced(commitment) {
warn!(
?commitment,
"The provided commitment is not an element of the field."
Expand Down Expand Up @@ -188,7 +198,7 @@ impl App {
commitment: &Hash,
) -> Result<(), ServerError> {
// Ensure that deletion provers exist
if !self.identity_manager.has_deletion_provers().await {
if !self.prover_repository.has_deletion_provers().await {
warn!(
?commitment,
"Identity Manager has no deletion provers. Add provers with /addBatchSize request."
Expand Down Expand Up @@ -245,23 +255,23 @@ impl App {
new_commitment: &Hash,
) -> Result<(), ServerError> {
retry_tx!(self.database.pool, tx, {
if *new_commitment == self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(new_commitment) {
warn!(
?new_commitment,
"Attempt to insert initial leaf in recovery."
);
return Err(ServerError::InvalidCommitment);
}

if !self.identity_manager.has_insertion_provers().await {
if !self.prover_repository.has_insertion_provers().await {
warn!(
?new_commitment,
"Identity Manager has no provers. Add provers with /addBatchSize request."
);
return Err(ServerError::NoProversOnIdInsert);
}

if !self.identity_validator.identity_is_reduced(*new_commitment) {
if !self.identity_validator.is_reduced(*new_commitment) {
warn!(
?new_commitment,
"The new identity commitment is not reduced."
Expand Down Expand Up @@ -320,7 +330,7 @@ impl App {
timeout_seconds: u64,
prover_type: ProverType,
) -> Result<(), ServerError> {
self.identity_manager
self.prover_repository
.add_batch_size(&url, batch_size, timeout_seconds, prover_type)
.await?;

Expand All @@ -341,7 +351,7 @@ impl App {
batch_size: usize,
prover_type: ProverType,
) -> Result<(), ServerError> {
self.identity_manager
self.prover_repository
.remove_batch_size(batch_size, prover_type)
.await?;

Expand All @@ -355,7 +365,7 @@ impl App {
/// Will return `Err` if something unknown went wrong.
#[instrument(level = "debug", skip(self))]
pub async fn list_batch_sizes(&self) -> Result<ListBatchSizesResponse, ServerError> {
let batches = self.identity_manager.list_batch_sizes().await?;
let batches = self.prover_repository.list_batch_sizes().await?;

Ok(ListBatchSizesResponse::from(batches))
}
Expand All @@ -368,7 +378,7 @@ impl App {
&self,
commitment: &Hash,
) -> Result<InclusionProofResponse, ServerError> {
if commitment == &self.identity_manager.initial_leaf_value() {
if self.identity_validator.is_initial_leaf(commitment) {
return Err(ServerError::InvalidCommitment);
}

Expand Down Expand Up @@ -424,7 +434,7 @@ impl App {
request.signal_hash,
request.external_nullifier_hash,
&request.proof,
self.identity_manager.tree_depth(),
self.config.tree.tree_depth,
);

match checked {
Expand Down
36 changes: 28 additions & 8 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,17 @@ use crate::utils::serde_utils::JsonStrWrapper;

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub app: AppConfig,
pub tree: TreeConfig,
pub network: NetworkConfig,
pub providers: ProvidersConfig,
pub relayer: RelayerConfig,
pub database: DatabaseConfig,
pub server: ServerConfig,
pub app: AppConfig,
pub tree: TreeConfig,
pub network: NetworkConfig,
pub providers: ProvidersConfig,
pub relayer: RelayerConfig,
pub database: DatabaseConfig,
pub server: ServerConfig,
#[serde(default)]
pub service: ServiceConfig,
pub service: ServiceConfig,
#[serde(default)]
pub offchain_mode: OffchainModeConfig,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand Down Expand Up @@ -219,6 +221,12 @@ pub struct DatadogConfig {
pub traces_endpoint: Option<String>,
}

#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct OffchainModeConfig {
#[serde(default = "default::offchain_mode_enabled")]
pub enabled: bool,
}

pub mod default {
use std::time::Duration;

Expand Down Expand Up @@ -311,6 +319,12 @@ pub mod default {
"0000000000000000000000000000000000000000000000000000000000000000"
))
}

pub fn offchain_mode_enabled() -> bool {
// todo(piotrh)
// false
true
}
}

#[cfg(test)]
Expand Down Expand Up @@ -339,6 +353,9 @@ mod tests {
[server]
address = "0.0.0.0:3001"
[offchain_mode]
enabled = false
"#};

#[test]
Expand Down Expand Up @@ -394,6 +411,9 @@ mod tests {
[service.datadog]
traces_endpoint = "http://localhost:8126"
[offchain_mode]
enabled = false
"#};

#[test]
Expand Down
Loading

0 comments on commit 9880502

Please sign in to comment.