Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(starknet_monitoring_endpoint): use run_until for querying liveness #2492

Merged
merged 1 commit into from
Dec 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
use std::time::Duration;

use infra_utils::run_until::run_until;
use mempool_test_utils::starknet_api_test_utils::{AccountId, MultiAccountTransactionGenerator};
use papyrus_execution::execution_utils::get_nonce_at;
Expand Down Expand Up @@ -72,8 +70,7 @@ async fn test_end_to_end_integration(mut tx_generator: MultiAccountTransactionGe
let node_run_handle = spawn_run_node(integration_test_setup.node_config_path).await;

// Wait for the node to start.
match integration_test_setup.is_alive_test_client.await_alive(Duration::from_secs(5), 50).await
{
match integration_test_setup.is_alive_test_client.await_alive(5000, 50).await {
Ok(_) => {}
Err(_) => panic!("Node is not alive."),
}
Expand Down
3 changes: 2 additions & 1 deletion crates/starknet_monitoring_endpoint/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@ repository.workspace = true
license-file.workspace = true

[features]
testing = ["tokio", "tower"]
testing = ["infra_utils", "tokio", "tower"]

[lints]
workspace = true

[dependencies]
axum.workspace = true
hyper = { workspace = true }
infra_utils = { workspace = true, optional = true }
metrics-exporter-prometheus.workspace = true
papyrus_config.workspace = true
serde.workspace = true
Expand Down
36 changes: 13 additions & 23 deletions crates/starknet_monitoring_endpoint/src/test_utils.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;

use axum::body::Body;
use axum::http::Request;
use hyper::client::HttpConnector;
use hyper::Client;
use infra_utils::run_until::run_until;
use infra_utils::tracing::{CustomLogger, TraceLevel};
use tracing::info;

use crate::monitoring_endpoint::{ALIVE, MONITORING_PREFIX};
Expand All @@ -31,30 +32,19 @@ impl IsAliveClient {
.map_or(false, |response| response.status().is_success())
}

// TODO(Tsabary/Lev): add sleep time as a parameter, and max retries. Consider using
// 'starknet_client::RetryConfig'.
/// Blocks until 'alive', up to a maximum number of query attempts. Returns 'Ok(())' if the
/// target is alive, otherwise 'Err(())'.
pub async fn await_alive(
&self,
retry_interval: Duration,
max_attempts: usize,
) -> Result<(), ()> {
let mut counter = 0;
while counter < max_attempts {
match self.query_alive().await {
true => {
info!("Node is alive.");
return Ok(());
}
false => {
info!("Waiting for node to be alive: {}.", counter);
tokio::time::sleep(retry_interval).await;
counter += 1;
}
}
}
Err(())
pub async fn await_alive(&self, interval: u64, max_attempts: usize) -> Result<(), ()> {
let condition = |node_is_alive: &bool| *node_is_alive;
let query_alive_closure = || async move { self.query_alive().await };

let logger =
CustomLogger::new(TraceLevel::Info, Some("Waiting for node to be alive".to_string()));

run_until(interval, max_attempts, query_alive_closure, condition, Some(logger))
.await
.ok_or(())
.map(|_| ())
}
}

Expand Down
Loading