diff --git a/Cargo.lock b/Cargo.lock index a6e0754166..23d1a21a7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1167,6 +1167,27 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "env_filter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +dependencies = [ + "log", +] + +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -1175,11 +1196,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.31" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" +checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" dependencies = [ "serde", + "typeid", ] [[package]] @@ -2026,9 +2048,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "serde", "value-bag", @@ -4077,6 +4099,28 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "test-log" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" +dependencies = [ + "env_logger", + "test-log-macros", + "tracing-subscriber", +] + +[[package]] +name = "test-log-macros" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "thiserror" version = "1.0.48" @@ -4534,6 +4578,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typeid" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" + [[package]] name = "typenum" version = "1.16.0" @@ -4713,9 +4763,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.4.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4723,9 +4773,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.4.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" +checksum = "ccacf50c5cb077a9abb723c5bcb5e0754c1a433f1e1de89edc328e2760b6328b" dependencies = [ "erased-serde", "serde", @@ -4734,9 +4784,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.4.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" +checksum = "1785bae486022dfb9703915d42287dcb284c1ee37bd1080eeba78cc04721285b" dependencies = [ "sval", "sval_buffer", @@ -5209,6 +5259,7 @@ dependencies = [ "serde_yaml", "socket2 0.5.6", "stop-token", + "test-log", "tokio", "tokio-util", "tracing", @@ -5267,12 +5318,12 @@ dependencies = [ "criterion", "rand 0.8.5", "serde", + "test-log", "tracing", "uhlc", "zenoh-buffers", "zenoh-protocol", "zenoh-shm", - "zenoh-util", ] [[package]] @@ -5356,6 +5407,7 @@ dependencies = [ "serde", "serde_cbor", "serde_json", + "test-log", "tokio", "tracing", "zenoh", @@ -5833,6 +5885,7 @@ dependencies = [ "rsa", "serde", "sha3", + "test-log", "tokio", "tokio-util", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 37df73e66b..5d6e5974a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,6 +157,7 @@ shellexpand = "3.0.0" socket2 = { version = "0.5.1", features = ["all"] } stop-token = "0.7.0" syn = "2.0" +test-log = "0.2.16" tide = "0.16.0" token-cell = { version = "1.4.2", default-features = false } tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml index 94ee27e7eb..8929b660dd 100644 --- a/ci/valgrind-check/Cargo.toml +++ b/ci/valgrind-check/Cargo.toml @@ -27,7 +27,6 @@ tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } futures = "0.3.25" zenoh = { path = "../../zenoh/" } zenoh-runtime = { path = "../../commons/zenoh-runtime/" } -zenoh-util = { path = "../../commons/zenoh-util/", features = ["test"] } [[bin]] name = "pub_sub" diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index f3b1dd0efe..e0a132517c 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -18,7 +18,7 @@ use zenoh::{config::Config, key_expr::KeyExpr, prelude::*}; #[tokio::main] async fn main() { let _z = zenoh_runtime::ZRuntimePoolGuard; - zenoh_util::init_log_test(); + zenoh::init_logging(); let pub_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 70945a4926..0b1025a8a2 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -20,7 +20,7 @@ use zenoh::{ #[tokio::main] async fn main() { let _z = zenoh_runtime::ZRuntimePoolGuard; - zenoh_util::init_log_test(); + zenoh::init_logging(); let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 50eb54c923..59a138372d 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -745,6 +745,7 @@ impl ZBuf { } } +#[cfg(test)] mod tests { #[test] fn zbuf_eq() { diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 209a4c698d..c3f68471f4 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -18,9 +18,9 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Luca Cominardi ", - "Pierre Avital ", + "kydos ", + "Luca Cominardi ", + "Pierre Avital ", ] edition = { workspace = true } license = { workspace = true } @@ -31,20 +31,20 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = [ - "tracing", - "serde/std", - "uhlc/std", - "zenoh-protocol/std" + "tracing", + "serde/std", + "uhlc/std", + "zenoh-protocol/std" ] shared-memory = [ - "std", - "zenoh-shm", - "zenoh-protocol/shared-memory", - "zenoh-buffers/shared-memory" + "std", + "zenoh-shm", + "zenoh-protocol/shared-memory", + "zenoh-buffers/shared-memory" ] [dependencies] -tracing = {workspace = true, optional = true } +tracing = { workspace = true, optional = true } serde = { workspace = true, features = ["alloc"] } uhlc = { workspace = true } zenoh-buffers = { workspace = true, default-features = false } @@ -54,10 +54,10 @@ zenoh-shm = { workspace = true, optional = true } # INFO: May cause problems when testing no_std stuff. Check this tool: https://docs.rs/crate/cargo-no-dev-deps/0.1.0 [dev-dependencies] criterion = { workspace = true } +test-log = { workspace = true } rand = { workspace = true, features = ["default"] } zenoh-protocol = { workspace = true, features = ["test"] } -zenoh-util = {workspace = true } [[bench]] name = "codec" diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index a29f88f3d5..08978704db 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -320,6 +320,7 @@ zint_impl!(usize); // } // } +// #[cfg(test)] // mod tests { // #[test] // fn u64_overhead() { diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 1e1bbe18a3..aec21bf948 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -386,10 +386,8 @@ fn codec_shm_info() { } // Common -#[test] +#[test_log::test] fn codec_extension() { - zenoh_util::try_init_log_from_env(); - macro_rules! run_extension_single { ($ext:ty, $buff:expr) => { let codec = Zenoh080::new(); diff --git a/commons/zenoh-crypto/src/cipher.rs b/commons/zenoh-crypto/src/cipher.rs index aa78b97b46..0d138f6acd 100644 --- a/commons/zenoh-crypto/src/cipher.rs +++ b/commons/zenoh-crypto/src/cipher.rs @@ -68,6 +68,7 @@ impl BlockCipher { } } +#[cfg(test)] mod tests { #[test] fn cipher() { diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index 60b9acde1d..da531bc920 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -18,9 +18,9 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Luca Cominardi ", - "Pierre Avital ", + "kydos ", + "Luca Cominardi ", + "Pierre Avital ", ] edition = { workspace = true } license = { workspace = true } @@ -35,7 +35,7 @@ test = ["num_cpus"] async-trait = { workspace = true } bincode = { workspace = true } crc = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } tokio = { workspace = true } diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index e41433b85f..3118ea4b9c 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -31,9 +31,6 @@ description = "Internal crate for zenoh." [badges] maintenance = { status = "actively-developed" } -[features] -test = [] - [dependencies] async-std = { workspace = true, features = ["default", "unstable"] } tokio = { workspace = true, features = ["time", "net"] } diff --git a/commons/zenoh-util/src/log.rs b/commons/zenoh-util/src/log.rs index 67f1a45df7..959702f783 100644 --- a/commons/zenoh-util/src/log.rs +++ b/commons/zenoh-util/src/log.rs @@ -11,86 +11,254 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::{fmt, thread, thread::ThreadId}; +use std::{env, fmt, str::FromStr}; use tracing::{field::Field, span, Event, Metadata, Subscriber}; use tracing_subscriber::{ - layer::{Context, SubscriberExt}, + filter::LevelFilter, + layer::{Context, Filter, SubscriberExt}, registry::LookupSpan, - EnvFilter, + util::SubscriberInitExt, + EnvFilter, Layer, }; -/// This is a utility function to enable the tracing formatting subscriber from -/// the `RUST_LOG` environment variable. If `RUST_LOG` is not set, then logging is not enabled. -/// -/// # Safety -/// Calling this function initializes a `lazy_static` in the `tracing` crate -/// such static is not deallocated prior to process existing, thus tools such as `valgrind` -/// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 -pub fn try_init_log_from_env() { - if let Ok(env_filter) = EnvFilter::try_from_default_env() { - init_env_filter(env_filter); +const ALREADY_INITIALIZED: &str = "Already initialized logging"; + +#[non_exhaustive] +#[derive(Debug, Default, Clone, Copy)] +pub enum LogLevel { + Trace, + Debug, + Info, + #[default] + Warn, + Error, + Off, +} + +#[derive(Debug, Clone)] +pub struct InvalidLogLevel(String); + +impl fmt::Display for InvalidLogLevel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "invalid log level {:?}", self.0) + } +} + +impl std::error::Error for InvalidLogLevel {} + +impl FromStr for LogLevel { + type Err = InvalidLogLevel; + fn from_str(s: &str) -> Result { + Ok(match s.to_lowercase().as_str() { + "trace" => Self::Trace, + "debug" => Self::Debug, + "info" => Self::Info, + "warn" | "warning" => Self::Warn, + "error" => Self::Error, + "off" => Self::Off, + _ => return Err(InvalidLogLevel(s.into())), + }) } } -/// This is a utility function to enable the tracing formatting subscriber from -/// the environment variable. If `RUST_LOG` is not set, then fallback directives are used. +/// Initialize zenoh logging using the value of `ZENOH_LOG` environment variable. /// -/// # Safety -/// Calling this function initializes a `lazy_static` in the `tracing` crate -/// such static is not deallocated prior to process existing, thus tools such as `valgrind` -/// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 -pub fn init_log_from_env_or(fallback: S) -where - S: AsRef, -{ - let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(fallback)); - init_env_filter(env_filter); +/// `ZENOH_LOG` is parsed use [`LogLevel::from_str`], possible values are `"debug"`/`"WARN"`/etc. +/// If `ZENOH_LOG` is not provided, a default `WARN` level is used. +/// +/// See [`init_logging_with_level`] if you prefer setting the level directly in your code. +/// This function is roughly a wrapper around +/// ```ignore +/// let level = std::env::var("ZENOH_LOG") +/// .map(|var| var.parse().unwrap()) +/// .unwrap_or_default(); +/// init_logging_with_level(level); +/// ``` +/// +/// Logs are printed on stdout and are formatted like the following: +/// ```text +/// 2024-06-19T09:46:18.808602Z INFO main ThreadId(01) zenoh::net::runtime: Using ZID: 1a615ea88fe1dc531a9d8701775d5bee +/// 2024-06-19T09:46:18.814577Z INFO main ThreadId(01) zenoh::net::runtime::orchestrator: Zenoh can be reached at: tcp/[fe80::1]:58977 +/// ``` +/// +/// # Advanced use +/// +/// zenoh logging uses `tracing` crates internally; this function is just a convenient wrapper +/// around `tracing-subscriber`. If you want to control the formatting, or have a finer grain on +/// log filtering, we advise using `tracing-subscriber` directly. +/// However, to make migration and on the fly debugging easier, [`RUST_LOG`][1] environment variable +/// can still be used, and will override `ZENOH_LOG` configuration. +/// +/// # Panics +/// +/// This function may panic if the following operations fail: +/// - parsing `ZENOH_LOG`/`RUST_LOG` (see [Advanced use](#advanced use)) environment variable +/// - register the global tracing subscriber, because another one has already been registered +/// These errors mostly being the result of unintended use, fast failure is assumed to be more +/// suitable than unexpected behavior, especially as logging should be initialized at program start. +/// +/// # Use in tests +/// +/// This function should **not** be used in tests, as it would panic as soon as there is more +/// than one test executed in the same unit, because only the first one to execute would be able to +/// register the global tracing subscriber. +/// Moreover, `tracing` and Rust logging in general requires special care about testing because of +/// libtest output capturing; see +/// [`SubscriberBuilder::with_test_writer`](tracing_subscriber::fmt::SubscriberBuilder::with_test_writer). +/// That's why we advise you to use a dedicated library like [`test-log`][3] +/// (with `"tracing"` feature enabled). +/// +/// # Memory leak +/// +/// [`tracing`] use a global `static` [subscriber](`tracing::subscriber::set_global_default`), +/// which is not deallocated prior to process exiting. +/// Tools such as `valgrind` will then report memory leaks in *still reachable* category. +/// +/// However, when `RUST_LOG` is provided (see [Advanced use](#advanced use)), +/// [`tracing_subscriber::EnvFilter`] is then used, and causes also *still reachable* blocks, +/// but also *possibly lost* blocks, which are [known false-positives][2]. +/// Those "leaks" can be "suppressed" from `valgrind` report using the following suppression: +/// ```text +/// { +/// zenoh_init_logging +/// Memcheck:Leak +/// ... +/// fun:*zenoh*init_logging* +// } +/// ``` +/// +/// [1]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +/// [2]: https://github.com/rust-lang/regex/issues/1205 +/// [3]: https://crates.io/crates/test-log +pub fn init_logging() { + try_init_logging().expect(ALREADY_INITIALIZED); } -fn init_env_filter(env_filter: EnvFilter) { - let subscriber = tracing_subscriber::fmt() - .with_env_filter(env_filter) +/// Initialize zenoh logging using the provided logging level. +/// +/// See [`init_logging`] if you prefer a dynamic setting the level using an environment variable. +/// Logs are printed on stdout and are formatted like the following: +/// ```text +/// 2024-06-19T09:46:18.808602Z INFO main ThreadId(01) zenoh::net::runtime: Using ZID: 1a615ea88fe1dc531a9d8701775d5bee +/// 2024-06-19T09:46:18.814577Z INFO main ThreadId(01) zenoh::net::runtime::orchestrator: Zenoh can be reached at: tcp/[fe80::1]:58977 +/// ``` +/// +/// # Advanced use +/// +/// zenoh logging uses `tracing` crates internally; this function is just a convenient wrapper +/// around `tracing-subscriber`. If you want to control the formatting, or have a finer grain on +/// log filtering, we advise using `tracing-subscriber` directly. +/// However, to make migration and on the fly debugging easier, [`RUST_LOG`][1] environment variable +/// can still be used, and will override the provided level. +/// +/// # Panics +/// +/// This function may panic if the following operations fail: +/// - parsing `RUST_LOG` (see [Advanced use](#advanced use)) environment variable +/// - register the global tracing subscriber, because another one has already been registered +/// These errors mostly being the result of unintended use, fast failure is assumed to be more +/// suitable than unexpected behavior, especially as logging should be initialized at program start. +/// +/// # Use in tests +/// +/// This function should **not** be used in unit tests, as it would panic as soon as there is more +/// than one test executed in the same unit, because only the first one to execute would be able to +/// register the global tracing subscriber. +/// Moreover, `tracing` and Rust logging in general requires special care about testing because of +/// libtest output capturing; see +/// [`SubscriberBuilder::with_test_writer`](tracing_subscriber::fmt::SubscriberBuilder::with_test_writer). +/// That's why we advise you to use a dedicated library like [`test-log`][3] +/// (with `"tracing"` feature enabled). +/// +/// # Memory leak +/// +/// [`tracing`] use a global `static` [subscriber](`tracing::subscriber::set_global_default`), +/// which is not deallocated prior to process exiting. +/// Tools such as `valgrind` will then report memory leaks in *still reachable* category. +/// +/// However, when `RUST_LOG` is provided (see [Advanced use](#advanced use)), +/// [`tracing_subscriber::EnvFilter`] is then used, and causes also *still reachable* blocks, +/// but also *possibly lost* blocks, which are [known false-positives][2]. +/// Those "leaks" can be "suppressed" from `valgrind` report using the following suppression: +/// ```text +/// { +/// zenoh_init_logging +/// Memcheck:Leak +/// ... +/// fun:*zenoh*init_logging* +// } +/// ``` +/// +/// [1]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +/// [2]: https://github.com/rust-lang/regex/issues/1205 +/// [3]: https://crates.io/crates/test-log +pub fn init_logging_with_level(level: LogLevel) { + try_init_logging_with_level(level).expect(ALREADY_INITIALIZED); +} + +/// [`init_logging`], but doesn't panic if `tracing` global subscriber is already set. +/// +/// This function is mainly meant to be used in plugins, which can be loaded both as static or +/// dynamic libraries. In fact, dynamic library has its own `tracing` global subscriber which need +/// to be initialized, but it would lead to a double initialization for a static library, hence +/// this fallible version. +/// Returns true if the logging was initialized. +pub fn try_init_logging() -> Result<(), Box> { + let level = env::var("ZENOH_LOG") + .map(|var| var.parse().expect("invalid ZENOH_LOG")) + .unwrap_or_default(); + try_init_logging_with_level(level) +} + +fn try_init_logging_with_level( + level: LogLevel, +) -> Result<(), Box> { + let builder = tracing_subscriber::fmt() .with_thread_ids(true) .with_thread_names(true) .with_level(true) .with_target(true); - - let subscriber = subscriber.finish(); - let _ = tracing::subscriber::set_global_default(subscriber); + if let Ok(rust_log) = env::var("RUST_LOG") { + let env_filter = EnvFilter::builder() + .parse(rust_log) + .expect("invalid RUST_LOG"); + builder.with_env_filter(env_filter).try_init() + } else { + let level_filter = match level { + LogLevel::Trace => LevelFilter::TRACE, + LogLevel::Debug => LevelFilter::DEBUG, + LogLevel::Info => LevelFilter::INFO, + LogLevel::Warn => LevelFilter::WARN, + LogLevel::Error => LevelFilter::ERROR, + LogLevel::Off => LevelFilter::OFF, + }; + builder.with_max_level(level_filter).try_init() + } } -pub struct LogRecord { - pub target: String, - pub level: tracing::Level, - pub file: Option<&'static str>, - pub line: Option, - pub thread_id: ThreadId, - pub thread_name: Option, +/// The data extracted from a [`tracing::Event`]. +/// +/// Span and event fields are flatten into `fields`, except `message` which has its own slot +/// for convenience. +/// While fields are formatted into a string, message keeps its `&dyn fmt::Debug` type to allow +/// using `write!` instead of `format!`. +pub struct LogEvent { + pub metadata: &'static Metadata<'static>, pub message: Option, - pub attributes: Vec<(&'static str, String)>, + pub fields: Vec<(&'static str, String)>, } #[derive(Clone)] struct SpanFields(Vec<(&'static str, String)>); -struct Layer { - enabled: Enabled, - callback: Callback, -} +struct CallbackLayer(F); -impl tracing_subscriber::Layer for Layer +impl Layer for CallbackLayer where S: Subscriber + for<'a> LookupSpan<'a>, - E: Fn(&Metadata) -> bool + 'static, - C: Fn(LogRecord) + 'static, + F: Fn(LogEvent) + 'static, { - fn enabled(&self, metadata: &Metadata<'_>, _: Context<'_, S>) -> bool { - (self.enabled)(metadata) - } - fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { let span = ctx.span(id).unwrap(); let mut extensions = span.extensions_mut(); @@ -111,55 +279,66 @@ where } fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { - let thread = thread::current(); - let mut record = LogRecord { - target: event.metadata().target().into(), - level: *event.metadata().level(), - file: event.metadata().file(), - line: event.metadata().line(), - thread_id: thread.id(), - thread_name: thread.name().map(Into::into), + tracing::debug!("plop"); + let mut log_event = LogEvent { + metadata: event.metadata(), message: None, - attributes: vec![], + fields: vec![], }; if let Some(scope) = ctx.event_scope(event) { for span in scope.from_root() { let extensions = span.extensions(); let fields = extensions.get::().unwrap(); - record.attributes.extend(fields.0.iter().cloned()); + log_event.fields.extend(fields.0.iter().cloned()); } } event.record(&mut |field: &Field, value: &dyn fmt::Debug| { if field.name() == "message" { - record.message = Some(format!("{value:?}")); + log_event.message = Some(format!("{value:?}")); } else { - record.attributes.push((field.name(), format!("{value:?}"))) + log_event.fields.push((field.name(), format!("{value:?}"))) } }); - (self.callback)(record); + self.0(log_event); } } -pub fn init_log_with_callback( - enabled: impl Fn(&Metadata) -> bool + Send + Sync + 'static, - callback: impl Fn(LogRecord) + Send + Sync + 'static, -) { - let subscriber = tracing_subscriber::registry().with(Layer { enabled, callback }); - let _ = tracing::subscriber::set_global_default(subscriber); +struct CallbackFilter { + enabled: E, + max_level_hint: L, } -#[cfg(feature = "test")] -// Used to verify memory leaks for valgrind CI. -// `EnvFilter` internally uses a static reference that is not cleaned up yielding to false positive in valgrind. -// This function enables logging without calling `EnvFilter` for env configuration. -pub fn init_log_test() { - let subscriber = tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_thread_ids(true) - .with_thread_names(true) - .with_level(true) - .with_target(true); +impl Filter for CallbackFilter +where + S: Subscriber + for<'a> LookupSpan<'a>, + E: Fn(&Metadata) -> bool + 'static, + L: Fn() -> Option + 'static, +{ + fn enabled(&self, meta: &Metadata<'_>, _: &Context<'_, S>) -> bool { + (self.enabled)(meta) + } + fn max_level_hint(&self) -> Option { + (self.max_level_hint)() + } +} - let subscriber = subscriber.finish(); - let _ = tracing::subscriber::set_global_default(subscriber); +/// Initialize zenoh logging using the provided callbacks. +/// +/// This function is mainly meant to be used in zenoh bindings, to provide a bridge between Rust +/// `tracing` implementation and a native logging implementation. +/// +/// [`LogEvent`] contains more or less all the data of a `tracing` event. +/// `max_level_hint` will be called only once, and `enabled` once per callsite (span/event). +/// [`tracing::callsite::rebuild_interest_cache`] can be called to reset the cache, and have +/// `max_level_hint`/`enabled` called again. +pub fn init_log_with_callbacks( + enabled: impl Fn(&Metadata) -> bool + Send + Sync + 'static, + max_level_hint: impl Fn() -> Option + Send + Sync + 'static, + callback: impl Fn(LogEvent) + Send + Sync + 'static, +) { + let layer = CallbackLayer(callback).with_filter(CallbackFilter { + enabled, + max_level_hint, + }); + tracing_subscriber::registry().with(layer).init(); } diff --git a/commons/zenoh-util/src/timer.rs b/commons/zenoh-util/src/timer.rs index d18b9192a4..1b12f877fc 100644 --- a/commons/zenoh-util/src/timer.rs +++ b/commons/zenoh-util/src/timer.rs @@ -296,6 +296,7 @@ impl Default for Timer { } } +#[cfg(test)] mod tests { #[test] fn timer() { diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index c1a2c9b8ae..6f51a8b759 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -26,10 +26,10 @@ description = "Internal crate for zenoh." [features] shared-memory = [ - "zenoh-protocol/shared-memory", - "zenoh-shm", - "zenoh-codec/shared-memory", - "zenoh-buffers/shared-memory", + "zenoh-protocol/shared-memory", + "zenoh-shm", + "zenoh-codec/shared-memory", + "zenoh-buffers/shared-memory", ] auth_pubkey = ["transport_auth", "rsa"] auth_usrpwd = ["transport_auth"] @@ -44,7 +44,7 @@ transport_ws = ["zenoh-link/transport_ws"] transport_serial = ["zenoh-link/transport_serial"] transport_compression = [] transport_unixpipe = ["zenoh-link/transport_unixpipe"] -transport_vsock= ["zenoh-link/transport_vsock"] +transport_vsock = ["zenoh-link/transport_vsock"] stats = ["zenoh-protocol/stats"] test = [] unstable = [] @@ -61,9 +61,9 @@ tokio = { workspace = true, features = [ "io-util", "net", ] } -tokio-util = { workspace = true, features = ["rt"]} +tokio-util = { workspace = true, features = ["rt"] } flume = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } lz4_flex = { workspace = true } paste = { workspace = true } rand = { workspace = true, features = ["default"] } @@ -90,7 +90,8 @@ zenoh-task = { workspace = true } [dev-dependencies] futures-util = { workspace = true } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } zenoh-protocol = { workspace = true, features = ["test"] } futures = { workspace = true } zenoh-link-commons = { workspace = true } +test-log = { workspace = true } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 4220f8e08b..e177d2de4d 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -192,6 +192,7 @@ impl Cookie { } } +#[cfg(test)] mod tests { #[test] fn codec_cookie() { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 22d7a86817..050f0e0351 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -452,6 +452,7 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } } +#[cfg(test)] mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_usrpwd_config() { diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index f4ddbd6ec4..5ff426bb41 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -98,9 +98,8 @@ async fn run(endpoints: &[EndPoint]) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_tcp() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7000).parse().unwrap(), @@ -111,9 +110,8 @@ async fn endpoint_tcp() { } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_udp() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("udp/127.0.0.1:{}", 7010).parse().unwrap(), @@ -124,9 +122,8 @@ async fn endpoint_udp() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_unix() { - zenoh_util::try_init_log_from_env(); // Remove the files if they still exists let f1 = "zenoh-test-unix-socket-0.sock"; let f2 = "zenoh-test-unix-socket-1.sock"; @@ -145,9 +142,8 @@ async fn endpoint_unix() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_ws() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("ws/127.0.0.1:{}", 7020).parse().unwrap(), @@ -158,9 +154,8 @@ async fn endpoint_ws() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_unixpipe() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ "unixpipe/endpoint_unixpipe".parse().unwrap(), @@ -172,9 +167,8 @@ async fn endpoint_unixpipe() { } #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_tcp_udp() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7030).parse().unwrap(), @@ -191,9 +185,8 @@ async fn endpoint_tcp_udp() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_tcp_udp_unix() { - zenoh_util::try_init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-2.sock"; let _ = std::fs::remove_file(f1); @@ -215,9 +208,8 @@ async fn endpoint_tcp_udp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_tcp_unix() { - zenoh_util::try_init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-3.sock"; let _ = std::fs::remove_file(f1); @@ -237,9 +229,8 @@ async fn endpoint_tcp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_udp_unix() { - zenoh_util::try_init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-4.sock"; let _ = std::fs::remove_file(f1); // Define the locators @@ -254,12 +245,10 @@ async fn endpoint_udp_unix() { } #[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_tls() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -333,12 +322,10 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM } #[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_quic() { use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -411,9 +398,8 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn endpoint_vsock() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![ "vsock/-1:1234".parse().unwrap(), diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 129f79d55e..41cdd86552 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -14,357 +14,349 @@ // Restricting to macos by default because of no IPv6 support // on GitHub CI actions on Linux and Windows. -#[cfg(all(target_family = "unix", feature = "transport_compression"))] -mod tests { - use std::{ - any::Any, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +#![cfg(all(target_family = "unix", feature = "transport_compression"))] +use std::{ + any::Any, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh_core::ztimeout; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, + }, + network::{ + push::{ + ext::{NodeIdType, QoSType}, + Push, }, - time::Duration, - }; - - use zenoh_core::ztimeout; - use zenoh_link::Link; - use zenoh_protocol::{ - core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, - ZenohIdProto, - }, - network::{ - push::{ - ext::{NodeIdType, QoSType}, - Push, - }, - NetworkMessage, - }, - zenoh::Put, - }; - use zenoh_result::ZResult; - use zenoh_transport::{ - multicast::{TransportManagerBuilderMulticast, TransportMulticast}, - unicast::TransportUnicast, - TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - const SLEEP_COUNT: Duration = Duration::from_millis(10); - - const MSG_COUNT: usize = 1_000; - const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; - - // Transport Handler for the peer02 - struct SHPeer { - count: Arc, - } - - impl Default for SHPeer { - fn default() -> Self { - Self { - count: Arc::new(AtomicUsize::new(0)), - } - } - } + NetworkMessage, + }, + zenoh::Put, +}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::{TransportManagerBuilderMulticast, TransportMulticast}, + unicast::TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); +const SLEEP_COUNT: Duration = Duration::from_millis(10); + +const MSG_COUNT: usize = 1_000; +const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + +// Transport Handler for the peer02 +struct SHPeer { + count: Arc, +} - impl SHPeer { - fn get_count(&self) -> usize { - self.count.load(Ordering::Relaxed) +impl Default for SHPeer { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), } } +} - impl TransportEventHandler for SHPeer { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - panic!(); - } - - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - let arc = Arc::new(SCPeer::new(self.count.clone())); - Ok(arc) - } +impl SHPeer { + fn get_count(&self) -> usize { + self.count.load(Ordering::Relaxed) } +} - // Transport Callback for the peer02 - pub struct SCPeer { - count: Arc, +impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + panic!(); } - impl SCPeer { - pub fn new(count: Arc) -> Self { - Self { count } - } + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + let arc = Arc::new(SCPeer::new(self.count.clone())); + Ok(arc) } +} - impl TransportMulticastEventHandler for SCPeer { - fn new_peer(&self, peer: TransportPeer) -> ZResult> { - println!("\tNew peer: {:?}", peer); - Ok(Arc::new(SCPeer { - count: self.count.clone(), - })) - } - fn closing(&self) {} - fn closed(&self) {} +// Transport Callback for the peer02 +pub struct SCPeer { + count: Arc, +} - fn as_any(&self) -> &dyn Any { - self - } +impl SCPeer { + pub fn new(count: Arc) -> Self { + Self { count } } +} - impl TransportPeerEventHandler for SCPeer { - fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { - self.count.fetch_add(1, Ordering::Relaxed); - Ok(()) - } - - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} - - fn as_any(&self) -> &dyn Any { - self - } +impl TransportMulticastEventHandler for SCPeer { + fn new_peer(&self, peer: TransportPeer) -> ZResult> { + println!("\tNew peer: {:?}", peer); + Ok(Arc::new(SCPeer { + count: self.count.clone(), + })) } + fn closing(&self) {} + fn closed(&self) {} - struct TransportMulticastPeer { - manager: TransportManager, - handler: Arc, - transport: TransportMulticast, + fn as_any(&self) -> &dyn Any { + self } +} - async fn open_transport( - endpoint: &EndPoint, - ) -> (TransportMulticastPeer, TransportMulticastPeer) { - // Define peer01 and peer02 IDs - let peer01_id = ZenohIdProto::try_from([1]).unwrap(); - let peer02_id = ZenohIdProto::try_from([2]).unwrap(); - - // Create the peer01 transport manager - let peer01_handler = Arc::new(SHPeer::default()); - let peer01_manager = TransportManager::builder() - .zid(peer01_id) - .whatami(WhatAmI::Peer) - .multicast(TransportManagerBuilderMulticast::default().compression(true)) - .build(peer01_handler.clone()) - .unwrap(); - - // Create the peer02 transport manager - let peer02_handler = Arc::new(SHPeer::default()); - let peer02_manager = TransportManager::builder() - .zid(peer02_id) - .whatami(WhatAmI::Peer) - .multicast(TransportManagerBuilderMulticast::default().compression(true)) - .build(peer02_handler.clone()) - .unwrap(); - - // Create an empty transport with the peer01 - // Open transport -> This should be accepted - println!("Opening transport with {endpoint}"); - let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); - println!( - "\t{:?}", - ztimeout!(peer01_manager.get_transports_multicast()) - ); - - println!("Opening transport with {endpoint}"); - let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); - println!( - "\t{:?}", - ztimeout!(peer02_manager.get_transports_multicast()) - ); - - // Wait to for peer 01 and 02 to join each other - ztimeout!(async { - while peer01_manager - .get_transport_multicast(&peer02_id) - .await - .is_none() - { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - let peer01_transport = - ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); - println!( - "\tPeer01 peers: {:?}", - peer01_transport.get_peers().unwrap() - ); - - ztimeout!(async { - while peer02_manager - .get_transport_multicast(&peer01_id) - .await - .is_none() - { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - let peer02_transport = - ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); - println!( - "\tPeer02 peers: {:?}", - peer02_transport.get_peers().unwrap() - ); - - ( - TransportMulticastPeer { - manager: peer01_manager, - handler: peer01_handler, - transport: peer01_transport, - }, - TransportMulticastPeer { - manager: peer02_manager, - handler: peer02_handler, - transport: peer02_transport, - }, - ) +impl TransportPeerEventHandler for SCPeer { + fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::Relaxed); + Ok(()) } - async fn close_transport( - peer01: TransportMulticastPeer, - peer02: TransportMulticastPeer, - endpoint: &EndPoint, - ) { - // Close the peer01 transport - println!("Closing transport with {endpoint}"); - ztimeout!(peer01.transport.close()).unwrap(); - assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); - ztimeout!(async { - while !peer02.transport.get_peers().unwrap().is_empty() { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - - // Close the peer02 transport - println!("Closing transport with {endpoint}"); - ztimeout!(peer02.transport.close()).unwrap(); - assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - // Wait a little bit - tokio::time::sleep(SLEEP).await; + fn as_any(&self) -> &dyn Any { + self } +} - async fn test_transport( - peer01: &TransportMulticastPeer, - peer02: &TransportMulticastPeer, - channel: Channel, - msg_size: usize, - ) { - // Create the message to send - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: vec![0u8; msg_size].into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), +struct TransportMulticastPeer { + manager: TransportManager, + handler: Arc, + transport: TransportMulticast, +} + +async fn open_transport(endpoint: &EndPoint) -> (TransportMulticastPeer, TransportMulticastPeer) { + // Define peer01 and peer02 IDs + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); + + // Create the peer01 transport manager + let peer01_handler = Arc::new(SHPeer::default()); + let peer01_manager = TransportManager::builder() + .zid(peer01_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer01_handler.clone()) + .unwrap(); + + // Create the peer02 transport manager + let peer02_handler = Arc::new(SHPeer::default()); + let peer02_manager = TransportManager::builder() + .zid(peer02_id) + .whatami(WhatAmI::Peer) + .multicast(TransportManagerBuilderMulticast::default().compression(true)) + .build(peer02_handler.clone()) + .unwrap(); + + // Create an empty transport with the peer01 + // Open transport -> This should be accepted + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); + + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); + + // Wait to for peer 01 and 02 to join each other + ztimeout!(async { + while peer01_manager + .get_transport_multicast(&peer02_id) + .await + .is_none() + { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); + let peer01_transport = ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); + println!( + "\tPeer01 peers: {:?}", + peer01_transport.get_peers().unwrap() + ); + + ztimeout!(async { + while peer02_manager + .get_transport_multicast(&peer01_id) + .await + .is_none() + { + tokio::time::sleep(SLEEP_COUNT).await; } - .into(); + }); + let peer02_transport = ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); + println!( + "\tPeer02 peers: {:?}", + peer02_transport.get_peers().unwrap() + ); + + ( + TransportMulticastPeer { + manager: peer01_manager, + handler: peer01_handler, + transport: peer01_transport, + }, + TransportMulticastPeer { + manager: peer02_manager, + handler: peer02_handler, + transport: peer02_transport, + }, + ) +} - println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); - for _ in 0..MSG_COUNT { - peer01.transport.schedule(message.clone()).unwrap(); +async fn close_transport( + peer01: TransportMulticastPeer, + peer02: TransportMulticastPeer, + endpoint: &EndPoint, +) { + // Close the peer01 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer01.transport.close()).unwrap(); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); + ztimeout!(async { + while !peer02.transport.get_peers().unwrap().is_empty() { + tokio::time::sleep(SLEEP_COUNT).await; } + }); - match channel.reliability { - Reliability::Reliable => { - ztimeout!(async { - while peer02.handler.get_count() != MSG_COUNT { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - Reliability::BestEffort => { - ztimeout!(async { - while peer02.handler.get_count() == 0 { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - }; + // Close the peer02 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer02.transport.close()).unwrap(); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} - // Wait a little bit - tokio::time::sleep(SLEEP).await; +async fn test_transport( + peer01: &TransportMulticastPeer, + peer02: &TransportMulticastPeer, + channel: Channel, + msg_size: usize, +) { + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], + } + .into(), } + .into(); - async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { - let (peer01, peer02) = open_transport(endpoint).await; - test_transport(&peer01, &peer02, channel, msg_size).await; + println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); + for _ in 0..MSG_COUNT { + peer01.transport.schedule(message.clone()).unwrap(); + } - #[cfg(feature = "stats")] - { - let stats = peer01.transport.get_stats().unwrap().report(); - println!("\tPeer 01: {:?}", stats); - let stats = peer02.transport.get_stats().unwrap().report(); - println!("\tPeer 02: {:?}", stats); + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while peer02.handler.get_count() != MSG_COUNT { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); + } + Reliability::BestEffort => { + ztimeout!(async { + while peer02.handler.get_count() == 0 { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); } + }; + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { + let (peer01, peer02) = open_transport(endpoint).await; + test_transport(&peer01, &peer02, channel, msg_size).await; - close_transport(peer01, peer02, endpoint).await; + #[cfg(feature = "stats")] + { + let stats = peer01.transport.get_stats().unwrap().report(); + println!("\tPeer 01: {:?}", stats); + let stats = peer02.transport.get_stats().unwrap().report(); + println!("\tPeer 02: {:?}", stats); } - async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { - for e in endpoints.iter() { - for ch in channel.iter() { - for ms in msg_size.iter() { - run_single(e, *ch, *ms).await; - } + close_transport(peer01, peer02, endpoint).await; +} + +async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { + for e in endpoints.iter() { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single(e, *ch, *ms).await; } } } +} - #[cfg(feature = "transport_udp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_multicast_compression_udp_only() { - zenoh_util::try_init_log_from_env(); - - // Define the locator - let endpoints: Vec = vec![ - format!( - "udp/224.{}.{}.{}:21000", - rand::random::(), - rand::random::(), - rand::random::() - ) - .parse() - .unwrap(), - // Disabling by default because of no IPv6 support - // on GitHub CI actions. - // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) - // .parse() - // .unwrap(), - ]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::BestEffort, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::BestEffort, - }, - ]; - // Run - run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; - } +#[cfg(feature = "transport_udp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_multicast_compression_udp_only() { + // Define the locator + let endpoints: Vec = vec![ + format!( + "udp/224.{}.{}.{}:21000", + rand::random::(), + rand::random::(), + rand::random::() + ) + .parse() + .unwrap(), + // Disabling by default because of no IPv6 support + // on GitHub CI actions. + // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) + // .parse() + // .unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; } diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 0ffefb59b2..13ecf359a2 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -14,354 +14,346 @@ // Restricting to macos by default because of no IPv6 support // on GitHub CI actions on Linux and Windows. -#[cfg(target_family = "unix")] -#[cfg(all(feature = "transport_compression", feature = "transport_udp"))] -mod tests { - use std::{ - any::Any, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, - }; - - use zenoh_core::ztimeout; - use zenoh_link::Link; - use zenoh_protocol::{ - core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, - ZenohIdProto, - }, - network::{ - push::{ - ext::{NodeIdType, QoSType}, - Push, - }, - NetworkMessage, +#![cfg(target_family = "unix")] +#![cfg(all(feature = "transport_compression", feature = "transport_udp"))] +use std::{ + any::Any, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh_core::ztimeout; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, + }, + network::{ + push::{ + ext::{NodeIdType, QoSType}, + Push, }, - zenoh::Put, - }; - use zenoh_result::ZResult; - use zenoh_transport::{ - multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, - TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - const SLEEP_COUNT: Duration = Duration::from_millis(10); - - const MSG_COUNT: usize = 1_000; - const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; - - // Transport Handler for the peer02 - struct SHPeer { - count: Arc, - } - - impl Default for SHPeer { - fn default() -> Self { - Self { - count: Arc::new(AtomicUsize::new(0)), - } - } - } + NetworkMessage, + }, + zenoh::Put, +}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); +const SLEEP_COUNT: Duration = Duration::from_millis(10); + +const MSG_COUNT: usize = 1_000; +const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + +// Transport Handler for the peer02 +struct SHPeer { + count: Arc, +} - impl SHPeer { - fn get_count(&self) -> usize { - self.count.load(Ordering::Relaxed) +impl Default for SHPeer { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), } } +} - impl TransportEventHandler for SHPeer { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - panic!(); - } - - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - let arc = Arc::new(SCPeer::new(self.count.clone())); - Ok(arc) - } +impl SHPeer { + fn get_count(&self) -> usize { + self.count.load(Ordering::Relaxed) } +} - // Transport Callback for the peer02 - pub struct SCPeer { - count: Arc, +impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + panic!(); } - impl SCPeer { - pub fn new(count: Arc) -> Self { - Self { count } - } + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + let arc = Arc::new(SCPeer::new(self.count.clone())); + Ok(arc) } +} - impl TransportMulticastEventHandler for SCPeer { - fn new_peer(&self, peer: TransportPeer) -> ZResult> { - println!("\tNew peer: {:?}", peer); - Ok(Arc::new(SCPeer { - count: self.count.clone(), - })) - } - fn closing(&self) {} - fn closed(&self) {} +// Transport Callback for the peer02 +pub struct SCPeer { + count: Arc, +} - fn as_any(&self) -> &dyn Any { - self - } +impl SCPeer { + pub fn new(count: Arc) -> Self { + Self { count } } +} - impl TransportPeerEventHandler for SCPeer { - fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { - self.count.fetch_add(1, Ordering::Relaxed); - Ok(()) - } - - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} - - fn as_any(&self) -> &dyn Any { - self - } +impl TransportMulticastEventHandler for SCPeer { + fn new_peer(&self, peer: TransportPeer) -> ZResult> { + println!("\tNew peer: {:?}", peer); + Ok(Arc::new(SCPeer { + count: self.count.clone(), + })) } + fn closing(&self) {} + fn closed(&self) {} - struct TransportMulticastPeer { - manager: TransportManager, - handler: Arc, - transport: TransportMulticast, + fn as_any(&self) -> &dyn Any { + self } +} - async fn open_transport( - endpoint: &EndPoint, - ) -> (TransportMulticastPeer, TransportMulticastPeer) { - // Define peer01 and peer02 IDs - let peer01_id = ZenohIdProto::try_from([1]).unwrap(); - let peer02_id = ZenohIdProto::try_from([2]).unwrap(); - - // Create the peer01 transport manager - let peer01_handler = Arc::new(SHPeer::default()); - let peer01_manager = TransportManager::builder() - .zid(peer01_id) - .whatami(WhatAmI::Peer) - .build(peer01_handler.clone()) - .unwrap(); - - // Create the peer02 transport manager - let peer02_handler = Arc::new(SHPeer::default()); - let peer02_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer02_id) - .build(peer02_handler.clone()) - .unwrap(); - - // Create an empty transport with the peer01 - // Open transport -> This should be accepted - println!("Opening transport with {endpoint}"); - let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); - println!( - "\t{:?}", - ztimeout!(peer01_manager.get_transports_multicast()) - ); - - println!("Opening transport with {endpoint}"); - let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); - println!( - "\t{:?}", - ztimeout!(peer02_manager.get_transports_multicast()) - ); - - // Wait to for peer 01 and 02 to join each other - ztimeout!(async { - while peer01_manager - .get_transport_multicast(&peer02_id) - .await - .is_none() - { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - let peer01_transport = - ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); - println!( - "\tPeer01 peers: {:?}", - peer01_transport.get_peers().unwrap() - ); - - ztimeout!(async { - while peer02_manager - .get_transport_multicast(&peer01_id) - .await - .is_none() - { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - let peer02_transport = - ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); - println!( - "\tPeer02 peers: {:?}", - peer02_transport.get_peers().unwrap() - ); - - ( - TransportMulticastPeer { - manager: peer01_manager, - handler: peer01_handler, - transport: peer01_transport, - }, - TransportMulticastPeer { - manager: peer02_manager, - handler: peer02_handler, - transport: peer02_transport, - }, - ) +impl TransportPeerEventHandler for SCPeer { + fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::Relaxed); + Ok(()) } - async fn close_transport( - peer01: TransportMulticastPeer, - peer02: TransportMulticastPeer, - endpoint: &EndPoint, - ) { - // Close the peer01 transport - println!("Closing transport with {endpoint}"); - ztimeout!(peer01.transport.close()).unwrap(); - assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); - ztimeout!(async { - while !peer02.transport.get_peers().unwrap().is_empty() { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - - // Close the peer02 transport - println!("Closing transport with {endpoint}"); - ztimeout!(peer02.transport.close()).unwrap(); - assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - // Wait a little bit - tokio::time::sleep(SLEEP).await; + fn as_any(&self) -> &dyn Any { + self } +} - async fn test_transport( - peer01: &TransportMulticastPeer, - peer02: &TransportMulticastPeer, - channel: Channel, - msg_size: usize, - ) { - // Create the message to send - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: vec![0u8; msg_size].into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), +struct TransportMulticastPeer { + manager: TransportManager, + handler: Arc, + transport: TransportMulticast, +} + +async fn open_transport(endpoint: &EndPoint) -> (TransportMulticastPeer, TransportMulticastPeer) { + // Define peer01 and peer02 IDs + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); + + // Create the peer01 transport manager + let peer01_handler = Arc::new(SHPeer::default()); + let peer01_manager = TransportManager::builder() + .zid(peer01_id) + .whatami(WhatAmI::Peer) + .build(peer01_handler.clone()) + .unwrap(); + + // Create the peer02 transport manager + let peer02_handler = Arc::new(SHPeer::default()); + let peer02_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer02_id) + .build(peer02_handler.clone()) + .unwrap(); + + // Create an empty transport with the peer01 + // Open transport -> This should be accepted + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); + + println!("Opening transport with {endpoint}"); + let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); + + // Wait to for peer 01 and 02 to join each other + ztimeout!(async { + while peer01_manager + .get_transport_multicast(&peer02_id) + .await + .is_none() + { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); + let peer01_transport = ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); + println!( + "\tPeer01 peers: {:?}", + peer01_transport.get_peers().unwrap() + ); + + ztimeout!(async { + while peer02_manager + .get_transport_multicast(&peer01_id) + .await + .is_none() + { + tokio::time::sleep(SLEEP_COUNT).await; } - .into(); + }); + let peer02_transport = ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); + println!( + "\tPeer02 peers: {:?}", + peer02_transport.get_peers().unwrap() + ); + + ( + TransportMulticastPeer { + manager: peer01_manager, + handler: peer01_handler, + transport: peer01_transport, + }, + TransportMulticastPeer { + manager: peer02_manager, + handler: peer02_handler, + transport: peer02_transport, + }, + ) +} - println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); - for _ in 0..MSG_COUNT { - peer01.transport.schedule(message.clone()).unwrap(); +async fn close_transport( + peer01: TransportMulticastPeer, + peer02: TransportMulticastPeer, + endpoint: &EndPoint, +) { + // Close the peer01 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer01.transport.close()).unwrap(); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); + ztimeout!(async { + while !peer02.transport.get_peers().unwrap().is_empty() { + tokio::time::sleep(SLEEP_COUNT).await; } + }); - match channel.reliability { - Reliability::Reliable => { - ztimeout!(async { - while peer02.handler.get_count() != MSG_COUNT { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - Reliability::BestEffort => { - ztimeout!(async { - while peer02.handler.get_count() == 0 { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - }; + // Close the peer02 transport + println!("Closing transport with {endpoint}"); + ztimeout!(peer02.transport.close()).unwrap(); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); - // Wait a little bit - tokio::time::sleep(SLEEP).await; + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn test_transport( + peer01: &TransportMulticastPeer, + peer02: &TransportMulticastPeer, + channel: Channel, + msg_size: usize, +) { + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], + } + .into(), } + .into(); - async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { - let (peer01, peer02) = open_transport(endpoint).await; - test_transport(&peer01, &peer02, channel, msg_size).await; + println!("Sending {MSG_COUNT} messages... {channel:?} {msg_size}"); + for _ in 0..MSG_COUNT { + peer01.transport.schedule(message.clone()).unwrap(); + } - #[cfg(feature = "stats")] - { - let stats = peer01.transport.get_stats().unwrap().report(); - println!("\tPeer 01: {:?}", stats); - let stats = peer02.transport.get_stats().unwrap().report(); - println!("\tPeer 02: {:?}", stats); + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while peer02.handler.get_count() != MSG_COUNT { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); } + Reliability::BestEffort => { + ztimeout!(async { + while peer02.handler.get_count() == 0 { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); + } + }; - close_transport(peer01, peer02, endpoint).await; + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { + let (peer01, peer02) = open_transport(endpoint).await; + test_transport(&peer01, &peer02, channel, msg_size).await; + + #[cfg(feature = "stats")] + { + let stats = peer01.transport.get_stats().unwrap().report(); + println!("\tPeer 01: {:?}", stats); + let stats = peer02.transport.get_stats().unwrap().report(); + println!("\tPeer 02: {:?}", stats); } - async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { - for e in endpoints.iter() { - for ch in channel.iter() { - for ms in msg_size.iter() { - run_single(e, *ch, *ms).await; - } + close_transport(peer01, peer02, endpoint).await; +} + +async fn run(endpoints: &[EndPoint], channel: &[Channel], msg_size: &[usize]) { + for e in endpoints.iter() { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single(e, *ch, *ms).await; } } } +} - #[cfg(all(feature = "transport_compression", feature = "transport_udp"))] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_multicast_udp_only() { - zenoh_util::try_init_log_from_env(); - - // Define the locator - let endpoints: Vec = vec![ - format!( - "udp/224.{}.{}.{}:20000", - rand::random::(), - rand::random::(), - rand::random::() - ) - .parse() - .unwrap(), - // Disabling by default because of no IPv6 support - // on GitHub CI actions. - // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) - // .parse() - // .unwrap(), - ]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::BestEffort, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::BestEffort, - }, - ]; - // Run - run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; - } +#[cfg(all(feature = "transport_compression", feature = "transport_udp"))] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_multicast_udp_only() { + // Define the locator + let endpoints: Vec = vec![ + format!( + "udp/224.{}.{}.{}:20000", + rand::random::(), + rand::random::(), + rand::random::() + ) + .parse() + .unwrap(), + // Disabling by default because of no IPv6 support + // on GitHub CI actions. + // format!("udp/{}", ZN_MULTICAST_IPV6_ADDRESS_DEFAULT) + // .parse() + // .unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; } diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index 121db5b5d6..586c5dc3bd 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -116,10 +116,8 @@ async fn run(endpoints: &[EndPoint]) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_whitelist_tcp() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 17000).parse().unwrap(), @@ -130,11 +128,9 @@ async fn transport_whitelist_tcp() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_whitelist_unixpipe() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![ "unixpipe/transport_whitelist_unixpipe".parse().unwrap(), @@ -145,10 +141,8 @@ async fn transport_whitelist_unixpipe() { } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_whitelist_vsock() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![ "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(), diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 87f2174598..c468946ec3 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -627,51 +627,45 @@ async fn run_with_lowlatency_transport(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_tcp() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8000).parse().unwrap(); run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_tcp_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8100).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_udp() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8010).parse().unwrap(); run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_udp_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8110).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn authenticator_unixpipe() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_test".parse().unwrap(); run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn authenticator_unixpipe_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_with_lowlatency_transport" .parse() .unwrap(); @@ -679,27 +673,24 @@ async fn authenticator_unixpipe_with_lowlatency_transport() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn authenticator_ws() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8020).parse().unwrap(); run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn authenticator_ws_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8120).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_unix() { - zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-10.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -709,12 +700,10 @@ async fn authenticator_unix() { } #[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_tls() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -809,12 +798,10 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== } #[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn authenticator_quic() { use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 7c2443c5d9..cc85510669 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -11,504 +11,493 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "transport_compression")] -mod tests { - use std::{ - any::Any, - convert::TryFrom, - fmt::Write as _, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, - }; - - use zenoh_core::ztimeout; - use zenoh_link::Link; - use zenoh_protocol::{ - core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, - ZenohIdProto, - }, - network::{ - push::ext::{NodeIdType, QoSType}, - NetworkMessage, Push, - }, - zenoh::Put, - }; - use zenoh_result::ZResult; - use zenoh_transport::{ - multicast::TransportMulticast, - unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, - TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - const SLEEP_COUNT: Duration = Duration::from_millis(10); +#![cfg(feature = "transport_compression")] +use std::{ + any::Any, + convert::TryFrom, + fmt::Write as _, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh_core::ztimeout; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{ + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, + }, + network::{ + push::ext::{NodeIdType, QoSType}, + NetworkMessage, Push, + }, + zenoh::Put, +}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); +const SLEEP_COUNT: Duration = Duration::from_millis(10); + +const MSG_COUNT: usize = 1_000; +const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; +const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; +const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + +// Transport Handler for the router +struct SHRouter { + count: Arc, +} - const MSG_COUNT: usize = 1_000; - const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; - const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; - const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; +impl Default for SHRouter { + fn default() -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + } + } +} - // Transport Handler for the router - struct SHRouter { - count: Arc, +impl SHRouter { + fn get_count(&self) -> usize { + self.count.load(Ordering::SeqCst) } +} - impl Default for SHRouter { - fn default() -> Self { - Self { - count: Arc::new(AtomicUsize::new(0)), - } - } +impl TransportEventHandler for SHRouter { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + let arc = Arc::new(SCRouter::new(self.count.clone())); + Ok(arc) } - impl SHRouter { - fn get_count(&self) -> usize { - self.count.load(Ordering::SeqCst) - } + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); } +} - impl TransportEventHandler for SHRouter { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - let arc = Arc::new(SCRouter::new(self.count.clone())); - Ok(arc) - } +// Transport Callback for the router +pub struct SCRouter { + count: Arc, +} - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } +impl SCRouter { + pub fn new(count: Arc) -> Self { + Self { count } } +} - // Transport Callback for the router - pub struct SCRouter { - count: Arc, +impl TransportPeerEventHandler for SCRouter { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::SeqCst); + Ok(()) } - impl SCRouter { - pub fn new(count: Arc) -> Self { - Self { count } - } - } + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - impl TransportPeerEventHandler for SCRouter { - fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { - self.count.fetch_add(1, Ordering::SeqCst); - Ok(()) - } + fn as_any(&self) -> &dyn Any { + self + } +} - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} +// Transport Handler for the client +#[derive(Default)] +struct SHClient; + +impl TransportEventHandler for SHClient { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(SCClient)) + } - fn as_any(&self) -> &dyn Any { - self - } + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); } +} - // Transport Handler for the client - #[derive(Default)] - struct SHClient; - - impl TransportEventHandler for SHClient { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - Ok(Arc::new(SCClient)) - } +// Transport Callback for the client +#[derive(Default)] +pub struct SCClient; - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } +impl TransportPeerEventHandler for SCClient { + fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { + Ok(()) } - // Transport Callback for the client - #[derive(Default)] - pub struct SCClient; + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - impl TransportPeerEventHandler for SCClient { - fn handle_message(&self, _message: NetworkMessage) -> ZResult<()> { - Ok(()) - } + fn as_any(&self) -> &dyn Any { + self + } +} - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} +async fn open_transport_unicast( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + lowlatency_transport: bool, +) -> ( + TransportManager, + Arc, + TransportManager, + TransportUnicast, +) { + // Define client and router IDs + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); + + // Create the router transport manager + let router_handler = Arc::new(SHRouter::default()); + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + server_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .compression(true); + let router_manager = TransportManager::builder() + .zid(router_id) + .whatami(WhatAmI::Router) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + // Create the listener on the router + for e in server_endpoints.iter() { + println!("Add endpoint: {}", e); + let _ = ztimeout!(router_manager.add_listener(e.clone())).unwrap(); + } - fn as_any(&self) -> &dyn Any { - self - } + // Create the client transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + client_endpoints.len(), + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .compression(true); + let client_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client_id) + .unicast(unicast) + .build(Arc::new(SHClient)) + .unwrap(); + + // Create an empty transport with the client + // Open transport -> This should be accepted + for e in client_endpoints.iter() { + println!("Opening transport with {}", e); + let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - async fn open_transport_unicast( - client_endpoints: &[EndPoint], - server_endpoints: &[EndPoint], - lowlatency_transport: bool, - ) -> ( - TransportManager, - Arc, - TransportManager, - TransportUnicast, - ) { - // Define client and router IDs - let client_id = ZenohIdProto::try_from([1]).unwrap(); - let router_id = ZenohIdProto::try_from([2]).unwrap(); - - // Create the router transport manager - let router_handler = Arc::new(SHRouter::default()); - let unicast = make_transport_manager_builder( - #[cfg(feature = "transport_multilink")] - server_endpoints.len(), - #[cfg(feature = "shared-memory")] - false, - lowlatency_transport, - ) - .compression(true); - let router_manager = TransportManager::builder() - .zid(router_id) - .whatami(WhatAmI::Router) - .unicast(unicast) - .build(router_handler.clone()) - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); - // Create the listener on the router - for e in server_endpoints.iter() { - println!("Add endpoint: {}", e); - let _ = ztimeout!(router_manager.add_listener(e.clone())).unwrap(); - } + // Return the handlers + ( + router_manager, + router_handler, + client_manager, + client_transport, + ) +} - // Create the client transport manager - let unicast = make_transport_manager_builder( - #[cfg(feature = "transport_multilink")] - client_endpoints.len(), - #[cfg(feature = "shared-memory")] - false, - lowlatency_transport, - ) - .compression(true); - let client_manager = TransportManager::builder() - .whatami(WhatAmI::Client) - .zid(client_id) - .unicast(unicast) - .build(Arc::new(SHClient)) - .unwrap(); +async fn close_transport( + router_manager: TransportManager, + client_manager: TransportManager, + client_transport: TransportUnicast, + endpoints: &[EndPoint], +) { + // Close the client transport + let mut ee = String::new(); + for e in endpoints.iter() { + let _ = write!(ee, "{e} "); + } + println!("Closing transport with {}", ee); + ztimeout!(client_transport.close()).unwrap(); - // Create an empty transport with the client - // Open transport -> This should be accepted - for e in client_endpoints.iter() { - println!("Opening transport with {}", e); - let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); + ztimeout!(async { + while !router_manager.get_transports_unicast().await.is_empty() { + tokio::time::sleep(SLEEP).await; } + }); - let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); - - // Return the handlers - ( - router_manager, - router_handler, - client_manager, - client_transport, - ) + // Stop the locators on the manager + for e in endpoints.iter() { + println!("Del locator: {}", e); + ztimeout!(router_manager.del_listener(e)).unwrap(); } - async fn close_transport( - router_manager: TransportManager, - client_manager: TransportManager, - client_transport: TransportUnicast, - endpoints: &[EndPoint], - ) { - // Close the client transport - let mut ee = String::new(); - for e in endpoints.iter() { - let _ = write!(ee, "{e} "); - } - println!("Closing transport with {}", ee); - ztimeout!(client_transport.close()).unwrap(); - - ztimeout!(async { - while !router_manager.get_transports_unicast().await.is_empty() { - tokio::time::sleep(SLEEP).await; - } - }); - - // Stop the locators on the manager - for e in endpoints.iter() { - println!("Del locator: {}", e); - ztimeout!(router_manager.del_listener(e)).unwrap(); + ztimeout!(async { + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } + }); - ztimeout!(async { - while !router_manager.get_listeners().await.is_empty() { - tokio::time::sleep(SLEEP).await; - } - }); + // Wait a little bit + tokio::time::sleep(SLEEP).await; - // Wait a little bit - tokio::time::sleep(SLEEP).await; + ztimeout!(router_manager.close()); + ztimeout!(client_manager.close()); - ztimeout!(router_manager.close()); - ztimeout!(client_manager.close()); + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} - // Wait a little bit - tokio::time::sleep(SLEEP).await; +async fn test_transport( + router_handler: Arc, + client_transport: TransportUnicast, + channel: Channel, + msg_size: usize, +) { + println!( + "Sending {} messages... {:?} {}", + MSG_COUNT, channel, msg_size + ); + let cctrl = match channel.reliability { + Reliability::Reliable => CongestionControl::Block, + Reliability::BestEffort => CongestionControl::Drop, + }; + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(channel.priority, cctrl, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: vec![0u8; msg_size].into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], + } + .into(), + } + .into(); + for _ in 0..MSG_COUNT { + let _ = client_transport.schedule(message.clone()); } - async fn test_transport( - router_handler: Arc, - client_transport: TransportUnicast, - channel: Channel, - msg_size: usize, - ) { - println!( - "Sending {} messages... {:?} {}", - MSG_COUNT, channel, msg_size - ); - let cctrl = match channel.reliability { - Reliability::Reliable => CongestionControl::Block, - Reliability::BestEffort => CongestionControl::Drop, - }; - // Create the message to send - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(channel.priority, cctrl, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: vec![0u8; msg_size].into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), + match channel.reliability { + Reliability::Reliable => { + ztimeout!(async { + while router_handler.get_count() != MSG_COUNT { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); } - .into(); - for _ in 0..MSG_COUNT { - let _ = client_transport.schedule(message.clone()); + Reliability::BestEffort => { + ztimeout!(async { + while router_handler.get_count() == 0 { + tokio::time::sleep(SLEEP_COUNT).await; + } + }); } + }; - match channel.reliability { - Reliability::Reliable => { - ztimeout!(async { - while router_handler.get_count() != MSG_COUNT { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - Reliability::BestEffort => { - ztimeout!(async { - while router_handler.get_count() == 0 { - tokio::time::sleep(SLEEP_COUNT).await; - } - }); - } - }; - - // Wait a little bit - tokio::time::sleep(SLEEP).await; - } - - async fn run_single( - client_endpoints: &[EndPoint], - server_endpoints: &[EndPoint], - channel: Channel, - msg_size: usize, - lowlatency_transport: bool, - ) { - println!( - "\n>>> Running test for: {:?}, {:?}, {:?}, {}", - client_endpoints, server_endpoints, channel, msg_size - ); - - #[allow(unused_variables)] // Used when stats feature is enabled - let (router_manager, router_handler, client_manager, client_transport) = - open_transport_unicast(client_endpoints, server_endpoints, lowlatency_transport).await; - - test_transport( - router_handler.clone(), - client_transport.clone(), - channel, - msg_size, - ) - .await; - - #[cfg(feature = "stats")] - { - let c_stats = client_transport.get_stats().unwrap().report(); - println!("\tClient: {:?}", c_stats); - let r_stats = - ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) - .unwrap() - .get_stats() - .map(|s| s.report()) - .unwrap(); - println!("\tRouter: {:?}", r_stats); - } + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} - close_transport( - router_manager, - client_manager, - client_transport, - client_endpoints, - ) - .await; +async fn run_single( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: Channel, + msg_size: usize, + lowlatency_transport: bool, +) { + println!( + "\n>>> Running test for: {:?}, {:?}, {:?}, {}", + client_endpoints, server_endpoints, channel, msg_size + ); + + #[allow(unused_variables)] // Used when stats feature is enabled + let (router_manager, router_handler, client_manager, client_transport) = + open_transport_unicast(client_endpoints, server_endpoints, lowlatency_transport).await; + + test_transport( + router_handler.clone(), + client_transport.clone(), + channel, + msg_size, + ) + .await; + + #[cfg(feature = "stats")] + { + let c_stats = client_transport.get_stats().unwrap().report(); + println!("\tClient: {:?}", c_stats); + let r_stats = ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); + println!("\tRouter: {:?}", r_stats); } - async fn run_internal( - client_endpoints: &[EndPoint], - server_endpoints: &[EndPoint], - channel: &[Channel], - msg_size: &[usize], - lowlatency_transport: bool, - ) { - for ch in channel.iter() { - for ms in msg_size.iter() { - run_single( - client_endpoints, - server_endpoints, - *ch, - *ms, - lowlatency_transport, - ) - .await; - } + close_transport( + router_manager, + client_manager, + client_transport, + client_endpoints, + ) + .await; +} + +async fn run_internal( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], + lowlatency_transport: bool, +) { + for ch in channel.iter() { + for ms in msg_size.iter() { + run_single( + client_endpoints, + server_endpoints, + *ch, + *ms, + lowlatency_transport, + ) + .await; } } +} - async fn run_with_universal_transport( - client_endpoints: &[EndPoint], - server_endpoints: &[EndPoint], - channel: &[Channel], - msg_size: &[usize], - ) { - run_internal(client_endpoints, server_endpoints, channel, msg_size, false).await; - } +async fn run_with_universal_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], +) { + run_internal(client_endpoints, server_endpoints, channel, msg_size, false).await; +} - async fn run_with_lowlatency_transport( - client_endpoints: &[EndPoint], - server_endpoints: &[EndPoint], - channel: &[Channel], - msg_size: &[usize], - ) { - if client_endpoints.len() > 1 || server_endpoints.len() > 1 { - println!("LowLatency transport doesn't support more than one link, so this test would produce MAX_LINKS error!"); - panic!(); - } - run_internal(client_endpoints, server_endpoints, channel, msg_size, true).await; +async fn run_with_lowlatency_transport( + client_endpoints: &[EndPoint], + server_endpoints: &[EndPoint], + channel: &[Channel], + msg_size: &[usize], +) { + if client_endpoints.len() > 1 || server_endpoints.len() > 1 { + println!("LowLatency transport doesn't support more than one link, so this test would produce MAX_LINKS error!"); + panic!(); } + run_internal(client_endpoints, server_endpoints, channel, msg_size, true).await; +} - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unicast_compression_tcp_only() { - zenoh_util::try_init_log_from_env(); - - // Define the locators - let endpoints: Vec = vec![ - format!("tcp/127.0.0.1:{}", 19000).parse().unwrap(), - format!("tcp/[::1]:{}", 19001).parse().unwrap(), - ]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::Reliable, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::Reliable, - }, - ]; - // Run - run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; - } +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unicast_compression_tcp_only() { + // Define the locators + let endpoints: Vec = vec![ + format!("tcp/127.0.0.1:{}", 19000).parse().unwrap(), + format!("tcp/[::1]:{}", 19001).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; +} - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - - // Define the locators - let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::Reliable, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::Reliable, - }, - ]; - // Run - run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; - } +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { + // Define the locators + let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::Reliable, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::Reliable, + }, + ]; + // Run + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; +} - #[cfg(feature = "transport_udp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unicast_compression_udp_only() { - zenoh_util::try_init_log_from_env(); - - // Define the locator - let endpoints: Vec = vec![ - format!("udp/127.0.0.1:{}", 19010).parse().unwrap(), - format!("udp/[::1]:{}", 19011).parse().unwrap(), - ]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::BestEffort, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::BestEffort, - }, - ]; - // Run - run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; - } +#[cfg(feature = "transport_udp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unicast_compression_udp_only() { + // Define the locator + let endpoints: Vec = vec![ + format!("udp/127.0.0.1:{}", 19010).parse().unwrap(), + format!("udp/[::1]:{}", 19011).parse().unwrap(), + ]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; +} - #[cfg(feature = "transport_udp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unicast_compression_udp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - - // Define the locator - let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; - // Define the reliability and congestion control - let channel = [ - Channel { - priority: Priority::DEFAULT, - reliability: Reliability::BestEffort, - }, - Channel { - priority: Priority::RealTime, - reliability: Reliability::BestEffort, - }, - ]; - // Run - run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; - } +#[cfg(feature = "transport_udp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unicast_compression_udp_only_with_lowlatency_transport() { + // Define the locator + let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; + // Define the reliability and congestion control + let channel = [ + Channel { + priority: Priority::DEFAULT, + reliability: Reliability::BestEffort, + }, + Channel { + priority: Priority::RealTime, + reliability: Reliability::BestEffort, + }, + ]; + // Run + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 183f8a7163..f2eb1f9cb3 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -347,8 +347,6 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec = vec![ format!("tcp/127.0.0.1:{}", 9000).parse().unwrap(), format!("tcp/127.0.0.1:{}", 9001).parse().unwrap(), @@ -377,8 +375,6 @@ async fn transport_tcp_concurrent() { #[tokio::test] #[ignore] async fn transport_ws_concurrent() { - zenoh_util::try_init_log_from_env(); - let endpoint01: Vec = vec![ format!("ws/127.0.0.1:{}", 9020).parse().unwrap(), format!("ws/127.0.0.1:{}", 9021).parse().unwrap(), @@ -407,8 +403,6 @@ async fn transport_ws_concurrent() { #[tokio::test] #[ignore] async fn transport_unixpipe_concurrent() { - zenoh_util::try_init_log_from_env(); - let endpoint01: Vec = vec![ "unixpipe/transport_unixpipe_concurrent".parse().unwrap(), "unixpipe/transport_unixpipe_concurrent2".parse().unwrap(), diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index fc54180c96..dde9ac8660 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -128,10 +128,8 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_defragmentation_tcp_only() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 11000).parse().unwrap(); // Define the reliability and congestion control @@ -160,11 +158,9 @@ async fn transport_unicast_defragmentation_tcp_only() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_unicast_defragmentation_ws_only() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 11010).parse().unwrap(); // Define the reliability and congestion control @@ -193,11 +189,9 @@ async fn transport_unicast_defragmentation_ws_only() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_unicast_defragmentation_unixpipe_only() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoint: EndPoint = "unixpipe/transport_unicast_defragmentation_unixpipe_only" .parse() diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index a2cb1e2d12..ca6f4cbe8a 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -426,53 +426,47 @@ async fn lowlatency_transport_intermittent(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_tcp_intermittent() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12000).parse().unwrap(); universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_tcp_intermittent_for_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12100).parse().unwrap(); lowlatency_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_ws_intermittent() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12010).parse().unwrap(); universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_ws_intermittent_for_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12110).parse().unwrap(); lowlatency_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_unixpipe_intermittent() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent".parse().unwrap(); universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn transport_unixpipe_intermittent_for_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent_for_lowlatency_transport" .parse() .unwrap(); @@ -480,9 +474,8 @@ async fn transport_unixpipe_intermittent_for_lowlatency_transport() { } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_vsock_intermittent() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); universal_transport_intermittent(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 6fc0864fe2..540a9d27b8 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -11,534 +11,521 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "transport_multilink")] -mod tests { - use std::{convert::TryFrom, sync::Arc, time::Duration}; - - use zenoh_core::ztimeout; - use zenoh_link::EndPoint; - use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; - use zenoh_result::ZResult; - use zenoh_transport::{ - multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, - TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, - TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_millis(100); - - #[cfg(test)] - #[derive(Default)] - struct SHRouterOpenClose; - - impl TransportEventHandler for SHRouterOpenClose { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - Ok(Arc::new(DummyTransportPeerEventHandler)) - } - - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } +#![cfg(feature = "transport_multilink")] +use std::{convert::TryFrom, sync::Arc, time::Duration}; + +use zenoh_core::ztimeout; +use zenoh_link::EndPoint; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, + TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, + TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_millis(100); + +#[cfg(test)] +#[derive(Default)] +struct SHRouterOpenClose; + +impl TransportEventHandler for SHRouterOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) } - // Transport Handler for the client - struct SHClientOpenClose {} - - impl SHClientOpenClose { - fn new() -> Self { - Self {} - } + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); } +} - impl TransportEventHandler for SHClientOpenClose { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - Ok(Arc::new(DummyTransportPeerEventHandler)) - } +// Transport Handler for the client +struct SHClientOpenClose {} - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } +impl SHClientOpenClose { + fn new() -> Self { + Self {} } +} - async fn multilink_transport(endpoint: &EndPoint) { - /* [ROUTER] */ - let router_id = ZenohIdProto::try_from([1]).unwrap(); - - let router_handler = Arc::new(SHRouterOpenClose); - // Create the router transport manager - let unicast = TransportManager::config_unicast() - .max_links(2) - .max_sessions(2); - let router_manager = TransportManager::builder() - .whatami(WhatAmI::Router) - .zid(router_id) - .unicast(unicast) - .build(router_handler.clone()) - .unwrap(); - - /* [CLIENT] */ - let client01_id = ZenohIdProto::try_from([2]).unwrap(); - let client02_id = ZenohIdProto::try_from([3]).unwrap(); - - // Create the transport transport manager for the first client - let unicast = TransportManager::config_unicast() - .max_links(2) - .max_sessions(1); - let client01_manager = TransportManager::builder() - .whatami(WhatAmI::Client) - .zid(client01_id) - .unicast(unicast) - .build(Arc::new(SHClientOpenClose::new())) - .unwrap(); - - // Create the transport transport manager for the second client - let unicast = TransportManager::config_unicast() - .max_links(1) - .max_sessions(1); - let client02_manager = TransportManager::builder() - .whatami(WhatAmI::Client) - .zid(client02_id) - .unicast(unicast) - .build(Arc::new(SHClientOpenClose::new())) - .unwrap(); +impl TransportEventHandler for SHClientOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } - // Create the transport transport manager for the third client spoofing the first - let unicast = TransportManager::config_unicast() - .max_links(2) - .max_sessions(1); - let client03_manager = TransportManager::builder() - .whatami(WhatAmI::Client) - .zid(client01_id) - .unicast(unicast) - .build(Arc::new(SHClientOpenClose::new())) - .unwrap(); + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} - /* [1] */ - println!("\nTransport Open Close [1a1]"); - // Add the locator on the router - let res = ztimeout!(router_manager.add_listener(endpoint.clone())); - println!("Transport Open Close [1a1]: {res:?}"); - assert!(res.is_ok()); - println!("Transport Open Close [1a2]"); - let locators = ztimeout!(router_manager.get_listeners()); - println!("Transport Open Close [1a2]: {locators:?}"); - assert_eq!(locators.len(), 1); - - // Open a first transport from the client to the router - // -> This should be accepted - let mut links_num = 1; - - println!("Transport Open Close [1c1]"); - let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [1c2]: {res:?}"); - assert!(res.is_ok()); - let c_ses1 = res.unwrap(); - println!("Transport Open Close [1d1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [1d2]: {transports:?}"); - assert_eq!(transports.len(), 1); - assert_eq!(c_ses1.get_zid().unwrap(), router_id); - println!("Transport Open Close [1e1]"); - let links = c_ses1.get_links().unwrap(); - println!("Transport Open Close [1e2]: {links:?}"); - assert_eq!(links.len(), links_num); +async fn multilink_transport(endpoint: &EndPoint) { + /* [ROUTER] */ + let router_id = ZenohIdProto::try_from([1]).unwrap(); + + let router_handler = Arc::new(SHRouterOpenClose); + // Create the router transport manager + let unicast = TransportManager::config_unicast() + .max_links(2) + .max_sessions(2); + let router_manager = TransportManager::builder() + .whatami(WhatAmI::Router) + .zid(router_id) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + /* [CLIENT] */ + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); + + // Create the transport transport manager for the first client + let unicast = TransportManager::config_unicast() + .max_links(2) + .max_sessions(1); + let client01_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client01_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + // Create the transport transport manager for the second client + let unicast = TransportManager::config_unicast() + .max_links(1) + .max_sessions(1); + let client02_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client02_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + // Create the transport transport manager for the third client spoofing the first + let unicast = TransportManager::config_unicast() + .max_links(2) + .max_sessions(1); + let client03_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client01_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + /* [1] */ + println!("\nTransport Open Close [1a1]"); + // Add the locator on the router + let res = ztimeout!(router_manager.add_listener(endpoint.clone())); + println!("Transport Open Close [1a1]: {res:?}"); + assert!(res.is_ok()); + println!("Transport Open Close [1a2]"); + let locators = ztimeout!(router_manager.get_listeners()); + println!("Transport Open Close [1a2]: {locators:?}"); + assert_eq!(locators.len(), 1); + + // Open a first transport from the client to the router + // -> This should be accepted + let mut links_num = 1; + + println!("Transport Open Close [1c1]"); + let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [1c2]: {res:?}"); + assert!(res.is_ok()); + let c_ses1 = res.unwrap(); + println!("Transport Open Close [1d1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [1d2]: {transports:?}"); + assert_eq!(transports.len(), 1); + assert_eq!(c_ses1.get_zid().unwrap(), router_id); + println!("Transport Open Close [1e1]"); + let links = c_ses1.get_links().unwrap(); + println!("Transport Open Close [1e2]: {links:?}"); + assert_eq!(links.len(), links_num); + + // Verify that the transport has been open on the router + println!("Transport Open Close [1f1]"); + ztimeout!(async { + loop { + let transports = router_manager.get_transports_unicast().await; + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); - // Verify that the transport has been open on the router - println!("Transport Open Close [1f1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - let s = transports - .iter() - .find(|s| s.get_zid().unwrap() == client01_id); - - match s { - Some(s) => { - let links = s.get_links().unwrap(); - assert_eq!(links.len(), links_num); - break; - } - None => tokio::time::sleep(SLEEP).await, - } - } - }); - - /* [2] */ - // Open a second transport from the client to the router - // -> This should be accepted - links_num = 2; - - println!("\nTransport Open Close [2a1]"); - let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [2a2]: {res:?}"); - assert!(res.is_ok()); - let c_ses2 = res.unwrap(); - println!("Transport Open Close [2b1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [2b2]: {transports:?}"); - assert_eq!(transports.len(), 1); - assert_eq!(c_ses2.get_zid().unwrap(), router_id); - println!("Transport Open Close [2c1]"); - let links = c_ses2.get_links().unwrap(); - println!("Transport Open Close [2c2]: {links:?}"); - assert_eq!(links.len(), links_num); - assert_eq!(c_ses2, c_ses1); - - // Verify that the transport has been open on the router - println!("Transport Open Close [2d1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - let s = transports - .iter() - .find(|s| s.get_zid().unwrap() == client01_id) - .unwrap(); - - let links = s.get_links().unwrap(); - if links.len() == links_num { + match s { + Some(s) => { + let links = s.get_links().unwrap(); + assert_eq!(links.len(), links_num); break; } - tokio::time::sleep(SLEEP).await; + None => tokio::time::sleep(SLEEP).await, } - }); - - /* [3] */ - // Open transport -> This should be rejected because - // of the maximum limit of links per transport - println!("\nTransport Open Close [3a1]"); - let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [3a2]: {res:?}"); - assert!(res.is_err()); - println!("Transport Open Close [3b1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [3b2]: {transports:?}"); - assert_eq!(transports.len(), 1); - assert_eq!(c_ses1.get_zid().unwrap(), router_id); - println!("Transport Open Close [3c1]"); - let links = c_ses1.get_links().unwrap(); - println!("Transport Open Close [3c2]: {links:?}"); - assert_eq!(links.len(), links_num); - - // Verify that the transport has not been open on the router - println!("Transport Open Close [3d1]"); - ztimeout!(async { - tokio::time::sleep(SLEEP).await; + } + }); + + /* [2] */ + // Open a second transport from the client to the router + // -> This should be accepted + links_num = 2; + + println!("\nTransport Open Close [2a1]"); + let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [2a2]: {res:?}"); + assert!(res.is_ok()); + let c_ses2 = res.unwrap(); + println!("Transport Open Close [2b1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [2b2]: {transports:?}"); + assert_eq!(transports.len(), 1); + assert_eq!(c_ses2.get_zid().unwrap(), router_id); + println!("Transport Open Close [2c1]"); + let links = c_ses2.get_links().unwrap(); + println!("Transport Open Close [2c2]: {links:?}"); + assert_eq!(links.len(), links_num); + assert_eq!(c_ses2, c_ses1); + + // Verify that the transport has been open on the router + println!("Transport Open Close [2d1]"); + ztimeout!(async { + loop { let transports = router_manager.get_transports_unicast().await; - assert_eq!(transports.len(), 1); let s = transports .iter() .find(|s| s.get_zid().unwrap() == client01_id) .unwrap(); + let links = s.get_links().unwrap(); - assert_eq!(links.len(), links_num); - }); - - /* [4] */ - // Close the open transport on the client - println!("\nTransport Open Close [4a1]"); - let res = ztimeout!(c_ses1.close()); - println!("Transport Open Close [4a2]: {res:?}"); - assert!(res.is_ok()); - println!("Transport Open Close [4b1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [4b2]: {transports:?}"); - assert_eq!(transports.len(), 0); - - // Verify that the transport has been closed also on the router - println!("Transport Open Close [4c1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - let index = transports - .iter() - .find(|s| s.get_zid().unwrap() == client01_id); - if index.is_none() { - break; - } - tokio::time::sleep(SLEEP).await; + if links.len() == links_num { + break; } - }); - - /* [5] */ - // Open transport -> This should be accepted because - // the number of links should be back to 0 - links_num = 1; - - println!("\nTransport Open Close [5a1]"); - let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [5a2]: {res:?}"); - assert!(res.is_ok()); - let c_ses3 = res.unwrap(); - println!("Transport Open Close [5b1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [5b2]: {transports:?}"); + tokio::time::sleep(SLEEP).await; + } + }); + + /* [3] */ + // Open transport -> This should be rejected because + // of the maximum limit of links per transport + println!("\nTransport Open Close [3a1]"); + let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [3a2]: {res:?}"); + assert!(res.is_err()); + println!("Transport Open Close [3b1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [3b2]: {transports:?}"); + assert_eq!(transports.len(), 1); + assert_eq!(c_ses1.get_zid().unwrap(), router_id); + println!("Transport Open Close [3c1]"); + let links = c_ses1.get_links().unwrap(); + println!("Transport Open Close [3c2]: {links:?}"); + assert_eq!(links.len(), links_num); + + // Verify that the transport has not been open on the router + println!("Transport Open Close [3d1]"); + ztimeout!(async { + tokio::time::sleep(SLEEP).await; + let transports = router_manager.get_transports_unicast().await; assert_eq!(transports.len(), 1); - assert_eq!(c_ses3.get_zid().unwrap(), router_id); - println!("Transport Open Close [5c1]"); - let links = c_ses3.get_links().unwrap(); - println!("Transport Open Close [5c2]: {links:?}"); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id) + .unwrap(); + let links = s.get_links().unwrap(); assert_eq!(links.len(), links_num); - - // Verify that the transport has been open on the router - println!("Transport Open Close [5d1]"); - ztimeout!(async { - tokio::time::sleep(SLEEP).await; + }); + + /* [4] */ + // Close the open transport on the client + println!("\nTransport Open Close [4a1]"); + let res = ztimeout!(c_ses1.close()); + println!("Transport Open Close [4a2]: {res:?}"); + assert!(res.is_ok()); + println!("Transport Open Close [4b1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [4b2]: {transports:?}"); + assert_eq!(transports.len(), 0); + + // Verify that the transport has been closed also on the router + println!("Transport Open Close [4c1]"); + ztimeout!(async { + loop { let transports = router_manager.get_transports_unicast().await; - assert_eq!(transports.len(), 1); - let s = transports + let index = transports .iter() - .find(|s| s.get_zid().unwrap() == client01_id) - .unwrap(); - let links = s.get_links().unwrap(); - assert_eq!(links.len(), links_num); - }); - - /* [6] */ - // Open transport -> This should be rejected because - // of the maximum limit of transports - println!("\nTransport Open Close [6a1]"); - let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [6a2]: {res:?}"); - assert!(res.is_ok()); - let c_ses4 = res.unwrap(); - println!("Transport Open Close [6b1]"); - let transports = ztimeout!(client02_manager.get_transports_unicast()); - println!("Transport Open Close [6b2]: {transports:?}"); + .find(|s| s.get_zid().unwrap() == client01_id); + if index.is_none() { + break; + } + tokio::time::sleep(SLEEP).await; + } + }); + + /* [5] */ + // Open transport -> This should be accepted because + // the number of links should be back to 0 + links_num = 1; + + println!("\nTransport Open Close [5a1]"); + let res = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [5a2]: {res:?}"); + assert!(res.is_ok()); + let c_ses3 = res.unwrap(); + println!("Transport Open Close [5b1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [5b2]: {transports:?}"); + assert_eq!(transports.len(), 1); + assert_eq!(c_ses3.get_zid().unwrap(), router_id); + println!("Transport Open Close [5c1]"); + let links = c_ses3.get_links().unwrap(); + println!("Transport Open Close [5c2]: {links:?}"); + assert_eq!(links.len(), links_num); + + // Verify that the transport has been open on the router + println!("Transport Open Close [5d1]"); + ztimeout!(async { + tokio::time::sleep(SLEEP).await; + let transports = router_manager.get_transports_unicast().await; assert_eq!(transports.len(), 1); - assert_eq!(c_ses4.get_zid().unwrap(), router_id); - println!("Transport Open Close [6c1]"); - let links = c_ses4.get_links().unwrap(); - println!("Transport Open Close [6c2]: {links:?}"); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id) + .unwrap(); + let links = s.get_links().unwrap(); assert_eq!(links.len(), links_num); - - // Open transport -> This should be rejected because - // of the maximum limit of transports - println!("\nTransport Open Close [6d1]"); - let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [6d2]: {res:?}"); - assert!(res.is_err()); - println!("Transport Open Close [6e1]"); - let transports = ztimeout!(client02_manager.get_transports_unicast()); - println!("Transport Open Close [6e2]: {transports:?}"); - assert_eq!(transports.len(), 1); - - // Verify that the transport has been open on the router - println!("Transport Open Close [6f1]"); - ztimeout!(async { + }); + + /* [6] */ + // Open transport -> This should be rejected because + // of the maximum limit of transports + println!("\nTransport Open Close [6a1]"); + let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [6a2]: {res:?}"); + assert!(res.is_ok()); + let c_ses4 = res.unwrap(); + println!("Transport Open Close [6b1]"); + let transports = ztimeout!(client02_manager.get_transports_unicast()); + println!("Transport Open Close [6b2]: {transports:?}"); + assert_eq!(transports.len(), 1); + assert_eq!(c_ses4.get_zid().unwrap(), router_id); + println!("Transport Open Close [6c1]"); + let links = c_ses4.get_links().unwrap(); + println!("Transport Open Close [6c2]: {links:?}"); + assert_eq!(links.len(), links_num); + + // Open transport -> This should be rejected because + // of the maximum limit of transports + println!("\nTransport Open Close [6d1]"); + let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [6d2]: {res:?}"); + assert!(res.is_err()); + println!("Transport Open Close [6e1]"); + let transports = ztimeout!(client02_manager.get_transports_unicast()); + println!("Transport Open Close [6e2]: {transports:?}"); + assert_eq!(transports.len(), 1); + + // Verify that the transport has been open on the router + println!("Transport Open Close [6f1]"); + ztimeout!(async { + tokio::time::sleep(SLEEP).await; + let transports = ztimeout!(router_manager.get_transports_unicast()); + assert_eq!(transports.len(), 2); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id) + .unwrap(); + let links = s.get_links().unwrap(); + assert_eq!(links.len(), links_num); + }); + + /* [7] */ + // Try to spoof the first client + // -> This should be rejected + println!("\nTransport Open Close [7a1]"); + let res = ztimeout!(client03_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [7a2]: {res:?}"); + assert!(res.is_err()); + println!("Transport Open Close [7b1]"); + let transports = ztimeout!(client03_manager.get_transports_unicast()); + println!("Transport Open Close [7b2]: {transports:?}"); + assert_eq!(transports.len(), 0); + + /* [8] */ + // Close the open transport on the client + println!("\nTransport Open Close [8a1]"); + let res = ztimeout!(c_ses3.close()); + println!("Transport Open Close [8a2]: {res:?}"); + assert!(res.is_ok()); + println!("\nTransport Open Close [8b1]"); + let res = ztimeout!(c_ses4.close()); + println!("Transport Open Close [8b2]: {res:?}"); + assert!(res.is_ok()); + println!("Transport Open Close [8c1]"); + let transports = ztimeout!(client01_manager.get_transports_unicast()); + println!("Transport Open Close [8c2]: {transports:?}"); + assert_eq!(transports.len(), 0); + + // Verify that the transport has been closed also on the router + println!("Transport Open Close [8d1]"); + ztimeout!(async { + loop { + let transports = router_manager.get_transports_unicast().await; + if transports.is_empty() { + break; + } tokio::time::sleep(SLEEP).await; - let transports = ztimeout!(router_manager.get_transports_unicast()); - assert_eq!(transports.len(), 2); + } + }); + + /* [9] */ + // Open transport -> This should be accepted because + // the number of transports should be back to 0 + links_num = 1; + + println!("\nTransport Open Close [9a1]"); + let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); + println!("Transport Open Close [9a2]: {res:?}"); + assert!(res.is_ok()); + let c_ses4 = res.unwrap(); + println!("Transport Open Close [9b1]"); + let transports = ztimeout!(client02_manager.get_transports_unicast()); + println!("Transport Open Close [9b2]: {transports:?}"); + assert_eq!(transports.len(), 1); + println!("Transport Open Close [9c1]"); + let links = c_ses4.get_links().unwrap(); + println!("Transport Open Close [9c2]: {links:?}"); + assert_eq!(links.len(), links_num); + + // Verify that the transport has been open on the router + println!("Transport Open Close [9d1]"); + ztimeout!(async { + loop { + let transports = router_manager.get_transports_unicast().await; let s = transports .iter() - .find(|s| s.get_zid().unwrap() == client01_id) - .unwrap(); - let links = s.get_links().unwrap(); - assert_eq!(links.len(), links_num); - }); - - /* [7] */ - // Try to spoof the first client - // -> This should be rejected - println!("\nTransport Open Close [7a1]"); - let res = ztimeout!(client03_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [7a2]: {res:?}"); - assert!(res.is_err()); - println!("Transport Open Close [7b1]"); - let transports = ztimeout!(client03_manager.get_transports_unicast()); - println!("Transport Open Close [7b2]: {transports:?}"); - assert_eq!(transports.len(), 0); - - /* [8] */ - // Close the open transport on the client - println!("\nTransport Open Close [8a1]"); - let res = ztimeout!(c_ses3.close()); - println!("Transport Open Close [8a2]: {res:?}"); - assert!(res.is_ok()); - println!("\nTransport Open Close [8b1]"); - let res = ztimeout!(c_ses4.close()); - println!("Transport Open Close [8b2]: {res:?}"); - assert!(res.is_ok()); - println!("Transport Open Close [8c1]"); - let transports = ztimeout!(client01_manager.get_transports_unicast()); - println!("Transport Open Close [8c2]: {transports:?}"); - assert_eq!(transports.len(), 0); - - // Verify that the transport has been closed also on the router - println!("Transport Open Close [8d1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - if transports.is_empty() { + .find(|s| s.get_zid().unwrap() == client02_id); + match s { + Some(s) => { + let links = s.get_links().unwrap(); + assert_eq!(links.len(), links_num); break; } - tokio::time::sleep(SLEEP).await; + None => tokio::time::sleep(SLEEP).await, } - }); - - /* [9] */ - // Open transport -> This should be accepted because - // the number of transports should be back to 0 - links_num = 1; - - println!("\nTransport Open Close [9a1]"); - let res = ztimeout!(client02_manager.open_transport_unicast(endpoint.clone())); - println!("Transport Open Close [9a2]: {res:?}"); - assert!(res.is_ok()); - let c_ses4 = res.unwrap(); - println!("Transport Open Close [9b1]"); - let transports = ztimeout!(client02_manager.get_transports_unicast()); - println!("Transport Open Close [9b2]: {transports:?}"); - assert_eq!(transports.len(), 1); - println!("Transport Open Close [9c1]"); - let links = c_ses4.get_links().unwrap(); - println!("Transport Open Close [9c2]: {links:?}"); - assert_eq!(links.len(), links_num); - - // Verify that the transport has been open on the router - println!("Transport Open Close [9d1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - let s = transports - .iter() - .find(|s| s.get_zid().unwrap() == client02_id); - match s { - Some(s) => { - let links = s.get_links().unwrap(); - assert_eq!(links.len(), links_num); - break; - } - None => tokio::time::sleep(SLEEP).await, - } - } - }); - - /* [9] */ - // Close the open transport on the client - println!("Transport Open Close [9a1]"); - let res = ztimeout!(c_ses4.close()); - println!("Transport Open Close [9a2]: {res:?}"); - assert!(res.is_ok()); - println!("Transport Open Close [9b1]"); - let transports = ztimeout!(client02_manager.get_transports_unicast()); - println!("Transport Open Close [9b2]: {transports:?}"); - assert_eq!(transports.len(), 0); - - // Verify that the transport has been closed also on the router - println!("Transport Open Close [9c1]"); - ztimeout!(async { - loop { - let transports = router_manager.get_transports_unicast().await; - if transports.is_empty() { - break; - } - tokio::time::sleep(SLEEP).await; - } - }); - - /* [10] */ - // Perform clean up of the open locators - println!("\nTransport Open Close [10a1]"); - let res = ztimeout!(router_manager.del_listener(endpoint)); - println!("Transport Open Close [10a2]: {res:?}"); - assert!(res.is_ok()); - - ztimeout!(async { - while !router_manager.get_listeners().await.is_empty() { - tokio::time::sleep(SLEEP).await; + } + }); + + /* [9] */ + // Close the open transport on the client + println!("Transport Open Close [9a1]"); + let res = ztimeout!(c_ses4.close()); + println!("Transport Open Close [9a2]: {res:?}"); + assert!(res.is_ok()); + println!("Transport Open Close [9b1]"); + let transports = ztimeout!(client02_manager.get_transports_unicast()); + println!("Transport Open Close [9b2]: {transports:?}"); + assert_eq!(transports.len(), 0); + + // Verify that the transport has been closed also on the router + println!("Transport Open Close [9c1]"); + ztimeout!(async { + loop { + let transports = router_manager.get_transports_unicast().await; + if transports.is_empty() { + break; } - }); - - // Wait a little bit - tokio::time::sleep(SLEEP).await; - - ztimeout!(router_manager.close()); - ztimeout!(client01_manager.close()); - ztimeout!(client02_manager.close()); - - // Wait a little bit - tokio::time::sleep(SLEEP).await; - } + tokio::time::sleep(SLEEP).await; + } + }); - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn multilink_tcp_only() { - zenoh_util::try_init_log_from_env(); + /* [10] */ + // Perform clean up of the open locators + println!("\nTransport Open Close [10a1]"); + let res = ztimeout!(router_manager.del_listener(endpoint)); + println!("Transport Open Close [10a2]: {res:?}"); + assert!(res.is_ok()); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 18000).parse().unwrap(); - multilink_transport(&endpoint).await; - } + ztimeout!(async { + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); - #[cfg(feature = "transport_udp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn multilink_udp_only() { - zenoh_util::try_init_log_from_env(); + // Wait a little bit + tokio::time::sleep(SLEEP).await; - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 18010).parse().unwrap(); - multilink_transport(&endpoint).await; - } + ztimeout!(router_manager.close()); + ztimeout!(client01_manager.close()); + ztimeout!(client02_manager.close()); - #[cfg(feature = "transport_ws")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - async fn multilink_ws_only() { - zenoh_util::try_init_log_from_env(); + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 18020).parse().unwrap(); - multilink_transport(&endpoint).await; - } +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn multilink_tcp_only() { + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 18000).parse().unwrap(); + multilink_transport(&endpoint).await; +} - #[cfg(feature = "transport_unixpipe")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - async fn multilink_unixpipe_only() { - zenoh_util::try_init_log_from_env(); +#[cfg(feature = "transport_udp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn multilink_udp_only() { + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 18010).parse().unwrap(); + multilink_transport(&endpoint).await; +} - let endpoint: EndPoint = "unixpipe/multilink_unixpipe_only".parse().unwrap(); - multilink_transport(&endpoint).await; - } +#[cfg(feature = "transport_ws")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +#[ignore] +async fn multilink_ws_only() { + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 18020).parse().unwrap(); + multilink_transport(&endpoint).await; +} - #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - async fn multilink_unix_only() { - zenoh_util::try_init_log_from_env(); - - let f1 = "zenoh-test-unix-socket-9.sock"; - let _ = std::fs::remove_file(f1); - let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); - multilink_transport(&endpoint).await; - let _ = std::fs::remove_file(f1); - let _ = std::fs::remove_file(format!("{f1}.lock")); - } +#[cfg(feature = "transport_unixpipe")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +#[ignore] +async fn multilink_unixpipe_only() { + let endpoint: EndPoint = "unixpipe/multilink_unixpipe_only".parse().unwrap(); + multilink_transport(&endpoint).await; +} - #[cfg(feature = "transport_tls")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn multilink_tls_only() { - use zenoh_link::tls::config::*; +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +#[ignore] +async fn multilink_unix_only() { + let f1 = "zenoh-test-unix-socket-9.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + multilink_transport(&endpoint).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} - zenoh_util::try_init_log_from_env(); +#[cfg(feature = "transport_tls")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn multilink_tls_only() { + use zenoh_link::tls::config::*; - // NOTE: this an auto-generated pair of certificate and key. - // The target domain is localhost, so it has no real - // mapping to any existing domain. The certificate and key - // have been generated using: https://github.com/jsha/minica - let key = "-----BEGIN RSA PRIVATE KEY----- + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU @@ -566,7 +553,7 @@ tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== -----END RSA PRIVATE KEY-----"; - let cert = "-----BEGIN CERTIFICATE----- + let cert = "-----BEGIN CERTIFICATE----- MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB @@ -587,8 +574,8 @@ p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ abY= -----END CERTIFICATE-----"; - // Configure the client - let ca = "-----BEGIN CERTIFICATE----- + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi @@ -609,33 +596,33 @@ Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== -----END CERTIFICATE-----"; - let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); - endpoint - .config_mut() - .extend_from_iter( - [ - (TLS_ROOT_CA_CERTIFICATE_RAW, ca), - (TLS_SERVER_PRIVATE_KEY_RAW, key), - (TLS_SERVER_CERTIFICATE_RAW, cert), - ] - .iter() - .copied(), - ) - .unwrap(); - - multilink_transport(&endpoint).await; - } + let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + multilink_transport(&endpoint).await; +} - #[cfg(feature = "transport_quic")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn multilink_quic_only() { - use zenoh_link::quic::config::*; +#[cfg(feature = "transport_quic")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn multilink_quic_only() { + use zenoh_link::quic::config::*; - // NOTE: this an auto-generated pair of certificate and key. - // The target domain is localhost, so it has no real - // mapping to any existing domain. The certificate and key - // have been generated using: https://github.com/jsha/minica - let key = "-----BEGIN RSA PRIVATE KEY----- + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU @@ -663,7 +650,7 @@ tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== -----END RSA PRIVATE KEY-----"; - let cert = "-----BEGIN CERTIFICATE----- + let cert = "-----BEGIN CERTIFICATE----- MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB @@ -684,8 +671,8 @@ p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ abY= -----END CERTIFICATE-----"; - // Configure the client - let ca = "-----BEGIN CERTIFICATE----- + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi @@ -706,30 +693,27 @@ Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== -----END CERTIFICATE-----"; - // Define the locator - let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); - endpoint - .config_mut() - .extend_from_iter( - [ - (TLS_ROOT_CA_CERTIFICATE_RAW, ca), - (TLS_SERVER_PRIVATE_KEY_RAW, key), - (TLS_SERVER_CERTIFICATE_RAW, cert), - ] - .iter() - .copied(), - ) - .unwrap(); - - multilink_transport(&endpoint).await; - } - - #[cfg(all(feature = "transport_vsock", target_os = "linux"))] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn multilink_vsock_only() { - zenoh_util::try_init_log_from_env(); + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + multilink_transport(&endpoint).await; +} - let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); - multilink_transport(&endpoint).await; - } +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn multilink_vsock_only() { + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); + multilink_transport(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 8909d74402..1fb08ea74b 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -474,69 +474,61 @@ async fn openclose_lowlatency_transport(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_tcp_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_tcp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_udp_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_udp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn openclose_ws_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn openclose_ws_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn openclose_unixpipe_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only".parse().unwrap(); openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn openclose_unixpipe_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -544,10 +536,9 @@ async fn openclose_unixpipe_only_with_lowlatency_transport() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn openclose_unix_only() { - zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -557,11 +548,10 @@ async fn openclose_unix_only() { } #[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -655,7 +645,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== } #[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_quic_only() { use zenoh_link::quic::config::*; @@ -755,12 +745,10 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[cfg(feature = "transport_tcp")] #[cfg(target_os = "linux")] #[should_panic(expected = "Elapsed")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_tcp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - zenoh_util::try_init_log_from_env(); - let listen_endpoint: EndPoint = format!("tcp/{}:{}", addrs[0], 13001).parse().unwrap(); let connect_endpoint: EndPoint = format!("tcp/{}:{}#iface=lo", addrs[0], 13001) @@ -774,12 +762,10 @@ async fn openclose_tcp_only_connect_with_interface_restriction() { #[cfg(feature = "transport_tcp")] #[cfg(target_os = "linux")] #[should_panic(expected = "assertion failed: open_res.is_ok()")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_tcp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - zenoh_util::try_init_log_from_env(); - let listen_endpoint: EndPoint = format!("tcp/{}:{}#iface=lo", addrs[0], 13002) .parse() .unwrap(); @@ -793,12 +779,10 @@ async fn openclose_tcp_only_listen_with_interface_restriction() { #[cfg(feature = "transport_udp")] #[cfg(target_os = "linux")] #[should_panic(expected = "Elapsed")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_udp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - zenoh_util::try_init_log_from_env(); - let listen_endpoint: EndPoint = format!("udp/{}:{}", addrs[0], 13003).parse().unwrap(); let connect_endpoint: EndPoint = format!("udp/{}:{}#iface=lo", addrs[0], 13003) @@ -812,11 +796,10 @@ async fn openclose_udp_only_connect_with_interface_restriction() { #[cfg(feature = "transport_udp")] #[cfg(target_os = "linux")] #[should_panic(expected = "Elapsed")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_udp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - zenoh_util::try_init_log_from_env(); let listen_endpoint: EndPoint = format!("udp/{}:{}#iface=lo", addrs[0], 13004) .parse() .unwrap(); @@ -828,9 +811,8 @@ async fn openclose_udp_only_listen_with_interface_restriction() { } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn openclose_vsock() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index 708a9fad3b..4e692d5b08 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -331,9 +331,8 @@ async fn run(endpoints: &[EndPoint]) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn priorities_tcp_only() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 10000).parse().unwrap()]; // Run @@ -341,10 +340,9 @@ async fn priorities_tcp_only() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn conduits_unixpipe_only() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec!["unixpipe/conduits_unixpipe_only" .to_string() @@ -355,9 +353,8 @@ async fn conduits_unixpipe_only() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn priorities_ws_only() { - zenoh_util::try_init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 10010).parse().unwrap()]; // Run diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 8c06a17f6d..414a85639c 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -11,399 +11,389 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -mod tests { - use std::{ - any::Any, - convert::TryFrom, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +#![cfg(feature = "shared-memory")] +use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh_buffers::buffer::SplitBuffer; +use zenoh_core::ztimeout; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, + network::{ + push::ext::{NodeIdType, QoSType}, + NetworkBody, NetworkMessage, Push, + }, + zenoh::{PushBody, Put}, +}; +use zenoh_result::ZResult; +use zenoh_shm::{ + api::{ + protocol_implementations::posix::{ + posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, - time::Duration, - }; - - use zenoh_buffers::buffer::SplitBuffer; - use zenoh_core::ztimeout; - use zenoh_link::Link; - use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, - network::{ - push::ext::{NodeIdType, QoSType}, - NetworkBody, NetworkMessage, Push, - }, - zenoh::{PushBody, Put}, - }; - use zenoh_result::ZResult; - use zenoh_shm::{ - api::{ - protocol_implementations::posix::{ - posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shm_provider::{BlockOn, GarbageCollect, ShmProviderBuilder}, - }, - ShmBufInner, - }; - use zenoh_transport::{ - multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, - TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - - const MSG_COUNT: usize = 1_000; - const MSG_SIZE: usize = 1_024; - - // Transport Handler for the router - struct SHPeer { - count: Arc, - is_shm: bool, - } - - impl SHPeer { - fn new(is_shm: bool) -> Self { - Self { - count: Arc::new(AtomicUsize::new(0)), - is_shm, - } - } + provider::shm_provider::{BlockOn, GarbageCollect, ShmProviderBuilder}, + }, + ShmBufInner, +}; +use zenoh_transport::{ + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); + +const MSG_COUNT: usize = 1_000; +const MSG_SIZE: usize = 1_024; + +// Transport Handler for the router +struct SHPeer { + count: Arc, + is_shm: bool, +} - fn get_count(&self) -> usize { - self.count.load(Ordering::SeqCst) +impl SHPeer { + fn new(is_shm: bool) -> Self { + Self { + count: Arc::new(AtomicUsize::new(0)), + is_shm, } } - impl TransportEventHandler for SHPeer { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - let arc = Arc::new(SCPeer::new(self.count.clone(), self.is_shm)); - Ok(arc) - } + fn get_count(&self) -> usize { + self.count.load(Ordering::SeqCst) + } +} - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } +impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + let arc = Arc::new(SCPeer::new(self.count.clone(), self.is_shm)); + Ok(arc) } - // Transport Callback for the peer - pub struct SCPeer { - count: Arc, - is_shm: bool, + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); } +} - impl SCPeer { - pub fn new(count: Arc, is_shm: bool) -> Self { - Self { count, is_shm } - } +// Transport Callback for the peer +pub struct SCPeer { + count: Arc, + is_shm: bool, +} + +impl SCPeer { + pub fn new(count: Arc, is_shm: bool) -> Self { + Self { count, is_shm } } +} - impl TransportPeerEventHandler for SCPeer { - fn handle_message(&self, message: NetworkMessage) -> ZResult<()> { - if self.is_shm { - print!("s"); - } else { - print!("n"); - } - let payload = match message.body { - NetworkBody::Push(m) => match m.payload { - PushBody::Put(Put { payload, .. }) => { - for zs in payload.zslices() { - if self.is_shm && zs.downcast_ref::().is_none() { - panic!("Expected ShmBufInner: {:?}", zs); - } else if !self.is_shm && zs.downcast_ref::().is_some() { - panic!("Not Expected ShmBufInner: {:?}", zs); - } +impl TransportPeerEventHandler for SCPeer { + fn handle_message(&self, message: NetworkMessage) -> ZResult<()> { + if self.is_shm { + print!("s"); + } else { + print!("n"); + } + let payload = match message.body { + NetworkBody::Push(m) => match m.payload { + PushBody::Put(Put { payload, .. }) => { + for zs in payload.zslices() { + if self.is_shm && zs.downcast_ref::().is_none() { + panic!("Expected ShmBufInner: {:?}", zs); + } else if !self.is_shm && zs.downcast_ref::().is_some() { + panic!("Not Expected ShmBufInner: {:?}", zs); } - payload.contiguous().into_owned() } - _ => panic!("Unsolicited message"), - }, + payload.contiguous().into_owned() + } _ => panic!("Unsolicited message"), - }; - assert_eq!(payload.len(), MSG_SIZE); - - let mut count_bytes = [0_u8; 8]; - count_bytes.copy_from_slice(&payload[0..8]); - let msg_count = u64::from_le_bytes(count_bytes) as usize; - let sex_count = self.count.fetch_add(1, Ordering::SeqCst); - assert_eq!(msg_count, sex_count); - print!("{msg_count} "); - - Ok(()) - } + }, + _ => panic!("Unsolicited message"), + }; + assert_eq!(payload.len(), MSG_SIZE); + + let mut count_bytes = [0_u8; 8]; + count_bytes.copy_from_slice(&payload[0..8]); + let msg_count = u64::from_le_bytes(count_bytes) as usize; + let sex_count = self.count.fetch_add(1, Ordering::SeqCst); + assert_eq!(msg_count, sex_count); + print!("{msg_count} "); + + Ok(()) + } - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - fn as_any(&self) -> &dyn Any { - self - } + fn as_any(&self) -> &dyn Any { + self } +} - async fn run(endpoint: &EndPoint, lowlatency_transport: bool) { - println!("Transport SHM [0a]: {endpoint:?}"); - - // Define client and router IDs - let peer_shm01 = ZenohIdProto::try_from([1]).unwrap(); - let peer_shm02 = ZenohIdProto::try_from([2]).unwrap(); - let peer_net01 = ZenohIdProto::try_from([3]).unwrap(); - - // create SHM provider - let backend = PosixShmProviderBackend::builder() - .with_size(2 * MSG_SIZE) - .unwrap() - .res() - .unwrap(); - let shm01 = ShmProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // Create a peer manager with shared-memory authenticator enabled - let peer_shm01_handler = Arc::new(SHPeer::new(true)); - let peer_shm01_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer_shm01) - .unicast( - TransportManager::config_unicast() - .shm(true) - .lowlatency(lowlatency_transport) - .qos(!lowlatency_transport), - ) - .build(peer_shm01_handler.clone()) - .unwrap(); - - // Create a peer manager with shared-memory authenticator enabled - let peer_shm02_handler = Arc::new(SHPeer::new(true)); - let peer_shm02_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer_shm02) - .unicast( - TransportManager::config_unicast() - .shm(true) - .lowlatency(lowlatency_transport) - .qos(!lowlatency_transport), - ) - .build(peer_shm02_handler.clone()) - .unwrap(); - - // Create a peer manager with shared-memory authenticator disabled - let peer_net01_handler = Arc::new(SHPeer::new(false)); - let peer_net01_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer_net01) - .unicast( - TransportManager::config_unicast() - .shm(false) - .lowlatency(lowlatency_transport) - .qos(!lowlatency_transport), - ) - .build(peer_net01_handler.clone()) - .unwrap(); - - // Create the listener on the peer - println!("Transport SHM [1a]"); - let _ = ztimeout!(peer_shm01_manager.add_listener(endpoint.clone())).unwrap(); - - // Create a transport with the peer - println!("Transport SHM [1b]"); - let peer_shm01_transport = - ztimeout!(peer_shm02_manager.open_transport_unicast(endpoint.clone())).unwrap(); - assert!(peer_shm01_transport.is_shm().unwrap()); - - // Create a transport with the peer - println!("Transport SHM [1c]"); - let peer_net02_transport = - ztimeout!(peer_net01_manager.open_transport_unicast(endpoint.clone())).unwrap(); - assert!(!peer_net02_transport.is_shm().unwrap()); - - // Retrieve the transports - println!("Transport SHM [2a]"); - let peer_shm02_transport = - ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_shm02)).unwrap(); - assert!(peer_shm02_transport.is_shm().unwrap()); - - println!("Transport SHM [2b]"); - let peer_net01_transport = - ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); - assert!(!peer_net01_transport.is_shm().unwrap()); - - let layout = shm01.alloc(MSG_SIZE).into_layout().unwrap(); - - // Send the message - println!("Transport SHM [3a]"); - // The msg count - for (msg_count, _) in (0..MSG_COUNT).enumerate() { - // Create the message to send - let mut sbuf = - ztimeout!(layout.alloc().with_policy::>()).unwrap(); - sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); - - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: sbuf.into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), +async fn run(endpoint: &EndPoint, lowlatency_transport: bool) { + println!("Transport SHM [0a]: {endpoint:?}"); + + // Define client and router IDs + let peer_shm01 = ZenohIdProto::try_from([1]).unwrap(); + let peer_shm02 = ZenohIdProto::try_from([2]).unwrap(); + let peer_net01 = ZenohIdProto::try_from([3]).unwrap(); + + // create SHM provider + let backend = PosixShmProviderBackend::builder() + .with_size(2 * MSG_SIZE) + .unwrap() + .res() + .unwrap(); + let shm01 = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Create a peer manager with shared-memory authenticator enabled + let peer_shm01_handler = Arc::new(SHPeer::new(true)); + let peer_shm01_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer_shm01) + .unicast( + TransportManager::config_unicast() + .shm(true) + .lowlatency(lowlatency_transport) + .qos(!lowlatency_transport), + ) + .build(peer_shm01_handler.clone()) + .unwrap(); + + // Create a peer manager with shared-memory authenticator enabled + let peer_shm02_handler = Arc::new(SHPeer::new(true)); + let peer_shm02_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer_shm02) + .unicast( + TransportManager::config_unicast() + .shm(true) + .lowlatency(lowlatency_transport) + .qos(!lowlatency_transport), + ) + .build(peer_shm02_handler.clone()) + .unwrap(); + + // Create a peer manager with shared-memory authenticator disabled + let peer_net01_handler = Arc::new(SHPeer::new(false)); + let peer_net01_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer_net01) + .unicast( + TransportManager::config_unicast() + .shm(false) + .lowlatency(lowlatency_transport) + .qos(!lowlatency_transport), + ) + .build(peer_net01_handler.clone()) + .unwrap(); + + // Create the listener on the peer + println!("Transport SHM [1a]"); + let _ = ztimeout!(peer_shm01_manager.add_listener(endpoint.clone())).unwrap(); + + // Create a transport with the peer + println!("Transport SHM [1b]"); + let peer_shm01_transport = + ztimeout!(peer_shm02_manager.open_transport_unicast(endpoint.clone())).unwrap(); + assert!(peer_shm01_transport.is_shm().unwrap()); + + // Create a transport with the peer + println!("Transport SHM [1c]"); + let peer_net02_transport = + ztimeout!(peer_net01_manager.open_transport_unicast(endpoint.clone())).unwrap(); + assert!(!peer_net02_transport.is_shm().unwrap()); + + // Retrieve the transports + println!("Transport SHM [2a]"); + let peer_shm02_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_shm02)).unwrap(); + assert!(peer_shm02_transport.is_shm().unwrap()); + + println!("Transport SHM [2b]"); + let peer_net01_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); + assert!(!peer_net01_transport.is_shm().unwrap()); + + let layout = shm01.alloc(MSG_SIZE).into_layout().unwrap(); + + // Send the message + println!("Transport SHM [3a]"); + // The msg count + for (msg_count, _) in (0..MSG_COUNT).enumerate() { + // Create the message to send + let mut sbuf = ztimeout!(layout.alloc().with_policy::>()).unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); + + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: sbuf.into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], } - .into(); - - peer_shm02_transport.schedule(message).unwrap(); + .into(), } + .into(); - // Wait a little bit - tokio::time::sleep(SLEEP).await; + peer_shm02_transport.schedule(message).unwrap(); + } - // Wait for the messages to arrive to the other side - println!("Transport SHM [3b]"); - ztimeout!(async { - while peer_shm02_handler.get_count() != MSG_COUNT { - tokio::time::sleep(SLEEP).await; - } - }); - - // Send the message - println!("Transport SHM [4a]"); - // The msg count - for (msg_count, _) in (0..MSG_COUNT).enumerate() { - // Create the message to send - let mut sbuf = - ztimeout!(layout.alloc().with_policy::>()).unwrap(); - sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); - - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: sbuf.into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), - } - .into(); + // Wait a little bit + tokio::time::sleep(SLEEP).await; - peer_net01_transport.schedule(message).unwrap(); + // Wait for the messages to arrive to the other side + println!("Transport SHM [3b]"); + ztimeout!(async { + while peer_shm02_handler.get_count() != MSG_COUNT { + tokio::time::sleep(SLEEP).await; + } + }); + + // Send the message + println!("Transport SHM [4a]"); + // The msg count + for (msg_count, _) in (0..MSG_COUNT).enumerate() { + // Create the message to send + let mut sbuf = ztimeout!(layout.alloc().with_policy::>()).unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); + + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: sbuf.into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], + } + .into(), } + .into(); - // Wait a little bit - tokio::time::sleep(SLEEP).await; + peer_net01_transport.schedule(message).unwrap(); + } - // Wait for the messages to arrive to the other side - println!("Transport SHM [4b]"); - ztimeout!(async { - while peer_net01_handler.get_count() != MSG_COUNT { - tokio::time::sleep(SLEEP).await; - } - }); + // Wait a little bit + tokio::time::sleep(SLEEP).await; - // Wait a little bit - tokio::time::sleep(SLEEP).await; + // Wait for the messages to arrive to the other side + println!("Transport SHM [4b]"); + ztimeout!(async { + while peer_net01_handler.get_count() != MSG_COUNT { + tokio::time::sleep(SLEEP).await; + } + }); - // Close the transports - println!("Transport SHM [5a]"); - ztimeout!(peer_shm02_transport.close()).unwrap(); + // Wait a little bit + tokio::time::sleep(SLEEP).await; - println!("Transport SHM [5b]"); - ztimeout!(peer_net01_transport.close()).unwrap(); + // Close the transports + println!("Transport SHM [5a]"); + ztimeout!(peer_shm02_transport.close()).unwrap(); - ztimeout!(async { - while !peer_shm01_manager.get_transports_unicast().await.is_empty() { - tokio::time::sleep(SLEEP).await; - } - }); + println!("Transport SHM [5b]"); + ztimeout!(peer_net01_transport.close()).unwrap(); - // Delete the listener - println!("Transport SHM [6a]"); - ztimeout!(peer_shm01_manager.del_listener(endpoint)).unwrap(); + ztimeout!(async { + while !peer_shm01_manager.get_transports_unicast().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); - // Wait a little bit - ztimeout!(async { - while !peer_shm01_manager.get_listeners().await.is_empty() { - tokio::time::sleep(SLEEP).await; - } - }); - tokio::time::sleep(SLEEP).await; + // Delete the listener + println!("Transport SHM [6a]"); + ztimeout!(peer_shm01_manager.del_listener(endpoint)).unwrap(); + + // Wait a little bit + ztimeout!(async { + while !peer_shm01_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); + tokio::time::sleep(SLEEP).await; - ztimeout!(peer_net01_manager.close()); - ztimeout!(peer_shm01_manager.close()); - ztimeout!(peer_shm02_manager.close()); + ztimeout!(peer_net01_manager.close()); + ztimeout!(peer_shm01_manager.close()); + ztimeout!(peer_shm02_manager.close()); - // Wait a little bit - tokio::time::sleep(SLEEP).await; - } + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_tcp_shm() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); - run(&endpoint, false).await; - } +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_tcp_shm() { + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); + run(&endpoint, false).await; +} - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_tcp_shm_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14001).parse().unwrap(); - run(&endpoint, true).await; - } +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_tcp_shm_with_lowlatency_transport() { + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14001).parse().unwrap(); + run(&endpoint, true).await; +} - #[cfg(feature = "transport_ws")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_ws_shm() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14010).parse().unwrap(); - run(&endpoint, false).await; - } +#[cfg(feature = "transport_ws")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_ws_shm() { + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14010).parse().unwrap(); + run(&endpoint, false).await; +} - #[cfg(feature = "transport_ws")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_ws_shm_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14011).parse().unwrap(); - run(&endpoint, true).await; - } +#[cfg(feature = "transport_ws")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_ws_shm_with_lowlatency_transport() { + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14011).parse().unwrap(); + run(&endpoint, true).await; +} - #[cfg(feature = "transport_unixpipe")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unixpipe_shm() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm".parse().unwrap(); - run(&endpoint, false).await; - } +#[cfg(feature = "transport_unixpipe")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unixpipe_shm() { + let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm".parse().unwrap(); + run(&endpoint, false).await; +} - #[cfg(feature = "transport_unixpipe")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_unixpipe_shm_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm_with_lowlatency_transport" - .parse() - .unwrap(); - run(&endpoint, true).await; - } +#[cfg(feature = "transport_unixpipe")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_unixpipe_shm_with_lowlatency_transport() { + let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm_with_lowlatency_transport" + .parse() + .unwrap(); + run(&endpoint, true).await; } diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 4f529c3b74..59432fe779 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -11,356 +11,350 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(target_family = "unix")] -mod tests { - use std::{ - any::Any, - convert::TryFrom, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, - }; - - use zenoh_core::ztimeout; - use zenoh_link::Link; - use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, - network::{ - push::ext::{NodeIdType, QoSType}, - NetworkMessage, Push, - }, - zenoh::Put, - }; - use zenoh_result::ZResult; - use zenoh_transport::{ - multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, - TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, - }; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_millis(500); - - const MSG_COUNT: usize = 16; - const MSG_SIZE: usize = 1_024; - - // Transport Handler for the router - struct SHPeer { - zid: ZenohIdProto, - count: Arc, - } - - impl SHPeer { - fn new(zid: ZenohIdProto) -> Self { - Self { - zid, - count: Arc::new(AtomicUsize::new(0)), - } - } +#![cfg(target_family = "unix")] +use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh_core::ztimeout; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, + network::{ + push::ext::{NodeIdType, QoSType}, + NetworkMessage, Push, + }, + zenoh::Put, +}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, + TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_millis(500); + +const MSG_COUNT: usize = 16; +const MSG_SIZE: usize = 1_024; + +// Transport Handler for the router +struct SHPeer { + zid: ZenohIdProto, + count: Arc, +} - fn get_count(&self) -> usize { - self.count.load(Ordering::SeqCst) +impl SHPeer { + fn new(zid: ZenohIdProto) -> Self { + Self { + zid, + count: Arc::new(AtomicUsize::new(0)), } } - impl TransportEventHandler for SHPeer { - fn new_unicast( - &self, - _peer: TransportPeer, - transport: TransportUnicast, - ) -> ZResult> { - // Create the message to send - let message: NetworkMessage = Push { - wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), - ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, - payload: Put { - payload: vec![0u8; MSG_SIZE].into(), - timestamp: None, - encoding: Encoding::empty(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, - ext_unknown: vec![], - } - .into(), - } - .into(); + fn get_count(&self) -> usize { + self.count.load(Ordering::SeqCst) + } +} - println!("[Simultaneous {}] Sending {}...", self.zid, MSG_COUNT); - for _ in 0..MSG_COUNT { - transport.schedule(message.clone()).unwrap(); +impl TransportEventHandler for SHPeer { + fn new_unicast( + &self, + _peer: TransportPeer, + transport: TransportUnicast, + ) -> ZResult> { + // Create the message to send + let message: NetworkMessage = Push { + wire_expr: "test".into(), + ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), + ext_tstamp: None, + ext_nodeid: NodeIdType::DEFAULT, + payload: Put { + payload: vec![0u8; MSG_SIZE].into(), + timestamp: None, + encoding: Encoding::empty(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: None, + ext_unknown: vec![], } - println!("[Simultaneous {}] ... sent {}", self.zid, MSG_COUNT); - - let mh = Arc::new(MHPeer::new(self.count.clone())); - Ok(mh) + .into(), } + .into(); - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); + println!("[Simultaneous {}] Sending {}...", self.zid, MSG_COUNT); + for _ in 0..MSG_COUNT { + transport.schedule(message.clone()).unwrap(); } + println!("[Simultaneous {}] ... sent {}", self.zid, MSG_COUNT); + + let mh = Arc::new(MHPeer::new(self.count.clone())); + Ok(mh) } - struct MHPeer { - count: Arc, + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); } +} - impl MHPeer { - fn new(count: Arc) -> Self { - Self { count } - } +struct MHPeer { + count: Arc, +} + +impl MHPeer { + fn new(count: Arc) -> Self { + Self { count } } +} - impl TransportPeerEventHandler for MHPeer { - fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { - self.count.fetch_add(1, Ordering::AcqRel); - Ok(()) - } +impl TransportPeerEventHandler for MHPeer { + fn handle_message(&self, _msg: NetworkMessage) -> ZResult<()> { + self.count.fetch_add(1, Ordering::AcqRel); + Ok(()) + } - fn new_link(&self, _link: Link) {} - fn del_link(&self, _link: Link) {} - fn closing(&self) {} - fn closed(&self) {} + fn new_link(&self, _link: Link) {} + fn del_link(&self, _link: Link) {} + fn closing(&self) {} + fn closed(&self) {} - fn as_any(&self) -> &dyn Any { - self - } + fn as_any(&self) -> &dyn Any { + self } +} - async fn transport_simultaneous(endpoint01: Vec, endpoint02: Vec) { - /* [Peers] */ - let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); - let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); - - // Create the peer01 transport manager - let peer_sh01 = Arc::new(SHPeer::new(peer_id01)); - let unicast = TransportManager::config_unicast().max_links(endpoint01.len()); - let peer01_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer_id01) - .unicast(unicast) - .build(peer_sh01.clone()) - .unwrap(); - - // Create the peer02 transport manager - let peer_sh02 = Arc::new(SHPeer::new(peer_id02)); - let unicast = TransportManager::config_unicast().max_links(endpoint02.len()); - let peer02_manager = TransportManager::builder() - .whatami(WhatAmI::Peer) - .zid(peer_id02) - .unicast(unicast) - .build(peer_sh02.clone()) - .unwrap(); - - // Add the endpoints on the peer01 - for e in endpoint01.iter() { - let res = ztimeout!(peer01_manager.add_listener(e.clone())); - println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); - assert!(res.is_ok()); +async fn transport_simultaneous(endpoint01: Vec, endpoint02: Vec) { + /* [Peers] */ + let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); + let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); + + // Create the peer01 transport manager + let peer_sh01 = Arc::new(SHPeer::new(peer_id01)); + let unicast = TransportManager::config_unicast().max_links(endpoint01.len()); + let peer01_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer_id01) + .unicast(unicast) + .build(peer_sh01.clone()) + .unwrap(); + + // Create the peer02 transport manager + let peer_sh02 = Arc::new(SHPeer::new(peer_id02)); + let unicast = TransportManager::config_unicast().max_links(endpoint02.len()); + let peer02_manager = TransportManager::builder() + .whatami(WhatAmI::Peer) + .zid(peer_id02) + .unicast(unicast) + .build(peer_sh02.clone()) + .unwrap(); + + // Add the endpoints on the peer01 + for e in endpoint01.iter() { + let res = ztimeout!(peer01_manager.add_listener(e.clone())); + println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); + assert!(res.is_ok()); + } + let locs = ztimeout!(peer01_manager.get_listeners()); + println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); + assert_eq!(endpoint01.len(), locs.len()); + + // Add the endpoints on peer02 + for e in endpoint02.iter() { + let res = ztimeout!(peer02_manager.add_listener(e.clone())); + println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); + assert!(res.is_ok()); + } + let locs = ztimeout!(peer02_manager.get_listeners()); + println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); + assert_eq!(endpoint02.len(), locs.len()); + + // Endpoints + let c_ep01 = endpoint01.clone(); + let c_ep02 = endpoint02.clone(); + + // Peer01 + let c_p01m = peer01_manager.clone(); + let peer01_task = tokio::task::spawn(async move { + // Open the transport with the second peer + // These open should succeed + for e in c_ep02.iter() { + println!("[Simultaneous 01c] => Opening transport with {e:?}..."); + let _ = ztimeout!(c_p01m.open_transport_unicast(e.clone())).unwrap(); } - let locs = ztimeout!(peer01_manager.get_listeners()); - println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); - assert_eq!(endpoint01.len(), locs.len()); - - // Add the endpoints on peer02 - for e in endpoint02.iter() { - let res = ztimeout!(peer02_manager.add_listener(e.clone())); - println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); - assert!(res.is_ok()); + + // These open should fails + for e in c_ep02.iter() { + println!("[Simultaneous 01d] => Exceeding transport with {e:?}..."); + let res = ztimeout!(c_p01m.open_transport_unicast(e.clone())); + assert!(res.is_err()); } - let locs = ztimeout!(peer02_manager.get_listeners()); - println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); - assert_eq!(endpoint02.len(), locs.len()); - - // Endpoints - let c_ep01 = endpoint01.clone(); - let c_ep02 = endpoint02.clone(); - - // Peer01 - let c_p01m = peer01_manager.clone(); - let peer01_task = tokio::task::spawn(async move { - // Open the transport with the second peer - // These open should succeed - for e in c_ep02.iter() { - println!("[Simultaneous 01c] => Opening transport with {e:?}..."); - let _ = ztimeout!(c_p01m.open_transport_unicast(e.clone())).unwrap(); - } - // These open should fails - for e in c_ep02.iter() { - println!("[Simultaneous 01d] => Exceeding transport with {e:?}..."); - let res = ztimeout!(c_p01m.open_transport_unicast(e.clone())); - assert!(res.is_err()); + tokio::time::sleep(SLEEP).await; + + let tp02 = ztimeout!(async { + let mut tp02 = None; + while tp02.is_none() { + tokio::time::sleep(SLEEP).await; + println!( + "[Simultaneous 01e] => Transports: {:?}", + peer01_manager.get_transports_unicast().await + ); + tp02 = peer01_manager.get_transport_unicast(&peer_id02).await; } - tokio::time::sleep(SLEEP).await; - - let tp02 = ztimeout!(async { - let mut tp02 = None; - while tp02.is_none() { - tokio::time::sleep(SLEEP).await; - println!( - "[Simultaneous 01e] => Transports: {:?}", - peer01_manager.get_transports_unicast().await - ); - tp02 = peer01_manager.get_transport_unicast(&peer_id02).await; - } - - tp02.unwrap() - }); - - // Wait for the links to be properly established - ztimeout!(async { - let expected = endpoint01.len() + c_ep02.len(); - let mut tl02 = vec![]; - while tl02.len() != expected { - tokio::time::sleep(SLEEP).await; - tl02 = tp02.get_links().unwrap(); - println!("[Simultaneous 01f] => Links {}/{}", tl02.len(), expected); - } - }); - - // Wait for the messages to arrive to peer 01 - ztimeout!(async { - let mut check = 0; - while check != MSG_COUNT { - tokio::time::sleep(SLEEP).await; - check = peer_sh01.get_count(); - println!("[Simultaneous 01g] => Received {check:?}/{MSG_COUNT:?}"); - } - }); + tp02.unwrap() }); - // Peer02 - let c_p02m = peer02_manager.clone(); - let peer02_task = tokio::task::spawn(async move { - // Open the transport with the first peer - // These open should succeed - for e in c_ep01.iter() { - println!("[Simultaneous 02c] => Opening transport with {e:?}..."); - let _ = ztimeout!(c_p02m.open_transport_unicast(e.clone())).unwrap(); + // Wait for the links to be properly established + ztimeout!(async { + let expected = endpoint01.len() + c_ep02.len(); + let mut tl02 = vec![]; + while tl02.len() != expected { + tokio::time::sleep(SLEEP).await; + tl02 = tp02.get_links().unwrap(); + println!("[Simultaneous 01f] => Links {}/{}", tl02.len(), expected); } + }); - // These open should fails - for e in c_ep01.iter() { - println!("[Simultaneous 02d] => Exceeding transport with {e:?}..."); - let res = ztimeout!(c_p02m.open_transport_unicast(e.clone())); - assert!(res.is_err()); + // Wait for the messages to arrive to peer 01 + ztimeout!(async { + let mut check = 0; + while check != MSG_COUNT { + tokio::time::sleep(SLEEP).await; + check = peer_sh01.get_count(); + println!("[Simultaneous 01g] => Received {check:?}/{MSG_COUNT:?}"); } - - // Wait a little bit - tokio::time::sleep(SLEEP).await; - - let tp01 = ztimeout!(async { - let mut tp01 = None; - while tp01.is_none() { - tokio::time::sleep(SLEEP).await; - println!( - "[Simultaneous 02e] => Transports: {:?}", - peer02_manager.get_transports_unicast().await - ); - tp01 = peer02_manager.get_transport_unicast(&peer_id01).await; - } - tp01.unwrap() - }); - - // Wait for the links to be properly established - ztimeout!(async { - let expected = c_ep01.len() + endpoint02.len(); - let mut tl01 = vec![]; - while tl01.len() != expected { - tokio::time::sleep(SLEEP).await; - tl01 = tp01.get_links().unwrap(); - println!("[Simultaneous 02f] => Links {}/{}", tl01.len(), expected); - } - }); - - // Wait for the messages to arrive to peer 02 - ztimeout!(async { - let mut check = 0; - while check != MSG_COUNT { - tokio::time::sleep(SLEEP).await; - check = peer_sh02.get_count(); - println!("[Simultaneous 02g] => Received {check:?}/{MSG_COUNT:?}"); - } - }); }); + }); + + // Peer02 + let c_p02m = peer02_manager.clone(); + let peer02_task = tokio::task::spawn(async move { + // Open the transport with the first peer + // These open should succeed + for e in c_ep01.iter() { + println!("[Simultaneous 02c] => Opening transport with {e:?}..."); + let _ = ztimeout!(c_p02m.open_transport_unicast(e.clone())).unwrap(); + } - println!("[Simultaneous] => Waiting for peer01 and peer02 tasks..."); - let _ = tokio::join!(peer01_task, peer02_task); - println!("[Simultaneous] => Waiting for peer01 and peer02 tasks... DONE\n"); + // These open should fails + for e in c_ep01.iter() { + println!("[Simultaneous 02d] => Exceeding transport with {e:?}..."); + let res = ztimeout!(c_p02m.open_transport_unicast(e.clone())); + assert!(res.is_err()); + } // Wait a little bit tokio::time::sleep(SLEEP).await; - } - #[cfg(feature = "transport_tcp")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn transport_tcp_simultaneous() { - zenoh_util::try_init_log_from_env(); - let endpoint01: Vec = vec![ - format!("tcp/127.0.0.1:{}", 15000).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15001).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15002).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15003).parse().unwrap(), - ]; - let endpoint02: Vec = vec![ - format!("tcp/127.0.0.1:{}", 15010).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15011).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15012).parse().unwrap(), - format!("tcp/127.0.0.1:{}", 15013).parse().unwrap(), - ]; - - transport_simultaneous(endpoint01, endpoint02).await; - } + let tp01 = ztimeout!(async { + let mut tp01 = None; + while tp01.is_none() { + tokio::time::sleep(SLEEP).await; + println!( + "[Simultaneous 02e] => Transports: {:?}", + peer02_manager.get_transports_unicast().await + ); + tp01 = peer02_manager.get_transport_unicast(&peer_id01).await; + } + tp01.unwrap() + }); - #[cfg(feature = "transport_unixpipe")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - async fn transport_unixpipe_simultaneous() { - zenoh_util::try_init_log_from_env(); - let endpoint01: Vec = vec![ - "unixpipe/transport_unixpipe_simultaneous".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous2".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous3".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous4".parse().unwrap(), - ]; - let endpoint02: Vec = vec![ - "unixpipe/transport_unixpipe_simultaneous5".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous6".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous7".parse().unwrap(), - "unixpipe/transport_unixpipe_simultaneous8".parse().unwrap(), - ]; - - transport_simultaneous(endpoint01, endpoint02).await; - } + // Wait for the links to be properly established + ztimeout!(async { + let expected = c_ep01.len() + endpoint02.len(); + let mut tl01 = vec![]; + while tl01.len() != expected { + tokio::time::sleep(SLEEP).await; + tl01 = tp01.get_links().unwrap(); + println!("[Simultaneous 02f] => Links {}/{}", tl01.len(), expected); + } + }); - #[cfg(feature = "transport_ws")] - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - async fn transport_ws_simultaneous() { - zenoh_util::try_init_log_from_env(); - - let endpoint01: Vec = vec![ - format!("ws/127.0.0.1:{}", 15020).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15021).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15022).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15023).parse().unwrap(), - ]; - let endpoint02: Vec = vec![ - format!("ws/127.0.0.1:{}", 15030).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15031).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15032).parse().unwrap(), - format!("ws/127.0.0.1:{}", 15033).parse().unwrap(), - ]; - - transport_simultaneous(endpoint01, endpoint02).await; - } + // Wait for the messages to arrive to peer 02 + ztimeout!(async { + let mut check = 0; + while check != MSG_COUNT { + tokio::time::sleep(SLEEP).await; + check = peer_sh02.get_count(); + println!("[Simultaneous 02g] => Received {check:?}/{MSG_COUNT:?}"); + } + }); + }); + + println!("[Simultaneous] => Waiting for peer01 and peer02 tasks..."); + let _ = tokio::join!(peer01_task, peer02_task); + println!("[Simultaneous] => Waiting for peer01 and peer02 tasks... DONE\n"); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +#[cfg(feature = "transport_tcp")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +async fn transport_tcp_simultaneous() { + let endpoint01: Vec = vec![ + format!("tcp/127.0.0.1:{}", 15000).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15001).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15002).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15003).parse().unwrap(), + ]; + let endpoint02: Vec = vec![ + format!("tcp/127.0.0.1:{}", 15010).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15011).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15012).parse().unwrap(), + format!("tcp/127.0.0.1:{}", 15013).parse().unwrap(), + ]; + + transport_simultaneous(endpoint01, endpoint02).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +#[ignore] +async fn transport_unixpipe_simultaneous() { + let endpoint01: Vec = vec![ + "unixpipe/transport_unixpipe_simultaneous".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous2".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous3".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous4".parse().unwrap(), + ]; + let endpoint02: Vec = vec![ + "unixpipe/transport_unixpipe_simultaneous5".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous6".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous7".parse().unwrap(), + "unixpipe/transport_unixpipe_simultaneous8".parse().unwrap(), + ]; + + transport_simultaneous(endpoint01, endpoint02).await; +} + +#[cfg(feature = "transport_ws")] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] +#[ignore] +async fn transport_ws_simultaneous() { + let endpoint01: Vec = vec![ + format!("ws/127.0.0.1:{}", 15020).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15021).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15022).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15023).parse().unwrap(), + ]; + let endpoint02: Vec = vec![ + format!("ws/127.0.0.1:{}", 15030).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15031).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15032).parse().unwrap(), + format!("ws/127.0.0.1:{}", 15033).parse().unwrap(), + ]; + + transport_simultaneous(endpoint01, endpoint02).await; } diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 5c62235371..6ca614db15 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -228,73 +228,65 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tcp_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_udp_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_ws_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unixpipe_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); time_universal_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unixpipe_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -302,10 +294,9 @@ async fn time_unixpipe_only_with_lowlatency_transport() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unix_only() { - zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -315,11 +306,10 @@ async fn time_unix_only() { } #[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -413,7 +403,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== } #[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_quic_only() { use zenoh_link::quic::config::*; @@ -512,10 +502,9 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_vsock_only() { - zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); time_lowlatency_transport(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index b49b863991..5ff9295872 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -603,10 +603,8 @@ async fn run_with_lowlatency_transport( } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tcp_only() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 16000).parse().unwrap(), @@ -628,10 +626,8 @@ async fn transport_unicast_tcp_only() { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tcp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 16100).parse().unwrap()]; // Define the reliability and congestion control @@ -650,10 +646,8 @@ async fn transport_unicast_tcp_only_with_lowlatency_transport() { } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_udp_only() { - zenoh_util::try_init_log_from_env(); - // Define the locator let endpoints: Vec = vec![ format!("udp/127.0.0.1:{}", 16010).parse().unwrap(), @@ -675,10 +669,8 @@ async fn transport_unicast_udp_only() { } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_udp_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - // Define the locator let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 16110).parse().unwrap()]; // Define the reliability and congestion control @@ -697,10 +689,8 @@ async fn transport_unicast_udp_only_with_lowlatency_transport() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_unix_only() { - zenoh_util::try_init_log_from_env(); - let f1 = "zenoh-test-unix-socket-5.sock"; let _ = std::fs::remove_file(f1); // Define the locator @@ -723,10 +713,8 @@ async fn transport_unicast_unix_only() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_unix_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - let f1 = "zenoh-test-unix-socket-5-lowlatency.sock"; let _ = std::fs::remove_file(f1); // Define the locator @@ -749,10 +737,8 @@ async fn transport_unicast_unix_only_with_lowlatency_transport() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_ws_only() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![ format!("ws/127.0.0.1:{}", 16020).parse().unwrap(), @@ -782,10 +768,8 @@ async fn transport_unicast_ws_only() { } #[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_ws_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 16120).parse().unwrap()]; // Define the reliability and congestion control @@ -812,10 +796,8 @@ async fn transport_unicast_ws_only_with_lowlatency_transport() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_unixpipe_only() { - zenoh_util::try_init_log_from_env(); - // Define the locator let endpoints: Vec = vec![ "unixpipe/transport_unicast_unixpipe_only".parse().unwrap(), @@ -837,10 +819,8 @@ async fn transport_unicast_unixpipe_only() { } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { - zenoh_util::try_init_log_from_env(); - // Define the locator let endpoints: Vec = vec![ "unixpipe/transport_unicast_unixpipe_only_with_lowlatency_transport" @@ -863,10 +843,8 @@ async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { } #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tcp_udp() { - zenoh_util::try_init_log_from_env(); - // Define the locator let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 16030).parse().unwrap(), @@ -894,10 +872,8 @@ async fn transport_unicast_tcp_udp() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tcp_unix() { - zenoh_util::try_init_log_from_env(); - let f1 = "zenoh-test-unix-socket-6.sock"; let _ = std::fs::remove_file(f1); // Define the locator @@ -928,10 +904,8 @@ async fn transport_unicast_tcp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_udp_unix() { - zenoh_util::try_init_log_from_env(); - let f1 = "zenoh-test-unix-socket-7.sock"; let _ = std::fs::remove_file(f1); // Define the locator @@ -963,10 +937,8 @@ async fn transport_unicast_udp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tcp_udp_unix() { - zenoh_util::try_init_log_from_env(); - let f1 = "zenoh-test-unix-socket-8.sock"; let _ = std::fs::remove_file(f1); // Define the locator @@ -995,12 +967,10 @@ async fn transport_unicast_tcp_udp_unix() { } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tls_only_server() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - // Define the locator let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint @@ -1041,11 +1011,10 @@ async fn transport_unicast_tls_only_server() { } #[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_quic_only_server() { use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); // Define the locator let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint @@ -1086,12 +1055,10 @@ async fn transport_unicast_quic_only_server() { } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tls_only_mutual_success() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - let client_auth = "true"; // Define the locator @@ -1157,14 +1124,12 @@ async fn transport_unicast_tls_only_mutual_success() { } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - // Define the locator let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint @@ -1227,8 +1192,6 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); - let client_auth = "true"; // Define the locator @@ -1302,12 +1265,10 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { } #[cfg(all(feature = "transport_quic", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_quic_only_mutual_success() { use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); - let client_auth = "true"; // Define the locator @@ -1373,14 +1334,12 @@ async fn transport_unicast_quic_only_mutual_success() { } #[cfg(all(feature = "transport_quic", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { use std::vec; use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); - // Define the locator let mut client_endpoint: EndPoint = ("quic/localhost:10462").parse().unwrap(); client_endpoint @@ -1443,8 +1402,6 @@ async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { use zenoh_link::quic::config::*; - zenoh_util::try_init_log_from_env(); - let client_auth = "true"; // Define the locator diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index cbd84fb766..459c6b635f 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -151,7 +151,7 @@ impl Drop for RunningPlugin { } async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { - zenoh_util::try_init_log_from_env(); + let _ = zenoh::logging::try_init_logging(); // create a zenoh Session that shares the same Runtime than zenohd let session = zenoh::session::init(runtime).await.unwrap(); diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index e39d7c28b2..511d798538 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -35,7 +35,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - zenoh::try_init_log_from_env(); + zenoh::init_logging(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 107f241a87..f0d94dab42 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -225,10 +225,7 @@ impl Plugin for RestPlugin { name: &str, runtime: &Self::StartArgs, ) -> ZResult { - // Try to initiate login. - // Required in case of dynamic lib, otherwise no logs. - // But cannot be done twice in case of static link. - zenoh::try_init_log_from_env(); + let _ = zenoh::logging::try_init_logging(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); @@ -466,10 +463,7 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result ZResult<()> { - // Try to initiate login. - // Required in case of dynamic lib, otherwise no logs. - // But cannot be done twice in case of static link. - zenoh::try_init_log_from_env(); + let _ = zenoh::logging::try_init_logging(); let zid = runtime.zid().to_string(); let session = zenoh::session::init(runtime).await.unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 69557af614..0850fada7b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -69,7 +69,7 @@ impl Plugin for StoragesPlugin { type Instance = RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - zenoh::try_init_log_from_env(); + let _ = zenoh::logging::try_init_logging(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -99,10 +99,7 @@ impl StorageRuntimeInner { ) } fn new(runtime: Runtime, config: PluginConfig) -> ZResult { - // Try to initiate login. - // Required in case of dynamic lib, otherwise no logs. - // But cannot be done twice in case of static link. - zenoh::try_init_log_from_env(); + let _ = zenoh::logging::try_init_logging(); let PluginConfig { name, backend_search_dirs, diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 4f2613cb70..4893742a76 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -53,6 +53,7 @@ zenoh-macros = { workspace = true } [dev-dependencies] zenoh = { workspace = true, features = ["unstable"], default-features = true } +test-log = { workspace = true } [package.metadata.docs.rs] features = ["unstable"] diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 90129ca21e..78d0eb098f 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -19,7 +19,7 @@ use zenoh_ext::group::*; #[tokio::main] async fn main() { - zenoh::try_init_log_from_env(); + zenoh::init_logging(); let z = Arc::new(zenoh::open(Config::default()).await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 0c5a60751b..0cfaaa531f 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -24,7 +24,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh::try_init_log_from_env(); + zenoh::init_logging(); let (config, key_expr, value, history, prefix, complete) = parse_args(); diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index c819a2a831..ab1e1caa1d 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -19,7 +19,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh::try_init_log_from_env(); + zenoh::init_logging(); let (config, key_expr, query) = parse_args(); diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index a38120cfb4..257bb3495d 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -20,7 +20,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { - zenoh::try_init_log_from_env(); + zenoh::init_logging(); let (config, group_name, id, size, timeout) = parse_args(); diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs index 23e901d458..8f48ffec4f 100644 --- a/zenoh-ext/tests/liveliness.rs +++ b/zenoh-ext/tests/liveliness.rs @@ -17,7 +17,7 @@ use zenoh::{ sample::SampleKind, }; -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_querying_subscriber_clique() { use std::time::Duration; @@ -32,8 +32,6 @@ async fn test_liveliness_querying_subscriber_clique() { const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; - zenoh_util::try_init_log_from_env(); - let peer1 = { let mut c = config::default(); c.listen @@ -87,7 +85,7 @@ async fn test_liveliness_querying_subscriber_clique() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_querying_subscriber_brokered() { use std::time::Duration; @@ -102,8 +100,6 @@ async fn test_liveliness_querying_subscriber_brokered() { const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; - zenoh_util::try_init_log_from_env(); - let _router = { let mut c = config::default(); c.listen @@ -181,7 +177,7 @@ async fn test_liveliness_querying_subscriber_brokered() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_fetching_subscriber_clique() { use std::time::Duration; @@ -196,8 +192,6 @@ async fn test_liveliness_fetching_subscriber_clique() { const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; - zenoh_util::try_init_log_from_env(); - let peer1 = { let mut c = config::default(); c.listen @@ -255,7 +249,7 @@ async fn test_liveliness_fetching_subscriber_clique() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_fetching_subscriber_brokered() { use std::time::Duration; @@ -270,8 +264,6 @@ async fn test_liveliness_fetching_subscriber_brokered() { const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; - zenoh_util::try_init_log_from_env(); - let _router = { let mut c = config::default(); c.listen diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 7961c787eb..642606faed 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -117,6 +117,7 @@ once_cell = { workspace = true } [dev-dependencies] tokio = { workspace = true } +test-log = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 27cfdc3e3f..b1df9ba148 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -2111,6 +2111,7 @@ impl From> for ZBytes { } } +#[cfg(test)] mod tests { #[test] fn serializer() { diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 41317b8b43..6659620fb8 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -289,6 +289,18 @@ pub struct Sample { pub(crate) attachment: Option, } +pub trait WithFields { + type Fields; + fn fields(self) -> Self::Fields; +} + +impl WithFields for Sample { + type Fields = SampleFields; + fn fields(self) -> Self::Fields { + self.into() + } +} + impl Sample { /// Gets the key expression on which this Sample was published. #[inline] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 7325737a03..411b062189 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -111,15 +111,11 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); -#[doc(inline)] -pub use zenoh_util::{ - init_log_from_env_or as init_logging_with, try_init_log_from_env as init_logging, -}; - #[doc(inline)] pub use crate::{ config::Config, core::{Error, Result}, + logging::{init_logging, init_logging_with_level}, scouting::scout, session::{open, Session}, }; @@ -141,6 +137,12 @@ pub mod core { pub use crate::api::publisher::Priority; } +pub mod logging { + #[cfg(feature = "internal")] + pub use zenoh_util::{init_log_with_callbacks, try_init_logging}; + pub use zenoh_util::{init_logging, init_logging_with_level, InvalidLogLevel, LogLevel}; +} + /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. /// /// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). @@ -405,8 +407,6 @@ pub mod internal { }; } - pub use zenoh_util::init_log_with_callback; - pub use crate::api::value::Value; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index eafb480487..3c26fb63c1 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -32,9 +32,8 @@ mod test { const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_acl() { - zenoh::init_logging(); test_pub_sub_deny().await; test_pub_sub_allow().await; test_pub_sub_deny_then_allow().await; diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index f8dcf74bc4..7e6a63e4c4 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -35,9 +35,8 @@ mod test { const VALUE: &str = "zenoh"; static TESTFILES_PATH: Lazy = Lazy::new(std::env::temp_dir); - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_authentication() { - zenoh_util::try_init_log_from_env(); create_new_files(TESTFILES_PATH.to_path_buf()) .await .unwrap(); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 93179d2c46..82ea49a06e 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -180,9 +180,8 @@ fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { downsampling_test(pub_config, sub_config, ke_prefix, ke_of_rates, rate_check); } -#[test] +#[test_log::test] fn downsampling_by_keyexpr() { - zenoh::init_logging(); downsampling_by_keyexpr_impl(InterceptorFlow::Ingress); downsampling_by_keyexpr_impl(InterceptorFlow::Egress); } @@ -233,18 +232,15 @@ fn downsampling_by_interface_impl(flow: InterceptorFlow) { } #[cfg(unix)] -#[test] +#[test_log::test] fn downsampling_by_interface() { - zenoh::init_logging(); downsampling_by_interface_impl(InterceptorFlow::Ingress); downsampling_by_interface_impl(InterceptorFlow::Egress); } -#[test] +#[test_log::test] #[should_panic(expected = "unknown variant `down`")] fn downsampling_config_error_wrong_strategy() { - zenoh::init_logging(); - let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index dbd850da24..a1d1a8e7ad 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -15,7 +15,7 @@ use zenoh_core::ztimeout; #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_subscriber_clique() { use std::time::Duration; @@ -27,8 +27,6 @@ async fn test_liveliness_subscriber_clique() { const PEER1_ENDPOINT: &str = "tcp/localhost:47447"; const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/clique"; - zenoh_util::try_init_log_from_env(); - let peer1 = { let mut c = config::default(); c.listen @@ -72,7 +70,7 @@ async fn test_liveliness_subscriber_clique() { } #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_query_clique() { use std::time::Duration; @@ -84,8 +82,6 @@ async fn test_liveliness_query_clique() { const PEER1_ENDPOINT: &str = "tcp/localhost:47448"; const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/clique"; - zenoh_util::try_init_log_from_env(); - let peer1 = { let mut c = config::default(); c.listen @@ -122,7 +118,7 @@ async fn test_liveliness_query_clique() { } #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_subscriber_brokered() { use std::time::Duration; @@ -135,8 +131,6 @@ async fn test_liveliness_subscriber_brokered() { const ROUTER_ENDPOINT: &str = "tcp/localhost:47449"; const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/brokered"; - zenoh_util::try_init_log_from_env(); - let _router = { let mut c = config::default(); c.listen @@ -192,7 +186,7 @@ async fn test_liveliness_subscriber_brokered() { } #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_query_brokered() { use std::time::Duration; @@ -204,8 +198,6 @@ async fn test_liveliness_query_brokered() { const ROUTER_ENDPOINT: &str = "tcp/localhost:47450"; const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/brokered"; - zenoh_util::try_init_log_from_env(); - let _router = { let mut c = config::default(); c.listen @@ -254,7 +246,7 @@ async fn test_liveliness_query_brokered() { } #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_subscriber_local() { use std::time::Duration; @@ -264,8 +256,6 @@ async fn test_liveliness_subscriber_local() { const SLEEP: Duration = Duration::from_secs(1); const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/local"; - zenoh_util::try_init_log_from_env(); - let peer = { let mut c = config::default(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); @@ -294,7 +284,7 @@ async fn test_liveliness_subscriber_local() { } #[cfg(feature = "unstable")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn test_liveliness_query_local() { use std::time::Duration; @@ -304,8 +294,6 @@ async fn test_liveliness_query_local() { const SLEEP: Duration = Duration::from_secs(1); const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/local"; - zenoh_util::try_init_log_from_env(); - let peer = { let mut c = config::default(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 30599ae5c1..bd51d3db8e 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -38,9 +38,8 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { (session1, session2) } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_matching_status_any() -> ZResult<()> { - zenoh_util::init_logging(); let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; let publisher1 = ztimeout!(session1 @@ -90,10 +89,8 @@ async fn zenoh_matching_status_any() -> ZResult<()> { Ok(()) } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_matching_status_remote() -> ZResult<()> { - zenoh_util::init_logging(); - let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); @@ -145,10 +142,8 @@ async fn zenoh_matching_status_remote() -> ZResult<()> { Ok(()) } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_matching_status_local() -> ZResult<()> { - zenoh_util::init_logging(); - let session1 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs index e927e921b2..bd11799b0d 100644 --- a/zenoh/tests/open_time.rs +++ b/zenoh/tests/open_time.rs @@ -133,73 +133,65 @@ async fn time_lowlatency_open(endpoint: &EndPoint, mode: WhatAmI) { } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tcp_only_open() { - zenoh::init_logging(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } #[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tcp_only_with_lowlatency_open() { - zenoh::init_logging(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14100).parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_udp_only_open() { - zenoh::init_logging(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14010).parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } #[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_udp_only_with_lowlatency_open() { - zenoh::init_logging(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14110).parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } // #[cfg(feature = "transport_ws")] -// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] // #[ignore] // async fn time_ws_only_open() { -// zenoh::init_logging(); // let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14020).parse().unwrap(); // time_universal_open(&endpoint, WhatAmI::Client).await; // } // #[cfg(feature = "transport_ws")] -// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] // #[ignore] // async fn time_ws_only_with_lowlatency_open() { -// zenoh::init_logging(); // let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14120).parse().unwrap(); // time_lowlatency_open(&endpoint, WhatAmI::Client).await; // } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unixpipe_only_open() { - zenoh::init_logging(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_open".parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } #[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unixpipe_only_with_lowlatency_open() { - zenoh::init_logging(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_open" .parse() .unwrap(); @@ -207,10 +199,9 @@ async fn time_unixpipe_only_with_lowlatency_open() { } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_unix_only_open() { - zenoh::init_logging(); let f1 = "zenoh-test-unix-socket-9-open.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -220,12 +211,11 @@ async fn time_unix_only_open() { } #[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_tls_only_open() { use zenoh_link::tls::config::*; - zenoh::init_logging(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -418,10 +408,9 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== } #[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] #[ignore] async fn time_vsock_only_open() { - zenoh::init_logging(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:18000".parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 515b3fbd34..93a41ca322 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -361,10 +361,8 @@ impl Recipe { // Two peers connecting to a common node (either in router or peer mode) can discover each other. // And the message transmission should work even if the common node disappears after a while. -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn gossip() -> Result<()> { - zenoh::init_logging(); - let locator = String::from("tcp/127.0.0.1:17446"); let ke = String::from("testKeyExprGossip"); let msg_size = 8; @@ -429,9 +427,8 @@ async fn gossip() -> Result<()> { } // Simulate two peers connecting to a router but not directly reachable to each other can exchange messages via the brokering by the router. -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn static_failover_brokering() -> Result<()> { - zenoh::init_logging(); let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprStaticFailoverBrokering"); let msg_size = 8; @@ -490,9 +487,8 @@ async fn static_failover_brokering() -> Result<()> { // 3. Spawning order (delay_in_secs for node1, node2, and node3) = 6 (cases) // // Total cases = 2 x 4 x 6 = 48 -#[tokio::test(flavor = "multi_thread", worker_threads = 9)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 9))] async fn three_node_combination() -> Result<()> { - zenoh::init_logging(); let modes = [WhatAmI::Peer, WhatAmI::Client]; let delay_in_secs = [ (0, 1, 2), @@ -621,10 +617,8 @@ async fn three_node_combination() -> Result<()> { // 2. Mode: {Client, Peer} x {Client, Peer} x {IsFirstListen} = 2 x 2 x 2 = 8 (modes) // // Total cases = 2 x 8 = 16 -#[tokio::test(flavor = "multi_thread", worker_threads = 8)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 8))] async fn two_node_combination() -> Result<()> { - zenoh::init_logging(); - #[derive(Clone, Copy)] struct IsFirstListen(bool); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 9c8a1cfa47..00b8b1532c 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -244,18 +244,16 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re } } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_session_unicast() { - zenoh::init_logging(); let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_session_multicast() { - zenoh::init_logging(); let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 074c538b6c..120a62d8eb 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -177,24 +177,18 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re } } -#[test] +#[test_log::test] fn zenoh_shm_unicast() { tokio::runtime::Runtime::new().unwrap().block_on(async { - // Initiate logging - zenoh::init_logging(); - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; }); } -#[test] +#[test_log::test] fn zenoh_shm_multicast() { tokio::runtime::Runtime::new().unwrap().block_on(async { - // Initiate logging - zenoh::init_logging(); - let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index ceb7fb8c98..6c768c1621 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -255,19 +255,16 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { } } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_unicity_p2p() { - zenoh::init_logging(); - let (s01, s02, s03) = open_p2p_sessions().await; test_unicity_pubsub(&s01, &s02, &s03).await; test_unicity_qryrep(&s01, &s02, &s03).await; close_sessions(s01, s02, s03).await; } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[test_log::test(tokio::test(flavor = "multi_thread", worker_threads = 4))] async fn zenoh_unicity_brokered() { - zenoh::init_logging(); let r = open_router_session().await; let (s01, s02, s03) = open_client_sessions().await; diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index b0320ce648..166f9b9f5e 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -28,20 +28,20 @@ readme = "README.md" [features] default = ["zenoh/default"] shared-memory = ["zenoh/shared-memory"] -loki = ["tracing-loki","url"] +loki = ["tracing-loki", "url"] [dependencies] tokio = { workspace = true, features = ["rt-multi-thread"] } clap = { workspace = true, features = ["derive"] } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } lazy_static = { workspace = true } -tracing = {workspace = true} -tracing-subscriber = {workspace = true} -tracing-loki = {workspace = true, optional = true } -url = {workspace = true, optional = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +tracing-loki = { workspace = true, optional = true } +url = { workspace = true, optional = true } zenoh = { workspace = true, features = ["unstable", "internal", "plugins"] } [dev-dependencies]