From 3e16412ca156b47ade0bfeecaae0f9820b8145b0 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Thu, 14 Nov 2024 12:50:01 +0100 Subject: [PATCH 01/17] refactor(iroh): remove iroh-docs --- iroh/Cargo.toml | 25 +++++++-- iroh/examples/hammer.rs | 106 +++++++++++++++++++++++++++++++++++++++ iroh/src/client.rs | 16 ++---- iroh/src/lib.rs | 2 - iroh/src/metrics.rs | 5 -- iroh/src/node.rs | 59 +--------------------- iroh/src/node/builder.rs | 103 +------------------------------------ iroh/src/node/rpc.rs | 25 +-------- iroh/src/rpc_protocol.rs | 2 - iroh/tests/gc.rs | 22 ++++---- iroh/tests/provide.rs | 5 +- 11 files changed, 147 insertions(+), 223 deletions(-) create mode 100644 iroh/examples/hammer.rs diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index c70df11a2e..82bd0b66b3 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -20,7 +20,13 @@ cc = "=1.1.31" # enforce cc version, because of https://github.com/rust-lang/cc- anyhow = { version = "1" } async-channel = "2.3.1" bytes = "1.7" -derive_more = { version = "1.0.0", features = ["debug", "display", "from", "try_into", "from_str"] } +derive_more = { version = "1.0.0", features = [ + "debug", + "display", + "from", + "try_into", + "from_str", +] } futures-lite = "2.3" futures-util = "0.3" iroh-blobs = { version = "0.28.0", features = ["downloader"] } @@ -35,8 +41,15 @@ nested_enum_utils = "0.1.0" num_cpus = { version = "1.15.0" } iroh-gossip = "0.28.1" parking_lot = "0.12.1" -postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } -quic-rpc = { version = "0.15", default-features = false, features = ["flume-transport", "quinn-transport"] } +postcard = { version = "1", default-features = false, features = [ + "alloc", + "use-std", + "experimental-derive", +] } +quic-rpc = { version = "0.15", default-features = false, features = [ + "flume-transport", + "quinn-transport", +] } quic-rpc-derive = { version = "0.15" } quinn = { package = "iroh-quinn", version = "0.12" } serde = { version = "1", features = ["derive"] } @@ -63,7 +76,11 @@ metrics = ["iroh-metrics", "iroh-blobs/metrics"] fs-store = ["iroh-blobs/fs-store"] test = [] examples = ["dep:clap", "dep:indicatif"] -discovery-local-network = ["iroh-net/discovery-local-network", "examples", "dep:console"] +discovery-local-network = [ + "iroh-net/discovery-local-network", + "examples", + "dep:console", +] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] diff --git a/iroh/examples/hammer.rs b/iroh/examples/hammer.rs new file mode 100644 index 0000000000..64d1f7b172 --- /dev/null +++ b/iroh/examples/hammer.rs @@ -0,0 +1,106 @@ +//! The smallest possible example to spin up a node and serve a single blob. +//! +//! This is using an in memory database and a random node id. +//! run this example from the project root: +//! $ cargo run --example hello-world-provide +use std::str::FromStr; + +use anyhow::Context; +use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; +use iroh_net::{relay::RelayUrl, RelayMap, RelayMode}; +use tracing_subscriber::{prelude::*, EnvFilter}; + +// set the RUST_LOG env var to one of {debug,info,warn} to see logging info +pub fn setup_logging() { + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) + .with(EnvFilter::from_default_env()) + .try_init() + .ok(); +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + setup_logging(); + println!("Hammer time!"); + + // get iterations from command line + let args: Vec = std::env::args().collect(); + let iterations = if args.len() == 2 { + args[1] + .parse::() + .context("failed to parse iterations")? + } else { + 10 + }; + + for i in 0..iterations { + // create a new node + println!("node: {}", i); + let relay_url = RelayUrl::from_str("http://localhost:3340").unwrap(); + let relay_map = RelayMap::from_url(relay_url.clone()); + tokio::task::spawn(async move { + let node = iroh::node::Node::memory() + .relay_mode(RelayMode::Custom(relay_map.clone())) + .spawn() + .await + .unwrap(); + + // add some data and remember the hash + let res = node.blobs().add_bytes("Hello, world!").await.unwrap(); + + // create a ticket + let mut addr = node.net().node_addr().await.unwrap(); + addr.apply_options(AddrInfoOptions::RelayAndAddresses); + let ticket = BlobTicket::new(addr, res.hash, res.format).unwrap(); + + tokio::task::spawn(async move { + let client_node = iroh::node::Node::memory() + .relay_mode(RelayMode::Custom(relay_map.clone())) + .spawn() + .await + .unwrap(); + + // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress + // on the state of your download. + let download_stream = client_node + .blobs() + .download(ticket.hash(), ticket.node_addr().clone()) + .await + .unwrap(); + + // You can also just `await` the stream, which will poll the `DownloadProgress` stream for you. + let outcome = download_stream + .await + .context("unable to download hash") + .unwrap(); + + println!( + "\ndownloaded {} bytes from node {}", + outcome.downloaded_size, + ticket.node_addr().node_id + ); + + // Get the content we have just fetched from the iroh database. + + let bytes = client_node + .blobs() + .read_to_bytes(ticket.hash()) + .await + .unwrap(); + let s = std::str::from_utf8(&bytes) + .context("unable to parse blob as as utf-8 string") + .unwrap(); + println!("content: {}", s); + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + }); + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + node.shutdown().await.unwrap(); + }); + tokio::time::sleep(std::time::Duration::from_millis(5)).await; + } + tokio::signal::ctrl_c().await?; + Ok(()) +} diff --git a/iroh/src/client.rs b/iroh/src/client.rs index ab1554802b..e4d1dafb97 100644 --- a/iroh/src/client.rs +++ b/iroh/src/client.rs @@ -7,7 +7,6 @@ pub use crate::rpc_protocol::RpcService; mod quic; pub use iroh_blobs::rpc::client::{blobs, tags}; -pub use iroh_docs::rpc::client::{authors, docs, docs::Doc}; pub use iroh_gossip::rpc::client as gossip; pub use iroh_node_util::rpc::client::{net, node}; @@ -45,21 +44,16 @@ impl Iroh { Self { rpc } } + /// Returns the actual [`RpcClient`]. + pub fn client(&self) -> RpcClient { + self.rpc.clone() + } + /// Returns the blobs client. pub fn blobs(&self) -> blobs::Client { blobs::Client::new(self.rpc.clone().map().boxed()) } - /// Returns the docs client. - pub fn docs(&self) -> iroh_docs::rpc::client::docs::Client { - iroh_docs::rpc::client::docs::Client::new(self.rpc.clone().map().boxed()) - } - - /// Returns the docs client. - pub fn authors(&self) -> iroh_docs::rpc::client::authors::Client { - iroh_docs::rpc::client::authors::Client::new(self.rpc.clone().map().boxed()) - } - /// Returns the tags client. pub fn tags(&self) -> tags::Client { tags::Client::new(self.rpc.clone().map().boxed()) diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index 6906f7b4f5..d13dfdea3b 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -93,8 +93,6 @@ pub use iroh_base as base; #[doc(inline)] pub use iroh_blobs as blobs; #[doc(inline)] -pub use iroh_docs as docs; -#[doc(inline)] pub use iroh_gossip as gossip; #[doc(inline)] pub use iroh_net as net; diff --git a/iroh/src/metrics.rs b/iroh/src/metrics.rs index 33f7551f81..7ce6a65f72 100644 --- a/iroh/src/metrics.rs +++ b/iroh/src/metrics.rs @@ -69,7 +69,6 @@ impl Metric for Metrics { pub fn try_init_metrics_collection() -> std::io::Result<()> { iroh_metrics::core::Core::try_init(|reg, metrics| { metrics.insert(crate::metrics::Metrics::new(reg)); - metrics.insert(iroh_docs::metrics::Metrics::new(reg)); metrics.insert(iroh_net::metrics::MagicsockMetrics::new(reg)); metrics.insert(iroh_net::metrics::NetReportMetrics::new(reg)); metrics.insert(iroh_net::metrics::PortmapMetrics::new(reg)); @@ -83,10 +82,6 @@ pub fn get_metrics() -> anyhow::Result> { let mut map = BTreeMap::new(); let core = iroh_metrics::core::Core::get().ok_or_else(|| anyhow::anyhow!("metrics are disabled"))?; - collect( - core.get_collector::(), - &mut map, - ); collect( core.get_collector::(), &mut map, diff --git a/iroh/src/node.rs b/iroh/src/node.rs index b414fea96c..aa4cf49416 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -54,7 +54,6 @@ use iroh_blobs::{ store::Store as BaoStore, util::local_pool::{LocalPool, LocalPoolHandle}, }; -use iroh_docs::{engine::Engine, net::DOCS_ALPN}; use iroh_net::{ endpoint::{DirectAddrsStream, RemoteInfo}, AddrInfo, Endpoint, NodeAddr, @@ -74,8 +73,7 @@ mod rpc_status; pub use self::{ builder::{ - Builder, DiscoveryConfig, DocsStorage, GcPolicy, ProtocolBuilder, StorageConfig, - DEFAULT_RPC_ADDR, + Builder, DiscoveryConfig, GcPolicy, ProtocolBuilder, StorageConfig, DEFAULT_RPC_ADDR, }, rpc_status::RpcStatus, }; @@ -292,7 +290,6 @@ impl NodeInner { if let GcPolicy::Interval(gc_period) = gc_policy { let router = router.clone(); let handle = local_pool.spawn(move || async move { - let docs_engine = router.get_protocol::>(DOCS_ALPN); let blobs = router .get_protocol::>(iroh_blobs::protocol::ALPN) .expect("missing blobs"); @@ -304,32 +301,7 @@ impl NodeInner { period: gc_period, done_callback: gc_done_callback, }, - move || { - let docs_engine = docs_engine.clone(); - async move { - let mut live = BTreeSet::default(); - if let Some(docs) = docs_engine { - let doc_hashes = match docs.sync.content_hashes().await { - Ok(hashes) => hashes, - Err(err) => { - tracing::warn!("Error getting doc hashes: {}", err); - return live; - } - }; - for hash in doc_hashes { - match hash { - Ok(hash) => { - live.insert(hash); - } - Err(err) => { - tracing::error!("Error getting doc hash: {}", err); - } - } - } - } - live - } - }, + || async move { BTreeSet::default() }, ) .await; }); @@ -584,33 +556,6 @@ mod tests { Ok(()) } - #[cfg(feature = "fs-store")] - #[tokio::test] - async fn test_shutdown() -> Result<()> { - let _guard = iroh_test::logging::setup(); - - let iroh_root = tempfile::TempDir::new()?; - { - let iroh = Node::persistent(iroh_root.path()) - .await? - .enable_docs() - .spawn() - .await?; - let doc = iroh.docs().create().await?; - drop(doc); - iroh.shutdown().await?; - } - - let iroh = Node::persistent(iroh_root.path()) - .await? - .enable_docs() - .spawn() - .await?; - let _doc = iroh.docs().create().await?; - - Ok(()) - } - #[tokio::test] async fn test_download_via_relay() -> Result<()> { let _guard = iroh_test::logging::setup(); diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index e6524f41e6..eb742d9dff 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -16,10 +16,6 @@ use iroh_blobs::{ store::{Map, Store as BaoStore}, util::local_pool::{self, LocalPool, LocalPoolHandle, PanicMode}, }; -use iroh_docs::{ - engine::{DefaultAuthorStorage, Engine}, - net::DOCS_ALPN, -}; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; #[cfg(not(test))] use iroh_net::discovery::local_swarm_discovery::LocalSwarmDiscovery; @@ -61,51 +57,11 @@ pub const DEFAULT_BIND_ADDR_V4: SocketAddrV4 = pub const DEFAULT_BIND_ADDR_V6: SocketAddrV6 = SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_BIND_PORT + 1, 0, 0); -/// Storage backend for documents. -#[derive(Debug, Clone)] -pub enum DocsStorage { - /// Disable docs completely. - Disabled, - /// In-memory storage. - Memory, - /// File-based persistent storage. - Persistent(PathBuf), -} - -/// Start the engine, and prepare the selected storage version. -async fn spawn_docs( - storage: DocsStorage, - blobs_store: S, - default_author_storage: DefaultAuthorStorage, - endpoint: Endpoint, - gossip: Gossip, - downloader: Downloader, - local_pool_handle: LocalPoolHandle, -) -> anyhow::Result>> { - let docs_store = match storage { - DocsStorage::Disabled => return Ok(None), - DocsStorage::Memory => iroh_docs::store::fs::Store::memory(), - DocsStorage::Persistent(path) => iroh_docs::store::fs::Store::persistent(path)?, - }; - let engine = Engine::spawn( - endpoint, - gossip, - docs_store, - blobs_store, - downloader, - default_author_storage, - local_pool_handle, - ) - .await?; - Ok(Some(engine)) -} - /// Builder for the [`Node`]. /// /// You must supply a blob store and a document store. /// /// Blob store implementations are available in [`iroh_blobs::store`]. -/// Document store implementations are available in [`iroh_docs::store`]. /// /// Everything else is optional, with some sensible defaults. /// @@ -141,7 +97,6 @@ where gc_policy: GcPolicy, dns_resolver: Option, node_discovery: DiscoveryConfig, - docs_storage: DocsStorage, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: bool, /// Callback to register when a gc loop is done @@ -160,18 +115,6 @@ pub enum StorageConfig { Persistent(PathBuf), } -impl StorageConfig { - fn default_author_storage(&self) -> DefaultAuthorStorage { - match self { - StorageConfig::Persistent(ref root) => { - let path = IrohPaths::DefaultAuthor.with_root(root); - DefaultAuthorStorage::Persistent(path) - } - StorageConfig::Mem => DefaultAuthorStorage::Mem, - } - } -} - /// Configuration for node discovery. /// /// Node discovery enables connecting to other peers by only the [`NodeId`]. This usually @@ -269,7 +212,6 @@ impl Default for Builder { rpc_endpoint: mk_external_rpc(), rpc_addr: None, gc_policy: GcPolicy::Disabled, - docs_storage: DocsStorage::Disabled, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -282,11 +224,7 @@ impl Default for Builder { impl Builder { /// Creates a new builder for [`Node`] using the given databases. - pub fn with_db_and_store( - blobs_store: D, - docs_storage: DocsStorage, - storage: StorageConfig, - ) -> Self { + pub fn with_db_and_store(blobs_store: D, storage: StorageConfig) -> Self { // Use staging in testing let relay_mode = match force_staging_infra() { true => RelayMode::Staging, @@ -305,7 +243,6 @@ impl Builder { rpc_endpoint: mk_external_rpc(), rpc_addr: None, gc_policy: GcPolicy::Disabled, - docs_storage, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -343,12 +280,6 @@ where .with_context(|| { format!("Failed to load blobs database from {}", blob_dir.display()) })?; - let docs_storage = match self.docs_storage { - DocsStorage::Persistent(_) | DocsStorage::Memory => { - DocsStorage::Persistent(IrohPaths::DocsDatabase.with_root(root)) - } - DocsStorage::Disabled => DocsStorage::Disabled, - }; let secret_key_path = IrohPaths::SecretKey.with_root(root); let secret_key = load_secret_key(secret_key_path).await?; @@ -365,7 +296,6 @@ where relay_mode: self.relay_mode, dns_resolver: self.dns_resolver, gc_policy: self.gc_policy, - docs_storage, node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -415,17 +345,6 @@ where self } - /// Enables documents support on this node. - pub fn enable_docs(mut self) -> Self { - self.docs_storage = match self.storage { - StorageConfig::Mem => DocsStorage::Memory, - StorageConfig::Persistent(ref root) => { - DocsStorage::Persistent(IrohPaths::DocsDatabase.with_root(root)) - } - }; - self - } - /// Sets the relay servers to assist in establishing connectivity. /// /// Relay servers are used to discover other nodes by `PublicKey` and also help @@ -677,19 +596,6 @@ where // Initialize the downloader. let downloader = Downloader::new(self.blobs_store.clone(), endpoint.clone(), lp.clone()); - // Spawn the docs engine, if enabled. - // This returns None for DocsStorage::Disabled, otherwise Some(DocsProtocol). - let docs = spawn_docs( - self.docs_storage, - self.blobs_store.clone(), - self.storage.default_author_storage(), - endpoint.clone(), - gossip.clone(), - downloader.clone(), - lp.handle().clone(), - ) - .await?; - // Initialize the internal RPC connection. let (internal_rpc, controller) = quic_rpc::transport::flume::channel(32); let internal_rpc = quic_rpc::transport::boxed::BoxedListener::new(internal_rpc); @@ -723,7 +629,6 @@ where self.blobs_store, gossip, downloader, - docs, ); Ok(protocol_builder) @@ -836,7 +741,6 @@ impl ProtocolBuilder { store: D, gossip: Gossip, downloader: Downloader, - docs: Option>, ) -> Self { // Register blobs. let blobs_proto = BlobsProtocol::new_with_events( @@ -851,11 +755,6 @@ impl ProtocolBuilder { // Register gossip. self = self.accept(GOSSIP_ALPN.to_vec(), Arc::new(gossip)); - // Register docs, if enabled. - if let Some(docs) = docs { - self = self.accept(DOCS_ALPN.to_vec(), Arc::new(docs)); - } - self } diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index cfd0bd3e22..a829098f3e 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -1,8 +1,8 @@ use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; use anyhow::Result; +use futures_lite::Stream; use iroh_blobs::{net_protocol::Blobs as BlobsProtocol, store::Store as BaoStore}; -use iroh_docs::net::DOCS_ALPN; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_node_util::rpc::proto::node::CounterStats; use iroh_router::Router; @@ -113,28 +113,6 @@ impl Handler { .map_err(|e| e.errors_into()) } - async fn handle_docs_request( - self, - msg: iroh_docs::rpc::proto::Request, - chan: RpcChannel, - ) -> Result<(), RpcServerError> { - if let Some(docs) = self - .router - .get_protocol::>(DOCS_ALPN) - { - let chan = chan.map::(); - docs.as_ref() - .clone() - .handle_rpc_request(msg, chan) - .await - .map_err(|e| e.errors_into()) - } else { - Err(RpcServerError::SendError(anyhow::anyhow!( - "Docs is not enabled" - ))) - } - } - pub(crate) async fn handle_rpc_request( self, msg: Request, @@ -145,7 +123,6 @@ impl Handler { match msg { Node(msg) => self.handle_node_request(msg, chan).await, BlobsAndTags(msg) => self.handle_blobs_request(msg, chan.map().boxed()).await, - Docs(msg) => self.handle_docs_request(msg, chan).await, Gossip(msg) => self.handle_gossip_request(msg, chan).await, } } diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index 34a483a7f3..72ebd54145 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -28,7 +28,6 @@ pub struct RpcService; pub enum Request { Node(iroh_node_util::rpc::proto::Request), Gossip(iroh_gossip::RpcRequest), - Docs(iroh_docs::rpc::proto::Request), BlobsAndTags(iroh_blobs::rpc::proto::Request), } @@ -39,7 +38,6 @@ pub enum Request { pub enum Response { Node(iroh_node_util::rpc::proto::Response), Gossip(iroh_gossip::RpcResponse), - Docs(iroh_docs::rpc::proto::Response), BlobsAndTags(iroh_blobs::rpc::proto::Response), } diff --git a/iroh/tests/gc.rs b/iroh/tests/gc.rs index 857de7e19f..e2aa6bf71c 100644 --- a/iroh/tests/gc.rs +++ b/iroh/tests/gc.rs @@ -6,7 +6,7 @@ use std::{ use anyhow::Result; use bao_tree::{blake3, io::sync::Outboard, ChunkRanges}; use bytes::Bytes; -use iroh::node::{self, DocsStorage, Node}; +use iroh::node::{self, Node}; use iroh_blobs::{ hashseq::HashSeq, store::{EntryStatus, MapMut, Store}, @@ -44,18 +44,14 @@ where S: iroh_blobs::store::Store, { let (gc_send, gc_recv) = async_channel::unbounded(); - let node = node::Builder::with_db_and_store( - bao_store, - DocsStorage::Memory, - iroh::node::StorageConfig::Mem, - ) - .gc_policy(iroh::node::GcPolicy::Interval(gc_period)) - .register_gc_done_cb(Box::new(move || { - gc_send.send_blocking(()).ok(); - })) - .spawn() - .await - .unwrap(); + let node = node::Builder::with_db_and_store(bao_store, iroh::node::StorageConfig::Mem) + .gc_policy(iroh::node::GcPolicy::Interval(gc_period)) + .register_gc_done_cb(Box::new(move || { + gc_send.send_blocking(()).ok(); + })) + .spawn() + .await + .unwrap(); (node, gc_recv) } diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs index 87ae66f384..eab8b84599 100644 --- a/iroh/tests/provide.rs +++ b/iroh/tests/provide.rs @@ -9,7 +9,7 @@ use anyhow::{Context, Result}; use bao_tree::{blake3, ChunkNum, ChunkRanges}; use bytes::Bytes; use futures_lite::FutureExt; -use iroh::node::{Builder, DocsStorage}; +use iroh::node::Builder; use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; use iroh_blobs::{ format::collection::Collection, @@ -37,8 +37,7 @@ async fn dial(secret_key: SecretKey, peer: NodeAddr) -> anyhow::Result(db: D) -> Builder { - iroh::node::Builder::with_db_and_store(db, DocsStorage::Memory, iroh::node::StorageConfig::Mem) - .bind_random_port() + iroh::node::Builder::with_db_and_store(db, iroh::node::StorageConfig::Mem).bind_random_port() } #[tokio::test] From da0ea80a7c4130315d24e63cf34a504d7fbdb61c Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Thu, 14 Nov 2024 13:29:00 +0100 Subject: [PATCH 02/17] refactor(iroh): remove iroh-gossip --- iroh/src/client.rs | 8 +-- iroh/src/lib.rs | 2 - iroh/src/node/builder.rs | 8 --- iroh/src/node/rpc.rs | 20 ------- iroh/src/rpc_protocol.rs | 2 - iroh/tests/client.rs | 117 --------------------------------------- 6 files changed, 3 insertions(+), 154 deletions(-) delete mode 100644 iroh/tests/client.rs diff --git a/iroh/src/client.rs b/iroh/src/client.rs index e4d1dafb97..98421742c2 100644 --- a/iroh/src/client.rs +++ b/iroh/src/client.rs @@ -7,8 +7,11 @@ pub use crate::rpc_protocol::RpcService; mod quic; pub use iroh_blobs::rpc::client::{blobs, tags}; +<<<<<<< HEAD pub use iroh_gossip::rpc::client as gossip; pub use iroh_node_util::rpc::client::{net, node}; +======= +>>>>>>> d9b832c90 (refactor(iroh): remove iroh-gossip) pub(crate) use self::quic::{connect_raw as quic_connect_raw, RPC_ALPN}; @@ -59,11 +62,6 @@ impl Iroh { tags::Client::new(self.rpc.clone().map().boxed()) } - /// Returns the gossip client. - pub fn gossip(&self) -> gossip::Client { - gossip::Client::new(self.rpc.clone().map().boxed()) - } - /// Returns the net client. pub fn net(&self) -> net::Client { net::Client::new(self.rpc.clone().map().boxed()) diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index d13dfdea3b..e5004e72e4 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -93,8 +93,6 @@ pub use iroh_base as base; #[doc(inline)] pub use iroh_blobs as blobs; #[doc(inline)] -pub use iroh_gossip as gossip; -#[doc(inline)] pub use iroh_net as net; #[doc(inline)] pub use iroh_router as router; diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index eb742d9dff..8de15131f0 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -16,7 +16,6 @@ use iroh_blobs::{ store::{Map, Store as BaoStore}, util::local_pool::{self, LocalPool, LocalPoolHandle, PanicMode}, }; -use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; #[cfg(not(test))] use iroh_net::discovery::local_swarm_discovery::LocalSwarmDiscovery; use iroh_net::{ @@ -591,8 +590,6 @@ where let addr = endpoint.node_addr().await?; trace!("endpoint address: {addr:?}"); - // Initialize the gossip protocol. - let gossip = Gossip::from_endpoint(endpoint.clone(), Default::default(), &addr.info); // Initialize the downloader. let downloader = Downloader::new(self.blobs_store.clone(), endpoint.clone(), lp.clone()); @@ -627,7 +624,6 @@ where let protocol_builder = protocol_builder.register_iroh_protocols( self.blob_events, self.blobs_store, - gossip, downloader, ); @@ -739,7 +735,6 @@ impl ProtocolBuilder { mut self, blob_events: EventSender, store: D, - gossip: Gossip, downloader: Downloader, ) -> Self { // Register blobs. @@ -752,9 +747,6 @@ impl ProtocolBuilder { ); self = self.accept(iroh_blobs::protocol::ALPN.to_vec(), Arc::new(blobs_proto)); - // Register gossip. - self = self.accept(GOSSIP_ALPN.to_vec(), Arc::new(gossip)); - self } diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index a829098f3e..de27ce401a 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -3,7 +3,6 @@ use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; use anyhow::Result; use futures_lite::Stream; use iroh_blobs::{net_protocol::Blobs as BlobsProtocol, store::Store as BaoStore}; -use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_node_util::rpc::proto::node::CounterStats; use iroh_router::Router; use quic_rpc::server::{RpcChannel, RpcServerError}; @@ -95,24 +94,6 @@ impl Handler { .map_err(|e| e.errors_into()) } - async fn handle_gossip_request( - self, - msg: iroh_gossip::RpcRequest, - chan: RpcChannel, - ) -> Result<(), RpcServerError> { - let gossip = self - .router - .get_protocol::(GOSSIP_ALPN) - .expect("missing gossip"); - let chan = chan.map::(); - gossip - .as_ref() - .clone() - .handle_rpc_request(msg, chan) - .await - .map_err(|e| e.errors_into()) - } - pub(crate) async fn handle_rpc_request( self, msg: Request, @@ -123,7 +104,6 @@ impl Handler { match msg { Node(msg) => self.handle_node_request(msg, chan).await, BlobsAndTags(msg) => self.handle_blobs_request(msg, chan.map().boxed()).await, - Gossip(msg) => self.handle_gossip_request(msg, chan).await, } } } diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index 72ebd54145..ac32f5b445 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -27,7 +27,6 @@ pub struct RpcService; #[nested_enum_utils::enum_conversions()] pub enum Request { Node(iroh_node_util::rpc::proto::Request), - Gossip(iroh_gossip::RpcRequest), BlobsAndTags(iroh_blobs::rpc::proto::Request), } @@ -37,7 +36,6 @@ pub enum Request { #[nested_enum_utils::enum_conversions()] pub enum Response { Node(iroh_node_util::rpc::proto::Response), - Gossip(iroh_gossip::RpcResponse), BlobsAndTags(iroh_blobs::rpc::proto::Response), } diff --git a/iroh/tests/client.rs b/iroh/tests/client.rs deleted file mode 100644 index bf56a61a54..0000000000 --- a/iroh/tests/client.rs +++ /dev/null @@ -1,117 +0,0 @@ -use bytes::Bytes; -use futures_lite::{Stream, StreamExt}; -use futures_util::SinkExt; -use iroh::client::Iroh; -use iroh_gossip::{ - net::{Command, Event, GossipEvent}, - proto::TopicId, -}; -use iroh_net::{key::SecretKey, NodeAddr}; -use testresult::TestResult; - -/// Spawn an iroh node in a separate thread and tokio runtime, and return -/// the address and client. -async fn spawn_node() -> (NodeAddr, Iroh) { - let (sender, receiver) = tokio::sync::oneshot::channel(); - std::thread::spawn(move || { - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build()?; - runtime.block_on(async move { - let secret_key = SecretKey::generate(); - let node = iroh::node::Builder::default() - .secret_key(secret_key) - .relay_mode(iroh_net::RelayMode::Disabled) - .node_discovery(iroh::node::DiscoveryConfig::None) - .spawn() - .await?; - let addr = node.net().node_addr().await?; - sender.send((addr, node.client().clone())).unwrap(); - node.cancel_token().cancelled().await; - anyhow::Ok(()) - })?; - anyhow::Ok(()) - }); - receiver.await.unwrap() -} - -/// Await `n` messages from a stream of gossip events. -async fn await_messages( - mut stream: impl Stream> + Unpin + Send + Sync + 'static, - n: usize, -) -> TestResult> { - let handle = tokio::spawn(async move { - let mut res = Vec::new(); - #[allow(clippy::single_match)] - while let Some(msg) = stream.next().await { - match msg.unwrap() { - Event::Gossip(GossipEvent::Received(msg)) => { - res.push(msg.content); - if res.len() >= n { - break; - } - } - _ => {} - } - } - res - }); - - Ok(tokio::time::timeout(std::time::Duration::from_secs(60), handle).await??) -} - -#[tokio::test] -#[ignore = "flaky"] -async fn gossip_smoke() -> TestResult { - let _ = tracing_subscriber::fmt::try_init(); - let (addr1, node1) = spawn_node().await; - let (addr2, node2) = spawn_node().await; - let gossip1 = node1.gossip(); - let gossip2 = node2.gossip(); - node1.net().add_node_addr(addr2.clone()).await?; - node2.net().add_node_addr(addr1.clone()).await?; - - let topic = TopicId::from([0u8; 32]); - let (mut sink1, mut stream1) = gossip1.subscribe(topic, [addr2.node_id]).await?; - let (_sink2, stream2) = gossip2.subscribe(topic, [addr1.node_id]).await?; - - assert_eq!( - stream1.next().await.unwrap().unwrap(), - Event::Gossip(GossipEvent::Joined(vec![addr2.node_id])) - ); - drop(stream1); - - sink1.send(Command::Broadcast("hello".into())).await?; - let msgs = await_messages(stream2, 1).await?; - assert_eq!(msgs, vec![Bytes::from("hello")]); - Ok(()) -} - -#[tokio::test] -async fn gossip_drop_sink() -> TestResult { - let _ = tracing_subscriber::fmt::try_init(); - let (addr1, node1) = spawn_node().await; - let (addr2, node2) = spawn_node().await; - let gossip1 = node1.gossip(); - let gossip2 = node2.gossip(); - node1.net().add_node_addr(addr2.clone()).await?; - node2.net().add_node_addr(addr1.clone()).await?; - - let topic = TopicId::from([0u8; 32]); - - let (mut sink1, mut stream1) = gossip1.subscribe(topic, [addr2.node_id]).await?; - let (sink2, stream2) = gossip2.subscribe(topic, [addr1.node_id]).await?; - - assert_eq!( - stream1.next().await.unwrap().unwrap(), - Event::Gossip(GossipEvent::Joined(vec![addr2.node_id])) - ); - - drop(stream1); - drop(sink2); - - sink1.send(Command::Broadcast("hello".into())).await?; - let msgs = await_messages(stream2, 1).await?; - assert_eq!(msgs, vec![Bytes::from("hello")]); - Ok(()) -} From 123fc4dcb33409ebab706045b579bff6fd1deec0 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Thu, 14 Nov 2024 13:50:01 +0100 Subject: [PATCH 03/17] refactor(iroh): remove iroh-blobs --- Cargo.lock | 1 - {iroh/src/util => iroh-cli/src}/progress.rs | 0 iroh/Cargo.toml | 5 +- iroh/src/client.rs | 15 - iroh/src/lib.rs | 2 - iroh/src/node.rs | 84 +-- iroh/src/node/builder.rs | 194 +------ iroh/src/node/rpc.rs | 41 +- iroh/src/rpc_protocol.rs | 2 - iroh/src/util.rs | 1 - iroh/tests/batch.rs | 247 --------- iroh/tests/gc.rs | 459 ----------------- iroh/tests/provide.rs | 542 -------------------- 13 files changed, 46 insertions(+), 1547 deletions(-) rename {iroh/src/util => iroh-cli/src}/progress.rs (100%) delete mode 100644 iroh/tests/batch.rs delete mode 100644 iroh/tests/gc.rs delete mode 100644 iroh/tests/provide.rs diff --git a/Cargo.lock b/Cargo.lock index 28c9d69eb4..826cdfe99a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2621,7 +2621,6 @@ dependencies = [ "indicatif", "iroh", "iroh-base", - "iroh-blobs", "iroh-docs", "iroh-gossip", "iroh-io", diff --git a/iroh/src/util/progress.rs b/iroh-cli/src/progress.rs similarity index 100% rename from iroh/src/util/progress.rs rename to iroh-cli/src/progress.rs diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 82bd0b66b3..50db996815 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -29,7 +29,6 @@ derive_more = { version = "1.0.0", features = [ ] } futures-lite = "2.3" futures-util = "0.3" -iroh-blobs = { version = "0.28.0", features = ["downloader"] } iroh-base = { version = "0.28.0", features = ["key"] } iroh-docs = { version = "0.28.0", features = ["rpc"] } iroh-io = { version = "0.6.0", features = ["stats"] } @@ -72,8 +71,8 @@ serde-error = "0.1.3" [features] default = ["metrics", "fs-store"] -metrics = ["iroh-metrics", "iroh-blobs/metrics"] -fs-store = ["iroh-blobs/fs-store"] +metrics = ["iroh-metrics"] +fs-store = [] test = [] examples = ["dep:clap", "dep:indicatif"] discovery-local-network = [ diff --git a/iroh/src/client.rs b/iroh/src/client.rs index 98421742c2..2b9a752d4e 100644 --- a/iroh/src/client.rs +++ b/iroh/src/client.rs @@ -6,12 +6,7 @@ pub use crate::rpc_protocol::RpcService; mod quic; -pub use iroh_blobs::rpc::client::{blobs, tags}; -<<<<<<< HEAD -pub use iroh_gossip::rpc::client as gossip; pub use iroh_node_util::rpc::client::{net, node}; -======= ->>>>>>> d9b832c90 (refactor(iroh): remove iroh-gossip) pub(crate) use self::quic::{connect_raw as quic_connect_raw, RPC_ALPN}; @@ -52,16 +47,6 @@ impl Iroh { self.rpc.clone() } - /// Returns the blobs client. - pub fn blobs(&self) -> blobs::Client { - blobs::Client::new(self.rpc.clone().map().boxed()) - } - - /// Returns the tags client. - pub fn tags(&self) -> tags::Client { - tags::Client::new(self.rpc.clone().map().boxed()) - } - /// Returns the net client. pub fn net(&self) -> net::Client { net::Client::new(self.rpc.clone().map().boxed()) diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index e5004e72e4..d5662459df 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -91,8 +91,6 @@ #[doc(inline)] pub use iroh_base as base; #[doc(inline)] -pub use iroh_blobs as blobs; -#[doc(inline)] pub use iroh_net as net; #[doc(inline)] pub use iroh_router as router; diff --git a/iroh/src/node.rs b/iroh/src/node.rs index aa4cf49416..7ce9217088 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -38,7 +38,6 @@ use std::{ collections::BTreeSet, fmt::Debug, - marker::PhantomData, net::SocketAddr, path::{Path, PathBuf}, sync::Arc, @@ -49,11 +48,6 @@ use anyhow::{anyhow, Result}; use futures_lite::StreamExt; use futures_util::future::{MapErr, Shared}; use iroh_base::key::PublicKey; -use iroh_blobs::{ - net_protocol::Blobs as BlobsProtocol, - store::Store as BaoStore, - util::local_pool::{LocalPool, LocalPoolHandle}, -}; use iroh_net::{ endpoint::{DirectAddrsStream, RemoteInfo}, AddrInfo, Endpoint, NodeAddr, @@ -72,9 +66,7 @@ mod rpc; mod rpc_status; pub use self::{ - builder::{ - Builder, DiscoveryConfig, GcPolicy, ProtocolBuilder, StorageConfig, DEFAULT_RPC_ADDR, - }, + builder::{Builder, DiscoveryConfig, ProtocolBuilder, StorageConfig, DEFAULT_RPC_ADDR}, rpc_status::RpcStatus, }; @@ -100,8 +92,8 @@ pub type IrohServerEndpoint = quic_rpc::transport::boxed::BoxedListener< /// await the [`Node`] struct directly, it will complete when the task completes. If /// this is dropped the node task is not stopped but keeps running. #[derive(Debug, Clone)] -pub struct Node { - inner: Arc>, +pub struct Node { + inner: Arc, // `Node` needs to be `Clone + Send`, and we need to `task.await` in its `shutdown()` impl. // So we need // - `Shared` so we can `task.await` from all `Node` clones @@ -115,43 +107,37 @@ pub struct Node { pub(crate) type JoinErrToStr = Box String + Send + Sync + 'static>; #[derive(derive_more::Debug)] -struct NodeInner { - db: PhantomData, +struct NodeInner { rpc_addr: Option, endpoint: Endpoint, cancel_token: CancellationToken, client: crate::client::Iroh, - local_pool_handle: LocalPoolHandle, } /// In memory node. -pub type MemNode = Node; +#[deprecated] +pub type MemNode = Node; /// Persistent node. -pub type FsNode = Node; +#[deprecated] +pub type FsNode = Node; -impl MemNode { +impl Node { /// Returns a new builder for the [`Node`], by default configured to run in memory. /// /// Once done with the builder call [`Builder::spawn`] to create the node. - pub fn memory() -> Builder { - Builder::default() + pub fn memory() -> Builder { + Builder::memory() } -} -impl FsNode { /// Returns a new builder for the [`Node`], configured to persist all data /// from the given path. /// /// Once done with the builder call [`Builder::spawn`] to create the node. - pub async fn persistent( - root: impl AsRef, - ) -> Result> { - Builder::default().persist(root).await + pub async fn persistent(root: impl AsRef) -> Result { + Builder::memory().persist(root).await } -} -impl Node { /// Returns the [`Endpoint`] of the node. /// /// This can be used to establish connections to other nodes under any @@ -195,11 +181,6 @@ impl Node { &self.inner.client } - /// Returns a reference to the used `LocalPoolHandle`. - pub fn local_pool_handle(&self) -> &LocalPoolHandle { - &self.inner.local_pool_handle - } - /// Get the relay server we are connected to. pub fn home_relay(&self) -> Option { self.inner.endpoint.home_relay() @@ -242,7 +223,7 @@ impl Node { } } -impl std::ops::Deref for Node { +impl std::ops::Deref for Node { type Target = crate::client::Iroh; fn deref(&self) -> &Self::Target { @@ -250,7 +231,7 @@ impl std::ops::Deref for Node { } } -impl NodeInner { +impl NodeInner { async fn local_endpoint_addresses(&self) -> Result> { let endpoints = self .endpoint @@ -267,10 +248,7 @@ impl NodeInner { external_rpc: IrohServerEndpoint, internal_rpc: IrohServerEndpoint, router: Router, - gc_policy: GcPolicy, - gc_done_callback: Option>, nodes_data_path: Option, - local_pool: LocalPool, ) { let (ipv4, ipv6) = self.endpoint.bound_sockets(); debug!( @@ -286,37 +264,6 @@ impl NodeInner { let external_rpc = RpcServer::new(external_rpc); let internal_rpc = RpcServer::new(internal_rpc); - // Spawn a task for the garbage collection. - if let GcPolicy::Interval(gc_period) = gc_policy { - let router = router.clone(); - let handle = local_pool.spawn(move || async move { - let blobs = router - .get_protocol::>(iroh_blobs::protocol::ALPN) - .expect("missing blobs"); - - blobs - .store() - .gc_run( - iroh_blobs::store::GcConfig { - period: gc_period, - done_callback: gc_done_callback, - }, - || async move { BTreeSet::default() }, - ) - .await; - }); - // We cannot spawn tasks that run on the local pool directly into the join set, - // so instead we create a new task that supervises the local task. - join_set.spawn({ - async move { - if let Err(err) = handle.await { - return Err(anyhow::Error::from(err)); - } - Ok(()) - } - }); - } - if let Some(nodes_data_path) = nodes_data_path { let ep = self.endpoint.clone(); let token = self.cancel_token.clone(); @@ -417,7 +364,6 @@ impl NodeInner { // Abort remaining local tasks. tracing::info!("Shutting down local pool"); - local_pool.shutdown().await; } } diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 8de15131f0..ca18703683 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -9,13 +9,6 @@ use anyhow::{Context, Result}; use futures_lite::StreamExt; use futures_util::{FutureExt as _, TryFutureExt as _}; use iroh_base::key::SecretKey; -use iroh_blobs::{ - downloader::Downloader, - net_protocol::Blobs as BlobsProtocol, - provider::EventSender, - store::{Map, Store as BaoStore}, - util::local_pool::{self, LocalPool, LocalPoolHandle, PanicMode}, -}; #[cfg(not(test))] use iroh_net::discovery::local_swarm_discovery::LocalSwarmDiscovery; use iroh_net::{ @@ -26,10 +19,9 @@ use iroh_net::{ }; use iroh_router::{ProtocolHandler, RouterBuilder}; use quic_rpc::transport::{boxed::BoxableListener, quinn::QuinnListener}; -use serde::{Deserialize, Serialize}; use tokio::task::JoinError; use tokio_util::{sync::CancellationToken, task::AbortOnDropHandle}; -use tracing::{debug, error_span, trace, Instrument}; +use tracing::{error_span, trace, Instrument}; use super::{rpc_status::RpcStatus, IrohServerEndpoint, JoinErrToStr, Node, NodeInner}; use crate::{ @@ -45,9 +37,6 @@ pub const DEFAULT_BIND_PORT: u16 = 11204; /// How long we wait at most for some endpoints to be discovered. const ENDPOINT_WAIT: Duration = Duration::from_secs(5); -/// Default interval between GC runs. -const DEFAULT_GC_INTERVAL: Duration = Duration::from_secs(60 * 5); - /// The default bind address for the iroh IPv4 socket. pub const DEFAULT_BIND_ADDR_V4: SocketAddrV4 = SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_BIND_PORT); @@ -80,28 +69,19 @@ pub const DEFAULT_BIND_ADDR_V6: SocketAddrV6 = /// /// [number 0]: https://n0.computer #[derive(derive_more::Debug)] -pub struct Builder -where - D: Map, -{ +pub struct Builder { storage: StorageConfig, addr_v4: SocketAddrV4, addr_v6: SocketAddrV6, secret_key: SecretKey, rpc_endpoint: IrohServerEndpoint, rpc_addr: Option, - blobs_store: D, keylog: bool, relay_mode: RelayMode, - gc_policy: GcPolicy, dns_resolver: Option, node_discovery: DiscoveryConfig, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: bool, - /// Callback to register when a gc loop is done - #[debug("callback")] - gc_done_callback: Option>, - blob_events: EventSender, transport_config: Option, } @@ -191,8 +171,9 @@ fn mk_external_rpc() -> IrohServerEndpoint { quic_rpc::transport::boxed::BoxedListener::new(DummyServerEndpoint) } -impl Default for Builder { - fn default() -> Self { +impl Builder { + /// Creates a default node builder with in memory configuration. + pub fn memory() -> Self { // Use staging in testing let relay_mode = match force_staging_infra() { true => RelayMode::Staging, @@ -204,26 +185,20 @@ impl Default for Builder { addr_v4: DEFAULT_BIND_ADDR_V4, addr_v6: DEFAULT_BIND_ADDR_V6, secret_key: SecretKey::generate(), - blobs_store: Default::default(), keylog: false, relay_mode, dns_resolver: None, rpc_endpoint: mk_external_rpc(), rpc_addr: None, - gc_policy: GcPolicy::Disabled, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, - gc_done_callback: None, - blob_events: Default::default(), transport_config: None, } } -} -impl Builder { /// Creates a new builder for [`Node`] using the given databases. - pub fn with_db_and_store(blobs_store: D, storage: StorageConfig) -> Self { + pub fn with_db_and_store(storage: StorageConfig) -> Self { // Use staging in testing let relay_mode = match force_staging_infra() { true => RelayMode::Staging, @@ -235,51 +210,23 @@ impl Builder { addr_v4: SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_BIND_PORT), addr_v6: SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_BIND_PORT + 1, 0, 0), secret_key: SecretKey::generate(), - blobs_store, keylog: false, relay_mode, dns_resolver: None, rpc_endpoint: mk_external_rpc(), rpc_addr: None, - gc_policy: GcPolicy::Disabled, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, - gc_done_callback: None, - blob_events: Default::default(), transport_config: None, } } } -impl Builder -where - D: BaoStore, -{ - /// Configure a blob events sender. This will replace the previous blob - /// event sender. By default, no events are sent. - /// - /// To define an event sender, implement the [`iroh_blobs::provider::CustomEventSender`] trait. - pub fn blobs_events(mut self, blob_events: impl Into) -> Self { - self.blob_events = blob_events.into(); - self - } - +impl Builder { /// Persist all node data in the provided directory. - pub async fn persist( - self, - root: impl AsRef, - ) -> Result> { + pub async fn persist(self, root: impl AsRef) -> Result { let root = root.as_ref(); - let blob_dir = IrohPaths::BaoStoreDir.with_root(root); - - tokio::fs::create_dir_all(&blob_dir).await?; - let blobs_store = iroh_blobs::store::fs::Store::load(&blob_dir) - .await - .with_context(|| { - format!("Failed to load blobs database from {}", blob_dir.display()) - })?; - let secret_key_path = IrohPaths::SecretKey.with_root(root); let secret_key = load_secret_key(secret_key_path).await?; @@ -288,18 +235,14 @@ where addr_v4: self.addr_v4, addr_v6: self.addr_v6, secret_key, - blobs_store, keylog: self.keylog, rpc_endpoint: self.rpc_endpoint, rpc_addr: self.rpc_addr, relay_mode: self.relay_mode, dns_resolver: self.dns_resolver, - gc_policy: self.gc_policy, node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, - gc_done_callback: self.gc_done_callback, - blob_events: self.blob_events, transport_config: self.transport_config, }) } @@ -314,12 +257,12 @@ where } /// Configure the default iroh rpc endpoint, on the default address. - pub async fn enable_rpc(self) -> Result> { + pub async fn enable_rpc(self) -> Result { self.enable_rpc_with_addr(DEFAULT_RPC_ADDR).await } /// Configure the default iroh rpc endpoint. - pub async fn enable_rpc_with_addr(self, mut rpc_addr: SocketAddr) -> Result> { + pub async fn enable_rpc_with_addr(self, mut rpc_addr: SocketAddr) -> Result { let (ep, actual_rpc_port) = make_rpc_endpoint(&self.secret_key, rpc_addr)?; rpc_addr.set_port(actual_rpc_port); @@ -336,14 +279,6 @@ where }) } - /// Sets the garbage collection policy. - /// - /// By default garbage collection is disabled. - pub fn gc_policy(mut self, gc_policy: GcPolicy) -> Self { - self.gc_policy = gc_policy; - self - } - /// Sets the relay servers to assist in establishing connectivity. /// /// Relay servers are used to discover other nodes by `PublicKey` and also help @@ -454,14 +389,6 @@ where self } - /// Register a callback for when GC is done. - #[cfg(any(test, feature = "test-utils"))] - #[cfg_attr(iroh_docsrs, doc(cfg(any(test, feature = "test-utils"))))] - pub fn register_gc_done_cb(mut self, cb: Box) -> Self { - self.gc_done_callback.replace(cb); - self - } - /// Whether to log the SSL pre-master key. /// /// If `true` and the `SSLKEYLOGFILE` environment variable is the path to a file this @@ -477,7 +404,7 @@ where /// This will create the underlying network server and spawn a tokio task accepting /// connections. The returned [`Node`] can be used to control the task as well as /// get information about it. - pub async fn spawn(self) -> Result> { + pub async fn spawn(self) -> Result { let unspawned_node = self.build().await?; unspawned_node.spawn().await } @@ -486,24 +413,8 @@ where /// /// Returns a [`ProtocolBuilder`], on which custom protocols can be registered with /// [`ProtocolBuilder::accept`]. To spawn the node, call [`ProtocolBuilder::spawn`]. - pub async fn build(self) -> Result> { - // Clone the blob store to shutdown in case of error. - let blobs_store = self.blobs_store.clone(); - match self.build_inner().await { - Ok(node) => Ok(node), - Err(err) => { - blobs_store.shutdown().await; - Err(err) - } - } - } - - async fn build_inner(self) -> Result> { + pub async fn build(self) -> Result { trace!("building node"); - let lp = LocalPool::new(local_pool::Config { - panic_mode: PanicMode::LogAndContinue, - ..Default::default() - }); let (endpoint, nodes_data_path) = { let discovery: Option> = match self.node_discovery { DiscoveryConfig::None => None, @@ -590,9 +501,6 @@ where let addr = endpoint.node_addr().await?; trace!("endpoint address: {addr:?}"); - // Initialize the downloader. - let downloader = Downloader::new(self.blobs_store.clone(), endpoint.clone(), lp.clone()); - // Initialize the internal RPC connection. let (internal_rpc, controller) = quic_rpc::transport::flume::channel(32); let internal_rpc = quic_rpc::transport::boxed::BoxedListener::new(internal_rpc); @@ -603,11 +511,9 @@ where let inner = Arc::new(NodeInner { rpc_addr: self.rpc_addr, - db: Default::default(), endpoint: endpoint.clone(), client, cancel_token: CancellationToken::new(), - local_pool_handle: lp.handle().clone(), }); let protocol_builder = ProtocolBuilder { @@ -615,18 +521,9 @@ where router: RouterBuilder::new(endpoint), internal_rpc, external_rpc: self.rpc_endpoint, - gc_policy: self.gc_policy, - gc_done_callback: self.gc_done_callback, nodes_data_path, - local_pool: lp, }; - let protocol_builder = protocol_builder.register_iroh_protocols( - self.blob_events, - self.blobs_store, - downloader, - ); - Ok(protocol_builder) } } @@ -640,19 +537,15 @@ where /// Note that RPC calls performed with client returned from [`Self::client`] will not complete /// until the node is spawned. #[derive(derive_more::Debug)] -pub struct ProtocolBuilder { - inner: Arc>, +pub struct ProtocolBuilder { + inner: Arc, internal_rpc: IrohServerEndpoint, external_rpc: IrohServerEndpoint, router: RouterBuilder, - #[debug("callback")] - gc_done_callback: Option>, - gc_policy: GcPolicy, nodes_data_path: Option, - local_pool: LocalPool, } -impl ProtocolBuilder { +impl ProtocolBuilder { /// Registers a protocol handler for incoming connections. /// /// Use this to register custom protocols onto the iroh node. Whenever a new connection for @@ -717,11 +610,6 @@ impl ProtocolBuilder { &self.inner.endpoint } - /// Returns a reference to the used [`LocalPoolHandle`]. - pub fn local_pool_handle(&self) -> &LocalPoolHandle { - self.local_pool.handle() - } - /// Returns a protocol handler for an ALPN. /// /// This downcasts to the concrete type and returns `None` if the handler registered for `alpn` @@ -730,37 +618,14 @@ impl ProtocolBuilder { self.router.get_protocol::

(alpn) } - /// Registers the core iroh protocols (blobs, gossip, docs). - fn register_iroh_protocols( - mut self, - blob_events: EventSender, - store: D, - downloader: Downloader, - ) -> Self { - // Register blobs. - let blobs_proto = BlobsProtocol::new_with_events( - store, - self.local_pool_handle().clone(), - blob_events, - downloader, - self.endpoint().clone(), - ); - self = self.accept(iroh_blobs::protocol::ALPN.to_vec(), Arc::new(blobs_proto)); - - self - } - /// Spawns the node and starts accepting connections. - pub async fn spawn(self) -> Result> { + pub async fn spawn(self) -> Result { let Self { inner, internal_rpc, external_rpc, router, - gc_done_callback, - gc_policy, nodes_data_path, - local_pool: rt, } = self; let node_id = inner.endpoint.node_id(); @@ -769,15 +634,7 @@ impl ProtocolBuilder { // Spawn the main task and store it in the node for structured termination in shutdown. let fut = inner .clone() - .run( - external_rpc, - internal_rpc, - router.clone(), - gc_policy, - gc_done_callback, - nodes_data_path, - rt, - ) + .run(external_rpc, internal_rpc, router.clone(), nodes_data_path) .instrument(error_span!("node", me=%node_id.fmt_short())); let task = tokio::task::spawn(fut); @@ -811,23 +668,6 @@ impl ProtocolBuilder { } } -/// Policy for garbage collection. -// Please note that this is documented in the `iroh.computer` repository under -// `src/app/docs/reference/config/page.mdx`. Any changes to this need to be updated there. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum GcPolicy { - /// Garbage collection is disabled. - Disabled, - /// Garbage collection is run at the given interval. - Interval(Duration), -} - -impl Default for GcPolicy { - fn default() -> Self { - Self::Interval(DEFAULT_GC_INTERVAL) - } -} - const DEFAULT_RPC_PORT: u16 = 0x1337; const MAX_RPC_STREAMS: u32 = 1024; diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index de27ce401a..959bc177be 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -1,8 +1,6 @@ use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; use anyhow::Result; -use futures_lite::Stream; -use iroh_blobs::{net_protocol::Blobs as BlobsProtocol, store::Store as BaoStore}; use iroh_node_util::rpc::proto::node::CounterStats; use iroh_router::Router; use quic_rpc::server::{RpcChannel, RpcServerError}; @@ -16,18 +14,21 @@ use crate::{ }; #[derive(Debug, Clone)] -pub(crate) struct Handler { - pub(crate) inner: Arc>, - pub(crate) router: Router, +pub(crate) struct Handler { + pub(crate) inner: Arc, + pub(crate) _router: Router, } -impl Handler { - pub fn new(inner: Arc>, router: Router) -> Self { - Self { inner, router } +impl Handler { + pub fn new(inner: Arc, router: Router) -> Self { + Self { + inner, + _router: router, + } } } -impl iroh_node_util::rpc::server::AbstractNode for NodeInner { +impl iroh_node_util::rpc::server::AbstractNode for NodeInner { fn endpoint(&self) -> &iroh_net::Endpoint { &self.endpoint } @@ -49,15 +50,9 @@ impl iroh_node_util::rpc::server::AbstractNode for NodeInner { } } -impl Handler { - fn blobs(&self) -> Arc> { - self.router - .get_protocol::>(iroh_blobs::protocol::ALPN) - .expect("missing blobs") - } - +impl Handler { pub(crate) fn spawn_rpc_request( - inner: Arc>, + inner: Arc, join_set: &mut JoinSet>, accepting: quic_rpc::server::Accepting, router: Router, @@ -83,17 +78,6 @@ impl Handler { .map_err(|e| e.errors_into()) } - async fn handle_blobs_request( - self, - msg: iroh_blobs::rpc::proto::Request, - chan: RpcChannel, - ) -> Result<(), RpcServerError> { - self.blobs() - .handle_rpc_request(msg, chan) - .await - .map_err(|e| e.errors_into()) - } - pub(crate) async fn handle_rpc_request( self, msg: Request, @@ -103,7 +87,6 @@ impl Handler { debug!("handling rpc request: {msg}"); match msg { Node(msg) => self.handle_node_request(msg, chan).await, - BlobsAndTags(msg) => self.handle_blobs_request(msg, chan.map().boxed()).await, } } } diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index ac32f5b445..b5958348ea 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -27,7 +27,6 @@ pub struct RpcService; #[nested_enum_utils::enum_conversions()] pub enum Request { Node(iroh_node_util::rpc::proto::Request), - BlobsAndTags(iroh_blobs::rpc::proto::Request), } /// The response enum, listing all possible responses. @@ -36,7 +35,6 @@ pub enum Request { #[nested_enum_utils::enum_conversions()] pub enum Response { Node(iroh_node_util::rpc::proto::Response), - BlobsAndTags(iroh_blobs::rpc::proto::Response), } impl quic_rpc::Service for RpcService { diff --git a/iroh/src/util.rs b/iroh/src/util.rs index 31106fe280..341cf83dfe 100644 --- a/iroh/src/util.rs +++ b/iroh/src/util.rs @@ -2,4 +2,3 @@ pub mod fs; pub mod path; -pub mod progress; diff --git a/iroh/tests/batch.rs b/iroh/tests/batch.rs deleted file mode 100644 index c3289d03e6..0000000000 --- a/iroh/tests/batch.rs +++ /dev/null @@ -1,247 +0,0 @@ -use std::{io, time::Duration}; - -use bao_tree::blake3; -use bytes::Bytes; -use futures_lite::StreamExt; -use iroh::{ - client::blobs::{AddDirOpts, WrapOption}, - node::GcPolicy, -}; -use iroh_blobs::store::mem::Store; - -async fn create_node() -> anyhow::Result<(iroh::node::Node, async_channel::Receiver<()>)> { - let (gc_send, gc_recv) = async_channel::unbounded(); - let node = iroh::node::Node::memory() - .gc_policy(GcPolicy::Interval(Duration::from_millis(10))) - .register_gc_done_cb(Box::new(move || { - gc_send.send_blocking(()).ok(); - })) - .spawn() - .await?; - Ok((node, gc_recv)) -} - -async fn wait_for_gc(chan: &mut async_channel::Receiver<()>) { - let _ = chan.drain(); - for _ in 0..5 { - chan.recv().await.unwrap(); - } -} - -/// Test that add_bytes adds the right data -#[tokio::test] -async fn add_bytes() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let tag = batch.add_bytes(data).await?; - let hash = *tag.hash(); - let actual = client.read_to_bytes(hash).await?; - assert_eq!(hash, blake3::hash(data).into()); - assert_eq!(actual.as_ref(), data); - Ok(()) -} - -/// Test that add_bytes adds the right data -#[tokio::test] -async fn add_stream() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let data_stream = futures_lite::stream::iter([io::Result::Ok(Bytes::copy_from_slice(data))]); - let tag = batch.add_stream(data_stream).await?; - let hash = *tag.hash(); - let actual = client.read_to_bytes(hash).await?; - assert_eq!(hash, blake3::hash(data).into()); - assert_eq!(actual.as_ref(), data); - Ok(()) -} - -/// Test that add_file adds the right data -#[tokio::test] -async fn add_file() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let dir = tempfile::tempdir()?; - let temp_path = dir.path().join("test"); - std::fs::write(&temp_path, b"test")?; - let (tag, _) = batch.add_file(temp_path).await?; - let hash = *tag.hash(); - let actual = client.read_to_bytes(hash).await?; - assert_eq!(hash, blake3::hash(b"test").into()); - assert_eq!(actual.as_ref(), b"test"); - Ok(()) -} - -/// Tests that add_dir adds the right data -#[tokio::test] -async fn add_dir() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let dir = tempfile::tempdir()?; - let data: [(&str, &[u8]); 2] = [("test1", b"test1"), ("test2", b"test2")]; - for (name, content) in &data { - let temp_path = dir.path().join(name); - std::fs::write(&temp_path, content)?; - } - let tag = batch.add_dir(dir.path().to_owned()).await?; - assert!(client.has(*tag.hash()).await?); - for (_, content) in &data { - let hash = blake3::hash(content).into(); - let data = client.read_to_bytes(hash).await?; - assert_eq!(data.as_ref(), *content); - } - Ok(()) -} - -/// Tests that add_dir adds the right data -#[tokio::test] -async fn add_dir_single_file() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let dir = tempfile::tempdir()?; - let temp_path = dir.path().join("test"); - let data: &[u8] = b"test"; - std::fs::write(&temp_path, data)?; - let tag = batch - .add_dir_with_opts( - temp_path, - AddDirOpts { - wrap: WrapOption::Wrap { name: None }, - ..Default::default() - }, - ) - .await?; - assert!(client.read_to_bytes(*tag.hash()).await.is_ok()); - let hash = blake3::hash(data).into(); - let actual_data = client.read_to_bytes(hash).await?; - assert_eq!(actual_data.as_ref(), data); - Ok(()) -} - -#[tokio::test] -async fn batch_drop() -> anyhow::Result<()> { - let (node, mut gc) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let tag = batch.add_bytes(data).await?; - let hash = *tag.hash(); - // Check that the store has the data and that it is protected from gc - wait_for_gc(&mut gc).await; - assert!(client.has(hash).await?); - drop(batch); - // Check that the store drops the data when the temp tag gets dropped - wait_for_gc(&mut gc).await; - assert!(!client.has(hash).await?); - Ok(()) -} - -/// This checks that dropping a tag makes the data eligible for garbage collection. -/// -/// Note that we might change this behavior in the future and only drop the data -/// once the batch is dropped. -#[tokio::test] -async fn tag_drop_raw() -> anyhow::Result<()> { - let (node, mut gc) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let tag = batch.add_bytes(data).await?; - let hash = *tag.hash(); - // Check that the store has the data and that it is protected from gc - wait_for_gc(&mut gc).await; - assert!(client.has(hash).await?); - drop(tag); - // Check that the store drops the data when the temp tag gets dropped - wait_for_gc(&mut gc).await; - assert!(!client.has(hash).await?); - Ok(()) -} - -/// Tests that data is preserved if a second temp tag is created for it -/// before the first temp tag is dropped. -#[tokio::test] -async fn temp_tag_copy() -> anyhow::Result<()> { - let (node, mut gc) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let tag = batch.add_bytes(data).await?; - let hash = *tag.hash(); - // Check that the store has the data and that it is protected from gc - wait_for_gc(&mut gc).await; - assert!(client.has(hash).await?); - // Create an additional temp tag for the same data - let tag2 = batch.temp_tag(tag.hash_and_format()).await?; - drop(tag); - // Check that the data is still present - wait_for_gc(&mut gc).await; - assert!(client.has(hash).await?); - drop(tag2); - // Check that the data is gone since both temp tags are dropped - wait_for_gc(&mut gc).await; - assert!(!client.has(hash).await?); - Ok(()) -} - -/// Tests that temp tags work properly for hash sequences, using add_dir -/// to add the data. -/// -/// Note that we might change this behavior in the future and only drop the data -/// once the batch is dropped. -#[tokio::test] -async fn tag_drop_hashseq() -> anyhow::Result<()> { - let (node, mut gc) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let dir = tempfile::tempdir()?; - let data: [(&str, &[u8]); 2] = [("test1", b"test1"), ("test2", b"test2")]; - for (name, content) in &data { - let temp_path = dir.path().join(name); - std::fs::write(&temp_path, content)?; - } - let tag = batch.add_dir(dir.path().to_owned()).await?; - let hash = *tag.hash(); - // weird signature to avoid async move issues - let check_present = |present: &'static bool| async { - assert!(client.has(hash).await? == *present); - for (_, content) in &data { - let hash = blake3::hash(content).into(); - assert!(client.has(hash).await? == *present); - } - anyhow::Ok(()) - }; - // Check that the store has the data immediately after adding it - check_present(&true).await?; - // Check that it is protected from gc - wait_for_gc(&mut gc).await; - check_present(&true).await?; - drop(tag); - // Check that the store drops the data when the temp tag gets dropped - wait_for_gc(&mut gc).await; - check_present(&false).await?; - Ok(()) -} - -/// This checks that dropping a tag makes the data eligible for garbage collection. -/// -/// Note that we might change this behavior in the future and only drop the data -/// once the batch is dropped. -#[tokio::test] -async fn wrong_batch() -> anyhow::Result<()> { - let (node, _) = create_node().await?; - let client = &node.client().blobs(); - let batch = client.batch().await?; - let data: &[u8] = b"test"; - let tag = batch.add_bytes(data).await?; - drop(batch); - let batch = client.batch().await?; - assert!(batch.persist(tag).await.is_err()); - Ok(()) -} diff --git a/iroh/tests/gc.rs b/iroh/tests/gc.rs deleted file mode 100644 index e2aa6bf71c..0000000000 --- a/iroh/tests/gc.rs +++ /dev/null @@ -1,459 +0,0 @@ -use std::{ - io::{Cursor, Write}, - time::Duration, -}; - -use anyhow::Result; -use bao_tree::{blake3, io::sync::Outboard, ChunkRanges}; -use bytes::Bytes; -use iroh::node::{self, Node}; -use iroh_blobs::{ - hashseq::HashSeq, - store::{EntryStatus, MapMut, Store}, - util::Tag, - BlobFormat, HashAndFormat, IROH_BLOCK_SIZE, -}; -use rand::RngCore; - -pub fn create_test_data(size: usize) -> Bytes { - let mut rand = rand::thread_rng(); - let mut res = vec![0u8; size]; - rand.fill_bytes(&mut res); - res.into() -} - -/// Take some data and encode it -pub fn simulate_remote(data: &[u8]) -> (blake3::Hash, Cursor) { - let outboard = bao_tree::io::outboard::PostOrderMemOutboard::create(data, IROH_BLOCK_SIZE); - let mut encoded = Vec::new(); - encoded - .write_all(outboard.tree.size().to_le_bytes().as_ref()) - .unwrap(); - bao_tree::io::sync::encode_ranges_validated(data, &outboard, &ChunkRanges::all(), &mut encoded) - .unwrap(); - let hash = outboard.root(); - (hash, Cursor::new(encoded.into())) -} - -/// Wrap a bao store in a node that has gc enabled. -async fn wrap_in_node( - bao_store: S, - gc_period: Duration, -) -> (Node, async_channel::Receiver<()>) -where - S: iroh_blobs::store::Store, -{ - let (gc_send, gc_recv) = async_channel::unbounded(); - let node = node::Builder::with_db_and_store(bao_store, iroh::node::StorageConfig::Mem) - .gc_policy(iroh::node::GcPolicy::Interval(gc_period)) - .register_gc_done_cb(Box::new(move || { - gc_send.send_blocking(()).ok(); - })) - .spawn() - .await - .unwrap(); - (node, gc_recv) -} - -async fn gc_test_node() -> ( - Node, - iroh_blobs::store::mem::Store, - async_channel::Receiver<()>, -) { - let bao_store = iroh_blobs::store::mem::Store::new(); - let (node, gc_recv) = wrap_in_node(bao_store.clone(), Duration::from_millis(500)).await; - (node, bao_store, gc_recv) -} - -async fn step(evs: &async_channel::Receiver<()>) { - // drain the event queue, we want a new GC - while evs.try_recv().is_ok() {} - // wait for several GC cycles - for _ in 0..3 { - evs.recv().await.unwrap(); - } -} - -/// Test the absolute basics of gc, temp tags and tags for blobs. -#[tokio::test] -async fn gc_basics() -> Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - let (node, bao_store, evs) = gc_test_node().await; - let data1 = create_test_data(1234); - let tt1 = bao_store.import_bytes(data1, BlobFormat::Raw).await?; - let data2 = create_test_data(5678); - let tt2 = bao_store.import_bytes(data2, BlobFormat::Raw).await?; - let h1 = *tt1.hash(); - let h2 = *tt2.hash(); - // temp tags are still there, so the entries should be there - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); - - // drop the first tag, the entry should be gone after some time - drop(tt1); - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); - - // create an explicit tag for h1 (as raw) and then delete the temp tag. Entry should still be there. - let tag = Tag::from("test"); - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::raw(h2))) - .await?; - drop(tt2); - tracing::info!("dropped tt2"); - step(&evs).await; - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); - - // delete the explicit tag, entry should be gone - bao_store.set_tag(tag, None).await?; - step(&evs).await; - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); - - node.shutdown().await?; - Ok(()) -} - -/// Test gc for sequences of hashes that protect their children from deletion. -#[tokio::test] -async fn gc_hashseq_impl() -> Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - let (node, bao_store, evs) = gc_test_node().await; - let data1 = create_test_data(1234); - let tt1 = bao_store.import_bytes(data1, BlobFormat::Raw).await?; - let data2 = create_test_data(5678); - let tt2 = bao_store.import_bytes(data2, BlobFormat::Raw).await?; - let seq = vec![*tt1.hash(), *tt2.hash()] - .into_iter() - .collect::(); - let ttr = bao_store - .import_bytes(seq.into_inner(), BlobFormat::HashSeq) - .await?; - let h1 = *tt1.hash(); - let h2 = *tt2.hash(); - let hr = *ttr.hash(); - drop(tt1); - drop(tt2); - - // there is a temp tag for the link seq, so it and its entries should be there - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); - assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); - - // make a permanent tag for the link seq, then delete the temp tag. Entries should still be there. - let tag = Tag::from("test"); - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::hash_seq(hr))) - .await?; - drop(ttr); - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); - assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); - - // change the permanent tag to be just for the linkseq itself as a blob. Only the linkseq should be there, not the entries. - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::raw(hr))) - .await?; - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); - assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); - - // delete the permanent tag, everything should be gone - bao_store.set_tag(tag, None).await?; - step(&evs).await; - assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); - assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); - assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::NotFound); - - node.shutdown().await?; - Ok(()) -} - -#[cfg(feature = "fs-store")] -mod file { - use std::{io, path::PathBuf}; - - use bao_tree::{ - io::fsm::{BaoContentItem, ResponseDecoderNext}, - BaoTree, - }; - use iroh_blobs::{ - store::{BaoBatchWriter, ConsistencyCheckProgress, MapEntryMut, ReportLevel}, - util::progress::{AsyncChannelProgressSender, ProgressSender as _}, - TempTag, - }; - use testdir::testdir; - use tokio::io::AsyncReadExt; - - use super::*; - - fn path(root: PathBuf, suffix: &'static str) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { - move |hash| root.join(format!("{}.{}", hash.to_hex(), suffix)) - } - - fn data_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { - // this assumes knowledge of the internal directory structure of the flat store - path(root.join("data"), "data") - } - - fn outboard_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { - // this assumes knowledge of the internal directory structure of the flat store - path(root.join("data"), "obao4") - } - - async fn check_consistency(store: &impl Store) -> anyhow::Result { - let mut max_level = ReportLevel::Trace; - let (tx, rx) = async_channel::bounded(1); - let task = tokio::task::spawn(async move { - while let Ok(ev) = rx.recv().await { - if let ConsistencyCheckProgress::Update { level, .. } = &ev { - max_level = max_level.max(*level); - } - } - }); - store - .consistency_check(false, AsyncChannelProgressSender::new(tx).boxed()) - .await?; - task.await?; - Ok(max_level) - } - - /// Test gc for sequences of hashes that protect their children from deletion. - #[tokio::test] - async fn gc_file_basics() -> Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - let dir = testdir!(); - let path = data_path(dir.clone()); - let outboard_path = outboard_path(dir.clone()); - - let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; - let (node, evs) = wrap_in_node(bao_store.clone(), Duration::from_millis(100)).await; - let data1 = create_test_data(10000000); - let tt1 = bao_store - .import_bytes(data1.clone(), BlobFormat::Raw) - .await?; - let data2 = create_test_data(1000000); - let tt2 = bao_store - .import_bytes(data2.clone(), BlobFormat::Raw) - .await?; - let seq = vec![*tt1.hash(), *tt2.hash()] - .into_iter() - .collect::(); - let ttr = bao_store - .import_bytes(seq.into_inner(), BlobFormat::HashSeq) - .await?; - - let h1 = *tt1.hash(); - let h2 = *tt2.hash(); - let hr = *ttr.hash(); - - // data is protected by the temp tag - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - // h1 is for a giant file, so we will have both data and outboard files - assert!(path(&h1).exists()); - assert!(outboard_path(&h1).exists()); - // h2 is for a mid sized file, so we will have just the data file - assert!(path(&h2).exists()); - assert!(!outboard_path(&h2).exists()); - // hr so small that data will be inlined and outboard will not exist at all - assert!(!path(&hr).exists()); - assert!(!outboard_path(&hr).exists()); - - drop(tt1); - drop(tt2); - let tag = Tag::from("test"); - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::hash_seq(*ttr.hash()))) - .await?; - drop(ttr); - - // data is now protected by a normal tag, nothing should be gone - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - // h1 is for a giant file, so we will have both data and outboard files - assert!(path(&h1).exists()); - assert!(outboard_path(&h1).exists()); - // h2 is for a mid sized file, so we will have just the data file - assert!(path(&h2).exists()); - assert!(!outboard_path(&h2).exists()); - // hr so small that data will be inlined and outboard will not exist at all - assert!(!path(&hr).exists()); - assert!(!outboard_path(&hr).exists()); - - tracing::info!("changing tag from hashseq to raw, this should orphan the children"); - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::raw(hr))) - .await?; - - // now only hr itself should be protected, but not its children - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - // h1 should be gone - assert!(!path(&h1).exists()); - assert!(!outboard_path(&h1).exists()); - // h2 should still not be there - assert!(!path(&h2).exists()); - assert!(!outboard_path(&h2).exists()); - // hr should still not be there - assert!(!path(&hr).exists()); - assert!(!outboard_path(&hr).exists()); - - bao_store.set_tag(tag, None).await?; - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - // h1 should be gone - assert!(!path(&h1).exists()); - assert!(!outboard_path(&h1).exists()); - // h2 should still not be there - assert!(!path(&h2).exists()); - assert!(!outboard_path(&h2).exists()); - // hr should still not be there - assert!(!path(&hr).exists()); - assert!(!outboard_path(&hr).exists()); - - node.shutdown().await?; - - Ok(()) - } - - /// Add a file to the store in the same way a download works. - /// - /// we know the hash in advance, create a partial entry, write the data to it and - /// the outboard file, then commit it to a complete entry. - /// - /// During this time, the partial entry is protected by a temp tag. - async fn simulate_download_partial( - bao_store: &S, - data: Bytes, - ) -> io::Result<(S::EntryMut, TempTag)> { - // simulate the remote side. - let (hash, mut response) = simulate_remote(data.as_ref()); - // simulate the local side. - // we got a hash and a response from the remote side. - let tt = bao_store.temp_tag(HashAndFormat::raw(hash.into())); - // get the size - let size = response.read_u64_le().await?; - // start reading the response - let mut reading = bao_tree::io::fsm::ResponseDecoder::new( - hash, - ChunkRanges::all(), - BaoTree::new(size, IROH_BLOCK_SIZE), - response, - ); - // create the partial entry - let entry = bao_store.get_or_create(hash.into(), size).await?; - // create the - let mut bw = entry.batch_writer().await?; - let mut buf = Vec::new(); - while let ResponseDecoderNext::More((next, res)) = reading.next().await { - let item = res?; - match &item { - BaoContentItem::Parent(_) => { - buf.push(item); - } - BaoContentItem::Leaf(_) => { - buf.push(item); - let batch = std::mem::take(&mut buf); - bw.write_batch(size, batch).await?; - } - } - reading = next; - } - bw.sync().await?; - drop(bw); - Ok((entry, tt)) - } - - async fn simulate_download_complete( - bao_store: &S, - data: Bytes, - ) -> io::Result { - let (entry, tt) = simulate_download_partial(bao_store, data).await?; - // commit the entry - bao_store.insert_complete(entry).await?; - Ok(tt) - } - - /// Test that partial files are deleted. - #[tokio::test] - async fn gc_file_partial() -> Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - let dir = testdir!(); - let path = data_path(dir.clone()); - let outboard_path = outboard_path(dir.clone()); - - let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; - let (node, evs) = wrap_in_node(bao_store.clone(), Duration::from_millis(10)).await; - - let data1: Bytes = create_test_data(10000000); - let (_entry, tt1) = simulate_download_partial(&bao_store, data1.clone()).await?; - drop(_entry); - let h1 = *tt1.hash(); - // partial data and outboard files should be there - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - assert!(path(&h1).exists()); - assert!(outboard_path(&h1).exists()); - - drop(tt1); - // partial data and outboard files should be gone - step(&evs).await; - bao_store.sync().await?; - assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); - assert!(!path(&h1).exists()); - assert!(!outboard_path(&h1).exists()); - - node.shutdown().await?; - Ok(()) - } - - #[tokio::test] - async fn gc_file_stress() -> Result<()> { - let _ = tracing_subscriber::fmt::try_init(); - let dir = testdir!(); - - let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; - let (node, evs) = wrap_in_node(bao_store.clone(), Duration::from_secs(1)).await; - - let mut deleted = Vec::new(); - let mut live = Vec::new(); - // download - for i in 0..100 { - let data: Bytes = create_test_data(16 * 1024 * 3 + 1); - let tt = simulate_download_complete(&bao_store, data).await.unwrap(); - if i % 100 == 0 { - let tag = Tag::from(format!("test{}", i)); - bao_store - .set_tag(tag.clone(), Some(HashAndFormat::raw(*tt.hash()))) - .await?; - live.push(*tt.hash()); - } else { - deleted.push(*tt.hash()); - } - } - step(&evs).await; - - for h in deleted.iter() { - assert_eq!(bao_store.entry_status(h).await?, EntryStatus::NotFound); - assert!(!dir.join(format!("data/{}.data", h.to_hex())).exists()); - } - - for h in live.iter() { - assert_eq!(bao_store.entry_status(h).await?, EntryStatus::Complete); - assert!(dir.join(format!("data/{}.data", h.to_hex())).exists()); - } - - node.shutdown().await?; - Ok(()) - } -} diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs deleted file mode 100644 index eab8b84599..0000000000 --- a/iroh/tests/provide.rs +++ /dev/null @@ -1,542 +0,0 @@ -use std::{ - collections::BTreeMap, - net::SocketAddr, - ops::Range, - time::{Duration, Instant}, -}; - -use anyhow::{Context, Result}; -use bao_tree::{blake3, ChunkNum, ChunkRanges}; -use bytes::Bytes; -use futures_lite::FutureExt; -use iroh::node::Builder; -use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; -use iroh_blobs::{ - format::collection::Collection, - get::{ - fsm::{self, ConnectedNext, DecodeError}, - Stats, - }, - protocol::{GetRequest, RangeSpecSeq}, - store::{MapMut, Store}, - Hash, -}; -use iroh_net::{defaults::staging::default_relay_map, key::SecretKey, NodeAddr, NodeId}; -use rand::RngCore; - -/// Create a new endpoint and dial a peer, returning the connection. -async fn dial(secret_key: SecretKey, peer: NodeAddr) -> anyhow::Result { - let endpoint = iroh_net::Endpoint::builder() - .secret_key(secret_key) - .bind() - .await?; - endpoint - .connect(peer, iroh::blobs::protocol::ALPN) - .await - .context("failed to connect to provider") -} - -fn test_node(db: D) -> Builder { - iroh::node::Builder::with_db_and_store(db, iroh::node::StorageConfig::Mem).bind_random_port() -} - -#[tokio::test] -async fn basics() -> Result<()> { - let _guard = iroh_test::logging::setup(); - transfer_data(vec![("hello_world", "hello world!".as_bytes().to_vec())]).await -} - -#[tokio::test] -async fn multi_file() -> Result<()> { - let _guard = iroh_test::logging::setup(); - - let file_opts = vec![ - ("1", 10), - ("2", 1024), - ("3", 1024 * 1024), - // overkill, but it works! Just annoying to wait for - // ("4", 1024 * 1024 * 90), - ]; - transfer_random_data(file_opts).await -} - -#[tokio::test] -async fn many_files() -> Result<()> { - let _guard = iroh_test::logging::setup(); - let num_files = [10, 100]; - for num in num_files { - println!("NUM_FILES: {num}"); - let file_opts = (0..num) - .map(|i| { - // use a long file name to test large collections - let name = i.to_string().repeat(50); - (name, 10) - }) - .collect(); - transfer_random_data(file_opts).await?; - } - Ok(()) -} - -#[tokio::test] -async fn sizes() -> Result<()> { - let _guard = iroh_test::logging::setup(); - - let sizes = [ - 0, - 10, - 100, - 1024, - 1024 * 100, - 1024 * 500, - 1024 * 1024, - 1024 * 1024 + 10, - 1024 * 1024 * 9, - ]; - - for size in sizes { - let now = Instant::now(); - transfer_random_data(vec![("hello_world", size)]).await?; - println!(" took {}ms", now.elapsed().as_millis()); - } - - Ok(()) -} - -#[tokio::test] -async fn empty_files() -> Result<()> { - // try to transfer as many files as possible without hitting a limit - // booo 400 is too small :( - let num_files = 400; - let mut file_opts = Vec::new(); - for i in 0..num_files { - file_opts.push((i.to_string(), 0)); - } - transfer_random_data(file_opts).await -} - -/// Create new get options with the given node id and addresses, using a -/// randomly generated secret key. -fn get_options( - node_id: NodeId, - addrs: impl IntoIterator, -) -> (SecretKey, NodeAddr) { - let relay_map = default_relay_map(); - let peer = iroh_net::NodeAddr::from_parts( - node_id, - relay_map.nodes().next().map(|n| n.url.clone()), - addrs, - ); - (SecretKey::generate(), peer) -} - -#[tokio::test(flavor = "multi_thread")] -async fn multiple_clients() -> Result<()> { - let content = b"hello world!"; - - let mut db = iroh_blobs::store::readonly_mem::Store::default(); - let expect_hash = db.insert(content.as_slice()); - let expect_name = "hello_world"; - let collection = Collection::from_iter([(expect_name, expect_hash)]); - let hash = db.insert_many(collection.to_blobs()).unwrap(); - let node = test_node(db).spawn().await?; - let mut tasks = Vec::new(); - for _i in 0..3 { - let file_hash: Hash = expect_hash; - let name = expect_name; - let addrs = node.local_address(); - let peer_id = node.node_id(); - let content = content.to_vec(); - - tasks.push(node.local_pool_handle().spawn(move || { - async move { - let (secret_key, peer) = get_options(peer_id, addrs); - let expected_data = &content; - let expected_name = name; - let request = GetRequest::all(hash); - let (collection, children, _stats) = - run_collection_get_request(secret_key, peer, request).await?; - assert_eq!(expected_name, &collection[0].0); - assert_eq!(&file_hash, &collection[0].1); - assert_eq!(expected_data, &children[&0]); - - anyhow::Ok(()) - } - .boxed_local() - })); - } - - futures_buffered::try_join_all(tasks).await?; - Ok(()) -} - -// Run the test creating random data for each blob, using the size specified by the file -// options -async fn transfer_random_data(file_opts: Vec<(S, usize)>) -> Result<()> -where - S: Into + std::fmt::Debug + std::cmp::PartialEq + Clone, -{ - let file_opts = file_opts - .into_iter() - .map(|(name, size)| { - let mut content = vec![0u8; size]; - rand::thread_rng().fill_bytes(&mut content); - (name, content) - }) - .collect(); - transfer_data(file_opts).await -} - -// Run the test for a vec of filenames and blob data -async fn transfer_data(file_opts: Vec<(S, Vec)>) -> Result<()> -where - S: Into + std::fmt::Debug + std::cmp::PartialEq + Clone, -{ - let mut expects = Vec::new(); - let num_blobs = file_opts.len(); - - let (mut mdb, _lookup) = iroh_blobs::store::readonly_mem::Store::new(file_opts.clone()); - let mut blobs = Vec::new(); - - for opt in file_opts.into_iter() { - let (name, data) = opt; - let name: String = name.into(); - println!("Sending {}: {}b", name, data.len()); - - // get expected hash of file - let hash = blake3::hash(&data); - let hash = Hash::from(hash); - let blob = (name.clone(), hash); - blobs.push(blob); - - // keep track of expected values - expects.push((name, hash)); - } - let collection_orig = Collection::from_iter(blobs); - let collection_hash = mdb.insert_many(collection_orig.to_blobs()).unwrap(); - - let node = test_node(mdb.clone()).spawn().await?; - - let addrs = node.local_endpoint_addresses().await?; - let (secret_key, peer) = get_options(node.node_id(), addrs); - let request = GetRequest::all(collection_hash); - let (collection, children, _stats) = - run_collection_get_request(secret_key, peer, request).await?; - assert_eq!(num_blobs, collection.len()); - for (i, (expected_name, expected_hash)) in expects.iter().enumerate() { - let (name, hash) = &collection[i]; - let got = &children[&(i as u64)]; - let expected = mdb.get_content(expected_hash).unwrap(); - assert_eq!(expected_name, name); - assert_eq!(expected_hash, hash); - assert_eq!(expected, got); - } - - node.shutdown().await?; - - Ok(()) -} - -#[tokio::test] -async fn test_server_close() { - let _guard = iroh_test::logging::setup(); - - // Prepare a Provider transferring a file. - let mut db = iroh_blobs::store::readonly_mem::Store::default(); - let child_hash = db.insert(b"hello there"); - let collection = Collection::from_iter([("hello", child_hash)]); - let hash = db.insert_many(collection.to_blobs()).unwrap(); - let node = test_node(db).spawn().await.unwrap(); - let node_addr = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - - let (secret_key, peer) = get_options(peer_id, node_addr); - let request = GetRequest::all(hash); - let (_collection, _children, _stats) = run_collection_get_request(secret_key, peer, request) - .await - .unwrap(); -} - -/// create an in memory test database containing the given entries and an iroh collection of all entries -/// -/// returns the database and the root hash of the collection -fn create_test_db( - entries: impl IntoIterator, impl AsRef<[u8]>)>, -) -> (iroh_blobs::store::readonly_mem::Store, Hash) { - let (mut db, hashes) = iroh_blobs::store::readonly_mem::Store::new(entries); - let collection = Collection::from_iter(hashes); - let hash = db.insert_many(collection.to_blobs()).unwrap(); - (db, hash) -} - -#[tokio::test] -#[ignore = "flaky"] -async fn test_ipv6() { - let _guard = iroh_test::logging::setup(); - - let (db, hash) = create_test_db([("test", b"hello")]); - let node = match test_node(db).spawn().await { - Ok(provider) => provider, - Err(_) => { - // We assume the problem here is IPv6 on this host. If the problem is - // not IPv6 then other tests will also fail. - return; - } - }; - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - let (secret_key, peer) = get_options(peer_id, addrs); - let request = GetRequest::all(hash); - run_collection_get_request(secret_key, peer, request).await - }) - .await - .expect("timeout") - .expect("get failed"); -} - -/// Simulate a node that has nothing -#[tokio::test] -async fn test_not_found() { - let _guard = iroh_test::logging::setup(); - - let db = iroh_blobs::store::readonly_mem::Store::default(); - let hash = blake3::hash(b"hello").into(); - let node = match test_node(db).spawn().await { - Ok(provider) => provider, - Err(_) => { - // We assume the problem here is IPv6 on this host. If the problem is - // not IPv6 then other tests will also fail. - return; - } - }; - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - let (secret_key, peer) = get_options(peer_id, addrs); - let request = GetRequest::single(hash); - let res = run_collection_get_request(secret_key, peer, request).await; - if let Err(cause) = res { - if let Some(e) = cause.downcast_ref::() { - if let DecodeError::NotFound = e { - Ok(()) - } else { - anyhow::bail!("expected DecodeError::NotFound, got {:?}", e); - } - } else { - anyhow::bail!("expected DecodeError, got {:?}", cause); - } - } else { - anyhow::bail!("expected error when getting non-existent blob"); - } - }) - .await - .expect("timeout") - .expect("get failed"); -} - -/// Simulate a node that has just begun downloading a blob, but does not yet have any data -#[tokio::test] -async fn test_chunk_not_found_1() { - let _guard = iroh_test::logging::setup(); - - let db = iroh_blobs::store::mem::Store::new(); - let data = (0..1024 * 64).map(|i| i as u8).collect::>(); - let hash = blake3::hash(&data).into(); - let _entry = db.get_or_create(hash, data.len() as u64).await.unwrap(); - let node = match test_node(db).spawn().await { - Ok(provider) => provider, - Err(_) => { - // We assume the problem here is IPv6 on this host. If the problem is - // not IPv6 then other tests will also fail. - return; - } - }; - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - let (secret_key, peer) = get_options(peer_id, addrs); - let request = GetRequest::single(hash); - let res = run_collection_get_request(secret_key, peer, request).await; - if let Err(cause) = res { - if let Some(e) = cause.downcast_ref::() { - if let DecodeError::NotFound = e { - Ok(()) - } else { - anyhow::bail!("expected DecodeError::ParentNotFound, got {:?}", e); - } - } else { - anyhow::bail!("expected DecodeError, got {:?}", cause); - } - } else { - anyhow::bail!("expected error when getting non-existent blob"); - } - }) - .await - .expect("timeout") - .expect("get failed"); -} - -#[tokio::test] -async fn test_run_ticket() { - let _guard = iroh_test::logging::setup(); - - let (db, hash) = create_test_db([("test", b"hello")]); - let node = test_node(db).spawn().await.unwrap(); - let _drop_guard = node.cancel_token().drop_guard(); - - let mut addr = node.net().node_addr().await.unwrap(); - addr.apply_options(AddrInfoOptions::RelayAndAddresses); - let ticket = BlobTicket::new(addr, hash, iroh_blobs::BlobFormat::HashSeq) - .expect("ticket creation failed"); - - tokio::time::timeout(Duration::from_secs(10), async move { - let request = GetRequest::all(hash); - run_collection_get_request(SecretKey::generate(), ticket.node_addr().clone(), request).await - }) - .await - .expect("timeout") - .expect("get ticket failed"); -} - -/// Utility to validate that the children of a collection are correct -fn validate_children(collection: Collection, children: BTreeMap) -> anyhow::Result<()> { - let blobs = collection.into_iter().collect::>(); - anyhow::ensure!(blobs.len() == children.len()); - for (child, (_name, hash)) in blobs.into_iter().enumerate() { - let child = child as u64; - let data = children.get(&child).unwrap(); - anyhow::ensure!(hash == blake3::hash(data).into()); - } - Ok(()) -} - -async fn run_collection_get_request( - secret_key: SecretKey, - peer: NodeAddr, - request: GetRequest, -) -> anyhow::Result<(Collection, BTreeMap, Stats)> { - let connection = dial(secret_key, peer).await?; - let initial = fsm::start(connection, request); - let connected = initial.next().await?; - let ConnectedNext::StartRoot(fsm_at_start_root) = connected.next().await? else { - anyhow::bail!("request did not include collection"); - }; - Collection::read_fsm_all(fsm_at_start_root).await -} - -#[tokio::test] -async fn test_run_fsm() { - let _guard = iroh_test::logging::setup(); - - let (db, hash) = create_test_db([("a", b"hello"), ("b", b"world")]); - let node = test_node(db).spawn().await.unwrap(); - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - let (secret_key, peer) = get_options(peer_id, addrs); - let request = GetRequest::all(hash); - let (collection, children, _) = - run_collection_get_request(secret_key, peer, request).await?; - validate_children(collection, children)?; - anyhow::Ok(()) - }) - .await - .expect("timeout") - .expect("get failed"); -} - -/// compute the range of the last chunk of a blob of the given size -fn last_chunk_range(size: usize) -> Range { - const CHUNK_LEN: usize = 1024; - const MASK: usize = CHUNK_LEN - 1; - if (size & MASK) == 0 { - size - CHUNK_LEN..size - } else { - (size & !MASK)..size - } -} - -fn last_chunk(data: &[u8]) -> &[u8] { - let range = last_chunk_range(data.len()); - &data[range] -} - -fn make_test_data(n: usize) -> Vec { - let mut data = Vec::with_capacity(n); - for i in 0..n { - data.push((i / 1024) as u8); - } - data -} - -/// Ask for the last chunk of a blob, even if we don't know the size yet. -/// -/// The verified last chunk also verifies the size. -#[tokio::test] -async fn test_size_request_blob() { - let _guard = iroh_test::logging::setup(); - - let expected = make_test_data(1024 * 64 + 1234); - let last_chunk = last_chunk(&expected); - let (db, hashes) = iroh_blobs::store::readonly_mem::Store::new([("test", &expected)]); - let hash = Hash::from(*hashes.values().next().unwrap()); - let node = test_node(db).spawn().await.unwrap(); - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - let request = GetRequest::last_chunk(hash); - let (secret_key, peer) = get_options(peer_id, addrs); - let connection = dial(secret_key, peer).await?; - let response = fsm::start(connection, request); - let connected = response.next().await?; - let ConnectedNext::StartRoot(start) = connected.next().await? else { - panic!() - }; - let header = start.next(); - let (_, actual) = header.concatenate_into_vec().await?; - assert_eq!(actual, last_chunk); - anyhow::Ok(()) - }) - .await - .expect("timeout") - .expect("get failed"); -} - -#[tokio::test] -async fn test_collection_stat() { - let _guard = iroh_test::logging::setup(); - - let child1 = make_test_data(123456); - let child2 = make_test_data(345678); - let (db, hash) = create_test_db([("a", &child1), ("b", &child2)]); - let node = test_node(db.clone()).spawn().await.unwrap(); - let addrs = node.local_endpoint_addresses().await.unwrap(); - let peer_id = node.node_id(); - tokio::time::timeout(Duration::from_secs(10), async move { - // first 1024 bytes - let header = ChunkRanges::from(..ChunkNum(1)); - // last chunk, whatever it is, to verify the size - let end = ChunkRanges::from(ChunkNum(u64::MAX)..); - // combine them - let ranges = &header | &end; - let request = GetRequest::new( - hash, - RangeSpecSeq::from_ranges_infinite([ChunkRanges::all(), ranges]), - ); - let (secret_key, peer) = get_options(peer_id, addrs); - let (_collection, items, _stats) = - run_collection_get_request(secret_key, peer, request).await?; - // we should get the first <=1024 bytes and the last chunk of each child - // so now we know the size and can guess the type by inspecting the header - assert_eq!(items.len(), 2); - assert_eq!(&items[&0][..1024], &child1[..1024]); - assert!(items[&0].ends_with(last_chunk(&child1))); - assert_eq!(&items[&1][..1024], &child2[..1024]); - assert!(items[&1].ends_with(last_chunk(&child2))); - anyhow::Ok(()) - }) - .await - .expect("timeout") - .expect("get failed"); -} From 0172035b75d2de42dba22f95f26ea4f93296c352 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 10:20:14 +0100 Subject: [PATCH 04/17] start updating examples --- Cargo.lock | 2 + iroh/Cargo.toml | 20 ++---- iroh/examples/client.rs | 43 ----------- iroh/examples/collection-fetch.rs | 94 ------------------------ iroh/examples/collection-provide.rs | 73 ------------------- iroh/examples/custom-protocol.rs | 3 +- iroh/examples/hammer.rs | 106 ---------------------------- iroh/examples/hello-world-fetch.rs | 25 +++++-- 8 files changed, 30 insertions(+), 336 deletions(-) delete mode 100644 iroh/examples/client.rs delete mode 100644 iroh/examples/collection-fetch.rs delete mode 100644 iroh/examples/collection-provide.rs delete mode 100644 iroh/examples/hammer.rs diff --git a/Cargo.lock b/Cargo.lock index 826cdfe99a..0d3841f38b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2621,6 +2621,7 @@ dependencies = [ "indicatif", "iroh", "iroh-base", + "iroh-blobs", "iroh-docs", "iroh-gossip", "iroh-io", @@ -2628,6 +2629,7 @@ dependencies = [ "iroh-net", "iroh-node-util", "iroh-quinn", + "iroh-relay", "iroh-router", "iroh-test", "nested_enum_utils", diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 50db996815..0976c47f50 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -58,23 +58,25 @@ tempfile = "3.4" tokio = { version = "1", features = ["io-util", "rt"] } tokio-util = { version = "0.7", features = ["codec", "io-util", "io", "time"] } tracing = "0.1" +iroh-relay = { version = "0.28", path = "../iroh-relay" } +ref-cast = "1.0.23" # Examples clap = { version = "4", features = ["derive"], optional = true } indicatif = { version = "0.17", features = ["tokio"], optional = true } -ref-cast = "1.0.23" console = { version = "0.15.5", optional = true } +iroh-blobs = { version = "0.28", optional = true, features = ["rpc"] } # Documentation tests url = { version = "2.5.0", features = ["serde"] } serde-error = "0.1.3" [features] -default = ["metrics", "fs-store"] +default = ["metrics", "fs-store", "examples"] metrics = ["iroh-metrics"] fs-store = [] test = [] -examples = ["dep:clap", "dep:indicatif"] +examples = ["dep:clap", "dep:indicatif", "dep:iroh-blobs"] discovery-local-network = [ "iroh-net/discovery-local-network", "examples", @@ -107,22 +109,14 @@ rustdoc-args = ["--cfg", "iroh_docsrs"] [[example]] name = "hello-world-provide" +required-features = ["examples"] [[example]] name = "hello-world-fetch" - -[[example]] -name = "collection-provide" - -[[example]] -name = "collection-fetch" - -[[example]] -name = "rpc" required-features = ["examples"] [[example]] -name = "client" +name = "rpc" required-features = ["examples"] [[example]] diff --git a/iroh/examples/client.rs b/iroh/examples/client.rs deleted file mode 100644 index 4d2338012a..0000000000 --- a/iroh/examples/client.rs +++ /dev/null @@ -1,43 +0,0 @@ -//! This example shows the shortest path to working with documents in iroh. This example creates a -//! document and sets an entry with key: "hello", value: "world". The document is completely local. -//! -//! The iroh node that creates the document is backed by an in-memory database and a random node ID -//! -//! run this example from the project root: -//! $ cargo run --features=examples --example client -use indicatif::HumanBytes; -use iroh::{base::base32, client::docs::Entry, docs::store::Query, node::Node}; -use tokio_stream::StreamExt; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let node = Node::memory().enable_docs().spawn().await?; - - // Could also use `node` directly, as it derefs to the client. - let client = node.client(); - - let blobs = client.blobs(); - let doc = client.docs().create().await?; - let author = client.authors().default().await?; - - doc.set_bytes(author, "hello", "world").await?; - - let mut stream = doc.get_many(Query::all()).await?; - while let Some(entry) = stream.try_next().await? { - println!("entry {}", fmt_entry(&entry)); - let content = blobs.read_to_bytes(entry.content_hash()).await?; - println!(" content {}", std::str::from_utf8(&content)?) - } - - Ok(()) -} - -fn fmt_entry(entry: &Entry) -> String { - let id = entry.id(); - let key = std::str::from_utf8(id.key()).unwrap_or(""); - let author = id.author().fmt_short(); - let hash = entry.content_hash(); - let hash = base32::fmt_short(hash.as_bytes()); - let len = HumanBytes(entry.content_len()); - format!("@{author}: {key} = {hash} ({len})",) -} diff --git a/iroh/examples/collection-fetch.rs b/iroh/examples/collection-fetch.rs deleted file mode 100644 index 98712a07f6..0000000000 --- a/iroh/examples/collection-fetch.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! An example that fetches an iroh collection and prints the contents. -//! Will only work with collections that contain text, and is meant as a companion to the and `collection-provide` example. -//! -//! This is using an in memory database and a random node id. -//! Run the `collection-provide` example, which will give you instructions on how to run this example. -use std::{env, str::FromStr}; - -use anyhow::{bail, ensure, Context, Result}; -use iroh::{base::ticket::BlobTicket, blobs::BlobFormat}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[tokio::main] -async fn main() -> Result<()> { - setup_logging(); - println!("\ncollection fetch example!"); - // get the ticket - let args: Vec = env::args().collect(); - - if args.len() != 2 { - bail!("expected one argument [BLOB_TICKET]\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example collection-provide`"); - } - - // deserialize ticket string into a ticket - let ticket = - BlobTicket::from_str(&args[1]).context("failed parsing blob ticket\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example collection-provide`")?; - - // create a new node - let node = iroh::node::Node::memory().spawn().await?; - - println!("fetching hash: {}", ticket.hash()); - println!("node id: {}", node.node_id()); - println!("node listening addresses:"); - let addrs = node.net().node_addr().await?; - for addr in addrs.direct_addresses() { - println!("\t{:?}", addr); - } - println!( - "node relay server url: {:?}", - node.home_relay() - .expect("a default relay url should be provided") - .to_string() - ); - - // Get the content we have just fetched from the iroh database. - ensure!( - ticket.format() == BlobFormat::HashSeq, - "'collection' example expects to fetch a collection, but the ticket indicates a single blob." - ); - - // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress - // on the state of your download. - let download_stream = node - .blobs() - .download_hash_seq(ticket.hash(), ticket.node_addr().clone()) - .await?; - - // You can also just `await` the stream, which poll the `DownloadProgress` stream for you. - let outcome = download_stream.await.context("unable to download hash")?; - - println!( - "\ndownloaded {} bytes from node {}", - outcome.downloaded_size, - ticket.node_addr().node_id - ); - - // If the `BlobFormat` is `HashSeq`, then we can assume for the example (and for any `HashSeq` that is derived from any iroh API), that it can be parsed as a `Collection` - // A `Collection` is a special `HashSeq`, where we preserve the names of any blobs added to the collection. (We do this by designating the first entry in the `Collection` as meta data.) - // To get the content of the collection, we first get the collection from the database using the `blobs` API - let collection = node - .blobs() - .get_collection(ticket.hash()) - .await - .context("expect hash with `BlobFormat::HashSeq` to be a collection")?; - - // Then we iterate through the collection, which gives us the name and hash of each entry in the collection. - for (name, hash) in collection.iter() { - println!("\nname: {name}, hash: {hash}"); - // Use the hash of the blob to get the content. - let content = node.blobs().read_to_bytes(*hash).await?; - let s = std::str::from_utf8(&content).context("unable to parse blob as as utf-8 string")?; - println!("{s}"); - } - - Ok(()) -} diff --git a/iroh/examples/collection-provide.rs b/iroh/examples/collection-provide.rs deleted file mode 100644 index 06d9f6dcfe..0000000000 --- a/iroh/examples/collection-provide.rs +++ /dev/null @@ -1,73 +0,0 @@ -//! An example that serves an iroh collection from memory. -//! -//! Since this is using the default iroh collection format, it can be downloaded -//! recursively using the iroh CLI. -//! -//! This is using an in memory database and a random node id. -//! run this example from the project root: -//! $ cargo run --example collection-provide -use iroh::blobs::{format::collection::Collection, util::SetTagOption, BlobFormat}; -use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - setup_logging(); - println!("\ncollection provide example!"); - - // create a new node - let node = iroh::node::Node::memory().spawn().await?; - - // Add two blobs - let blob1 = node.blobs().add_bytes("the first blob of bytes").await?; - let blob2 = node.blobs().add_bytes("the second blob of bytes").await?; - - // Create blobs from the data - let collection: Collection = [("blob1", blob1.hash), ("blob2", blob2.hash)] - .into_iter() - .collect(); - - // Create a collection - let (hash, _) = node - .blobs() - .create_collection(collection, SetTagOption::Auto, Default::default()) - .await?; - - // create a ticket - // tickets wrap all details needed to get a collection - let mut addr = node.net().node_addr().await?; - addr.apply_options(AddrInfoOptions::RelayAndAddresses); - let ticket = BlobTicket::new(addr, hash, BlobFormat::HashSeq)?; - - // print some info about the node - println!("serving hash: {}", ticket.hash()); - println!("node id: {}", ticket.node_addr().node_id); - println!("node listening addresses:"); - for addr in ticket.node_addr().direct_addresses() { - println!("\t{:?}", addr); - } - println!( - "node relay server url: {:?}", - ticket - .node_addr() - .relay_url() - .expect("a default relay url should be provided") - .to_string() - ); - // print the ticket, containing all the above information - println!("\nin another terminal, run:"); - println!("\tcargo run --example collection-fetch {}", ticket); - // block until SIGINT is received (ctrl+c) - tokio::signal::ctrl_c().await?; - node.shutdown().await?; - Ok(()) -} diff --git a/iroh/examples/custom-protocol.rs b/iroh/examples/custom-protocol.rs index b6bdd56d38..92130a4dff 100644 --- a/iroh/examples/custom-protocol.rs +++ b/iroh/examples/custom-protocol.rs @@ -44,14 +44,13 @@ use anyhow::Result; use clap::Parser; use futures_lite::future::Boxed as BoxedFuture; use iroh::{ - blobs::Hash, - client::blobs, net::{ endpoint::{get_remote_node_id, Connecting}, Endpoint, NodeId, }, router::ProtocolHandler, }; +use iroh_base::hash::Hash; use tracing_subscriber::{prelude::*, EnvFilter}; #[derive(Debug, Parser)] diff --git a/iroh/examples/hammer.rs b/iroh/examples/hammer.rs deleted file mode 100644 index 64d1f7b172..0000000000 --- a/iroh/examples/hammer.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! The smallest possible example to spin up a node and serve a single blob. -//! -//! This is using an in memory database and a random node id. -//! run this example from the project root: -//! $ cargo run --example hello-world-provide -use std::str::FromStr; - -use anyhow::Context; -use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; -use iroh_net::{relay::RelayUrl, RelayMap, RelayMode}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - setup_logging(); - println!("Hammer time!"); - - // get iterations from command line - let args: Vec = std::env::args().collect(); - let iterations = if args.len() == 2 { - args[1] - .parse::() - .context("failed to parse iterations")? - } else { - 10 - }; - - for i in 0..iterations { - // create a new node - println!("node: {}", i); - let relay_url = RelayUrl::from_str("http://localhost:3340").unwrap(); - let relay_map = RelayMap::from_url(relay_url.clone()); - tokio::task::spawn(async move { - let node = iroh::node::Node::memory() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .spawn() - .await - .unwrap(); - - // add some data and remember the hash - let res = node.blobs().add_bytes("Hello, world!").await.unwrap(); - - // create a ticket - let mut addr = node.net().node_addr().await.unwrap(); - addr.apply_options(AddrInfoOptions::RelayAndAddresses); - let ticket = BlobTicket::new(addr, res.hash, res.format).unwrap(); - - tokio::task::spawn(async move { - let client_node = iroh::node::Node::memory() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .spawn() - .await - .unwrap(); - - // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress - // on the state of your download. - let download_stream = client_node - .blobs() - .download(ticket.hash(), ticket.node_addr().clone()) - .await - .unwrap(); - - // You can also just `await` the stream, which will poll the `DownloadProgress` stream for you. - let outcome = download_stream - .await - .context("unable to download hash") - .unwrap(); - - println!( - "\ndownloaded {} bytes from node {}", - outcome.downloaded_size, - ticket.node_addr().node_id - ); - - // Get the content we have just fetched from the iroh database. - - let bytes = client_node - .blobs() - .read_to_bytes(ticket.hash()) - .await - .unwrap(); - let s = std::str::from_utf8(&bytes) - .context("unable to parse blob as as utf-8 string") - .unwrap(); - println!("content: {}", s); - - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - }); - - tokio::time::sleep(std::time::Duration::from_secs(5)).await; - node.shutdown().await.unwrap(); - }); - tokio::time::sleep(std::time::Duration::from_millis(5)).await; - } - tokio::signal::ctrl_c().await?; - Ok(()) -} diff --git a/iroh/examples/hello-world-fetch.rs b/iroh/examples/hello-world-fetch.rs index 06a62bbf23..7b09d34043 100644 --- a/iroh/examples/hello-world-fetch.rs +++ b/iroh/examples/hello-world-fetch.rs @@ -3,10 +3,13 @@ //! //! This is using an in memory database and a random node id. //! Run the `provide` example, which will give you instructions on how to run this example. -use std::{env, str::FromStr}; +use std::{env, str::FromStr, sync::Arc}; use anyhow::{bail, ensure, Context, Result}; -use iroh::{base::ticket::BlobTicket, blobs::BlobFormat}; +use iroh::base::ticket::BlobTicket; +use iroh_blobs::{ + downloader::Downloader, net_protocol::Blobs, util::local_pool::LocalPool, BlobFormat, +}; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info @@ -34,7 +37,20 @@ async fn main() -> Result<()> { BlobTicket::from_str(&args[1]).context("failed parsing blob ticket\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`")?; // create a new node - let node = iroh::node::Node::memory().spawn().await?; + let builder = iroh::node::Node::memory().build().await?; + let local_pool = LocalPool::default(); + let store = iroh_blobs::store::mem::Store::new(); + let downloader = Downloader::new(store.clone(), builder.endpoint(), local_pool.handle()); + let blobs = Blobs::new_with_events( + store, + local_pool.handle().clone(), + Default::default(), + downloader, + builder.endpoint(), + ); + let blobs_client = blobs.client(); + builder.accept(iroh_blobs::protocol::ALPN.to_vec(), Arc::new(blobs)); + let node = builder.spawn().await?; println!("fetching hash: {}", ticket.hash()); println!("node id: {}", node.node_id()); @@ -58,8 +74,7 @@ async fn main() -> Result<()> { // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress // on the state of your download. - let download_stream = node - .blobs() + let download_stream = blobs_client .download(ticket.hash(), ticket.node_addr().clone()) .await?; From 1e92e4a8dc54d5463e9b387b66023eb8ac35a915 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 10:40:51 +0100 Subject: [PATCH 05/17] fixup examples --- Cargo.lock | 221 ++++++++++++++----------- iroh/Cargo.toml | 19 ++- iroh/examples/custom-protocol.rs | 32 +++- iroh/examples/hello-world-fetch.rs | 20 ++- iroh/examples/hello-world-provide.rs | 24 ++- iroh/examples/local-swarm-discovery.rs | 46 +++-- iroh/examples/rpc.rs | 6 +- 7 files changed, 230 insertions(+), 138 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d3841f38b..c886f39d89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -65,9 +65,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "android-tzdata" @@ -175,7 +175,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", ] @@ -270,9 +270,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -658,9 +658,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -668,9 +668,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -692,9 +692,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "clipboard-win" @@ -741,14 +741,14 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm", + "crossterm 0.28.1", "strum 0.26.3", "strum_macros 0.26.4", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -782,7 +782,7 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width", + "unicode-width 0.1.14", "windows-sys 0.52.0", ] @@ -826,9 +826,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -940,6 +940,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "crossterm" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +dependencies = [ + "bitflags 2.6.0", + "crossterm_winapi", + "parking_lot", + "rustix", + "winapi", +] + [[package]] name = "crossterm_winapi" version = "0.9.1" @@ -1167,7 +1180,7 @@ dependencies = [ "console", "shell-words", "tempfile", - "thiserror 1.0.68", + "thiserror 1.0.69", "zeroize", ] @@ -1528,9 +1541,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fd-lock" @@ -1605,7 +1618,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" dependencies = [ "nonempty", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1704,7 +1717,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -2013,7 +2026,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "thiserror 1.0.68", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -2043,7 +2056,7 @@ dependencies = [ "rustls", "rustls-pemfile", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tinyvec", "tokio", @@ -2069,7 +2082,7 @@ dependencies = [ "lru-cache", "parking_lot", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -2092,7 +2105,7 @@ dependencies = [ "rustls", "serde", "smallvec", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-rustls", "tracing", @@ -2116,7 +2129,7 @@ dependencies = [ "prefix-trie", "rustls", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls", @@ -2542,16 +2555,16 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" dependencies = [ "console", - "instant", "number_prefix", "portable-atomic", "tokio", - "unicode-width", + "unicode-width 0.2.0", + "web-time", ] [[package]] @@ -2622,8 +2635,6 @@ dependencies = [ "iroh", "iroh-base", "iroh-blobs", - "iroh-docs", - "iroh-gossip", "iroh-io", "iroh-metrics", "iroh-net", @@ -2650,7 +2661,7 @@ dependencies = [ "tempfile", "testdir", "testresult", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -2684,7 +2695,7 @@ dependencies = [ "serde_json", "serde_test", "ssh-key", - "thiserror 1.0.68", + "thiserror 1.0.69", "ttl_cache", "url", "zeroize", @@ -2769,7 +2780,7 @@ dependencies = [ "colored", "comfy-table", "console", - "crossterm", + "crossterm 0.27.0", "derive_more", "dialoguer", "duct", @@ -2805,7 +2816,7 @@ dependencies = [ "strum 0.26.3", "tempfile", "testdir", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-util", @@ -3054,7 +3065,7 @@ dependencies = [ "surge-ping", "swarm-discovery", "testresult", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls", @@ -3166,7 +3177,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3184,7 +3195,7 @@ dependencies = [ "rustls", "rustls-platform-verifier", "slab", - "thiserror 1.0.68", + "thiserror 1.0.69", "tinyvec", "tracing", ] @@ -3250,7 +3261,7 @@ dependencies = [ "smallvec", "socket2", "stun-rs", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls", @@ -3352,7 +3363,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror 1.0.68", + "thiserror 1.0.69", "walkdir", ] @@ -3382,9 +3393,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libm" @@ -3489,7 +3500,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "sha1_smol", - "thiserror 1.0.68", + "thiserror 1.0.69", "tracing", ] @@ -3649,7 +3660,7 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -3663,7 +3674,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", ] @@ -3699,7 +3710,7 @@ dependencies = [ "rtnetlink", "serde", "socket2", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-util", @@ -4109,7 +4120,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror 1.0.68", + "thiserror 1.0.69", "ucd-trie", ] @@ -4197,7 +4208,7 @@ dependencies = [ "rand", "self_cell", "simple-dns", - "thiserror 1.0.68", + "thiserror 1.0.69", "tracing", "ureq", "wasm-bindgen", @@ -4335,7 +4346,7 @@ dependencies = [ "serde", "smallvec", "socket2", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-util", @@ -4609,9 +4620,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", @@ -4620,26 +4631,29 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror 1.0.68", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring", "rustc-hash", "rustls", + "rustls-pki-types", "slab", - "thiserror 1.0.68", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] @@ -4745,7 +4759,7 @@ dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm", + "crossterm 0.27.0", "itertools 0.12.1", "lru", "paste", @@ -4753,7 +4767,7 @@ dependencies = [ "strum 0.26.3", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -4845,7 +4859,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -4870,9 +4884,9 @@ dependencies = [ [[package]] name = "reflink-copy" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31414597d1cd7fdd2422798b7652a6329dda0fe0219e6335a13d5bcaa9aeb6" +checksum = "17400ed684c3a0615932f00c271ae3eea13e47056a1455821995122348ab6438" dependencies = [ "cfg-if", "rustix", @@ -4887,7 +4901,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -4902,9 +4916,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -5041,7 +5055,7 @@ dependencies = [ "netlink-proto", "netlink-sys", "nix 0.26.4", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", ] @@ -5077,9 +5091,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -5090,9 +5104,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", @@ -5130,6 +5144,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-platform-verifier" @@ -5205,7 +5222,7 @@ dependencies = [ "radix_trie", "scopeguard", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", "utf8parse", "winapi", ] @@ -5236,9 +5253,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -5285,9 +5302,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -5358,9 +5375,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -5817,7 +5834,7 @@ dependencies = [ "pnet_packet", "rand", "socket2", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -5938,7 +5955,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -5966,11 +5983,11 @@ checksum = "614b328ff036a4ef882c61570f72918f7e9c5bee1da33f8e7f91e01daee7e56c" [[package]] name = "thiserror" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.68", + "thiserror-impl 1.0.69", ] [[package]] @@ -5984,9 +6001,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", @@ -6143,7 +6160,7 @@ dependencies = [ "rustls", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tokio", "tokio-rustls", @@ -6202,7 +6219,7 @@ dependencies = [ "http 1.1.0", "httparse", "js-sys", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-tungstenite", "wasm-bindgen", @@ -6331,7 +6348,7 @@ dependencies = [ "governor", "http 1.1.0", "pin-project", - "thiserror 1.0.68", + "thiserror 1.0.69", "tower 0.4.13", "tracing", ] @@ -6355,7 +6372,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tracing-subscriber", ] @@ -6450,7 +6467,7 @@ dependencies = [ "log", "rand", "sha1", - "thiserror 1.0.68", + "thiserror 1.0.69", "url", "utf-8", ] @@ -6517,7 +6534,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -6526,6 +6543,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -6733,7 +6756,7 @@ dependencies = [ "event-listener 4.0.3", "futures-util", "parking_lot", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -6746,6 +6769,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.26.6" @@ -7092,7 +7125,7 @@ dependencies = [ "futures", "log", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", "windows 0.58.0", "windows-core 0.58.0", ] @@ -7122,7 +7155,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", ] diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 0976c47f50..7dd6baa3fc 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -30,7 +30,6 @@ derive_more = { version = "1.0.0", features = [ futures-lite = "2.3" futures-util = "0.3" iroh-base = { version = "0.28.0", features = ["key"] } -iroh-docs = { version = "0.28.0", features = ["rpc"] } iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.28.0", optional = true } iroh-net = { version = "0.28.1", features = ["discovery-local-network"] } @@ -38,7 +37,6 @@ iroh-node-util = { version = "0.28.0", path = "../iroh-node-util" } iroh-router = { version = "0.28.0" } nested_enum_utils = "0.1.0" num_cpus = { version = "1.15.0" } -iroh-gossip = "0.28.1" parking_lot = "0.12.1" postcard = { version = "1", default-features = false, features = [ "alloc", @@ -65,25 +63,30 @@ ref-cast = "1.0.23" clap = { version = "4", features = ["derive"], optional = true } indicatif = { version = "0.17", features = ["tokio"], optional = true } console = { version = "0.15.5", optional = true } -iroh-blobs = { version = "0.28", optional = true, features = ["rpc"] } +iroh-blobs = { version = "0.28", optional = true, features = [ + "rpc", + "downloader", + "net_protocol", +] } # Documentation tests url = { version = "2.5.0", features = ["serde"] } serde-error = "0.1.3" [features] -default = ["metrics", "fs-store", "examples"] +default = ["metrics", "fs-store", "examples", "example-discovery-local-network"] metrics = ["iroh-metrics"] fs-store = [] test = [] +discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] +test-utils = ["iroh-net/test-utils"] + examples = ["dep:clap", "dep:indicatif", "dep:iroh-blobs"] -discovery-local-network = [ +example-discovery-local-network = [ "iroh-net/discovery-local-network", "examples", "dep:console", ] -discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] -test-utils = ["iroh-net/test-utils"] [dev-dependencies] anyhow = { version = "1" } @@ -125,4 +128,4 @@ required-features = ["examples"] [[example]] name = "local-swarm-discovery" -required-features = ["discovery-local-network"] +required-features = ["example-discovery-local-network"] diff --git a/iroh/examples/custom-protocol.rs b/iroh/examples/custom-protocol.rs index 92130a4dff..849284ff08 100644 --- a/iroh/examples/custom-protocol.rs +++ b/iroh/examples/custom-protocol.rs @@ -51,6 +51,10 @@ use iroh::{ router::ProtocolHandler, }; use iroh_base::hash::Hash; +use iroh_blobs::{ + downloader::Downloader, net_protocol::Blobs, rpc::client::blobs::MemClient, + util::local_pool::LocalPool, +}; use tracing_subscriber::{prelude::*, EnvFilter}; #[derive(Debug, Parser)] @@ -87,11 +91,27 @@ async fn main() -> Result<()> { let args = Cli::parse(); // Build a in-memory node. For production code, you'd want a persistent node instead usually. - let builder = iroh::node::Node::memory().build().await?; + let mut builder = iroh::node::Node::memory().build().await?; + let local_pool = LocalPool::default(); + let store = iroh_blobs::store::mem::Store::new(); + let downloader = Downloader::new( + store.clone(), + builder.endpoint().clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(Blobs::new_with_events( + store, + local_pool.handle().clone(), + Default::default(), + downloader, + builder.endpoint().clone(), + )); + let blobs_client = blobs.clone().client(); + builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); // Build our custom protocol handler. The `builder` exposes access to various subsystems in the // iroh node. In our case, we need a blobs client and the endpoint. - let proto = BlobSearch::new(builder.client().blobs().clone(), builder.endpoint().clone()); + let proto = BlobSearch::new(blobs_client.clone(), builder.endpoint().clone()); // Add our protocol, identified by our ALPN, to the node, and spawn the node. let node = builder.accept(ALPN.to_vec(), proto.clone()).spawn().await?; @@ -117,7 +137,7 @@ async fn main() -> Result<()> { // Print out our query results. for hash in hashes { - read_and_print(&node.blobs(), hash).await?; + read_and_print(&blobs_client, hash).await?; } } } @@ -129,7 +149,7 @@ async fn main() -> Result<()> { #[derive(Debug, Clone)] struct BlobSearch { - blobs: blobs::Client, + blobs: MemClient, endpoint: Endpoint, index: Arc>>, } @@ -180,7 +200,7 @@ impl ProtocolHandler for BlobSearch { impl BlobSearch { /// Create a new protocol handler. - pub fn new(blobs: blobs::Client, endpoint: Endpoint) -> Arc { + pub fn new(blobs: MemClient, endpoint: Endpoint) -> Arc { Arc::new(Self { blobs, endpoint, @@ -281,7 +301,7 @@ impl BlobSearch { } /// Read a blob from the local blob store and print it to STDOUT. -async fn read_and_print(blobs: &blobs::Client, hash: Hash) -> Result<()> { +async fn read_and_print(blobs: &MemClient, hash: Hash) -> Result<()> { let content = blobs.read_to_bytes(hash).await?; let message = String::from_utf8(content.to_vec())?; println!("{}: {message}", hash.fmt_short()); diff --git a/iroh/examples/hello-world-fetch.rs b/iroh/examples/hello-world-fetch.rs index 7b09d34043..3c86be6007 100644 --- a/iroh/examples/hello-world-fetch.rs +++ b/iroh/examples/hello-world-fetch.rs @@ -37,19 +37,23 @@ async fn main() -> Result<()> { BlobTicket::from_str(&args[1]).context("failed parsing blob ticket\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`")?; // create a new node - let builder = iroh::node::Node::memory().build().await?; + let mut builder = iroh::node::Node::memory().build().await?; let local_pool = LocalPool::default(); let store = iroh_blobs::store::mem::Store::new(); - let downloader = Downloader::new(store.clone(), builder.endpoint(), local_pool.handle()); - let blobs = Blobs::new_with_events( + let downloader = Downloader::new( + store.clone(), + builder.endpoint().clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(Blobs::new_with_events( store, local_pool.handle().clone(), Default::default(), downloader, - builder.endpoint(), - ); - let blobs_client = blobs.client(); - builder.accept(iroh_blobs::protocol::ALPN.to_vec(), Arc::new(blobs)); + builder.endpoint().clone(), + )); + let blobs_client = blobs.clone().client(); + builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); let node = builder.spawn().await?; println!("fetching hash: {}", ticket.hash()); @@ -89,7 +93,7 @@ async fn main() -> Result<()> { // Get the content we have just fetched from the iroh database. - let bytes = node.blobs().read_to_bytes(ticket.hash()).await?; + let bytes = blobs_client.read_to_bytes(ticket.hash()).await?; let s = std::str::from_utf8(&bytes).context("unable to parse blob as as utf-8 string")?; println!("{s}"); diff --git a/iroh/examples/hello-world-provide.rs b/iroh/examples/hello-world-provide.rs index a74bc3bae3..53fd92a322 100644 --- a/iroh/examples/hello-world-provide.rs +++ b/iroh/examples/hello-world-provide.rs @@ -3,7 +3,10 @@ //! This is using an in memory database and a random node id. //! run this example from the project root: //! $ cargo run --example hello-world-provide +use std::sync::Arc; + use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; +use iroh_blobs::{downloader::Downloader, net_protocol::Blobs, util::local_pool::LocalPool}; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info @@ -21,10 +24,27 @@ async fn main() -> anyhow::Result<()> { println!("'Hello World' provide example!"); // create a new node - let node = iroh::node::Node::memory().spawn().await?; + let mut builder = iroh::node::Node::memory().build().await?; + let local_pool = LocalPool::default(); + let store = iroh_blobs::store::mem::Store::new(); + let downloader = Downloader::new( + store.clone(), + builder.endpoint().clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(Blobs::new_with_events( + store, + local_pool.handle().clone(), + Default::default(), + downloader, + builder.endpoint().clone(), + )); + let blobs_client = blobs.clone().client(); + builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); + let node = builder.spawn().await?; // add some data and remember the hash - let res = node.blobs().add_bytes("Hello, world!").await?; + let res = blobs_client.add_bytes("Hello, world!").await?; // create a ticket let mut addr = node.net().node_addr().await?; diff --git a/iroh/examples/local-swarm-discovery.rs b/iroh/examples/local-swarm-discovery.rs index 641e62ec22..4dcf121e5e 100644 --- a/iroh/examples/local-swarm-discovery.rs +++ b/iroh/examples/local-swarm-discovery.rs @@ -5,18 +5,19 @@ //! Wait for output that looks like the following: //! $ cargo run --example local_swarm_discovery --features="discovery-local-network" -- connect [NODE_ID] [HASH] -o [FILE_PATH] //! Run that command on another machine in the same local network, replacing [FILE_PATH] to the path on which you want to save the transferred content. -use std::path::PathBuf; +use std::{path::PathBuf, sync::Arc}; use anyhow::ensure; use clap::{Parser, Subcommand}; use iroh::{ - base::key::SecretKey, - client::blobs::WrapOption, - net::discovery::local_swarm_discovery::LocalSwarmDiscovery, - node::{DiscoveryConfig, Node}, + base::{hash::Hash, key::SecretKey}, + net::{discovery::local_swarm_discovery::LocalSwarmDiscovery, key::PublicKey, NodeAddr}, + node::DiscoveryConfig, +}; +use iroh_blobs::{ + downloader::Downloader, net_protocol::Blobs, rpc::client::blobs::WrapOption, + util::local_pool::LocalPool, }; -use iroh_blobs::Hash; -use iroh_net::{key::PublicKey, NodeAddr}; use tracing_subscriber::{prelude::*, EnvFilter}; use self::progress::show_download_progress; @@ -67,13 +68,31 @@ async fn main() -> anyhow::Result<()> { let cfg = DiscoveryConfig::Custom(Box::new(discovery)); println!("Starting iroh node with local node discovery..."); - let node = Node::memory() + // create a new node + let mut builder = iroh::node::Node::memory() .secret_key(key) .node_discovery(cfg) .bind_random_port() .relay_mode(iroh_net::RelayMode::Disabled) - .spawn() + .build() .await?; + let local_pool = LocalPool::default(); + let store = iroh_blobs::store::mem::Store::new(); + let downloader = Downloader::new( + store.clone(), + builder.endpoint().clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(Blobs::new_with_events( + store, + local_pool.handle().clone(), + Default::default(), + downloader, + builder.endpoint().clone(), + )); + let blobs_client = blobs.clone().client(); + builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); + let node = builder.spawn().await?; match &cli.command { Commands::Accept { path } => { @@ -84,8 +103,7 @@ async fn main() -> anyhow::Result<()> { } let absolute = path.canonicalize()?; println!("Adding {} as {}...", path.display(), absolute.display()); - let stream = node - .blobs() + let stream = blobs_client .add_from_path( absolute, true, @@ -101,8 +119,7 @@ async fn main() -> anyhow::Result<()> { } Commands::Connect { node_id, hash, out } => { println!("NodeID: {}", node.node_id()); - let mut stream = node - .blobs() + let mut stream = blobs_client .download(*hash, NodeAddr::new(*node_id)) .await?; show_download_progress(*hash, &mut stream).await?; @@ -114,8 +131,7 @@ async fn main() -> anyhow::Result<()> { path.display(), absolute.display() ); - let stream = node - .blobs() + let stream = blobs_client .export( *hash, absolute, diff --git a/iroh/examples/rpc.rs b/iroh/examples/rpc.rs index e331cf7d67..4eb3bac6a1 100644 --- a/iroh/examples/rpc.rs +++ b/iroh/examples/rpc.rs @@ -9,7 +9,6 @@ //! The `net node-addr` command will reach out over RPC to the node constructed in the example. use clap::Parser; -use iroh_blobs::store::Store; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info @@ -21,10 +20,7 @@ pub fn setup_logging() { .ok(); } -async fn run(builder: iroh::node::Builder) -> anyhow::Result<()> -where - S: Store, -{ +async fn run(builder: iroh::node::Builder) -> anyhow::Result<()> { let node = builder .enable_rpc() .await? // enable the RPC endpoint From ba29c7050009869736c248c932beb42b322f20af Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 11:01:23 +0100 Subject: [PATCH 06/17] refactor! remove iroh-cli --- .github/workflows/ci.yml | 243 +++-- .github/workflows/tests.yaml | 304 +++--- Cargo.lock | 408 +------- Cargo.toml | 3 - README.md | 5 - deny.toml | 46 +- iroh-cli/Cargo.toml | 82 -- iroh-cli/README.md | 20 - iroh-cli/src/commands.rs | 216 ---- iroh-cli/src/commands/console.rs | 145 --- iroh-cli/src/commands/doctor.rs | 1570 ------------------------------ iroh-cli/src/commands/rpc.rs | 96 -- iroh-cli/src/commands/start.rs | 321 ------ iroh-cli/src/config.rs | 279 ------ iroh-cli/src/main.rs | 31 - iroh-cli/src/progress.rs | 70 -- iroh-cli/tests/cli.rs | 981 ------------------- iroh/Cargo.toml | 2 +- iroh/src/node.rs | 172 ---- 19 files changed, 296 insertions(+), 4698 deletions(-) delete mode 100644 iroh-cli/Cargo.toml delete mode 100644 iroh-cli/README.md delete mode 100644 iroh-cli/src/commands.rs delete mode 100644 iroh-cli/src/commands/console.rs delete mode 100644 iroh-cli/src/commands/doctor.rs delete mode 100644 iroh-cli/src/commands/rpc.rs delete mode 100644 iroh-cli/src/commands/start.rs delete mode 100644 iroh-cli/src/config.rs delete mode 100644 iroh-cli/src/main.rs delete mode 100644 iroh-cli/src/progress.rs delete mode 100644 iroh-cli/tests/cli.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4f55f6d60b..1b0b6e6ea5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: pull_request: - types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] + types: ["labeled", "unlabeled", "opened", "synchronize", "reopened"] merge_group: push: branches: @@ -24,7 +24,7 @@ jobs: tests: name: CI Test Suite if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" - uses: './.github/workflows/tests.yaml' + uses: "./.github/workflows/tests.yaml" cross_build: name: Cross Build Only @@ -35,8 +35,8 @@ jobs: fail-fast: false matrix: target: - # cross tests are currently broken vor armv7 and aarch64 - # see https://github.com/cross-rs/cross/issues/1311 + # cross tests are currently broken vor armv7 and aarch64 + # see https://github.com/cross-rs/cross/issues/1311 # - armv7-linux-androideabi # - aarch64-linux-android # Freebsd execution fails in cross @@ -45,29 +45,29 @@ jobs: # Netbsd execution fails to link in cross # - x86_64-unknown-netbsd steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Install rust stable - uses: dtolnay/rust-toolchain@stable - - - name: Cleanup Docker - continue-on-error: true - run: | - docker kill $(docker ps -q) - - # See https://github.com/cross-rs/cross/issues/1222 - - uses: taiki-e/install-action@cross - - - name: build - # cross tests are currently broken vor armv7 and aarch64 - # see https://github.com/cross-rs/cross/issues/1311. So on - # those platforms we only build but do not run tests. - run: cross build --all --target ${{ matrix.target }} - env: - RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Install rust stable + uses: dtolnay/rust-toolchain@stable + + - name: Cleanup Docker + continue-on-error: true + run: | + docker kill $(docker ps -q) + + # See https://github.com/cross-rs/cross/issues/1222 + - uses: taiki-e/install-action@cross + + - name: build + # cross tests are currently broken vor armv7 and aarch64 + # see https://github.com/cross-rs/cross/issues/1311. So on + # those platforms we only build but do not run tests. + run: cross build --all --target ${{ matrix.target }} + env: + RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} android_build: name: Android Build Only @@ -82,38 +82,38 @@ jobs: - aarch64-linux-android - armv7-linux-androideabi steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up Rust - uses: dtolnay/rust-toolchain@stable - with: - target: ${{ matrix.target }} - - name: Install rustup target - run: rustup target add ${{ matrix.target }} - - - name: Setup Java - uses: actions/setup-java@v4 - with: - distribution: 'temurin' - java-version: '17' - - - name: Setup Android SDK - uses: android-actions/setup-android@v3 - - - name: Setup Android NDK - uses: arqu/setup-ndk@main - id: setup-ndk - with: - ndk-version: r23 - add-to-path: true - - - name: Build - env: - ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} - run: | - cargo install --version 3.5.4 cargo-ndk - cargo ndk --target ${{ matrix.target }} build + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Rust + uses: dtolnay/rust-toolchain@stable + with: + target: ${{ matrix.target }} + - name: Install rustup target + run: rustup target add ${{ matrix.target }} + + - name: Setup Java + uses: actions/setup-java@v4 + with: + distribution: "temurin" + java-version: "17" + + - name: Setup Android SDK + uses: android-actions/setup-android@v3 + + - name: Setup Android NDK + uses: arqu/setup-ndk@main + id: setup-ndk + with: + ndk-version: r23 + add-to-path: true + + - name: Build + env: + ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} + run: | + cargo install --version 3.5.4 cargo-ndk + cargo ndk --target ${{ matrix.target }} build cross_test: name: Cross Test @@ -126,27 +126,26 @@ jobs: target: - i686-unknown-linux-gnu steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Install rust stable - uses: dtolnay/rust-toolchain@stable + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Cleanup Docker - continue-on-error: true - run: | - docker kill $(docker ps -q) + - name: Install rust stable + uses: dtolnay/rust-toolchain@stable - # See https://github.com/cross-rs/cross/issues/1222 - - uses: taiki-e/install-action@cross + - name: Cleanup Docker + continue-on-error: true + run: | + docker kill $(docker ps -q) - - name: test - run: cross test --all --target ${{ matrix.target }} -- --test-threads=12 - env: - RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} + # See https://github.com/cross-rs/cross/issues/1222 + - uses: taiki-e/install-action@cross + - name: test + run: cross test --all --target ${{ matrix.target }} -- --test-threads=12 + env: + RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} wasm_build: name: Build wasm32 @@ -190,7 +189,7 @@ jobs: # uses: obi1kenobi/cargo-semver-checks-action@v2 uses: n0-computer/cargo-semver-checks-action@feat-baseline with: - package: iroh, iroh-base, iroh-cli, iroh-dns-server, iroh-metrics, iroh-net, iroh-net-bench, iroh-node-util, iroh-router, netwatch, portmapper, iroh-relay, iroh-net-report + package: iroh, iroh-base, iroh-dns-server, iroh-metrics, iroh-net, iroh-net-bench, iroh-node-util, iroh-router, netwatch, portmapper, iroh-relay, iroh-net-report baseline-rev: ${{ env.HEAD_COMMIT_SHA }} use-cache: false @@ -202,13 +201,13 @@ jobs: RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "on" steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt - - uses: mozilla-actions/sccache-action@v0.0.6 - - uses: taiki-e/install-action@cargo-make - - run: cargo make format-check + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - uses: mozilla-actions/sccache-action@v0.0.6 + - uses: taiki-e/install-action@cargo-make + - run: cargo make format-check check_docs: timeout-minutes: 30 @@ -218,17 +217,17 @@ jobs: RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "on" steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: nightly-2024-05-02 - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 - - - name: Docs - run: cargo doc --workspace --all-features --no-deps --document-private-items - env: - RUSTDOCFLAGS: --cfg docsrs + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2024-05-02 + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.6 + + - name: Docs + run: cargo doc --workspace --all-features --no-deps --document-private-items + env: + RUSTDOCFLAGS: --cfg docsrs clippy_check: timeout-minutes: 30 @@ -237,23 +236,23 @@ jobs: RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "on" steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - components: clippy - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.6 - # TODO: We have a bunch of platform-dependent code so should - # probably run this job on the full platform matrix - - name: clippy check (all features) - run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches + # TODO: We have a bunch of platform-dependent code so should + # probably run this job on the full platform matrix + - name: clippy check (all features) + run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches - - name: clippy check (no features) - run: cargo clippy --workspace --no-default-features --lib --bins --tests + - name: clippy check (no features) + run: cargo clippy --workspace --no-default-features --lib --bins --tests - - name: clippy check (default features) - run: cargo clippy --workspace --all-targets + - name: clippy check (default features) + run: cargo clippy --workspace --all-targets msrv: if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" @@ -264,16 +263,16 @@ jobs: RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "on" steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ env.MSRV }} - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.MSRV }} + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.6 - - name: Check MSRV all features - run: | - cargo +$MSRV check --workspace --all-targets + - name: Check MSRV all features + run: | + cargo +$MSRV check --workspace --all-targets cargo_deny: timeout-minutes: 30 @@ -290,7 +289,7 @@ jobs: netsim-integration-tests: permissions: write-all if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" - uses: './.github/workflows/netsim_runner.yaml' + uses: "./.github/workflows/netsim_runner.yaml" secrets: inherit with: branch: ${{ github.ref }} @@ -357,6 +356,6 @@ jobs: timeout-minutes: 30 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - run: pip install --user codespell[toml] - - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md + - uses: actions/checkout@v4 + - run: pip install --user codespell[toml] + - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 49349da1ab..fc21b81aca 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -6,15 +6,15 @@ on: workflow_call: inputs: rust-version: - description: 'The version of the rust compiler to run' + description: "The version of the rust compiler to run" type: string - default: 'stable' + default: "stable" flaky: - description: 'Whether to also run flaky tests' + description: "Whether to also run flaky tests" type: boolean default: false git-ref: - description: 'Which git ref to checkout' + description: "Which git ref to checkout" type: string default: ${{ github.ref }} @@ -23,7 +23,7 @@ env: RUSTFLAGS: -Dwarnings RUSTDOCFLAGS: -Dwarnings SCCACHE_CACHE_SIZE: "50G" - CRATES_LIST: "iroh,iroh-node-util,iroh-metrics,iroh-net,iroh-net-bench,iroh-test,iroh-cli,iroh-dns-server,iroh-router,netwatch,portmapper,iroh-relay,iroh-net-report" + CRATES_LIST: "iroh,iroh-node-util,iroh-metrics,iroh-net,iroh-net-bench,iroh-test,netwatch,portmapper,iroh-relay,iroh-net-report" IROH_FORCE_STAGING_RELAYS: "1" jobs: @@ -35,7 +35,7 @@ jobs: fail-fast: false matrix: name: [ubuntu-latest, macOS-arm-latest] - rust: [ '${{ inputs.rust-version }}' ] + rust: ["${{ inputs.rust-version }}"] features: [all, none, default] include: - name: ubuntu-latest @@ -53,91 +53,87 @@ jobs: # not SCCACHE_GHA_ENABLED. RUSTC_WRAPPER: "sccache" steps: - - name: Checkout - uses: actions/checkout@v4 - with: - ref: ${{ inputs.git-ref }} - - - name: Install ${{ matrix.rust }} rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ matrix.rust }} - - - name: Install cargo-nextest - uses: taiki-e/install-action@v2 - with: - tool: nextest - - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 - - - name: Select features - run: | - case "${{ matrix.features }}" in - all) - echo "FEATURES=--all-features" >> "$GITHUB_ENV" - ;; - none) - echo "FEATURES=--no-default-features" >> "$GITHUB_ENV" - ;; - default) - echo "FEATURES=" >> "$GITHUB_ENV" - ;; - *) - exit 1 - esac - - - name: check features - if: ${{ ! inputs.flaky }} - run: | - for i in ${CRATES_LIST//,/ } - do - echo "Checking $i $FEATURES" - if [ $i = "iroh-cli" ]; then - targets="--bins" - else + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ inputs.git-ref }} + + - name: Install ${{ matrix.rust }} rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + + - name: Install cargo-nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest + + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.6 + + - name: Select features + run: | + case "${{ matrix.features }}" in + all) + echo "FEATURES=--all-features" >> "$GITHUB_ENV" + ;; + none) + echo "FEATURES=--no-default-features" >> "$GITHUB_ENV" + ;; + default) + echo "FEATURES=" >> "$GITHUB_ENV" + ;; + *) + exit 1 + esac + + - name: check features + if: ${{ ! inputs.flaky }} + run: | + for i in ${CRATES_LIST//,/ } + do + echo "Checking $i $FEATURES" targets="--lib --bins" + echo cargo check -p $i $FEATURES $targets + cargo check -p $i $FEATURES $targets + done + env: + RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} + + - name: build tests + run: | + cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run + + - name: list ignored tests + run: | + cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only + + - name: run tests + run: | + mkdir -p output + cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json + env: + RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} + NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 + + - name: upload results + if: ${{ failure() && inputs.flaky }} + uses: actions/upload-artifact@v4 + with: + name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json + path: output + retention-days: 45 + compression-level: 0 + + - name: doctests + if: ${{ (! inputs.flaky) && matrix.features == 'all' }} + run: | + if [ -n "${{ runner.debug }}" ]; then + export RUST_LOG=TRACE + else + export RUST_LOG=DEBUG fi - echo cargo check -p $i $FEATURES $targets - cargo check -p $i $FEATURES $targets - done - env: - RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} - - - name: build tests - run: | - cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run - - - name: list ignored tests - run: | - cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only - - - name: run tests - run: | - mkdir -p output - cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json - env: - RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} - NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 - - - name: upload results - if: ${{ failure() && inputs.flaky }} - uses: actions/upload-artifact@v4 - with: - name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json - path: output - retention-days: 45 - compression-level: 0 - - - name: doctests - if: ${{ (! inputs.flaky) && matrix.features == 'all' }} - run: | - if [ -n "${{ runner.debug }}" ]; then - export RUST_LOG=TRACE - else - export RUST_LOG=DEBUG - fi - cargo test --workspace --all-features --doc + cargo test --workspace --all-features --doc build_and_test_windows: timeout-minutes: 30 @@ -147,7 +143,7 @@ jobs: fail-fast: false matrix: name: [windows-latest] - rust: [ '${{ inputs.rust-version}}' ] + rust: ["${{ inputs.rust-version}}"] features: [all, none, default] target: - x86_64-pc-windows-msvc @@ -160,72 +156,72 @@ jobs: # not SCCACHE_GHA_ENABLED. RUSTC_WRAPPER: "sccache" steps: - - name: Checkout - uses: actions/checkout@v4 - with: - ref: ${{ inputs.git-ref }} - - - name: Install ${{ matrix.rust }} - run: | - rustup toolchain install ${{ matrix.rust }} - rustup toolchain default ${{ matrix.rust }} - rustup target add ${{ matrix.target }} - rustup set default-host ${{ matrix.target }} - - - name: Install cargo-nextest - shell: powershell - run: | - $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru - Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows - $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" } - $tmp | Expand-Archive -DestinationPath $outputDir -Force - $tmp | Remove-Item - - - name: Select features - run: | - switch ("${{ matrix.features }}") { - "all" { - echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append - } - "none" { - echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append - } - "default" { - echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append - } - default { - Exit 1 - } - } - - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ inputs.git-ref }} + + - name: Install ${{ matrix.rust }} + run: | + rustup toolchain install ${{ matrix.rust }} + rustup toolchain default ${{ matrix.rust }} + rustup target add ${{ matrix.target }} + rustup set default-host ${{ matrix.target }} + + - name: Install cargo-nextest + shell: powershell + run: | + $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru + Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows + $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" } + $tmp | Expand-Archive -DestinationPath $outputDir -Force + $tmp | Remove-Item + + - name: Select features + run: | + switch ("${{ matrix.features }}") { + "all" { + echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append + } + "none" { + echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append + } + "default" { + echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append + } + default { + Exit 1 + } + } + + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.6 - uses: msys2/setup-msys2@v2 with: release: false - - name: build tests - run: | - cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run - - - name: list ignored tests - run: | - cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only - - - name: tests - run: | - mkdir -p output - cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json - env: - RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} - NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 - - - name: upload results - if: ${{ failure() && inputs.flaky }} - uses: actions/upload-artifact@v4 - with: - name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json - path: output - retention-days: 1 - compression-level: 0 + - name: build tests + run: | + cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run + + - name: list ignored tests + run: | + cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only + + - name: tests + run: | + mkdir -p output + cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json + env: + RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} + NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 + + - name: upload results + if: ${{ failure() && inputs.flaky }} + uses: actions/upload-artifact@v4 + with: + name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json + path: output + retention-days: 1 + compression-level: 0 diff --git a/Cargo.lock b/Cargo.lock index c886f39d89..4aedb41a9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,15 +236,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "atomic-polyfill" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" -dependencies = [ - "critical-section", -] - [[package]] name = "atomic-waker" version = "1.1.2" @@ -544,27 +535,12 @@ dependencies = [ "serde_json", ] -[[package]] -name = "cassowary" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" - [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" -[[package]] -name = "castaway" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" -dependencies = [ - "rustversion", -] - [[package]] name = "cc" version = "1.1.31" @@ -745,25 +721,12 @@ version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm 0.28.1", + "crossterm", "strum 0.26.3", "strum_macros 0.26.4", "unicode-width 0.2.0", ] -[[package]] -name = "compact_str" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" -dependencies = [ - "castaway", - "cfg-if", - "itoa", - "ryu", - "static_assertions", -] - [[package]] name = "concurrent-queue" version = "2.5.0" @@ -860,7 +823,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools 0.10.5", + "itertools", "num-traits", "once_cell", "oorandom", @@ -881,15 +844,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools 0.10.5", + "itertools", ] -[[package]] -name = "critical-section" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" - [[package]] name = "crossbeam-channel" version = "0.5.13" @@ -924,22 +881,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.6.0", - "crossterm_winapi", - "libc", - "mio 0.8.11", - "parking_lot", - "signal-hook", - "signal-hook-mio", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -1171,19 +1112,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "dialoguer" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" -dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror 1.0.69", - "zeroize", -] - [[package]] name = "diatomic-waker" version = "0.2.3" @@ -1208,15 +1136,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs-next" version = "2.0.0" @@ -1227,18 +1146,6 @@ dependencies = [ "dirs-sys-next", ] -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -1911,15 +1818,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hash32" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" -dependencies = [ - "byteorder", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -1965,20 +1863,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "heapless" -version = "0.7.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" -dependencies = [ - "atomic-polyfill", - "hash32", - "rustc_version", - "serde", - "spin", - "stable_deref_trait", -] - [[package]] name = "heck" version = "0.4.1" @@ -2724,8 +2608,6 @@ dependencies = [ "bao-tree", "bytes", "chrono", - "clap", - "console", "derive_more", "futures-buffered", "futures-lite 2.5.0", @@ -2733,7 +2615,6 @@ dependencies = [ "genawaiter", "hashlink", "hex", - "indicatif", "iroh-base", "iroh-io", "iroh-metrics", @@ -2768,64 +2649,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "iroh-cli" -version = "0.28.1" -dependencies = [ - "anyhow", - "async-channel", - "bao-tree", - "bytes", - "clap", - "colored", - "comfy-table", - "console", - "crossterm 0.27.0", - "derive_more", - "dialoguer", - "duct", - "futures-buffered", - "futures-lite 2.5.0", - "futures-util", - "hex", - "human-time", - "indicatif", - "iroh", - "iroh-blobs", - "iroh-docs", - "iroh-gossip", - "iroh-metrics", - "iroh-net-report", - "iroh-node-util", - "nix 0.27.1", - "parking_lot", - "pkarr", - "portable-atomic", - "portmapper", - "postcard", - "quic-rpc", - "rand", - "rand_xorshift", - "ratatui", - "regex", - "reqwest", - "rustyline", - "serde", - "shell-words", - "shellexpand", - "strum 0.26.3", - "tempfile", - "testdir", - "thiserror 1.0.69", - "time", - "tokio", - "tokio-util", - "toml", - "tracing", - "url", - "walkdir", -] - [[package]] name = "iroh-dns-server" version = "0.28.0" @@ -2875,90 +2698,6 @@ dependencies = [ "z32", ] -[[package]] -name = "iroh-docs" -version = "0.28.0" -source = "git+https://github.com/n0-computer/iroh-docs?branch=main#d4ab8db452bbefe7aab45ed25621dde6b6ee3f15" -dependencies = [ - "anyhow", - "async-channel", - "bytes", - "clap", - "colored", - "console", - "derive_more", - "dialoguer", - "ed25519-dalek", - "futures-buffered", - "futures-lite 2.5.0", - "futures-util", - "hex", - "indicatif", - "iroh-base", - "iroh-blake3", - "iroh-blobs", - "iroh-gossip", - "iroh-metrics", - "iroh-net", - "iroh-router", - "nested_enum_utils", - "num_enum", - "portable-atomic", - "postcard", - "quic-rpc", - "quic-rpc-derive", - "rand", - "rand_core", - "redb 1.5.1", - "redb 2.2.0", - "self_cell", - "serde", - "serde-error", - "shellexpand", - "strum 0.26.3", - "tempfile", - "thiserror 2.0.3", - "tokio", - "tokio-stream", - "tokio-util", - "tracing", -] - -[[package]] -name = "iroh-gossip" -version = "0.28.1" -source = "git+https://github.com/n0-computer/iroh-gossip?branch=main#7c90c3f351585e7a364bfbf3941d07592b20dec6" -dependencies = [ - "anyhow", - "async-channel", - "bytes", - "clap", - "derive_more", - "ed25519-dalek", - "futures-concurrency", - "futures-lite 2.5.0", - "futures-util", - "hex", - "indexmap 2.6.0", - "iroh-base", - "iroh-blake3", - "iroh-metrics", - "iroh-net", - "iroh-router", - "nested_enum_utils", - "postcard", - "quic-rpc", - "quic-rpc-derive", - "rand", - "rand_core", - "serde", - "serde-error", - "strum 0.26.3", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "iroh-io" version = "0.6.1" @@ -3128,7 +2867,7 @@ dependencies = [ "rustls", "surge-ping", "testresult", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-util", "tracing", @@ -3329,24 +3068,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.11" @@ -3564,18 +3285,6 @@ dependencies = [ "adler2", ] -[[package]] -name = "mio" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" -dependencies = [ - "libc", - "log", - "wasi", - "windows-sys 0.48.0", -] - [[package]] name = "mio" version = "1.0.2" @@ -3739,17 +3448,6 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "libc", -] - [[package]] name = "no-std-compat" version = "0.4.1" @@ -3993,12 +3691,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "os_pipe" version = "1.2.1" @@ -4373,7 +4065,6 @@ dependencies = [ "cobs", "embedded-io 0.4.0", "embedded-io 0.6.1", - "heapless", "postcard-derive", "serde", ] @@ -4750,26 +4441,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "ratatui" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" -dependencies = [ - "bitflags 2.6.0", - "cassowary", - "compact_str", - "crossterm 0.27.0", - "itertools 0.12.1", - "lru", - "paste", - "stability", - "strum 0.26.3", - "unicode-segmentation", - "unicode-truncate", - "unicode-width 0.1.14", -] - [[package]] name = "raw-cpuid" version = "11.2.0" @@ -5054,7 +4725,7 @@ dependencies = [ "netlink-packet-utils", "netlink-proto", "netlink-sys", - "nix 0.26.4", + "nix", "thiserror 1.0.69", "tokio", ] @@ -5218,7 +4889,7 @@ dependencies = [ "libc", "log", "memchr", - "nix 0.26.4", + "nix", "radix_trie", "scopeguard", "unicode-segmentation", @@ -5512,48 +5183,12 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - -[[package]] -name = "shellexpand" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" -dependencies = [ - "dirs", -] - [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "signal-hook" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] -name = "signal-hook-mio" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" -dependencies = [ - "libc", - "mio 0.8.11", - "signal-hook", -] - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -5686,28 +5321,12 @@ dependencies = [ "zeroize", ] -[[package]] -name = "stability" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" -dependencies = [ - "quote", - "syn 2.0.87", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "str-buf" version = "1.0.6" @@ -6108,7 +5727,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", + "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -6193,7 +5812,6 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util", ] [[package]] @@ -6248,7 +5866,6 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ - "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -6526,17 +6143,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" -[[package]] -name = "unicode-truncate" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" -dependencies = [ - "itertools 0.13.0", - "unicode-segmentation", - "unicode-width 0.1.14", -] - [[package]] name = "unicode-width" version = "0.1.14" diff --git a/Cargo.toml b/Cargo.toml index c37cdec555..c66757b070 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "iroh-net", "iroh-test", "iroh-net/bench", - "iroh-cli", "iroh-relay", "iroh-router", "net-tools/netwatch", @@ -57,6 +56,4 @@ iroh-metrics = { path = "./iroh-metrics" } iroh-test = { path = "./iroh-test" } iroh-router = { path = "./iroh-router" } -iroh-gossip = { git = "https://github.com/n0-computer/iroh-gossip", branch = "main" } -iroh-docs = { git = "https://github.com/n0-computer/iroh-docs", branch = "main" } iroh-blobs = { git = "https://github.com/n0-computer/iroh-blobs", branch = "main" } diff --git a/README.md b/README.md index fb23bda3ff..0697d37013 100644 --- a/README.md +++ b/README.md @@ -48,11 +48,6 @@ Iroh is delivered as a Rust library and a CLI. Run `cargo add iroh`, to add `iroh` to your project. -### CLI - -Check out https://iroh.computer/docs/install to get started. - -The implementation lives in the `iroh-cli` crate. ### Links diff --git a/deny.toml b/deny.toml index 31feb724cf..845bdf79a5 100644 --- a/deny.toml +++ b/deny.toml @@ -1,45 +1,33 @@ [bans] multiple-versions = "allow" -deny = [ - "aws-lc", - "aws-lc-rs", - "aws-lc-sys", - "native-tls", - "openssl", -] +deny = ["aws-lc", "aws-lc-rs", "aws-lc-sys", "native-tls", "openssl"] [licenses] allow = [ - "Apache-2.0", - "Apache-2.0 WITH LLVM-exception", - "BSD-2-Clause", - "BSD-3-Clause", - "BSL-1.0", # BOSL license - "ISC", - "MIT", - "OpenSSL", - "Unicode-DFS-2016", - "Zlib", - "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ - "Unicode-3.0", + "Apache-2.0", + "Apache-2.0 WITH LLVM-exception", + "BSD-2-Clause", + "BSD-3-Clause", + "BSL-1.0", # BOSL license + "ISC", + "MIT", + "OpenSSL", + "Unicode-DFS-2016", + "Zlib", + "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ + "Unicode-3.0", ] [[licenses.clarify]] name = "ring" expression = "MIT AND ISC AND OpenSSL" -license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 }, -] +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] [advisories] ignore = [ - "RUSTSEC-2024-0370", # unmaintained, no upgrade available - "RUSTSEC-2024-0384", # unmaintained, no upgrade available + "RUSTSEC-2024-0370", # unmaintained, no upgrade available + "RUSTSEC-2024-0384", # unmaintained, no upgrade available ] [sources] -allow-git = [ - "https://github.com/n0-computer/iroh-blobs.git", - "https://github.com/n0-computer/iroh-gossip.git", - "https://github.com/n0-computer/iroh-docs.git", -] +allow-git = ["https://github.com/n0-computer/iroh-blobs.git"] diff --git a/iroh-cli/Cargo.toml b/iroh-cli/Cargo.toml deleted file mode 100644 index 4bcd5d0541..0000000000 --- a/iroh-cli/Cargo.toml +++ /dev/null @@ -1,82 +0,0 @@ -[package] -name = "iroh-cli" -version = "0.28.1" -edition = "2021" -readme = "README.md" -description = "Bytes. Distributed." -license = "MIT OR Apache-2.0" -authors = ["dignifiedquire ", "n0 team"] -repository = "https://github.com/n0-computer/iroh" -keywords = ["networking", "p2p", "holepunching", "ipfs"] - -# Despite not being in the workspace root this is explicitly here to -# make `cargo run` in the workspace root invoke `iroh`. -default-run = "iroh" - -[lints] -workspace = true - -[[bin]] -name = "iroh" -path = "src/main.rs" -doc = false - -[dependencies] -anyhow = "1.0.81" -async-channel = "2.3.1" -bao-tree = "0.13" -bytes = "1.7" -clap = { version = "4", features = ["derive"] } -colored = "2.0.4" -comfy-table = "7.0.1" -console = "0.15.5" -crossterm = "0.27.0" -derive_more = { version = "1.0.0", features = ["display"] } -dialoguer = { version = "0.11.0", default-features = false } -futures-buffered = "0.2.4" -futures-lite = "2.3" -futures-util = { version = "0.3.30", features = ["futures-sink"] } -hex = "0.4.3" -human-time = "0.1.6" -indicatif = { version = "0.17", features = ["tokio"] } -iroh = { version = "0.28.1", path = "../iroh", features = ["metrics"] } -iroh-blobs = { version = "0.28.1", features = ["cli"] } -iroh-docs = { version = "0.28.0", features = ["cli"] } -iroh-gossip = { version = "0.28.1", features = ["cli"] } -iroh-metrics = { version = "0.28.0" } -net-report = { package = "iroh-net-report", path = "../iroh-net-report", version = "0.28" } -iroh-node-util = { path = "../iroh-node-util", features = ["config", "logging", "cli"] } -parking_lot = "0.12.1" -pkarr = { version = "2.2.0", default-features = false } -portable-atomic = "1" -portmapper = { version = "0.1.0", path = "../net-tools/portmapper" } -postcard = "1.0.8" -quic-rpc = { version = "0.15", features = ["flume-transport", "quinn-transport"] } -rand = "0.8.5" -ratatui = "0.26.2" -reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } -rustyline = "12.0.0" -serde = { version = "1.0.197", features = ["derive"] } -shell-words = "1.1.0" -shellexpand = "3.1.0" -strum = { version = "0.26.2", features = ["derive"] } -tempfile = "3.10.1" -thiserror = "1.0.58" -time = { version = "0.3", features = ["formatting"] } -tokio = { version = "1.36.0", features = ["full"] } -tokio-util = { version = "0.7.12", features = ["rt"] } -toml = { version = "0.8.12", features = ["preserve_order"] } -tracing = "0.1.40" - -[dev-dependencies] -duct = "0.13.6" -nix = { version = "0.27", features = ["signal", "process"] } -rand_xorshift = "0.3.0" -regex = "1.10.3" -testdir = "0.9.1" -url = "2.5.0" -walkdir = "2" - -[features] -default = ["metrics"] -metrics = [] diff --git a/iroh-cli/README.md b/iroh-cli/README.md deleted file mode 100644 index 0f0c4c01a8..0000000000 --- a/iroh-cli/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# iroh-cli - -> The CLI for `iroh`. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this project by you, as defined in the Apache-2.0 license, -shall be dual licensed as above, without any additional terms or conditions. diff --git a/iroh-cli/src/commands.rs b/iroh-cli/src/commands.rs deleted file mode 100644 index 741baf3f82..0000000000 --- a/iroh-cli/src/commands.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std::{ - net::SocketAddr, - path::{Path, PathBuf}, -}; - -use anyhow::{ensure, Context, Result}; -use clap::Parser; -use iroh::client::Iroh; -use iroh_blobs::cli::{BlobAddOptions, BlobSource}; -use iroh_docs::cli::ConsoleEnv; - -use self::{rpc::RpcCommands, start::RunType}; -use crate::config::NodeConfig; - -pub(crate) mod console; -pub(crate) mod doctor; -pub(crate) mod rpc; -pub(crate) mod start; -pub(crate) use iroh_blobs::{cli as blobs, cli::tags}; -pub(crate) use iroh_docs::{cli as docs, cli::authors}; -pub(crate) use iroh_gossip::cli as gossip; -pub(crate) use iroh_node_util::cli::net; - -/// iroh is a tool for building distributed apps. -/// -/// For more information, visit: . -#[derive(Parser, Debug, Clone)] -#[clap(version, verbatim_doc_comment)] -pub(crate) struct Cli { - #[clap(subcommand)] - pub(crate) command: Commands, - - /// Path to the configuration file, see https://iroh.computer/docs/reference/config. - #[clap(long)] - pub(crate) config: Option, - - /// Start an iroh node in the background. - #[clap(long, global = true)] - start: bool, - - /// Address to serve metrics on. Disabled by default. - #[clap(long)] - pub(crate) metrics_addr: Option, - - /// Address to serve RPC on. - #[clap(long)] - pub(crate) rpc_addr: Option, - - /// Write metrics in CSV format at 100ms intervals. Disabled by default. - #[clap(long)] - pub(crate) metrics_dump_path: Option, -} - -/// Possible commands to run with the iroh CLI. -#[derive(Parser, Debug, Clone)] -pub(crate) enum Commands { - /// Start an iroh node. - /// - /// A node is a long-running process that serves data and connects to other nodes. - /// The console, doc, author, blob, node, and tag commands require a running node. - /// - /// `start` optionally takes a `--add SOURCE` option, which can be a file or a folder - /// to serve on startup. Data can also be added after startup with commands like - /// `iroh blob add` or by adding content to documents. - /// - /// For general configuration options see . - Start { - /// Optionally add a file or folder to the node. - /// - /// If set to `STDIN`, the data will be read from stdin. - /// - /// When left empty no content is added. - #[clap(long)] - add: Option, - - /// Options when adding data. - #[clap(flatten)] - add_options: BlobAddOptions, - }, - - /// Open the iroh console. - /// - /// The console is a REPL for interacting with a running iroh node. - /// For more info on available commands, see . - /// - /// For general configuration options see . - Console, - - /// Manage the RPC. - #[clap(flatten)] - Rpc(#[clap(subcommand)] RpcCommands), - - /// Diagnostic commands for the relay protocol. - Doctor { - /// Commands for doctor - defined in the mod - #[clap(subcommand)] - command: self::doctor::Commands, - }, -} - -impl Cli { - /// Run the CLI. - pub(crate) async fn run(self, data_dir: &Path) -> Result<()> { - // Initialize the metrics collection. - // - // The metrics are global per process. Subsequent calls do not change the metrics - // collection and will return an error. We ignore this error. This means that if you'd - // spawn multiple Iroh nodes in the same process, the metrics would be shared between the - // nodes. - #[cfg(feature = "metrics")] - iroh::metrics::try_init_metrics_collection().ok(); - - match self.command { - Commands::Console => { - let data_dir_owned = data_dir.to_owned(); - if self.start { - let config = Self::load_config(self.config, self.metrics_addr).await?; - start::run_with_command( - &config, - data_dir, - self.rpc_addr, - RunType::SingleCommandNoAbort, - |iroh| async move { - let env = - ConsoleEnv::for_console(data_dir_owned, &iroh.authors()).await?; - console::run(&iroh, &env).await - }, - ) - .await - } else { - iroh_node_util::logging::init_terminal_logging()?; - let iroh = if let Some(addr) = self.rpc_addr { - Iroh::connect_addr(addr).await.context("rpc connect")? - } else { - Iroh::connect_path(data_dir).await.context("rpc connect")? - }; - let env = ConsoleEnv::for_console(data_dir_owned, &iroh.authors()).await?; - console::run(&iroh, &env).await - } - } - Commands::Rpc(command) => { - let data_dir_owned = data_dir.to_owned(); - if self.start { - let config = Self::load_config(self.config, self.metrics_addr).await?; - start::run_with_command( - &config, - data_dir, - self.rpc_addr, - RunType::SingleCommandAbortable, - move |iroh| async move { - let env = ConsoleEnv::for_cli(data_dir_owned, &iroh.authors()).await?; - command.run(&iroh, &env).await - }, - ) - .await - } else { - iroh_node_util::logging::init_terminal_logging()?; - let iroh = if let Some(addr) = self.rpc_addr { - Iroh::connect_addr(addr).await.context("rpc connect")? - } else { - Iroh::connect_path(data_dir).await.context("rpc connect")? - }; - let env = ConsoleEnv::for_cli(data_dir_owned, &iroh.authors()).await?; - command.run(&iroh, &env).await - } - } - Commands::Start { add, add_options } => { - // if adding data on start, exit early if the path doesn't exist - if let Some(BlobSource::Path(ref path)) = add { - ensure!( - path.exists(), - "Cannot provide nonexistent path: {}", - path.display() - ); - } - let config = Self::load_config(self.config, self.metrics_addr).await?; - - let add_command = add.map(|source| blobs::BlobCommands::Add { - source, - options: add_options, - }); - - start::run_with_command( - &config, - data_dir, - self.rpc_addr, - RunType::UntilStopped, - |client| async move { - match add_command { - None => Ok(()), - Some(command) => { - let node_addr = client.net().node_addr().await?; - command.run(&client.blobs(), node_addr).await - } - } - }, - ) - .await - } - Commands::Doctor { command } => { - let config = Self::load_config(self.config, self.metrics_addr).await?; - self::doctor::run(command, &config).await - } - } - } - - /// Loads the configuration file or creates the default one, and sets the given metrics address. - async fn load_config( - config: Option, - metrics_addr: Option, - ) -> Result { - let mut config = NodeConfig::load(config.as_deref()).await?; - config.metrics_addr = metrics_addr; - Ok(config) - } -} diff --git a/iroh-cli/src/commands/console.rs b/iroh-cli/src/commands/console.rs deleted file mode 100644 index feebf58434..0000000000 --- a/iroh-cli/src/commands/console.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Define commands for the iroh console. - -use anyhow::Result; -use clap::{Parser, Subcommand}; -use colored::Colorize; -use iroh::{base::base32::fmt_short, client::Iroh}; -use iroh_docs::cli::ConsoleEnv; -use rustyline::{error::ReadlineError, Config, DefaultEditor}; -use tokio::sync::{mpsc, oneshot}; - -use crate::{commands::rpc::RpcCommands, config::ConsolePaths}; - -/// Runs the iroh console -pub async fn run(iroh: &Iroh, env: &ConsoleEnv) -> Result<()> { - println!("{}", "Welcome to the Iroh console!".purple().bold()); - println!("Type `{}` for a list of commands.", "help".bold()); - let mut from_repl = Repl::spawn(env.clone()); - while let Some((cmd, reply)) = from_repl.recv().await { - // allow to abort a running command with Ctrl-C - tokio::select! { - biased; - _ = tokio::signal::ctrl_c() => {}, - res = cmd.run(iroh, env) => { - if let Err(err) = res { - println!("{} {:?}", "Error:".red().bold(), err) - } - } - } - reply.send(()).ok(); - } - Ok(()) -} - -/// All the information for the REPL environment. -pub struct Repl { - env: ConsoleEnv, - cmd_tx: mpsc::Sender<(RpcCommands, oneshot::Sender<()>)>, -} - -impl Repl { - /// Creates a new REPL environment. - pub fn spawn(env: ConsoleEnv) -> mpsc::Receiver<(RpcCommands, oneshot::Sender<()>)> { - let (cmd_tx, cmd_rx) = mpsc::channel(1); - let repl = Repl { env, cmd_tx }; - std::thread::spawn(move || { - if let Err(err) = repl.run() { - println!("> repl crashed: {err}"); - } - }); - cmd_rx - } - - /// Run the REPL environment. - pub fn run(self) -> anyhow::Result<()> { - let mut rl = - DefaultEditor::with_config(Config::builder().check_cursor_position(true).build())?; - let history_path = ConsolePaths::History.with_iroh_data_dir(self.env.iroh_data_dir()); - rl.load_history(&history_path).ok(); - loop { - // prepare a channel to receive a signal from the main thread when a command completed - let (reply_tx, reply_rx) = oneshot::channel(); - let readline = rl.readline(&self.prompt()); - match readline { - Ok(line) if line.is_empty() => continue, - Ok(line) => { - rl.add_history_entry(line.as_str())?; - let cmd = parse_cmd::(&line); - match cmd { - None => continue, - Some(ReplCmd::Exit) => break, - Some(ReplCmd::Rpc(cmd)) => self.cmd_tx.blocking_send((cmd, reply_tx))?, - } - } - Err(ReadlineError::Interrupted) => { - println!("KeyboardInterrupt (press Ctrl-D to exit)"); - continue; - } - Err(ReadlineError::Eof) => break, - Err(ReadlineError::WindowResized) => continue, - Err(err) => return Err(err.into()), - } - // wait for reply from main thread - reply_rx.blocking_recv()?; - } - rl.save_history(&history_path).ok(); - Ok(()) - } - - /// Returns the prompt for the REPL as a `String`. - pub fn prompt(&self) -> String { - let mut pwd = String::new(); - let author = self.env.author(); - pwd.push_str(&format!( - "{}{} ", - "author:".blue(), - fmt_short(author.as_bytes()).blue().bold(), - )); - if let Some(doc) = &self.env.doc(None).ok() { - pwd.push_str(&format!( - "{}{} ", - "doc:".blue(), - fmt_short(doc.as_bytes()).blue().bold(), - )); - } - if !pwd.is_empty() { - pwd.push('\n'); - } - format!("\n{pwd}{}", "> ".blue()) - } -} - -/// The REPL commands. -#[derive(Debug, Parser)] -pub enum ReplCmd { - /// Run an RPC command in the REPL. - #[clap(flatten)] - Rpc(#[clap(subcommand)] RpcCommands), - /// Quit the Iroh console - #[clap(alias = "quit")] - Exit, -} - -/// Tries to convert a `&str`ing into a `clap` [`Subcommand`], and error if it fails. -fn try_parse_cmd(s: &str) -> anyhow::Result { - let args = shell_words::split(s)?; - let cmd = clap::Command::new("repl"); - let cmd = C::augment_subcommands(cmd); - let matches = cmd - .multicall(true) - .subcommand_required(true) - .try_get_matches_from(args)?; - let cmd = C::from_arg_matches(&matches)?; - Ok(cmd) -} - -/// Parses a `&str`ing into a `clap` [`Subcommand`]. -fn parse_cmd(s: &str) -> Option { - match try_parse_cmd::(s) { - Ok(cmd) => Some(cmd), - Err(err) => { - println!("{err}"); - None - } - } -} diff --git a/iroh-cli/src/commands/doctor.rs b/iroh-cli/src/commands/doctor.rs deleted file mode 100644 index 3ebad96439..0000000000 --- a/iroh-cli/src/commands/doctor.rs +++ /dev/null @@ -1,1570 +0,0 @@ -//! Tool to get information about the current network environment of a node, -//! and to test connectivity to specific other nodes. - -use std::{ - collections::HashMap, - io, - net::SocketAddr, - num::NonZeroU16, - path::PathBuf, - str::FromStr, - sync::Arc, - time::{Duration, Instant}, -}; - -use anyhow::Context; -use clap::Subcommand; -use console::style; -use crossterm::{ - event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyEventKind}, - execute, - terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, -}; -use derive_more::Display; -use futures_lite::StreamExt; -use indicatif::{HumanBytes, MultiProgress, ProgressBar}; -use iroh::{ - base::ticket::{BlobTicket, Ticket}, - blobs::{ - store::{ReadableStore, Store as _}, - util::progress::{AsyncChannelProgressSender, ProgressSender}, - }, - docs::{Capability, DocTicket}, - net::{ - defaults::DEFAULT_STUN_PORT, - discovery::{dns::DnsDiscovery, pkarr::PkarrPublisher, ConcurrentDiscovery, Discovery}, - dns::default_resolver, - endpoint::{self, Connection, ConnectionTypeStream, RecvStream, RemoteInfo, SendStream}, - key::{PublicKey, SecretKey}, - metrics::MagicsockMetrics, - ticket::NodeTicket, - Endpoint, NodeAddr, NodeId, RelayMap, RelayMode, RelayUrl, - }, - util::{path::IrohPaths, progress::ProgressWriter}, -}; -use iroh_metrics::core::Core; -use iroh_node_util::config::data_root; -use portable_atomic::AtomicU64; -use postcard::experimental::max_size::MaxSize; -use rand::Rng; -use ratatui::{prelude::*, widgets::*}; -use serde::{Deserialize, Serialize}; -use tokio::{io::AsyncWriteExt, sync}; -use tokio_util::task::AbortOnDropHandle; -use tracing::warn; - -use crate::config::NodeConfig; - -/// Options for the secret key usage. -#[derive(Debug, Clone, derive_more::Display)] -pub enum SecretKeyOption { - /// Generate random secret key - Random, - /// Use local secret key - Local, - /// Explicitly specify a secret key - Hex(String), -} - -impl std::str::FromStr for SecretKeyOption { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - let s_lower = s.to_ascii_lowercase(); - Ok(if s_lower == "random" { - SecretKeyOption::Random - } else if s_lower == "local" { - SecretKeyOption::Local - } else { - SecretKeyOption::Hex(s.to_string()) - }) - } -} - -/// Subcommands for the iroh doctor. -#[derive(Subcommand, Debug, Clone)] -pub enum Commands { - /// Report on the current network environment, using either an explicitly provided stun host - /// or the settings from the config file. - Report { - /// Explicitly provided stun host. If provided, this will disable relay and just do stun. - #[clap(long)] - stun_host: Option, - /// The port of the STUN server. - #[clap(long, default_value_t = DEFAULT_STUN_PORT)] - stun_port: u16, - }, - /// Wait for incoming requests from iroh doctor connect. - Accept { - /// Our own secret key, in hex. If not specified, the locally configured key will be used. - #[clap(long, default_value_t = SecretKeyOption::Local)] - secret_key: SecretKeyOption, - - /// Number of bytes to send to the remote for each test. - #[clap(long, default_value_t = 1024 * 1024 * 16)] - size: u64, - - /// Number of iterations to run the test for. If not specified, the test will run forever. - #[clap(long)] - iterations: Option, - - /// Use a local relay. - #[clap(long)] - local_relay_server: bool, - - /// Do not allow the node to dial and be dialed by id only. - /// - /// This disables DNS discovery, which would allow the node to dial other nodes by id only. - /// And it disables Pkarr Publishing, which would allow the node to announce its address for dns discovery. - /// - /// Default is `false` - #[clap(long, default_value_t = false)] - disable_discovery: bool, - }, - /// Connect to an iroh doctor accept node. - Connect { - /// Hexadecimal node id of the node to connect to. - dial: PublicKey, - - /// One or more remote endpoints to use when dialing. - #[clap(long)] - remote_endpoint: Vec, - - /// Our own secret key, in hex. If not specified, a random key will be generated. - #[clap(long, default_value_t = SecretKeyOption::Random)] - secret_key: SecretKeyOption, - - /// Use a local relay: - /// - /// Overrides the `relay_url` field. - #[clap(long)] - local_relay_server: bool, - - /// The relay url the peer you are dialing can be found on. - /// - /// If `local_relay_server` is true, this field is ignored. - /// - /// When `None`, or if attempting to dial an unknown url, no hole punching can occur. - /// - /// Default is `None`. - #[clap(long)] - relay_url: Option, - - /// Do not allow the node to dial and be dialed by id only. - /// - /// This disables DNS discovery, which would allow the node to dial other nodes by id only. - /// It also disables Pkarr Publishing, which would allow the node to announce its address for DNS discovery. - /// - /// Default is `false` - #[clap(long, default_value_t = false)] - disable_discovery: bool, - }, - /// Probe the port mapping protocols. - PortMapProbe { - /// Whether to enable UPnP. - #[clap(long)] - enable_upnp: bool, - /// Whether to enable PCP. - #[clap(long)] - enable_pcp: bool, - /// Whether to enable NAT-PMP. - #[clap(long)] - enable_nat_pmp: bool, - }, - /// Attempt to get a port mapping to the given local port. - PortMap { - /// Protocol to use for port mapping. One of ["upnp", "nat_pmp", "pcp"]. - protocol: String, - /// Local port to get a mapping. - local_port: NonZeroU16, - /// How long to wait for an external port to be ready in seconds. - #[clap(long, default_value_t = 10)] - timeout_secs: u64, - }, - /// Get the latencies of the different relay url - /// - /// Tests the latencies of the default relay url and nodes. To test custom urls or nodes, - /// adjust the `Config`. - RelayUrls { - /// How often to execute. - #[clap(long, default_value_t = 5)] - count: usize, - }, - /// Inspect a ticket. - TicketInspect { - ticket: String, - #[clap(long)] - zbase32: bool, - }, - /// Perform a metadata consistency check on a blob store. - BlobConsistencyCheck { - /// Path of the blob store to validate. For iroh, this is the blobs subdirectory - /// in the iroh data directory. But this can also be used for apps that embed - /// just iroh-blobs. - path: PathBuf, - /// Try to get the store into a consistent state by removing orphaned data - /// and broken entries. - /// - /// Caution, this might remove data. - #[clap(long)] - repair: bool, - }, - /// Validate the actual content of a blob store. - BlobValidate { - /// Path of the blob store to validate. For iroh, this is the blobs subdirectory - /// in the iroh data directory. But this can also be used for apps that embed - /// just iroh-blobs. - path: PathBuf, - /// Try to get the store into a consistent state by downgrading entries from - /// complete to partial if data is missing etc. - #[clap(long)] - repair: bool, - }, - /// Plot metric counters - Plot { - /// How often to collect samples in milliseconds. - #[clap(long, default_value_t = 500)] - interval: u64, - /// Which metrics to plot. Commas separated list of metric names. - metrics: String, - /// What the plotted time frame should be in seconds. - #[clap(long, default_value_t = 60)] - timeframe: usize, - /// Endpoint to scrape for prometheus metrics - #[clap(long, default_value = "http://localhost:9090")] - scrape_url: String, - /// File to read the metrics from. Takes precedence over scrape_url. - #[clap(long)] - file: Option, - }, -} - -/// Possible streams that can be requested. -#[derive(Debug, Serialize, Deserialize, MaxSize)] -enum TestStreamRequest { - Echo { bytes: u64 }, - Drain { bytes: u64 }, - Send { bytes: u64, block_size: u32 }, -} - -/// Configuration for testing. -#[derive(Debug, Clone, Copy)] -struct TestConfig { - size: u64, - iterations: Option, -} - -/// Updates the progress bar. -fn update_pb( - task: &'static str, - pb: Option, - total_bytes: u64, - mut updates: sync::mpsc::Receiver, -) -> tokio::task::JoinHandle<()> { - if let Some(pb) = pb { - pb.set_message(task); - pb.set_position(0); - pb.set_length(total_bytes); - tokio::spawn(async move { - while let Some(position) = updates.recv().await { - pb.set_position(position); - } - }) - } else { - tokio::spawn(std::future::ready(())) - } -} - -/// Handles a test stream request. -async fn handle_test_request( - mut send: SendStream, - mut recv: RecvStream, - gui: &Gui, -) -> anyhow::Result<()> { - let mut buf = [0u8; TestStreamRequest::POSTCARD_MAX_SIZE]; - recv.read_exact(&mut buf).await?; - let request: TestStreamRequest = postcard::from_bytes(&buf)?; - let pb = Some(gui.pb.clone()); - match request { - TestStreamRequest::Echo { bytes } => { - // copy the stream back - let (mut send, updates) = ProgressWriter::new(&mut send); - let t0 = Instant::now(); - let progress = update_pb("echo", pb, bytes, updates); - tokio::io::copy(&mut recv, &mut send).await?; - let elapsed = t0.elapsed(); - drop(send); - progress.await?; - gui.set_echo(bytes, elapsed); - } - TestStreamRequest::Drain { bytes } => { - // drain the stream - let (mut send, updates) = ProgressWriter::new(tokio::io::sink()); - let progress = update_pb("recv", pb, bytes, updates); - let t0 = Instant::now(); - tokio::io::copy(&mut recv, &mut send).await?; - let elapsed = t0.elapsed(); - drop(send); - progress.await?; - gui.set_recv(bytes, elapsed); - } - TestStreamRequest::Send { bytes, block_size } => { - // send the requested number of bytes, in blocks of the requested size - let (mut send, updates) = ProgressWriter::new(&mut send); - let progress = update_pb("send", pb, bytes, updates); - let t0 = Instant::now(); - send_blocks(&mut send, bytes, block_size).await?; - drop(send); - let elapsed = t0.elapsed(); - progress.await?; - gui.set_send(bytes, elapsed); - } - } - send.finish()?; - Ok(()) -} - -/// Sends the requested number of bytes, in blocks of the requested size. -async fn send_blocks( - mut send: impl tokio::io::AsyncWrite + Unpin, - total_bytes: u64, - block_size: u32, -) -> anyhow::Result<()> { - let buf = vec![0u8; block_size as usize]; - let mut remaining = total_bytes; - while remaining > 0 { - let n = remaining.min(block_size as u64); - send.write_all(&buf[..n as usize]).await?; - remaining -= n; - } - Ok(()) -} - -/// Prints a client report. -async fn report( - stun_host: Option, - stun_port: u16, - config: &NodeConfig, -) -> anyhow::Result<()> { - let port_mapper = portmapper::Client::default(); - let dns_resolver = default_resolver().clone(); - let mut client = net_report::Client::new(Some(port_mapper), dns_resolver)?; - - let dm = match stun_host { - Some(host_name) => { - let url = host_name.parse()?; - // creating a relay map from host name and stun port - RelayMap::default_from_node(url, stun_port) - } - None => config.relay_map()?.unwrap_or_else(RelayMap::empty), - }; - println!("getting report using relay map {dm:#?}"); - - let r = client.get_report(dm, None, None).await?; - println!("{r:#?}"); - Ok(()) -} - -/// Contains all the GUI state. -struct Gui { - #[allow(dead_code)] - mp: MultiProgress, - pb: ProgressBar, - #[allow(dead_code)] - counters: ProgressBar, - send_pb: ProgressBar, - recv_pb: ProgressBar, - echo_pb: ProgressBar, - #[allow(dead_code)] - counter_task: Option>, -} - -impl Gui { - /// Create a new GUI struct. - fn new(endpoint: Endpoint, node_id: NodeId) -> Self { - let mp = MultiProgress::new(); - mp.set_draw_target(indicatif::ProgressDrawTarget::stderr()); - let counters = mp.add(ProgressBar::hidden()); - let remote_info = mp.add(ProgressBar::hidden()); - let send_pb = mp.add(ProgressBar::hidden()); - let recv_pb = mp.add(ProgressBar::hidden()); - let echo_pb = mp.add(ProgressBar::hidden()); - let style = indicatif::ProgressStyle::default_bar() - .template("{msg}") - .unwrap(); - send_pb.set_style(style.clone()); - recv_pb.set_style(style.clone()); - echo_pb.set_style(style.clone()); - remote_info.set_style(style.clone()); - counters.set_style(style); - let pb = mp.add(indicatif::ProgressBar::hidden()); - pb.enable_steady_tick(Duration::from_millis(100)); - pb.set_style(indicatif::ProgressStyle::default_bar() - .template("{spinner:.green} [{bar:80.cyan/blue}] {msg} {bytes}/{total_bytes} ({bytes_per_sec})").unwrap() - .progress_chars("█▉▊▋▌▍▎▏ ")); - let counters2 = counters.clone(); - let counter_task = tokio::spawn(async move { - loop { - Self::update_counters(&counters2); - Self::update_remote_info(&remote_info, &endpoint, &node_id); - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - Self { - mp, - pb, - counters, - send_pb, - recv_pb, - echo_pb, - counter_task: Some(AbortOnDropHandle::new(counter_task)), - } - } - - /// Updates the information of the target progress bar. - fn update_remote_info(target: &ProgressBar, endpoint: &Endpoint, node_id: &NodeId) { - let format_latency = |x: Option| { - x.map(|x| format!("{:.6}s", x.as_secs_f64())) - .unwrap_or_else(|| "unknown".to_string()) - }; - let msg = match endpoint.remote_info(*node_id) { - Some(RemoteInfo { - relay_url, - conn_type, - latency, - addrs, - .. - }) => { - let relay_url = relay_url - .map(|x| x.relay_url.to_string()) - .unwrap_or_else(|| "unknown".to_string()); - let latency = format_latency(latency); - let addrs = addrs - .into_iter() - .map(|addr_info| { - format!("{} ({})", addr_info.addr, format_latency(addr_info.latency)) - }) - .collect::>() - .join("; "); - format!( - "relay url: {}, latency: {}, connection type: {}, addrs: [{}]", - relay_url, latency, conn_type, addrs - ) - } - None => "connection info unavailable".to_string(), - }; - target.set_message(msg); - } - - /// Updates the counters for the target progress bar. - fn update_counters(target: &ProgressBar) { - if let Some(core) = Core::get() { - let metrics = core.get_collector::().unwrap(); - let send_ipv4 = HumanBytes(metrics.send_ipv4.get()); - let send_ipv6 = HumanBytes(metrics.send_ipv6.get()); - let send_relay = HumanBytes(metrics.send_relay.get()); - let recv_data_relay = HumanBytes(metrics.recv_data_relay.get()); - let recv_data_ipv4 = HumanBytes(metrics.recv_data_ipv4.get()); - let recv_data_ipv6 = HumanBytes(metrics.recv_data_ipv6.get()); - let text = format!( - r#"Counters - -Relay: - send: {send_relay} - recv: {recv_data_relay} -Ipv4: - send: {send_ipv4} - recv: {recv_data_ipv4} -Ipv6: - send: {send_ipv6} - recv: {recv_data_ipv6} -"#, - ); - target.set_message(text); - } - } - - /// Sets the "send" text and the speed for the progress bar. - fn set_send(&self, bytes: u64, duration: Duration) { - Self::set_bench_speed(&self.send_pb, "send", bytes, duration); - } - - /// Sets the "recv" text and the speed for the progress bar. - fn set_recv(&self, bytes: u64, duration: Duration) { - Self::set_bench_speed(&self.recv_pb, "recv", bytes, duration); - } - - /// Sets the "echo" text and the speed for the progress bar. - fn set_echo(&self, bytes: u64, duration: Duration) { - Self::set_bench_speed(&self.echo_pb, "echo", bytes, duration); - } - - /// Sets a text and the speed for the progress bar. - fn set_bench_speed(pb: &ProgressBar, text: &str, bytes: u64, duration: Duration) { - pb.set_message(format!( - "{}: {}/s", - text, - HumanBytes((bytes as f64 / duration.as_secs_f64()) as u64) - )); - } - - /// Clears the [`MultiProgress`] field. - fn clear(&self) { - self.mp.clear().ok(); - } -} - -/// Sends, receives and echoes data in a connection. -async fn active_side( - connection: Connection, - config: &TestConfig, - gui: Option<&Gui>, -) -> anyhow::Result<()> { - let n = config.iterations.unwrap_or(u64::MAX); - if let Some(gui) = gui { - let pb = Some(&gui.pb); - for _ in 0..n { - let d = send_test(&connection, config, pb).await?; - gui.set_send(config.size, d); - let d = recv_test(&connection, config, pb).await?; - gui.set_recv(config.size, d); - let d = echo_test(&connection, config, pb).await?; - gui.set_echo(config.size, d); - } - } else { - let pb = None; - for _ in 0..n { - let _d = send_test(&connection, config, pb).await?; - let _d = recv_test(&connection, config, pb).await?; - let _d = echo_test(&connection, config, pb).await?; - } - } - Ok(()) -} - -/// Sends a test request in a connection. -async fn send_test_request( - send: &mut SendStream, - request: &TestStreamRequest, -) -> anyhow::Result<()> { - let mut buf = [0u8; TestStreamRequest::POSTCARD_MAX_SIZE]; - postcard::to_slice(&request, &mut buf)?; - send.write_all(&buf).await?; - Ok(()) -} - -/// Echoes test a connection. -async fn echo_test( - connection: &Connection, - config: &TestConfig, - pb: Option<&indicatif::ProgressBar>, -) -> anyhow::Result { - let size = config.size; - let (mut send, mut recv) = connection.open_bi().await?; - send_test_request(&mut send, &TestStreamRequest::Echo { bytes: size }).await?; - let (mut sink, updates) = ProgressWriter::new(tokio::io::sink()); - let copying = tokio::spawn(async move { tokio::io::copy(&mut recv, &mut sink).await }); - let progress = update_pb("echo", pb.cloned(), size, updates); - let t0 = Instant::now(); - send_blocks(&mut send, size, 1024 * 1024).await?; - send.finish()?; - let received = copying.await??; - anyhow::ensure!(received == size); - let duration = t0.elapsed(); - progress.await?; - Ok(duration) -} - -/// Sends test a connection. -async fn send_test( - connection: &Connection, - config: &TestConfig, - pb: Option<&indicatif::ProgressBar>, -) -> anyhow::Result { - let size = config.size; - let (mut send, mut recv) = connection.open_bi().await?; - send_test_request(&mut send, &TestStreamRequest::Drain { bytes: size }).await?; - let (mut send_with_progress, updates) = ProgressWriter::new(&mut send); - let copying = - tokio::spawn(async move { tokio::io::copy(&mut recv, &mut tokio::io::sink()).await }); - let progress = update_pb("send", pb.cloned(), size, updates); - let t0 = Instant::now(); - send_blocks(&mut send_with_progress, size, 1024 * 1024).await?; - drop(send_with_progress); - send.finish()?; - drop(send); - let received = copying.await??; - anyhow::ensure!(received == 0); - let duration = t0.elapsed(); - progress.await?; - Ok(duration) -} - -/// Receives test a connection. -async fn recv_test( - connection: &Connection, - config: &TestConfig, - pb: Option<&indicatif::ProgressBar>, -) -> anyhow::Result { - let size = config.size; - let (mut send, mut recv) = connection.open_bi().await?; - let t0 = Instant::now(); - let (mut sink, updates) = ProgressWriter::new(tokio::io::sink()); - send_test_request( - &mut send, - &TestStreamRequest::Send { - bytes: size, - block_size: 1024 * 1024, - }, - ) - .await?; - let copying = tokio::spawn(async move { tokio::io::copy(&mut recv, &mut sink).await }); - let progress = update_pb("recv", pb.cloned(), size, updates); - send.finish()?; - let received = copying.await??; - anyhow::ensure!(received == size); - let duration = t0.elapsed(); - progress.await?; - Ok(duration) -} - -/// Accepts connections and answers requests (echo, drain or send) as passive side. -async fn passive_side(gui: Gui, connection: Connection) -> anyhow::Result<()> { - loop { - match connection.accept_bi().await { - Ok((send, recv)) => { - if let Err(cause) = handle_test_request(send, recv, &gui).await { - eprintln!("Error handling test request {cause}"); - } - } - Err(cause) => { - eprintln!("error accepting bidi stream {cause}"); - break Err(cause.into()); - } - }; - } -} - -/// Configures a relay map with some default values. -fn configure_local_relay_map() -> RelayMap { - let stun_port = DEFAULT_STUN_PORT; - let url = "http://localhost:3340".parse().unwrap(); - RelayMap::default_from_node(url, stun_port) -} - -/// ALPN protocol address. -const DR_RELAY_ALPN: [u8; 11] = *b"n0/drderp/1"; - -/// Creates an iroh net [`Endpoint`] from a [SecreetKey`], a [`RelayMap`] and a [`Discovery`]. -async fn make_endpoint( - secret_key: SecretKey, - relay_map: Option, - discovery: Option>, -) -> anyhow::Result { - tracing::info!( - "public key: {}", - hex::encode(secret_key.public().as_bytes()) - ); - tracing::info!("relay map {:#?}", relay_map); - - let mut transport_config = endpoint::TransportConfig::default(); - transport_config.keep_alive_interval(Some(Duration::from_secs(5))); - transport_config.max_idle_timeout(Some(Duration::from_secs(10).try_into().unwrap())); - - let endpoint = Endpoint::builder() - .secret_key(secret_key) - .alpns(vec![DR_RELAY_ALPN.to_vec()]) - .transport_config(transport_config); - - let endpoint = match discovery { - Some(discovery) => endpoint.discovery(discovery), - None => endpoint, - }; - - let endpoint = match relay_map { - Some(relay_map) => endpoint.relay_mode(RelayMode::Custom(relay_map)), - None => endpoint, - }; - let endpoint = endpoint.bind().await?; - - tokio::time::timeout(Duration::from_secs(10), endpoint.direct_addresses().next()) - .await - .context("wait for relay connection")? - .context("no endpoints")?; - - Ok(endpoint) -} - -/// Connects to a [`NodeId`]. -async fn connect( - node_id: NodeId, - secret_key: SecretKey, - direct_addresses: Vec, - relay_url: Option, - relay_map: Option, - discovery: Option>, -) -> anyhow::Result<()> { - let endpoint = make_endpoint(secret_key, relay_map, discovery).await?; - - tracing::info!("dialing {:?}", node_id); - let node_addr = NodeAddr::from_parts(node_id, relay_url, direct_addresses); - let conn = endpoint.connect(node_addr, &DR_RELAY_ALPN).await; - match conn { - Ok(connection) => { - let maybe_stream = endpoint.conn_type_stream(node_id); - let gui = Gui::new(endpoint, node_id); - if let Ok(stream) = maybe_stream { - log_connection_changes(gui.mp.clone(), node_id, stream); - } - - if let Err(cause) = passive_side(gui, connection).await { - eprintln!("error handling connection: {cause}"); - } - } - Err(cause) => { - eprintln!("unable to connect to {node_id}: {cause}"); - } - } - - Ok(()) -} - -/// Formats a [`SocketAddr`] so that console doesn't escape it. -fn format_addr(addr: SocketAddr) -> String { - if addr.is_ipv6() { - format!("'{addr}'") - } else { - format!("{addr}") - } -} - -/// Accepts the connections. -async fn accept( - secret_key: SecretKey, - config: TestConfig, - relay_map: Option, - discovery: Option>, -) -> anyhow::Result<()> { - let endpoint = make_endpoint(secret_key.clone(), relay_map, discovery).await?; - let endpoints = endpoint - .direct_addresses() - .next() - .await - .context("no endpoints")?; - let remote_addrs = endpoints - .iter() - .map(|endpoint| format!("--remote-endpoint {}", format_addr(endpoint.addr))) - .collect::>() - .join(" "); - println!("Connect to this node using one of the following commands:\n"); - println!( - "\tUsing the relay url and direct connections:\niroh doctor connect {} {}\n", - secret_key.public(), - remote_addrs, - ); - if let Some(relay_url) = endpoint.home_relay() { - println!( - "\tUsing just the relay url:\niroh doctor connect {} --relay-url {}\n", - secret_key.public(), - relay_url, - ); - } - if endpoint.discovery().is_some() { - println!( - "\tUsing just the node id:\niroh doctor connect {}\n", - secret_key.public(), - ); - } - let connections = Arc::new(AtomicU64::default()); - while let Some(incoming) = endpoint.accept().await { - let connecting = match incoming.accept() { - Ok(connecting) => connecting, - Err(err) => { - warn!("incoming connection failed: {err:#}"); - // we can carry on in these cases: - // this can be caused by retransmitted datagrams - continue; - } - }; - let connections = connections.clone(); - let endpoint = endpoint.clone(); - tokio::task::spawn(async move { - let n = connections.fetch_add(1, portable_atomic::Ordering::SeqCst); - match connecting.await { - Ok(connection) => { - if n == 0 { - let Ok(remote_peer_id) = endpoint::get_remote_node_id(&connection) else { - return; - }; - println!("Accepted connection from {}", remote_peer_id); - let t0 = Instant::now(); - let gui = Gui::new(endpoint.clone(), remote_peer_id); - if let Ok(stream) = endpoint.conn_type_stream(remote_peer_id) { - log_connection_changes(gui.mp.clone(), remote_peer_id, stream); - } - let res = active_side(connection, &config, Some(&gui)).await; - gui.clear(); - let dt = t0.elapsed().as_secs_f64(); - if let Err(cause) = res { - eprintln!("Test finished after {dt}s: {cause}",); - } else { - eprintln!("Test finished after {dt}s",); - } - } else { - // silent - active_side(connection, &config, None).await.ok(); - } - } - Err(cause) => { - eprintln!("error accepting connection {cause}"); - } - }; - connections.sub(1, portable_atomic::Ordering::SeqCst); - }); - } - - Ok(()) -} - -/// Logs the connection changes to the multiprogress. -fn log_connection_changes(pb: MultiProgress, node_id: NodeId, mut stream: ConnectionTypeStream) { - tokio::spawn(async move { - let start = Instant::now(); - while let Some(conn_type) = stream.next().await { - pb.println(format!( - "Connection with {node_id:#} changed: {conn_type} (after {:?})", - start.elapsed() - )) - .ok(); - } - }); -} - -/// Checks if there's a port mapping in the local port, and if it's ready. -async fn port_map(protocol: &str, local_port: NonZeroU16, timeout: Duration) -> anyhow::Result<()> { - // Create the config that enables exclusively the required protocol - let mut enable_upnp = false; - let mut enable_pcp = false; - let mut enable_nat_pmp = false; - match protocol.to_ascii_lowercase().as_ref() { - "upnp" => enable_upnp = true, - "nat_pmp" => enable_nat_pmp = true, - "pcp" => enable_pcp = true, - other => anyhow::bail!("Unknown port mapping protocol {other}"), - } - let config = portmapper::Config { - enable_upnp, - enable_pcp, - enable_nat_pmp, - }; - let port_mapper = portmapper::Client::new(config); - let mut watcher = port_mapper.watch_external_address(); - port_mapper.update_local_port(local_port); - - // Wait for the mapping to be ready, or timeout waiting for a change. - match tokio::time::timeout(timeout, watcher.changed()).await { - Ok(Ok(_)) => match *watcher.borrow() { - Some(address) => { - println!("Port mapping ready: {address}"); - // Ensure the port mapper remains alive until the end. - drop(port_mapper); - Ok(()) - } - None => anyhow::bail!("No port mapping found"), - }, - Ok(Err(_recv_err)) => anyhow::bail!("Service dropped. This is a bug"), - Err(_) => anyhow::bail!("Timed out waiting for a port mapping"), - } -} - -/// Probes a port map. -async fn port_map_probe(config: portmapper::Config) -> anyhow::Result<()> { - println!("probing port mapping protocols with {config:?}"); - let port_mapper = portmapper::Client::new(config); - let probe_rx = port_mapper.probe(); - let probe = probe_rx.await?.map_err(|e| anyhow::anyhow!(e))?; - println!("{probe}"); - Ok(()) -} - -/// Checks a certain amount (`count`) of the nodes given by the [`NodeConfig`]. -async fn relay_urls(count: usize, config: NodeConfig) -> anyhow::Result<()> { - let key = SecretKey::generate(); - if config.relay_nodes.is_empty() { - println!("No relay nodes specified in the config file."); - } - - let dns_resolver = default_resolver(); - let mut clients = HashMap::new(); - for node in &config.relay_nodes { - let secret_key = key.clone(); - let client = iroh::net::relay::HttpClientBuilder::new(node.url.clone()) - .build(secret_key, dns_resolver.clone()); - - clients.insert(node.url.clone(), client); - } - - let mut success = Vec::new(); - let mut fail = Vec::new(); - - for i in 0..count { - println!("Round {}/{count}", i + 1); - let relay_nodes = config.relay_nodes.clone(); - for node in relay_nodes.into_iter() { - let mut node_details = NodeDetails { - connect: None, - latency: None, - error: None, - host: node.url.clone(), - }; - - let client = clients.get(&node.url).map(|(c, _)| c.clone()).unwrap(); - - if client.is_connected().await? { - client.close_for_reconnect().await?; - } - assert!(!client.is_connected().await?); - - let start = std::time::Instant::now(); - match tokio::time::timeout(Duration::from_secs(2), client.connect()).await { - Err(e) => { - tracing::warn!("connect timeout"); - node_details.error = Some(e.to_string()); - } - Ok(Err(e)) => { - tracing::warn!("connect error"); - node_details.error = Some(e.to_string()); - } - Ok(_) => { - assert!(client.is_connected().await?); - node_details.connect = Some(start.elapsed()); - - match client.ping().await { - Ok(latency) => { - node_details.latency = Some(latency); - } - Err(e) => { - tracing::warn!("ping error: {:?}", e); - node_details.error = Some(e.to_string()); - } - } - } - } - - if node_details.error.is_none() { - success.push(node_details); - } else { - fail.push(node_details); - } - } - } - - // success.sort_by_key(|d| d.latency); - if !success.is_empty() { - println!("Relay Node Latencies:"); - println!(); - } - for node in success { - println!("{node}"); - println!(); - } - if !fail.is_empty() { - println!("Connection Failures:"); - println!(); - } - for node in fail { - println!("{node}"); - println!(); - } - - Ok(()) -} - -/// Information about a node and its connection. -struct NodeDetails { - connect: Option, - latency: Option, - host: RelayUrl, - error: Option, -} - -impl std::fmt::Display for NodeDetails { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.error { - None => { - write!( - f, - "Node {}\nConnect: {:?}\nLatency: {:?}", - self.host, - self.connect.unwrap_or_default(), - self.latency.unwrap_or_default(), - ) - } - Some(ref err) => { - write!(f, "Node {}\nConnection Error: {:?}", self.host, err,) - } - } - } -} - -/// Creates a [`SecretKey`] from a [`SecretKeyOption`]. -fn create_secret_key(secret_key: SecretKeyOption) -> anyhow::Result { - Ok(match secret_key { - SecretKeyOption::Random => SecretKey::generate(), - SecretKeyOption::Hex(hex) => { - let bytes = hex::decode(hex)?; - SecretKey::try_from(&bytes[..])? - } - SecretKeyOption::Local => { - let path = IrohPaths::SecretKey.with_root(data_root("iroh")?); - if path.exists() { - let bytes = std::fs::read(&path)?; - SecretKey::try_from_openssh(bytes)? - } else { - println!( - "Local key not found in {}. Using random key.", - path.display() - ); - SecretKey::generate() - } - } - }) -} - -/// Creates a [`Discovery`] service from a [`SecretKey`]. -fn create_discovery(disable_discovery: bool, secret_key: &SecretKey) -> Option> { - if disable_discovery { - None - } else { - Some(Box::new(ConcurrentDiscovery::from_services(vec![ - // Enable DNS discovery by default - Box::new(DnsDiscovery::n0_dns()), - // Enable pkarr publishing by default - Box::new(PkarrPublisher::n0_dns(secret_key.clone())), - ]))) - } -} - -/// Prints a string in bold. -fn bold(x: T) -> String { - style(x).bold().to_string() -} - -/// Converts a [`NodeId`] public key to a [`zbase32`] string. -fn to_z32(node_id: NodeId) -> String { - pkarr::PublicKey::try_from(node_id.as_bytes()) - .unwrap() - .to_z32() -} - -/// Prints the node's address give a [`NodeAddr`], a prefix (`&str`), -/// and whether or not it is zbase32. -fn print_node_addr(prefix: &str, node_addr: &NodeAddr, zbase32: bool) { - let node = if zbase32 { - to_z32(node_addr.node_id) - } else { - node_addr.node_id.to_string() - }; - println!("{}node-id: {}", prefix, bold(node)); - if let Some(relay_url) = node_addr.relay_url() { - println!("{}relay-url: {}", prefix, bold(relay_url)); - } - for addr in node_addr.direct_addresses() { - println!("{}addr: {}", prefix, bold(addr.to_string())); - } -} - -/// Inspects the ticker by printing its information. -fn inspect_ticket(ticket: &str, zbase32: bool) -> anyhow::Result<()> { - if ticket.starts_with(BlobTicket::KIND) { - let ticket = BlobTicket::from_str(ticket).context("failed parsing blob ticket")?; - println!("BlobTicket"); - println!(" hash: {}", bold(ticket.hash())); - println!(" format: {}", bold(ticket.format())); - println!(" NodeInfo"); - print_node_addr(" ", ticket.node_addr(), zbase32); - } else if ticket.starts_with(DocTicket::KIND) { - let ticket = DocTicket::from_str(ticket).context("failed parsing doc ticket")?; - println!("DocTicket:\n"); - match ticket.capability { - Capability::Read(namespace) => { - println!(" read: {}", bold(namespace)); - } - Capability::Write(secret) => { - println!(" write: {}", bold(secret)); - } - } - for node in &ticket.nodes { - print_node_addr(" ", node, zbase32); - } - } else if ticket.starts_with(NodeTicket::KIND) { - let ticket = NodeTicket::from_str(ticket).context("failed parsing node ticket")?; - println!("NodeTicket"); - print_node_addr(" ", ticket.node_addr(), zbase32); - } - - Ok(()) -} - -/// Runs the doctor commands. -pub async fn run(command: Commands, config: &NodeConfig) -> anyhow::Result<()> { - let data_dir = data_root("iroh")?; - let _guard = - iroh_node_util::logging::init_terminal_and_file_logging(&config.file_logs, &data_dir)?; - let metrics_fut = super::start::start_metrics_server(config.metrics_addr); - let cmd_res = match command { - Commands::Report { - stun_host, - stun_port, - } => report(stun_host, stun_port, config).await, - Commands::Connect { - dial, - secret_key, - local_relay_server, - relay_url, - remote_endpoint, - disable_discovery, - } => { - let (relay_map, relay_url) = if local_relay_server { - let dm = configure_local_relay_map(); - let url = dm.urls().next().unwrap().clone(); - (Some(dm), Some(url)) - } else { - (config.relay_map()?, relay_url) - }; - let secret_key = create_secret_key(secret_key)?; - - let discovery = create_discovery(disable_discovery, &secret_key); - connect( - dial, - secret_key, - remote_endpoint, - relay_url, - relay_map, - discovery, - ) - .await - } - Commands::Accept { - secret_key, - local_relay_server, - size, - iterations, - disable_discovery, - } => { - let relay_map = if local_relay_server { - Some(configure_local_relay_map()) - } else { - config.relay_map()? - }; - let secret_key = create_secret_key(secret_key)?; - let config = TestConfig { size, iterations }; - let discovery = create_discovery(disable_discovery, &secret_key); - accept(secret_key, config, relay_map, discovery).await - } - Commands::PortMap { - protocol, - local_port, - timeout_secs, - } => port_map(&protocol, local_port, Duration::from_secs(timeout_secs)).await, - Commands::PortMapProbe { - enable_upnp, - enable_pcp, - enable_nat_pmp, - } => { - let config = portmapper::Config { - enable_upnp, - enable_pcp, - enable_nat_pmp, - }; - - port_map_probe(config).await - } - Commands::RelayUrls { count } => { - let config = NodeConfig::load(None).await?; - relay_urls(count, config).await - } - Commands::TicketInspect { ticket, zbase32 } => inspect_ticket(&ticket, zbase32), - Commands::BlobConsistencyCheck { path, repair } => { - let blob_store = iroh::blobs::store::fs::Store::load(path).await?; - let (send, recv) = async_channel::bounded(1); - let task = tokio::spawn(async move { - while let Ok(msg) = recv.recv().await { - println!("{:?}", msg); - } - }); - blob_store - .consistency_check(repair, AsyncChannelProgressSender::new(send).boxed()) - .await?; - task.await?; - Ok(()) - } - Commands::BlobValidate { path, repair } => { - let blob_store = iroh::blobs::store::fs::Store::load(path).await?; - let (send, recv) = async_channel::bounded(1); - let task = tokio::spawn(async move { - while let Ok(msg) = recv.recv().await { - println!("{:?}", msg); - } - }); - blob_store - .validate(repair, AsyncChannelProgressSender::new(send).boxed()) - .await?; - task.await?; - Ok(()) - } - Commands::Plot { - interval, - metrics, - timeframe, - scrape_url, - file, - } => { - let metrics: Vec = metrics.split(',').map(|s| s.to_string()).collect(); - let interval = Duration::from_millis(interval); - - enable_raw_mode()?; - let mut stdout = io::stdout(); - execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?; - let backend = CrosstermBackend::new(stdout); - let mut terminal = Terminal::new(backend)?; - - let app = PlotterApp::new(metrics, timeframe, scrape_url, file); - let res = run_plotter(&mut terminal, app, interval).await; - disable_raw_mode()?; - execute!( - terminal.backend_mut(), - LeaveAlternateScreen, - DisableMouseCapture - )?; - terminal.show_cursor()?; - - if let Err(err) = res { - println!("{err:?}"); - } - - Ok(()) - } - }; - if let Some(metrics_fut) = metrics_fut { - metrics_fut.abort(); - } - cmd_res -} - -/// Runs the [`PlotterApp`]. -async fn run_plotter( - terminal: &mut Terminal, - mut app: PlotterApp, - tick_rate: Duration, -) -> anyhow::Result<()> { - let mut last_tick = Instant::now(); - loop { - terminal.draw(|f| plotter_draw(f, &mut app))?; - - if crossterm::event::poll(Duration::from_millis(10))? { - if let Event::Key(key) = event::read()? { - if key.kind == KeyEventKind::Press { - if let KeyCode::Char(c) = key.code { - app.on_key(c) - } - } - } - } - if last_tick.elapsed() >= tick_rate { - app.on_tick().await; - last_tick = Instant::now(); - } - if app.should_quit { - return Ok(()); - } - } -} - -/// Converts an area into `n` chunks. -fn area_into_chunks(area: Rect, n: usize, is_horizontal: bool) -> std::rc::Rc<[Rect]> { - let mut constraints = vec![]; - for _ in 0..n { - constraints.push(Constraint::Percentage(100 / n as u16)); - } - let layout = match is_horizontal { - true => Layout::horizontal(constraints), - false => Layout::vertical(constraints), - }; - layout.split(area) -} - -/// Creates a collection of [`Rect`] by splitting an [`Rect`] area into `n` chunks. -fn generate_layout_chunks(area: Rect, n: usize) -> Vec { - if n < 4 { - let chunks = area_into_chunks(area, n, false); - return chunks.iter().copied().collect(); - } - let main_chunks = area_into_chunks(area, 2, true); - let left_chunks = area_into_chunks(main_chunks[0], n / 2 + n % 2, false); - let right_chunks = area_into_chunks(main_chunks[1], n / 2, false); - let mut chunks = vec![]; - chunks.extend(left_chunks.iter()); - chunks.extend(right_chunks.iter()); - chunks -} - -/// Draws the [`Frame`] given a [`PlotterApp`]. -fn plotter_draw(f: &mut Frame, app: &mut PlotterApp) { - let area = f.size(); - - let metrics_cnt = app.metrics.len(); - let areas = generate_layout_chunks(area, metrics_cnt); - - for (i, metric) in app.metrics.iter().enumerate() { - plot_chart(f, areas[i], app, metric); - } -} - -/// Draws the chart defined in the [`Frame`]. -fn plot_chart(frame: &mut Frame, area: Rect, app: &PlotterApp, metric: &str) { - let elapsed = app.internal_ts.as_secs_f64(); - let data = app.data.get(metric).unwrap().clone(); - let data_y_range = app.data_y_range.get(metric).unwrap(); - - let moved = (elapsed / 15.0).floor() * 15.0 - app.timeframe as f64; - let moved = moved.max(0.0); - let x_start = 0.0 + moved; - let x_end = moved + app.timeframe as f64 + 25.0; - - let y_start = data_y_range.0; - let y_end = data_y_range.1; - - let last_val = data.last(); - let name = match last_val { - Some(val) => { - let val_y = val.1; - format!("{metric}: {val_y:.0}") - } - None => metric.to_string(), - }; - let datasets = vec![Dataset::default() - .name(name) - .marker(symbols::Marker::Dot) - .graph_type(GraphType::Line) - .style(Style::default().fg(Color::Cyan)) - .data(&data)]; - - // TODO(arqu): labels are incorrectly spaced for > 3 labels https://github.com/ratatui-org/ratatui/issues/334 - let x_labels = vec![ - Span::styled( - format!("{:.1}s", x_start), - Style::default().add_modifier(Modifier::BOLD), - ), - Span::raw(format!("{:.1}s", x_start + (x_end - x_start) / 2.0)), - Span::styled( - format!("{:.1}s", x_end), - Style::default().add_modifier(Modifier::BOLD), - ), - ]; - - let mut y_labels = vec![Span::styled( - format!("{:.0}", y_start), - Style::default().add_modifier(Modifier::BOLD), - )]; - - for i in 1..=10 { - y_labels.push(Span::raw(format!( - "{:.0}", - y_start + (y_end - y_start) / 10.0 * i as f64 - ))); - } - - y_labels.push(Span::styled( - format!("{:.0}", y_end), - Style::default().add_modifier(Modifier::BOLD), - )); - - let chart = Chart::new(datasets) - .block( - Block::default() - .borders(Borders::ALL) - .title(format!("Chart: {}", metric)), - ) - .x_axis( - Axis::default() - .title("X Axis") - .style(Style::default().fg(Color::Gray)) - .labels(x_labels) - .bounds([x_start, x_end]), - ) - .y_axis( - Axis::default() - .title("Y Axis") - .style(Style::default().fg(Color::Gray)) - .labels(y_labels) - .bounds([y_start, y_end]), - ); - - frame.render_widget(chart, area); -} - -/// All the information about the plotter app. -struct PlotterApp { - should_quit: bool, - metrics: Vec, - start_ts: Instant, - data: HashMap>, - data_y_range: HashMap, - timeframe: usize, - rng: rand::rngs::ThreadRng, - freeze: bool, - internal_ts: Duration, - scrape_url: String, - file_data: Vec, - file_header: Vec, -} - -impl PlotterApp { - /// Creates a new [`PlotterApp`]. - fn new( - metrics: Vec, - timeframe: usize, - scrape_url: String, - file: Option, - ) -> Self { - let data = metrics.iter().map(|m| (m.clone(), vec![])).collect(); - let data_y_range = metrics.iter().map(|m| (m.clone(), (0.0, 0.0))).collect(); - let mut file_data: Vec = file - .map(|f| std::fs::read_to_string(f).unwrap()) - .unwrap_or_default() - .split('\n') - .map(|s| s.to_string()) - .collect(); - let mut file_header = vec![]; - let mut timeframe = timeframe; - if !file_data.is_empty() { - file_header = file_data[0].split(',').map(|s| s.to_string()).collect(); - file_data.remove(0); - - while file_data.last().unwrap().is_empty() { - file_data.pop(); - } - - let first_line: Vec = file_data[0].split(',').map(|s| s.to_string()).collect(); - let last_line: Vec = file_data - .last() - .unwrap() - .split(',') - .map(|s| s.to_string()) - .collect(); - - let start_time: usize = first_line.first().unwrap().parse().unwrap(); - let end_time: usize = last_line.first().unwrap().parse().unwrap(); - - timeframe = (end_time - start_time) / 1000; - } - timeframe = timeframe.clamp(30, 90); - - file_data.reverse(); - Self { - should_quit: false, - metrics, - start_ts: Instant::now(), - data, - data_y_range, - timeframe, - rng: rand::thread_rng(), - freeze: false, - internal_ts: Duration::default(), - scrape_url, - file_data, - file_header, - } - } - - /// Chooses what to do when a key is pressed. - fn on_key(&mut self, c: char) { - match c { - 'q' => { - self.should_quit = true; - } - 'f' => { - self.freeze = !self.freeze; - } - _ => {} - } - } - - /// Chooses what to do on a tick. - async fn on_tick(&mut self) { - if self.freeze { - return; - } - - let metrics_response = match self.file_data.is_empty() { - true => { - let req = reqwest::Client::new().get(&self.scrape_url).send().await; - if req.is_err() { - return; - } - let data = req.unwrap().text().await.unwrap(); - let metrics_response = iroh_metrics::parse_prometheus_metrics(&data); - if metrics_response.is_err() { - return; - } - metrics_response.unwrap() - } - false => { - if self.file_data.len() == 1 { - self.freeze = true; - return; - } - let data = self.file_data.pop().unwrap(); - let r = parse_csv_metrics(&self.file_header, &data); - if let Ok(mr) = r { - mr - } else { - warn!("Failed to parse csv metrics: {:?}", r.err()); - HashMap::new() - } - } - }; - self.internal_ts = self.start_ts.elapsed(); - for metric in &self.metrics { - let val = if metric.eq("random") { - self.rng.gen_range(0..101) as f64 - } else if let Some(v) = metrics_response.get(metric) { - *v - } else { - 0.0 - }; - let e = self.data.entry(metric.clone()).or_default(); - let mut ts = self.internal_ts.as_secs_f64(); - if metrics_response.contains_key("time") { - ts = *metrics_response.get("time").unwrap() / 1000.0; - } - self.internal_ts = Duration::from_secs_f64(ts); - e.push((ts, val)); - let yr = self.data_y_range.get_mut(metric).unwrap(); - if val * 1.1 < yr.0 { - yr.0 = val * 1.2; - } - if val * 1.1 > yr.1 { - yr.1 = val * 1.2; - } - } - } -} - -/// Parses CSV metrics into a [`HashMap`] of `String` -> `f64`. -fn parse_csv_metrics(header: &[String], data: &str) -> anyhow::Result> { - let mut metrics = HashMap::new(); - let data = data.split(',').collect::>(); - for (i, h) in header.iter().enumerate() { - let val = match h.as_str() { - "time" => { - let ts = data[i].parse::()?; - ts as f64 - } - _ => data[i].parse::()?, - }; - metrics.insert(h.clone(), val); - } - Ok(metrics) -} diff --git a/iroh-cli/src/commands/rpc.rs b/iroh-cli/src/commands/rpc.rs deleted file mode 100644 index 69c93783b1..0000000000 --- a/iroh-cli/src/commands/rpc.rs +++ /dev/null @@ -1,96 +0,0 @@ -//! Define the subcommands to manage the iroh RPC. - -use anyhow::Result; -use clap::Subcommand; -use iroh::client::Iroh; -use iroh_docs::cli::ConsoleEnv; -use iroh_node_util::cli::node::NodeCommands; - -use super::{ - authors::AuthorCommands, blobs::BlobCommands, docs::DocCommands, gossip::GossipCommands, - net::NetCommands, tags::TagCommands, -}; - -/// Commands to manage the iroh RPC. -#[derive(Subcommand, Debug, Clone)] -#[allow(clippy::large_enum_variant)] -pub enum RpcCommands { - /// Manage iroh's documents. - /// - /// Documents are mutable, syncable key-value stores. - /// For more on docs see https://iroh.computer/docs/layers/documents - /// - /// For general configuration options see . - Docs { - #[clap(subcommand)] - command: DocCommands, - }, - - /// Manage iroh's document authors. - /// - /// Authors are keypairs that identify writers to documents. - /// - /// For general configuration options see . - Authors { - #[clap(subcommand)] - command: AuthorCommands, - }, - /// Manage blobs - /// - /// Blobs are immutable, opaque chunks of arbitrary-sized data. - /// For more on blobs see https://iroh.computer/docs/layers/blobs - /// - /// For general configuration options see . - Blobs { - #[clap(subcommand)] - command: BlobCommands, - }, - /// Manage the iroh network - Net { - #[clap(subcommand)] - command: NetCommands, - }, - /// Manage gossip - /// - /// Gossip is a way to broadcast messages to a group of nodes. - /// - /// For general configuration options see . - Gossip { - #[clap(subcommand)] - command: GossipCommands, - }, - /// Manage tags - /// - /// Tags are local, human-readable names for things iroh should keep. - /// Anything added with explicit commands like `iroh get` or `doc join` - /// will be tagged & kept until the tag is removed. If no tag is given - /// while running an explicit command, iroh will automatically generate - /// a tag. - /// - /// Any data iroh fetches without a tag will be periodically deleted. - /// - /// For general configuration options see . - Tags { - #[clap(subcommand)] - command: TagCommands, - }, - - #[clap(flatten)] - Node(NodeCommands), -} - -impl RpcCommands { - /// Run the RPC command given the iroh client and the console environment. - pub async fn run(self, iroh: &Iroh, env: &ConsoleEnv) -> Result<()> { - let node_id = || async move { iroh.net().node_addr().await }; - match self { - Self::Net { command } => command.run(&iroh.net()).await, - Self::Blobs { command } => command.run(&iroh.blobs(), node_id().await?).await, - Self::Docs { command } => command.run(&iroh.docs(), &iroh.blobs(), env).await, - Self::Authors { command } => command.run(&iroh.authors(), env).await, - Self::Tags { command } => command.run(&iroh.tags()).await, - Self::Gossip { command } => command.run(&iroh.gossip()).await, - Self::Node(command) => command.run(&iroh.node()).await, - } - } -} diff --git a/iroh-cli/src/commands/start.rs b/iroh-cli/src/commands/start.rs deleted file mode 100644 index 682b1222fe..0000000000 --- a/iroh-cli/src/commands/start.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! Define commands to manage the start of the iroh node. - -use std::{ - future::Future, - net::SocketAddr, - path::{Path, PathBuf}, - time::Duration, -}; - -use anyhow::Result; -use colored::Colorize; -use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}; -use iroh::{ - net::{RelayMap, RelayMode}, - node::{Node, RpcStatus, DEFAULT_RPC_ADDR}, -}; -use tracing::{info_span, trace, Instrument}; - -use crate::config::NodeConfig; - -/// Whether to stop the node after running a command or run forever until stopped. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum RunType { - /// Run a single command, and then shutdown the node. Allow to abort with Ctrl-C. - SingleCommandAbortable, - /// Run a single command, and then shutdown the node. Do not abort on Ctrl-C (expects Ctrl-C to be handled internally). - SingleCommandNoAbort, - /// Run until manually stopped (through Ctrl-C or shutdown RPC command) - UntilStopped, -} - -/// Error to show that iroh is already running in some port. -#[derive(thiserror::Error, Debug)] -#[error("iroh is already running on port {0}")] -pub struct AlreadyRunningError(u16); - -/// Runs an iroh node with a given command. -pub async fn run_with_command( - config: &NodeConfig, - iroh_data_root: &Path, - rpc_addr: Option, - run_type: RunType, - command: F, -) -> Result<()> -where - F: FnOnce(iroh::client::Iroh) -> T + Send + 'static, - T: Future> + 'static, -{ - let _guard = - iroh_node_util::logging::init_terminal_and_file_logging(&config.file_logs, iroh_data_root)?; - let metrics_fut = start_metrics_server(config.metrics_addr); - let metrics_dumper_fut = - start_metrics_dumper(config.metrics_dump_path.clone(), Duration::from_millis(100)); - - let res = run_with_command_inner(config, iroh_data_root, rpc_addr, run_type, command).await; - - // If `Some`thing is returned, it means the starting has failed and the tasks should be aborted. - if let Some(metrics_fut) = metrics_fut { - metrics_fut.abort(); - } - // If `Some`thing is returned, it means the starting has failed and the tasks should be aborted. - if let Some(metrics_dumper_fut) = metrics_dumper_fut { - metrics_dumper_fut.abort(); - } - - let (clear_rpc, res) = match res { - Ok(()) => (true, res), - Err(e) => match e.downcast::() { - // iroh is already running in a different process, do no remove the rpc lockfile - Ok(already_running) => (false, Err(already_running.into())), - Err(e) => (true, Err(e)), - }, - }; - - if clear_rpc { - RpcStatus::clear(iroh_data_root).await?; - } - - res -} - -/// Runs an iroh node with the given command (private function). -async fn run_with_command_inner( - config: &NodeConfig, - iroh_data_root: &Path, - rpc_addr: Option, - run_type: RunType, - command: F, -) -> Result<()> -where - F: FnOnce(iroh::client::Iroh) -> T + Send + 'static, - T: Future> + 'static, -{ - trace!(?config, "using config"); - let relay_map = config.relay_map()?; - - let spinner = create_spinner("Iroh booting..."); - let node = start_node(iroh_data_root, rpc_addr, relay_map).await?; - drop(spinner); - - eprintln!("{}", welcome_message(&node)?); - - let client = node.client().clone(); - - let mut command_task = node.local_pool_handle().spawn(move || { - async move { - match command(client).await { - Err(err) => Err(err), - Ok(()) => { - // keep the task open forever if not running in single-command mode - if run_type == RunType::UntilStopped { - futures_lite::future::pending().await - } - Ok(()) - } - } - } - .instrument(info_span!("command")) - }); - - tokio::select! { - biased; - // always abort on signal-c - _ = tokio::signal::ctrl_c(), if run_type != RunType::SingleCommandNoAbort => { - command_task.abort(); - node.shutdown().await?; - } - // abort if the command task finishes (will run forever if not in single-command mode) - res = &mut command_task => { - let _ = node.shutdown().await; - res??; - } - } - Ok(()) -} - -/// Starts an iroh node. -pub(crate) async fn start_node( - iroh_data_root: &Path, - rpc_addr: Option, - relay_map: Option, -) -> Result> { - let rpc_status = RpcStatus::load(iroh_data_root).await?; - match rpc_status { - RpcStatus::Running { port, .. } => { - return Err(AlreadyRunningError(port).into()); - } - RpcStatus::Stopped => { - // all good, we can start the node - } - } - - let relay_mode = match relay_map { - None => RelayMode::Default, - Some(relay_map) => RelayMode::Custom(relay_map), - }; - - let rpc_addr = rpc_addr.unwrap_or(DEFAULT_RPC_ADDR); - Node::persistent(iroh_data_root) - .await? - .relay_mode(relay_mode) - .enable_docs() - .enable_rpc_with_addr(rpc_addr) - .await? - .spawn() - .await -} - -/// Creates a welcome message for the given [`Node`]. -fn welcome_message(node: &Node) -> Result { - let msg = format!( - "{}\nNode ID: {}\n", - "Iroh is running".green(), - node.node_id() - ); - - Ok(msg) -} - -/// Creates a nice spinner. -fn create_spinner(msg: &'static str) -> ProgressBar { - let pb = ProgressBar::new_spinner(); - pb.enable_steady_tick(Duration::from_millis(80)); - pb.set_draw_target(ProgressDrawTarget::stderr()); - pb.set_style( - ProgressStyle::with_template("{spinner:.blue} {msg}") - .unwrap() - .tick_strings(&["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]), - ); - pb.set_message(msg); - pb.with_finish(indicatif::ProgressFinish::AndClear) -} - -/// Start an iroh metrics server to serve the OpenMetrics endpoint. -/// -/// Returns `None` if succeeded; otherwise, returns the `JoinHandle` with which the task can be aborted. -pub fn start_metrics_server( - metrics_addr: Option, -) -> Option> { - // doesn't start the server if the address is None - if let Some(metrics_addr) = metrics_addr { - // metrics are initilaized in iroh::node::Node::spawn - // here we only start the server - return Some(tokio::task::spawn(async move { - if let Err(e) = iroh_metrics::metrics::start_metrics_server(metrics_addr).await { - eprintln!("Failed to start metrics server: {e}"); - } - })); - } - tracing::info!("Metrics server not started, no address provided"); - None -} - -/// Starts an iroh metrics dumper service. -/// -/// Returns `None` if succeeded; otherwise, returns the `JoinHandle` with which the task can be aborted. -pub fn start_metrics_dumper( - path: Option, - interval: Duration, -) -> Option> { - // doesn't start the dumper if the address is None - Some(tokio::task::spawn(async move { - if let Some(path) = path { - if let Err(e) = iroh_metrics::metrics::start_metrics_dumper(path, interval).await { - eprintln!("Failed to start metrics dumper: {e}"); - } - } - })) -} - -#[cfg(test)] -mod tests { - use anyhow::bail; - use iroh::util::path::IrohPaths; - - use super::*; - - #[tokio::test] - async fn test_run_rpc_lock_file() -> Result<()> { - let data_dir = tempfile::TempDir::with_prefix("rpc-lock-file-")?; - let lock_file_path = data_dir - .path() - .join(IrohPaths::RpcLock.with_root(data_dir.path())); - let data_dir_path = data_dir.path().to_path_buf(); - - let (ready_s, ready_r) = tokio::sync::oneshot::channel(); - let (close_s, close_r) = tokio::sync::oneshot::channel(); - - // run the first start command, using channels to coordinate so we know when the node has fully booted up, and when we need to shut the node down - let start = tokio::spawn(async move { - run_with_command( - &NodeConfig::default(), - &data_dir_path, - None, - RunType::SingleCommandAbortable, - |_| async move { - // inform the test the node is booted up - ready_s.send(()).unwrap(); - - // wait until the test tells us to shut down the node - close_r.await?; - Ok(()) - }, - ) - .await - }); - - // allow ample time for iroh to boot up - if tokio::time::timeout(Duration::from_millis(20000), ready_r) - .await - .is_err() - { - start.abort(); - bail!("First `run_with_command` call never started"); - } - - // ensure the rpc lock file exists - if !lock_file_path.try_exists()? { - start.abort(); - bail!("First `run_with_command` call never made the rpc lockfile"); - } - - // run the second command, this should fail - if run_with_command( - &NodeConfig::default(), - data_dir.path(), - None, - RunType::SingleCommandAbortable, - |_| async move { Ok(()) }, - ) - .await - .is_ok() - { - start.abort(); - bail!("Second `run_with_command` call should return error"); - } - - // ensure the rpc lock file still exists - if !lock_file_path.try_exists()? { - start.abort(); - bail!("Second `run_with_command` removed the rpc lockfile"); - } - - // inform the node it should close - close_s.send(()).unwrap(); - - // wait for the node to close - if tokio::time::timeout(Duration::from_secs(5), start) - .await - .is_err() - { - bail!("First `run_with_command` never closed"); - } - - // ensure the lockfile no longer exists - if lock_file_path.try_exists()? { - bail!("First `run_with_command` closed without removing the rpc lockfile"); - } - Ok(()) - } -} diff --git a/iroh-cli/src/config.rs b/iroh-cli/src/config.rs deleted file mode 100644 index 1c35e6ed2b..0000000000 --- a/iroh-cli/src/config.rs +++ /dev/null @@ -1,279 +0,0 @@ -//! Configuration for the iroh CLI. - -use std::{ - net::SocketAddr, - path::{Path, PathBuf}, - sync::Arc, - time::Duration, -}; - -use anyhow::Result; -use iroh::{ - net::{RelayMap, RelayNode}, - node::GcPolicy, -}; -use iroh_node_util::{config::config_root, logging::env_file_rust_log}; -use serde::Deserialize; - -/// BIN_NAME is the name of the binary. This is used in various places, e.g. for the home directory -/// and for environment variables. -pub(crate) const BIN_NAME: &str = "iroh"; - -/// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory -pub(crate) const CONFIG_FILE_NAME: &str = "iroh.config.toml"; - -#[derive(Debug, Clone, Copy, Eq, PartialEq, strum::AsRefStr, strum::EnumString, strum::Display)] -pub(crate) enum ConsolePaths { - #[strum(serialize = "current-author")] - CurrentAuthor, - #[strum(serialize = "history")] - History, -} - -impl ConsolePaths { - fn root(iroh_data_dir: impl AsRef) -> PathBuf { - PathBuf::from(iroh_data_dir.as_ref()).join("console") - } - pub fn with_iroh_data_dir(self, iroh_data_dir: impl AsRef) -> PathBuf { - Self::root(iroh_data_dir).join(self.as_ref()) - } -} - -/// The configuration for an iroh node. -// Please note that this is documented in the `iroh.computer` repository under -// `src/app/docs/reference/config/page.mdx`. Any changes to this need to be updated there. -#[derive(PartialEq, Eq, Debug, Deserialize, Clone)] -#[serde(default, deny_unknown_fields)] -pub(crate) struct NodeConfig { - /// The nodes for relay to use. - pub(crate) relay_nodes: Vec, - /// How often to run garbage collection. - pub(crate) gc_policy: GcPolicyConfig, - /// Bind address on which to serve Prometheus metrics - pub(crate) metrics_addr: Option, - /// Configuration for the logfile. - pub(crate) file_logs: iroh_node_util::logging::FileLogging, - /// Path to dump metrics to in CSV format. - pub(crate) metrics_dump_path: Option, -} - -impl Default for NodeConfig { - fn default() -> Self { - let relay_map = iroh::net::endpoint::default_relay_mode().relay_map(); - let relay_nodes = relay_map - .nodes() - .map(|v| Arc::unwrap_or_clone(v.clone())) - .collect(); - Self { - relay_nodes, - gc_policy: GcPolicyConfig::default(), - metrics_addr: None, - file_logs: Default::default(), - metrics_dump_path: None, - } - } -} - -impl NodeConfig { - /// Creates a config from default config file. - /// - /// If the *file* is `Some` the configuration will be read from it. Otherwise the - /// default config file will be loaded. If that is not present the default config will - /// be used. - pub(crate) async fn load(file: Option<&Path>) -> Result { - let default_config = config_root(BIN_NAME)?.join(CONFIG_FILE_NAME); - - let config_file = match file { - Some(file) => Some(file), - None => { - if default_config.exists() { - Some(default_config.as_ref()) - } else { - None - } - } - }; - let mut config = if let Some(file) = config_file { - let config = tokio::fs::read_to_string(file).await?; - Self::load_toml(&config)? - } else { - Self::default() - }; - - // override from env var - if let Some(env_filter) = env_file_rust_log(BIN_NAME).transpose()? { - config.file_logs.rust_log = env_filter; - } - Ok(config) - } - - fn load_toml(s: &str) -> Result { - let config = toml::from_str(s)?; - Ok(config) - } - - /// Constructs a `RelayMap` based on the current configuration. - pub(crate) fn relay_map(&self) -> Result> { - if self.relay_nodes.is_empty() { - return Ok(None); - } - Some(RelayMap::from_nodes(self.relay_nodes.iter().cloned())).transpose() - } -} - -/// Serde-compatible configuration for [`GcPolicy`]. -/// -/// The [`GcPolicy`] struct is not amenable to TOML serialisation, this covers this gap. -#[derive(PartialEq, Eq, Debug, Default, Deserialize, Clone)] -#[serde(default, deny_unknown_fields, rename = "gc_policy")] -pub(crate) struct GcPolicyConfig { - enabled: bool, - interval: Option, -} - -impl From for GcPolicy { - fn from(source: GcPolicyConfig) -> Self { - if source.enabled { - match source.interval { - Some(interval) => Self::Interval(Duration::from_secs(interval)), - None => Self::default(), - } - } else { - Self::Disabled - } - } -} - -#[cfg(test)] -mod tests { - use std::{ - net::{Ipv4Addr, Ipv6Addr}, - str::FromStr, - }; - - use iroh_node_util::logging::{EnvFilter, Rotation}; - use url::Url; - - use super::*; - - #[test] - fn test_toml_invalid_field() { - let source = r#" - not_a_field = true - "#; - let res = NodeConfig::load_toml(source); - assert!(res.is_err()); - } - - #[test] - fn test_toml_relay_nodes() { - let source = r#" - [[relay_nodes]] - url = "https://example.org." - stun_only = false - stun_port = 123 - "#; - let config = NodeConfig::load_toml(source).unwrap(); - - let expected = RelayNode { - url: Url::parse("https://example.org./").unwrap().into(), - stun_only: false, - stun_port: 123, - }; - assert_eq!(config.relay_nodes, vec![expected]); - } - - #[test] - fn test_toml_gc_policy() { - let source = r#" - [gc_policy] - enabled = false - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!(GcPolicy::from(config.gc_policy), GcPolicy::Disabled); - - // Default interval should be used. - let source = r#" - [gc_policy] - enabled = true - "#; - let config = NodeConfig::load_toml(source).unwrap(); - let gc_policy = GcPolicy::from(config.gc_policy); - assert!(matches!(gc_policy, GcPolicy::Interval(_))); - assert_eq!(gc_policy, GcPolicy::default()); - - let source = r#" - [gc_policy] - enabled = true - interval = 1234 - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!( - GcPolicy::from(config.gc_policy), - GcPolicy::Interval(Duration::from_secs(1234)) - ); - - let source = r#" - [gc_policy] - not_a_field = true - "#; - let res = NodeConfig::load_toml(source); - assert!(res.is_err()); - } - - #[test] - fn test_toml_metrics_addr() { - let source = r#" - metrics_addr = "1.2.3.4:1234" - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!( - config.metrics_addr, - Some(SocketAddr::new(Ipv4Addr::new(1, 2, 3, 4).into(), 1234)), - ); - - let source = r#" - metrics_addr = "[123:456::789:abc]:1234" - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!( - config.metrics_addr, - Some(SocketAddr::new( - Ipv6Addr::new(0x123, 0x456, 0, 0, 0, 0, 0x789, 0xabc).into(), - 1234 - )), - ); - } - - #[test] - fn test_toml_file_logs() { - let source = r#" - [file_logs] - rust_log = "iroh_net=trace" - max_files = 123 - rotation = "daily" - dir = "/var/log/iroh" - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!( - config.file_logs.rust_log, - EnvFilter::from_str("iroh_net=trace").unwrap() - ); - assert_eq!(config.file_logs.max_files, 123); - assert_eq!(config.file_logs.rotation, Rotation::Daily); - assert_eq!(config.file_logs.dir, Some(PathBuf::from("/var/log/iroh"))); - - let source = r#" - [file_logs] - rust_log = "info" - "#; - let config = NodeConfig::load_toml(source).unwrap(); - assert_eq!( - config.file_logs.rust_log, - EnvFilter::from_str("info").unwrap() - ); - assert_eq!(config.file_logs.max_files, 4); - assert_eq!(config.file_logs.rotation, Rotation::Hourly); - assert_eq!(config.file_logs.dir, None); - } -} diff --git a/iroh-cli/src/main.rs b/iroh-cli/src/main.rs deleted file mode 100644 index 49ea85e6f6..0000000000 --- a/iroh-cli/src/main.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::time::Duration; - -use anyhow::Result; -use clap::Parser; -use config::BIN_NAME; -use iroh_node_util::config::data_root; - -mod commands; -mod config; - -use crate::commands::Cli; - -fn main() -> Result<()> { - let rt = tokio::runtime::Builder::new_multi_thread() - .thread_name("main-runtime") - .worker_threads(2) - .enable_all() - .build()?; - rt.block_on(main_impl())?; - // give the runtime some time to finish, but do not wait indefinitely. - // there are cases where the a runtime thread is blocked doing io. - // e.g. reading from stdin. - rt.shutdown_timeout(Duration::from_millis(500)); - Ok(()) -} - -async fn main_impl() -> Result<()> { - let data_dir = data_root(BIN_NAME)?; - let cli = Cli::parse(); - cli.run(&data_dir).await -} diff --git a/iroh-cli/src/progress.rs b/iroh-cli/src/progress.rs deleted file mode 100644 index 835e5ce53c..0000000000 --- a/iroh-cli/src/progress.rs +++ /dev/null @@ -1,70 +0,0 @@ -//! Generic utilities to track progress of data transfers. -//! -//! Based on your environment there might also be better choices for this, e.g. very -//! similar and more advanced functionality is available in the `indicatif` crate for -//! terminal applications. - -use std::{pin::Pin, task::Poll}; - -use iroh_blobs::util::io::TrackingWriter; -use tokio::{ - io::{self, AsyncWrite}, - sync::mpsc, -}; - -/// A writer that tries to send the total number of bytes written after each write -/// -/// It sends the total number instead of just an increment so the update is self-contained -#[derive(Debug)] -pub struct ProgressWriter { - inner: TrackingWriter, - sender: mpsc::Sender, -} - -impl ProgressWriter { - /// Create a new `ProgressWriter` from an inner writer - pub fn new(inner: W) -> (Self, mpsc::Receiver) { - let (sender, receiver) = mpsc::channel(1); - ( - Self { - inner: TrackingWriter::new(inner), - sender, - }, - receiver, - ) - } - - /// Return the inner writer - pub fn into_inner(self) -> W { - self.inner.into_parts().0 - } -} - -impl AsyncWrite for ProgressWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> Poll> { - let this = &mut *self; - let res = Pin::new(&mut this.inner).poll_write(cx, buf); - if let Poll::Ready(Ok(_)) = res { - this.sender.try_send(this.inner.bytes_written()).ok(); - } - res - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) - } -} diff --git a/iroh-cli/tests/cli.rs b/iroh-cli/tests/cli.rs deleted file mode 100644 index 08a5566223..0000000000 --- a/iroh-cli/tests/cli.rs +++ /dev/null @@ -1,981 +0,0 @@ -#![cfg(any(target_os = "windows", target_os = "macos", target_os = "linux"))] -use std::{ - env, - io::{BufRead, BufReader, Read}, - net::SocketAddr, - path::{Path, PathBuf}, - str::FromStr, -}; - -use anyhow::{ensure, Context, Result}; -use bao_tree::blake3; -use duct::{cmd, ReaderHandle}; -use iroh::{base::ticket::BlobTicket, blobs::Hash, util::path::IrohPaths}; -use rand::{RngCore, SeedableRng}; -use regex::Regex; -use testdir::testdir; -use walkdir::WalkDir; - -fn make_rand_file(size: usize, path: &Path) -> Result { - // 64 chars makes for easy random sampling - const CHARS_LUT: &[u8; 64] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789 ."; - - // We do something custom to eek out a little bit more performance - // over just a simple `rand::distributions::Alphanumeric.sample_string`. - let mut rng = rand_xorshift::XorShiftRng::seed_from_u64(1); - let content = std::iter::from_fn(move || Some(rng.next_u32())) - .flat_map(u32::to_le_bytes) - .map(|num| CHARS_LUT[(num >> (8 - 6)) as usize]) - .take(size) - .collect::>(); - - let hash = blake3::hash(&content); - std::fs::write(path, content)?; - Ok(hash.into()) -} - -#[test] -fn cli_provide_one_file_basic() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - make_rand_file(1000, &path)?; - // provide a path to a file, do not pipe from stdin, do not pipe to stdout - test_provide_get_loop(Input::Path(path), Output::Path) -} - -#[test] -fn cli_provide_one_file_external_outboard() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - // The cutoff point at which an outboard is stored externally is 16KiB by default. - // Outboards end up approaching ~1/256th the size of the source file. - // So if the source file is 16 KiB * 256, we *almost* have a file big enough that - // causes its outboard to be stored externally. - // We add a bit of margin, just to be safe. - let outboard_size_to_file_size = 256; - let safety_margin = 20; - let file_size = iroh::blobs::store::fs::InlineOptions::default().max_outboard_inlined - * (outboard_size_to_file_size + safety_margin); - // At current defaults, `file_size` ends up being ~4.5MB - make_rand_file(file_size as usize, &path)?; - // provide a path to a file, do not pipe from stdin, do not pipe to stdout - test_provide_get_loop(Input::Path(path), Output::Path) -} - -/// Test single file download to a path -#[test] -fn cli_provide_one_file_single_path() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - let hash = make_rand_file(1000, &path)?; - - test_provide_get_loop_single(Input::Path(path), Output::Path, hash)?; - Ok(()) -} - -/// test single file download to stdout -#[test] -fn cli_provide_one_file_single_stdout() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - let hash = make_rand_file(1000, &path)?; - - test_provide_get_loop_single(Input::Path(path.clone()), Output::Stdout, hash)?; - - Ok(()) -} - -#[test] -fn cli_provide_folder() -> Result<()> { - let path = testdir!().join("src"); - std::fs::create_dir(&path)?; - let foo_path = path.join("foo"); - let bar_path = path.join("bar"); - make_rand_file(1000, &foo_path)?; - make_rand_file(10000, &bar_path)?; - // provide a path to a folder, do not pipe from stdin, do not pipe to stdout - test_provide_get_loop(Input::Path(path), Output::Path) -} - -#[test] -fn cli_provide_tree() -> Result<()> { - let path = testdir!().join("src"); - std::fs::create_dir(&path)?; - let foo_path = path.join("foo"); - let bar_path = path.join("bar"); - let file1 = foo_path.join("file1"); - let file2 = bar_path.join("file2"); - let file3 = bar_path.join("file3"); - std::fs::create_dir(&foo_path)?; - std::fs::create_dir(&bar_path)?; - make_rand_file(1000, &file1)?; - make_rand_file(10000, &file2)?; - make_rand_file(5000, &file3)?; - - // provide a path to a folder, do not pipe from stdin, do not pipe to stdout - test_provide_get_loop(Input::Path(path), Output::Path) -} - -/// Test resumption with collections. -#[test] -fn cli_provide_tree_resume() -> Result<()> { - use iroh::blobs::store::fs::test_support::{make_partial, MakePartialResult}; - - /// Get all matches for match group 1 (an explicitly defined match group) - fn explicit_matches(matches: Vec<(usize, Vec)>) -> Vec { - matches - .iter() - .filter_map(|(_, m)| m.get(1).cloned()) - .collect::>() - } - - let tmp = testdir!(); - let src = tmp.join("src"); - std::fs::create_dir(&src)?; - let src_iroh_data_dir_pre = tmp.join("src_iroh_data_dir_pre"); - let tgt = tmp.join("tgt"); - { - let foo_path = src.join("foo"); - let bar_path = src.join("bar"); - let file1 = foo_path.join("file1"); - let file2 = bar_path.join("file2"); - let file3 = bar_path.join("file3"); - std::fs::create_dir(&foo_path)?; - std::fs::create_dir(&bar_path)?; - make_rand_file(10000, &file1)?; - make_rand_file(100000, &file2)?; - make_rand_file(5000, &file3)?; - } - - let count = count_input_files(&src); - - { - // import the files into an ephemeral iroh to use the generated blobs db in tests - let provider = make_provider_in(&src_iroh_data_dir_pre, Input::Path(src.clone()), false)?; - // small synchronization point: allow iroh to be ready for transfer - #[cfg(target_os = "windows")] - let wait = 10u64; - #[cfg(not(target_os = "windows"))] - let wait = 5u64; - std::thread::sleep(std::time::Duration::from_secs(wait)); - let _ticket = match_provide_output(&provider, count, BlobOrCollection::Collection)?; - } - - // setup the data dir for the iroh instances that will get the blobs - let src_iroh_data_dir = tmp.join("src_iroh_data_dir"); - copy_blob_dirs(&src_iroh_data_dir_pre, &src_iroh_data_dir)?; - // first tests - let empty_dir = tmp.join("get_iroh_data_dir_01"); - // second test - let full_dir = tmp.join("get_iroh_data_dir_02"); - copy_blob_dirs(&src_iroh_data_dir, &full_dir)?; - // third test - let partial_dir_1 = tmp.join("get_iroh_data_dir_03"); - copy_blob_dirs(&src_iroh_data_dir, &partial_dir_1)?; - make_partial(&partial_dir_1, |_hash, size| { - if size == 100000 { - MakePartialResult::Remove - } else { - MakePartialResult::Retain - } - })?; - // fourth test - let partial_dir_2 = tmp.join("get_iroh_data_dir_04"); - copy_blob_dirs(&src_iroh_data_dir, &partial_dir_2)?; - make_partial(&partial_dir_2, |_hash, size| { - if size == 100000 { - MakePartialResult::Truncate(1024 * 32) - } else { - MakePartialResult::Retain - } - })?; - - // start the provider and run the test cases - let provider = make_provider_in(&src_iroh_data_dir, Input::Path(src.clone()), false)?; - let ticket = match_provide_output(&provider, count, BlobOrCollection::Collection)?; - - let run_test = - |name: &'static str, get_folder: PathBuf, transfer_size: &'static str| -> Result<()> { - println!("\n***\n{name}\n***"); - let get_output = run_get_cmd(&get_folder, &ticket, Some(tgt.clone()))?; - let matches = explicit_matches(match_get_stderr(get_output.stderr)?); - assert_eq!(matches, vec![transfer_size], "{name}: wrong transfer size"); - compare_files(&src, &tgt).context("file contents do not match")?; - std::fs::remove_dir_all(&tgt)?; - Ok(()) - }; - - run_test("no data needs full transfer", empty_dir, "112.89 KiB")?; - run_test("full data needs no transfer", full_dir, "0 B")?; - run_test("missing blobs needs transfer", partial_dir_1, "98.04 KiB")?; - run_test("partial blobs needs transfer", partial_dir_2, "65.98 KiB")?; - - drop(provider); - Ok(()) -} - -#[test] -fn cli_provide_file_resume() -> Result<()> { - use iroh::blobs::store::fs::test_support::{make_partial, MakePartialResult}; - - /// Get all matches for match group 1 (an explicitly defined match group) - fn explicit_matches(matches: Vec<(usize, Vec)>) -> Vec { - matches - .iter() - .filter_map(|(_, m)| m.get(1).cloned()) - .collect::>() - } - - let tmp = testdir!(); - let src = tmp.join("src"); - let tgt = tmp.join("tgt"); - std::fs::create_dir(&src)?; - let src_iroh_data_dir_pre = tmp.join("src_iroh_data_dir_pre"); - let file = src.join("file"); - let hash = make_rand_file(100000, &file)?; - let count = count_input_files(&src); - - { - // import the files into an ephemeral iroh to use the generated blobs db in tests - let provider = make_provider_in(&src_iroh_data_dir_pre, Input::Path(file.clone()), false)?; - // small synchronization point: allow iroh to be ready for transfer - #[cfg(target_os = "windows")] - let wait = 10u64; - #[cfg(not(target_os = "windows"))] - let wait = 5u64; - std::thread::sleep(std::time::Duration::from_secs(wait)); - let _ticket = match_provide_output(&provider, count, BlobOrCollection::Blob)?; - } - - // setup the data dir for the iroh instances that will get the blobs - let src_iroh_data_dir = tmp.join("src_iroh_data_dir"); - copy_blob_dirs(&src_iroh_data_dir_pre, &src_iroh_data_dir)?; - - // first test: empty - let empty_data_dir = tmp.join("get_iroh_data_dir_01"); - // second test: all data available already - let full_data_dir = tmp.join("get_iroh_data_dir_02"); - copy_blob_dirs(&src_iroh_data_dir, &full_data_dir)?; - // third test: partial files - let partial_data_dir = tmp.join("get_iroh_data_dir_03"); - copy_blob_dirs(&src_iroh_data_dir, &partial_data_dir)?; - make_partial(&partial_data_dir, |_hash, _size| { - MakePartialResult::Truncate(1024 * 32) - })?; - - // start the provider and run the test cases - - let provider = make_provider_in(&src_iroh_data_dir, Input::Path(file.clone()), false)?; - let ticket = match_provide_output(&provider, count, BlobOrCollection::Blob)?; - - let run_test = - |name: &'static str, get_folder: PathBuf, transfer_size: &'static str| -> Result<()> { - println!("\n***\n{name}\n***"); - let get_output = run_get_cmd(&get_folder, &ticket, Some(tgt.clone()))?; - let matches = explicit_matches(match_get_stderr(get_output.stderr)?); - assert_eq!(matches, vec![transfer_size], "{name}: wrong transfer size"); - let current_hash = Hash::new(std::fs::read(&tgt)?); - assert_eq!(current_hash, hash, "{name}: wrong blob contents"); - std::fs::remove_file(&tgt)?; - Ok(()) - }; - - run_test("no data needs full transfer", empty_data_dir, "98.04 KiB")?; - run_test("full folder needs no transfer", full_data_dir, "0 B")?; - run_test("partial data needs transfer", partial_data_dir, "65.98 KiB")?; - Ok(()) -} - -#[test] -fn cli_provide_from_stdin_to_stdout() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - make_rand_file(1000, &path)?; - // provide a file, pipe content to the provider's stdin, pipe content to the getter's stdout - test_provide_get_loop(Input::Stdin(path), Output::Stdout) -} - -#[cfg(unix)] -#[tokio::test] -async fn cli_provide_persistence() -> anyhow::Result<()> { - use std::time::Duration; - - use iroh::blobs::store::ReadableStore; - use nix::{ - sys::signal::{self, Signal}, - unistd::Pid, - }; - - let dir = testdir!(); - let iroh_data_dir = dir.join("iroh_data_dir"); - - let foo_path = dir.join("foo"); - std::fs::write(&foo_path, b"foo")?; - let bar_path = dir.join("bar"); - std::fs::write(&bar_path, b"bar")?; - - // spawn iroh in provide mode - let iroh_provide = |path: &PathBuf| { - cmd( - iroh_bin(), - ["start", "--add", path.to_str().unwrap(), "--wrap"], - ) - .env("IROH_DATA_DIR", &iroh_data_dir) - .env_remove("RUST_LOG") - .stdin_null() - .stderr_to_stdout() - .reader() - }; - // start provide until we got the ticket, then stop with control-c - let provide = |path| { - let mut child = iroh_provide(path)?; - // wait for the provider to start - let _ticket = match_provide_output(&mut child, 1, BlobOrCollection::Collection)?; - println!("got ticket, stopping provider {}", _ticket); - // kill the provider via Control-C - for pid in child.pids() { - signal::kill(Pid::from_raw(pid as i32), Signal::SIGINT).unwrap(); - } - // wait for the provider to stop - loop { - if let Some(_output) = child.try_wait()? { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - anyhow::Ok(()) - }; - provide(&foo_path)?; - // should have some data now - let db_path = IrohPaths::BaoStoreDir.with_root(&iroh_data_dir); - let db = iroh::blobs::store::fs::Store::load(&db_path).await?; - let blobs: Vec> = db.blobs().await.unwrap().collect::>(); - drop(db); - assert_eq!(blobs.len(), 3); - - provide(&bar_path)?; - // should have more data now - let db = iroh::blobs::store::fs::Store::load(&db_path).await?; - let blobs = db.blobs().await.unwrap().collect::>(); - drop(db); - assert_eq!(blobs.len(), 6); - - Ok(()) -} - -#[ignore = "flaky"] -#[test] -fn cli_provide_addresses() -> Result<()> { - let dir = testdir!(); - let path = dir.join("foo"); - make_rand_file(1000, &path)?; - - let iroh_data_dir = dir.join("iroh-data-dir"); - let mut provider = make_provider_in(&iroh_data_dir, Input::Path(path), true)?; - // wait for the provider to start - let _ticket = match_provide_output(&mut provider, 1, BlobOrCollection::Collection)?; - - // test output - let get_output = cmd(iroh_bin(), ["status"]) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", iroh_data_dir) - // .stderr_file(std::io::stderr().as_raw_fd()) // for debug output - .stdout_capture() - .run()?; - let stdout = String::from_utf8(get_output.stdout).unwrap(); - assert!(get_output.status.success()); - assert!(stdout.starts_with("Listening addresses:")); - //parse the output to get the addresses - let addresses = stdout - .split('[') - .nth(1) - .unwrap() - .split(']') - .next() - .unwrap() - .split(',') - .map(|x| x.trim()) - .filter(|x| !x.is_empty()) - .map(|x| SocketAddr::from_str(x).unwrap()) - .collect::>(); - assert!(!addresses.is_empty()); - Ok(()) -} - -#[test] -#[ignore = "flaky"] -fn cli_rpc_lock_restart() -> Result<()> { - let dir = testdir!(); - let iroh_data_dir = dir.join("data-dir"); - - println!("start"); - let mut reader_handle = cmd(iroh_bin(), ["start"]) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", &iroh_data_dir) - .stderr_to_stdout() - .reader()?; - - assert_matches_line( - BufReader::new(&mut reader_handle), - [(r"Iroh is running", 1), (r"Node ID: [_\w\d-]*", 1)], - ); - - // check for the lock file - assert!( - IrohPaths::RpcLock.with_root(&iroh_data_dir).exists(), - "missing lock file" - ); - - // kill process - println!("killing process"); - reader_handle.kill()?; - - // File should still be there - assert!( - IrohPaths::RpcLock.with_root(&iroh_data_dir).exists(), - "missing lock file" - ); - - // Restart should work fine - println!("restart"); - let mut reader_handle = cmd(iroh_bin(), ["start"]) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", &iroh_data_dir) - .stderr_to_stdout() - .reader()?; - - assert_matches_line( - BufReader::new(&mut reader_handle), - [(r"Iroh is running", 1), (r"Node ID: [_\w\d-]*", 1)], - ); - - println!("double start"); - let output = cmd(iroh_bin(), ["start"]) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", &iroh_data_dir) - .stderr_capture() - .unchecked() - .run()?; - - let output = std::str::from_utf8(&output.stderr).unwrap(); - println!("{}", output); - assert!(output.contains("iroh is already running on port")); - - Ok(()) -} - -/// Parameter for `test_provide_get_loop`, that determines how we handle the fetched data from the -/// `iroh get` command -#[derive(Debug, PartialEq)] -enum Output { - /// Indicates we should save the content as a file in the given directory, by passing the path - /// to the `--out` argument in `iroh get` - Path, - /// Indicates we should pipe the content to `stdout` of the `iroh get` process - Stdout, - /// Custom output - #[allow(dead_code)] - Custom(PathBuf), -} - -/// Parameter for `test_provide_get_loop`, that determines how we send the data to the `provide` -/// command. -#[derive(Debug, PartialEq, Clone)] -enum Input { - /// Indicates we should pass the content as an argument to the `iroh start` command - Path(PathBuf), - /// Idincates we should pipe the content via `stdin` to the `iroh start` command - /// should point to a file, never to a directory - Stdin(PathBuf), -} - -impl Input { - fn as_path(&self) -> &PathBuf { - match self { - Input::Path(ref p) => p, - Input::Stdin(ref p) => p, - } - } - - fn as_arg(&self) -> String { - match self { - Input::Path(path) => path.to_str().unwrap().to_string(), - Input::Stdin(_) => "STDIN".into(), - } - } - - fn should_wrap(&self) -> bool { - match self { - Input::Path(path) => path.as_path().is_file(), - Input::Stdin(_) => false, - } - } - - fn is_blob_or_collection(&self) -> BlobOrCollection { - match self { - // we currently always create a collection because single files will be wrapped - Input::Path(_) => BlobOrCollection::Collection, - Input::Stdin(_) => BlobOrCollection::Blob, - } - } -} - -fn iroh_bin() -> &'static str { - env!("CARGO_BIN_EXE_iroh") -} - -/// Makes a provider process with its home directory in `iroh_data_dir`. -fn make_provider_in(iroh_data_dir: &Path, input: Input, wrap: bool) -> Result { - let mut args = vec!["start"]; - if wrap { - args.push("--wrap"); - } - args.push("--add"); - let arg = input.as_arg(); - args.push(&arg); - - // spawn a provider & optionally provide from stdin - println!( - "running iroh {:?} in dir: {}", - args, - iroh_data_dir.display() - ); - let res = cmd(iroh_bin(), &args) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", iroh_data_dir) - .stderr_to_stdout(); - - let provider = match input { - Input::Stdin(ref p) => res.stdin_path(p), - Input::Path(_) => res.stdin_null(), - } - .reader()?; - - // wrap in `ProvideProcess` to ensure the spawned process is killed on drop - Ok(provider) -} - -/// Count the number of files in the given path, for matching the output text in -/// [match_provide_output] -fn count_input_files(path: impl AsRef) -> usize { - let path = path.as_ref(); - if path.is_dir() { - WalkDir::new(path) - .into_iter() - .filter_map(|x| x.ok().filter(|x| x.file_type().is_file())) - .count() - } else { - 1 - } -} - -/// Translate output into an optional out path -fn to_out_dir(output: Output) -> Option { - match output { - Output::Path => { - let dir = testdir!(); - Some(dir.join("out")) - } - Output::Custom(out) => Some(out), - Output::Stdout => None, - } -} - -/// Create a get command given a ticket and an output mode and run it. -/// -/// The commands STDOUT and STDERR are printed, and the command's result code is checked for -/// success. -#[track_caller] -fn run_get_cmd( - iroh_data_dir: &Path, - ticket: &str, - out: Option, -) -> Result { - // create a `get-ticket` cmd & optionally provide out path - let out = out - .map(|ref o| o.to_str().unwrap().to_string()) - .unwrap_or("STDOUT".into()); - let args = vec!["--start", "blobs", "get", ticket, "--out", &out]; - - println!( - "running iroh {:?} in dir: {}", - args, - iroh_data_dir.display() - ); - - let output = cmd(iroh_bin(), &args) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", iroh_data_dir) - .stdout_capture() - .stderr_capture() - .unchecked() - .run()?; - - // checking the output first, so you can still view any logging - println!("STDOUT: {}", String::from_utf8_lossy(&output.stdout)); - println!("STDERR: {}", String::from_utf8_lossy(&output.stderr)); - - ensure!( - output.status.success(), - "iroh command failed. See STDERR output above." - ); - - Ok(output) -} - -/// Test the provide and get loop for success, stderr output, and file contents. -/// -/// Can optionally pipe the given `path` content to the provider from stdin & can optionally -/// save the output to an `out` path. -/// -/// Runs the provider as a child process that stays alive until the getter has -/// completed. Then checks the output of the "provide" and "get" processes against expected -/// regex output. Finally, test the content fetched from the "get" process is the same as -/// the "provided" content. -fn test_provide_get_loop(input: Input, output: Output) -> Result<()> { - let num_blobs = count_input_files(input.as_path()); - let wrap = input.should_wrap(); - - let dir = testdir!(); - let iroh_data_dir = dir.join("iroh-data-dir"); - let mut provider = make_provider_in(&iroh_data_dir, input.clone(), wrap)?; - - // test provide output & scrape the ticket from stderr - let ticket = match_provide_output(&mut provider, num_blobs, input.is_blob_or_collection())?; - let out_dir = to_out_dir(output); - let get_iroh_data_dir = dir.join("get-iroh-data-dir"); - let get_output = run_get_cmd(&get_iroh_data_dir, &ticket, out_dir.clone())?; - - drop(provider); - - match_get_stderr(get_output.stderr)?; - assert!(get_output.status.success()); - - // test output - match out_dir { - None => { - let path = input.as_path(); - assert!(!get_output.stdout.is_empty()); - let expect_content = std::fs::read_to_string(path)?; - assert_eq!( - expect_content, - std::string::String::from_utf8_lossy(&get_output.stdout) - ); - } - Some(out) => compare_files(input.as_path(), out)?, - }; - Ok(()) -} - -/// Test the provide and get loop for success, stderr output, and file contents. -/// -/// Can optionally pipe the given `path` content to the provider from stdin & can optionally save the output to an `out` path. -/// -/// Runs the provider as a child process that stays alive until the getter has completed. Then -/// checks the output of the "provide" and "get" processes against expected regex output. Finally, -/// test the content fetched from the "get" process is the same as the "provided" content. -fn test_provide_get_loop_single(input: Input, output: Output, hash: Hash) -> Result<()> { - let out = match output { - Output::Stdout => "STDOUT".to_string(), - Output::Path => { - let dir = testdir!(); - dir.join("out").display().to_string() - } - Output::Custom(ref out) => out.display().to_string(), - }; - - let num_blobs = if input.as_path().is_dir() { - WalkDir::new(input.as_path()) - .into_iter() - .filter_map(|x| x.ok().filter(|x| x.file_type().is_file())) - .count() - } else { - 1 - }; - - let dir = testdir!(); - let iroh_data_dir = dir.join("iroh-data-dir"); - - let mut provider = make_provider_in(&iroh_data_dir, input.clone(), true)?; - - // test provide output & get all in one ticket from stderr - let ticket = match_provide_output(&mut provider, num_blobs, BlobOrCollection::Collection)?; - let ticket = BlobTicket::from_str(&ticket).unwrap(); - let addrs = ticket - .node_addr() - .direct_addresses() - .map(|x| x.to_string()) - .collect::>(); - let node = ticket.node_addr().node_id.to_string(); - let relay_url = ticket - .node_addr() - .relay_url() - .context("should have relay url in ticket")? - .to_string(); - - // create a `get-ticket` cmd & optionally provide out path - let mut args = vec!["--start", "blobs", "get", "--node", &node]; - for addr in &addrs { - args.push("--address"); - args.push(addr); - } - args.push("--out"); - args.push(&out); - - args.push("--relay-url"); - args.push(&relay_url); - let hash_str = hash.to_string(); - args.push(&hash_str); - let get_iroh_data_dir = dir.join("get-iroh-data-dir"); - let cmd = cmd(iroh_bin(), args) - .env_remove("RUST_LOG") - .env("IROH_DATA_DIR", get_iroh_data_dir) - .stdout_capture() - .stderr_capture() - .unchecked(); - - // test get stderr output - let get_output = cmd.run()?; - println!("{}", std::str::from_utf8(&get_output.stdout).unwrap()); - println!("{}", std::str::from_utf8(&get_output.stderr).unwrap()); - - provider.kill().expect("failed to kill provider"); - assert!(get_output.status.success()); - - // test output - let expect_content = std::fs::read_to_string(input.as_path())?; - match output { - Output::Stdout => { - assert!(!get_output.stdout.is_empty()); - assert_eq!( - expect_content, - std::string::String::from_utf8_lossy(&get_output.stdout) - ); - } - _ => { - let content = std::fs::read_to_string(out)?; - assert_eq!(expect_content, content); - } - }; - Ok(()) -} - -fn compare_files(expect_path: impl AsRef, got_dir_path: impl AsRef) -> Result<()> { - let expect_path = expect_path.as_ref(); - let got_dir_path = got_dir_path.as_ref(); - if expect_path.is_dir() { - let paths = WalkDir::new(expect_path).into_iter().filter(|x| { - x.as_ref() - .ok() - .map(|x| x.file_type().is_file()) - .unwrap_or(false) - }); - for entry in paths { - let entry = entry?; - let file_path = entry.path(); - let rel = file_path.strip_prefix(expect_path)?; - let expected_file_path = got_dir_path.join(rel); - let got = std::fs::read(file_path)?; - let expect = std::fs::read(expected_file_path)?; - assert_eq!(expect, got); - } - } else { - let file_name = expect_path.file_name().unwrap(); - let expect = std::fs::read(expect_path)?; - let got = std::fs::read(got_dir_path.join(file_name))?; - assert_eq!(expect, got); - } - - Ok(()) -} - -/// Looks for regex matches on stderr output for the getter. -/// -/// Errors on the first regex mismatch or if the stderr output has fewer lines than expected -fn match_get_stderr(stderr: Vec) -> Result)>> { - let captures = assert_matches_line( - std::io::Cursor::new(stderr), - [ - (r"Iroh is running", 1), - (r"Node ID: [_\w\d-]*", 1), - (r"", 1), - (r"Fetching: [\da-z]{52}", 1), - ( - r"Transferred (\d*.?\d*? ?[BKMGT]i?B?) in \d* (second|minute)s?, \d*.?\d* ?(?:B|KiB|MiB|GiB|TiB)/s", - 1, - ), - ], - ); - Ok(captures) -} - -enum BlobOrCollection { - Blob, - Collection, -} - -/// Asserts provider output, returning the all-in-one ticket. -/// -/// The provider output is asserted to check if it matches expected output. The all-in-one -/// ticket is parsed out and returned as a string. -/// -/// Returns an error on the first regex mismatch or if the stderr output has fewer lines -/// than expected. -fn match_provide_output( - reader: T, - num_blobs: usize, - kind: BlobOrCollection, -) -> Result { - let reader = BufReader::new(reader); - - let blob_or_collection_matcher = match kind { - BlobOrCollection::Collection => (r"Collection: [\da-z]{52}", 1), - BlobOrCollection::Blob => (r"Blob: [\da-z]{52}", 1), - }; - - let mut caps = assert_matches_line( - reader, - [ - (r"Iroh is running", 1), - (r"Node ID: [_\w\d-]*", 1), - (r"", 1), - (r"Adding .*", 1), - (r"- \S*: \d*.?\d*? ?[BKMGT]i?B?", num_blobs as i64), - (r"Total: [_\w\d-]*", 1), - (r"", 1), - blob_or_collection_matcher, - (r"All-in-one ticket: ([_a-zA-Z\d-]*)", 1), - ], - ); - - // return the capture of the all in one ticket, should be the last capture - let (_, mut last) = caps.pop().context("Expected at least one capture.")?; - let ticket = last.pop().context("expected ticket")?; - Ok(ticket) -} - -/// Ensures each line of the first expression matches the regex of each following expression. Each -/// regex expression is followed by the number of consecutive lines it should match. -/// -/// A match number of `-1` indicates that the regex should match at least once. -/// -/// Returns a vec of `String`s of any captures made against the regex on each line. -/// -/// # Examples -/// ``` -/// let expr = b"hello world!\nNice to meet you!\n02/23/2023\n02/23/2023\n02/23/2023"; -/// let buf_reader = std::io::BufReader::new(&expr[..]); -/// assert_matches_line( -/// buf_reader, -/// [ -/// (r"hello world!", 1), -/// (r"\S*$", 1), -/// (r"\d{2}/\d{2}/\d{4}", 3), -/// ], -/// ); -/// ``` -fn assert_matches_line(reader: R, expressions: I) -> Vec<(usize, Vec)> -where - I: IntoIterator, -{ - let mut lines = reader.lines().peekable(); - let mut caps = Vec::new(); - - for (ei, (regex_str, num_matches)) in expressions.into_iter().enumerate() { - let rx = Regex::new(regex_str).expect("invalid regex"); - let mut matches = 0; - - loop { - if num_matches > 0 && matches == num_matches as usize { - break; - } - - match lines.peek() { - Some(Ok(line)) => { - println!("|{}", line); - - let mut line_caps = Vec::new(); - if let Some(cap) = rx.captures(line) { - for i in 0..cap.len() { - if let Some(capture_group) = cap.get(i) { - line_caps.push(capture_group.as_str().to_string()); - } - } - - matches += 1; - } else { - break; - } - caps.push((ei, line_caps)); - } - Some(Err(err)) => { - panic!("Error from reader: {err:#}"); - } - None => { - panic!("All lines read but no match found for /{rx}/"); - } - } - - let _ = lines.next(); - } - - if num_matches == -1 { - if matches == 0 { - println!("Expected at least one match for regex: {}", regex_str); - panic!("no matches found"); - } - } else if matches != num_matches as usize { - println!("Expected {} matches for regex: {}", num_matches, regex_str); - panic!("invalid number of matches"); - } - } - - caps -} - -fn copy_dir_all(src: impl AsRef, dst: impl AsRef) -> anyhow::Result { - let src = src.as_ref(); - let dst = dst.as_ref(); - std::fs::create_dir_all(dst)?; - let mut len = 0; - for entry in std::fs::read_dir(src)? { - let entry = entry - .with_context(|| format!("failed to read directory entry in `{}`", src.display()))?; - let ty = entry.file_type().with_context(|| { - format!( - "failed to get file type for file `{}`", - entry.path().display() - ) - })?; - let src = entry.path(); - let dst = dst.join(entry.file_name()); - if ty.is_dir() { - len += copy_dir_all(&src, &dst).with_context(|| { - format!( - "failed to copy directory `{}` to `{}`", - src.display(), - dst.display() - ) - })?; - } else { - println!("copying {} to {}", src.display(), dst.display()); - std::fs::copy(&src, &dst).with_context(|| { - format!( - "failed to copy file `{}` to `{}`", - src.display(), - dst.display() - ) - })?; - len += 1; - } - } - Ok(len) -} - -fn copy_blob_dirs(src: &Path, tgt: &Path) -> Result<()> { - let dir = &IrohPaths::BaoStoreDir; - copy_dir_all(dir.with_root(src), dir.with_root(tgt))?; - Ok(()) -} diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 7dd6baa3fc..3a2634224c 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -74,7 +74,7 @@ url = { version = "2.5.0", features = ["serde"] } serde-error = "0.1.3" [features] -default = ["metrics", "fs-store", "examples", "example-discovery-local-network"] +default = ["metrics", "fs-store"] metrics = ["iroh-metrics"] fs-store = [] test = [] diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 7ce9217088..d5c5020cdd 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -408,175 +408,3 @@ fn node_address_for_storage(info: RemoteInfo) -> Option { }) } } - -#[cfg(test)] -mod tests { - use anyhow::{bail, Context}; - use bytes::Bytes; - use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; - use iroh_blobs::{provider::AddProgress, util::SetTagOption, BlobFormat}; - use iroh_net::{key::SecretKey, test_utils::DnsPkarrServer, NodeAddr, RelayMode}; - - use super::*; - use crate::client::blobs::{AddOutcome, WrapOption}; - - #[tokio::test] - async fn test_ticket_multiple_addrs() { - let _guard = iroh_test::logging::setup(); - - let node = Node::memory().spawn().await.unwrap(); - let hash = node - .client() - .blobs() - .add_bytes(Bytes::from_static(b"hello")) - .await - .unwrap() - .hash; - - let _drop_guard = node.cancel_token().drop_guard(); - let mut addr = node.net().node_addr().await.unwrap(); - addr.apply_options(AddrInfoOptions::RelayAndAddresses); - let ticket = BlobTicket::new(addr, hash, BlobFormat::Raw).unwrap(); - println!("addrs: {:?}", ticket.node_addr().info); - assert!(!ticket.node_addr().info.direct_addresses.is_empty()); - } - - #[tokio::test] - async fn test_node_add_blob_stream() -> Result<()> { - let _guard = iroh_test::logging::setup(); - - use std::io::Cursor; - let node = Node::memory().bind_random_port().spawn().await?; - - let _drop_guard = node.cancel_token().drop_guard(); - let client = node.client(); - let input = vec![2u8; 1024 * 256]; // 265kb so actually streaming, chunk size is 64kb - let reader = Cursor::new(input.clone()); - let progress = client - .blobs() - .add_reader(reader, SetTagOption::Auto) - .await?; - let outcome = progress.finish().await?; - let hash = outcome.hash; - let output = client.blobs().read_to_bytes(hash).await?; - assert_eq!(input, output.to_vec()); - Ok(()) - } - - #[tokio::test] - async fn test_node_add_tagged_blob_event() -> Result<()> { - let _guard = iroh_test::logging::setup(); - - let node = Node::memory().bind_random_port().spawn().await?; - - let _drop_guard = node.cancel_token().drop_guard(); - - let _got_hash = tokio::time::timeout(Duration::from_secs(10), async move { - let mut stream = node - .blobs() - .add_from_path( - Path::new(env!("CARGO_MANIFEST_DIR")).join("README.md"), - false, - SetTagOption::Auto, - WrapOption::NoWrap, - ) - .await?; - - while let Some(progress) = stream.next().await { - match progress? { - AddProgress::AllDone { hash, .. } => { - return Ok(hash); - } - AddProgress::Abort(e) => { - bail!("Error while adding data: {e}"); - } - _ => {} - } - } - bail!("stream ended without providing data"); - }) - .await - .context("timeout")? - .context("get failed")?; - - Ok(()) - } - - #[tokio::test] - async fn test_download_via_relay() -> Result<()> { - let _guard = iroh_test::logging::setup(); - let (relay_map, relay_url, _guard) = iroh_net::test_utils::run_relay_server().await?; - - let node1 = Node::memory() - .bind_random_port() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .insecure_skip_relay_cert_verify(true) - .spawn() - .await?; - let node2 = Node::memory() - .bind_random_port() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .insecure_skip_relay_cert_verify(true) - .spawn() - .await?; - let AddOutcome { hash, .. } = node1.blobs().add_bytes(b"foo".to_vec()).await?; - - // create a node addr with only a relay URL, no direct addresses - let addr = NodeAddr::new(node1.node_id()).with_relay_url(relay_url); - node2.blobs().download(hash, addr).await?.await?; - assert_eq!( - node2 - .blobs() - .read_to_bytes(hash) - .await - .context("get")? - .as_ref(), - b"foo" - ); - Ok(()) - } - - #[tokio::test] - #[ignore = "flaky"] - async fn test_download_via_relay_with_discovery() -> Result<()> { - let _guard = iroh_test::logging::setup(); - let (relay_map, _relay_url, _guard) = iroh_net::test_utils::run_relay_server().await?; - let dns_pkarr_server = DnsPkarrServer::run().await?; - - let secret1 = SecretKey::generate(); - let node1 = Node::memory() - .secret_key(secret1.clone()) - .bind_random_port() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .insecure_skip_relay_cert_verify(true) - .dns_resolver(dns_pkarr_server.dns_resolver()) - .node_discovery(dns_pkarr_server.discovery(secret1).into()) - .spawn() - .await?; - let secret2 = SecretKey::generate(); - let node2 = Node::memory() - .secret_key(secret2.clone()) - .bind_random_port() - .relay_mode(RelayMode::Custom(relay_map.clone())) - .insecure_skip_relay_cert_verify(true) - .dns_resolver(dns_pkarr_server.dns_resolver()) - .node_discovery(dns_pkarr_server.discovery(secret2).into()) - .spawn() - .await?; - let hash = node1.blobs().add_bytes(b"foo".to_vec()).await?.hash; - - // create a node addr with node id only - let addr = NodeAddr::new(node1.node_id()); - node2.blobs().download(hash, addr).await?.await?; - assert_eq!( - node2 - .blobs() - .read_to_bytes(hash) - .await - .context("get")? - .as_ref(), - b"foo" - ); - Ok(()) - } -} From 228722e4cebb25265af45ce255761d5d87609e0b Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 11:22:53 +0100 Subject: [PATCH 07/17] remove examples depending on iroh-blobs --- Cargo.lock | 125 +--------- Cargo.toml | 2 - deny.toml | 3 - iroh/Cargo.toml | 29 +-- iroh/examples/custom-protocol.rs | 318 ------------------------- iroh/examples/hello-world-fetch.rs | 101 -------- iroh/examples/hello-world-provide.rs | 76 ------ iroh/examples/local-swarm-discovery.rs | 284 ---------------------- 8 files changed, 3 insertions(+), 935 deletions(-) delete mode 100644 iroh/examples/custom-protocol.rs delete mode 100644 iroh/examples/hello-world-fetch.rs delete mode 100644 iroh/examples/hello-world-provide.rs delete mode 100644 iroh/examples/local-swarm-discovery.rs diff --git a/Cargo.lock b/Cargo.lock index 4aedb41a9b..f6779670d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,18 +42,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -500,9 +488,6 @@ name = "bytes" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" -dependencies = [ - "serde", -] [[package]] name = "camino" @@ -587,10 +572,8 @@ checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", "serde", - "wasm-bindgen", "windows-targets 0.52.6", ] @@ -1829,9 +1812,6 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", -] [[package]] name = "hashbrown" @@ -1844,15 +1824,6 @@ dependencies = [ "foldhash", ] -[[package]] -name = "hashlink" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" -dependencies = [ - "hashbrown 0.14.5", -] - [[package]] name = "hdrhistogram" version = "7.5.4" @@ -2509,7 +2480,6 @@ dependencies = [ "bytes", "cc", "clap", - "console", "derive_more", "futures-buffered", "futures-lite 2.5.0", @@ -2518,7 +2488,6 @@ dependencies = [ "indicatif", "iroh", "iroh-base", - "iroh-blobs", "iroh-io", "iroh-metrics", "iroh-net", @@ -2574,7 +2543,7 @@ dependencies = [ "proptest", "rand", "rand_core", - "redb 2.2.0", + "redb", "serde", "serde_json", "serde_test", @@ -2598,57 +2567,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "iroh-blobs" -version = "0.28.1" -source = "git+https://github.com/n0-computer/iroh-blobs?branch=main#cdcb863d7c75cdd7258c8f5001154272df7460c0" -dependencies = [ - "anyhow", - "async-channel", - "bao-tree", - "bytes", - "chrono", - "derive_more", - "futures-buffered", - "futures-lite 2.5.0", - "futures-util", - "genawaiter", - "hashlink", - "hex", - "iroh-base", - "iroh-io", - "iroh-metrics", - "iroh-net", - "iroh-quinn", - "iroh-router", - "nested_enum_utils", - "num_cpus", - "oneshot", - "parking_lot", - "portable-atomic", - "postcard", - "quic-rpc", - "quic-rpc-derive", - "rand", - "range-collections", - "redb 1.5.1", - "redb 2.2.0", - "ref-cast", - "reflink-copy", - "self_cell", - "serde", - "serde-error", - "smallvec", - "strum 0.26.3", - "tempfile", - "thiserror 2.0.3", - "tokio", - "tokio-util", - "tracing", - "tracing-futures", - "walkdir", -] - [[package]] name = "iroh-dns-server" version = "0.28.0" @@ -2676,7 +2594,7 @@ dependencies = [ "parking_lot", "pkarr", "rcgen 0.13.1", - "redb 2.2.0", + "redb", "regex", "rustls", "rustls-pemfile", @@ -3667,12 +3585,6 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" -[[package]] -name = "oneshot" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e296cf87e61c9cfc1a61c3c63a0f7f286ed4554e0e22be84e8a38e1d264a2a29" - [[package]] name = "oorandom" version = "11.1.4" @@ -4495,15 +4407,6 @@ dependencies = [ "yasna", ] -[[package]] -name = "redb" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7f82ecd6ba647a39dd1a7172b8a1cd9453c0adee6da20cb553d83a9a460fa5" -dependencies = [ - "libc", -] - [[package]] name = "redb" version = "2.2.0" @@ -4553,17 +4456,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "reflink-copy" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17400ed684c3a0615932f00c271ae3eea13e47056a1455821995122348ab6438" -dependencies = [ - "cfg-if", - "rustix", - "windows 0.58.0", -] - [[package]] name = "regex" version = "1.11.1" @@ -5231,9 +5123,6 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -dependencies = [ - "serde", -] [[package]] name = "smol_str" @@ -6015,16 +5904,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index c66757b070..2574ed852a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,5 +55,3 @@ iroh-net = { path = "./iroh-net" } iroh-metrics = { path = "./iroh-metrics" } iroh-test = { path = "./iroh-test" } iroh-router = { path = "./iroh-router" } - -iroh-blobs = { git = "https://github.com/n0-computer/iroh-blobs", branch = "main" } diff --git a/deny.toml b/deny.toml index 845bdf79a5..800c849d90 100644 --- a/deny.toml +++ b/deny.toml @@ -28,6 +28,3 @@ ignore = [ "RUSTSEC-2024-0370", # unmaintained, no upgrade available "RUSTSEC-2024-0384", # unmaintained, no upgrade available ] - -[sources] -allow-git = ["https://github.com/n0-computer/iroh-blobs.git"] diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 3a2634224c..153ee24995 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -62,12 +62,6 @@ ref-cast = "1.0.23" # Examples clap = { version = "4", features = ["derive"], optional = true } indicatif = { version = "0.17", features = ["tokio"], optional = true } -console = { version = "0.15.5", optional = true } -iroh-blobs = { version = "0.28", optional = true, features = [ - "rpc", - "downloader", - "net_protocol", -] } # Documentation tests url = { version = "2.5.0", features = ["serde"] } @@ -81,12 +75,7 @@ test = [] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] -examples = ["dep:clap", "dep:indicatif", "dep:iroh-blobs"] -example-discovery-local-network = [ - "iroh-net/discovery-local-network", - "examples", - "dep:console", -] +examples = ["dep:clap", "dep:indicatif"] [dev-dependencies] anyhow = { version = "1" } @@ -110,22 +99,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] } all-features = true rustdoc-args = ["--cfg", "iroh_docsrs"] -[[example]] -name = "hello-world-provide" -required-features = ["examples"] - -[[example]] -name = "hello-world-fetch" -required-features = ["examples"] - [[example]] name = "rpc" required-features = ["examples"] - -[[example]] -name = "custom-protocol" -required-features = ["examples"] - -[[example]] -name = "local-swarm-discovery" -required-features = ["example-discovery-local-network"] diff --git a/iroh/examples/custom-protocol.rs b/iroh/examples/custom-protocol.rs deleted file mode 100644 index 849284ff08..0000000000 --- a/iroh/examples/custom-protocol.rs +++ /dev/null @@ -1,318 +0,0 @@ -//! Example for adding a custom protocol to a iroh node. -//! -//! We are building a very simple custom protocol here, and make our iroh nodes speak this protocol -//! in addition to the built-in protocols (blobs, gossip, docs). -//! -//! Our custom protocol allows querying the blob store of other nodes for text matches. For -//! this, we keep a very primitive index of the UTF-8 text of our blobs. -//! -//! The example is contrived - we only use memory nodes, and our database is a hashmap in a mutex, -//! and our queries just match if the query string appears as-is in a blob. -//! Nevertheless, this shows how powerful systems can be built with custom protocols by also using -//! the existing iroh protocols (blobs in this case). -//! -//! ## Usage -//! -//! In one terminal, run -//! -//! cargo run --example custom-protocol --features=examples -- listen "hello-world" "foo-bar" "hello-moon" -//! -//! This spawns an iroh nodes with three blobs. It will print the node's node id. -//! -//! In another terminal, run -//! -//! cargo run --example custom-protocol --features=examples -- query hello -//! -//! Replace with the node id from above. This will connect to the listening node with our -//! custom protocol and query for the string `hello`. The listening node will return a list of -//! blob hashes that contain `hello`. We will then download all these blobs with iroh-blobs, -//! and then print a list of the hashes with their content. -//! -//! For this example, this will print: -//! -//! moobakc6gao3ufmk: hello moon -//! 25eyd35hbigiqc4n: hello world -//! -//! That's it! Follow along in the code below, we added a bunch of comments to explain things. - -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; - -use anyhow::Result; -use clap::Parser; -use futures_lite::future::Boxed as BoxedFuture; -use iroh::{ - net::{ - endpoint::{get_remote_node_id, Connecting}, - Endpoint, NodeId, - }, - router::ProtocolHandler, -}; -use iroh_base::hash::Hash; -use iroh_blobs::{ - downloader::Downloader, net_protocol::Blobs, rpc::client::blobs::MemClient, - util::local_pool::LocalPool, -}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -#[derive(Debug, Parser)] -pub struct Cli { - #[clap(subcommand)] - command: Command, -} - -#[derive(Debug, Parser)] -pub enum Command { - /// Spawn a node in listening mode. - Listen { - /// Each text string will be imported as a blob and inserted into the search database. - text: Vec, - }, - /// Query a remote node for data and print the results. - Query { - /// The node id of the node we want to query. - node_id: NodeId, - /// The text we want to match. - query: String, - }, -} - -/// Each custom protocol is identified by its ALPN string. -/// -/// The ALPN, or application-layer protocol negotiation, is exchanged in the connection handshake, -/// and the connection is aborted unless both nodes pass the same bytestring. -const ALPN: &[u8] = b"iroh-example/text-search/0"; - -#[tokio::main] -async fn main() -> Result<()> { - setup_logging(); - let args = Cli::parse(); - - // Build a in-memory node. For production code, you'd want a persistent node instead usually. - let mut builder = iroh::node::Node::memory().build().await?; - let local_pool = LocalPool::default(); - let store = iroh_blobs::store::mem::Store::new(); - let downloader = Downloader::new( - store.clone(), - builder.endpoint().clone(), - local_pool.handle().clone(), - ); - let blobs = Arc::new(Blobs::new_with_events( - store, - local_pool.handle().clone(), - Default::default(), - downloader, - builder.endpoint().clone(), - )); - let blobs_client = blobs.clone().client(); - builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); - - // Build our custom protocol handler. The `builder` exposes access to various subsystems in the - // iroh node. In our case, we need a blobs client and the endpoint. - let proto = BlobSearch::new(blobs_client.clone(), builder.endpoint().clone()); - - // Add our protocol, identified by our ALPN, to the node, and spawn the node. - let node = builder.accept(ALPN.to_vec(), proto.clone()).spawn().await?; - - match args.command { - Command::Listen { text } => { - let node_id = node.node_id(); - println!("our node id: {node_id}"); - - // Insert the text strings as blobs and index them. - for text in text.into_iter() { - proto.insert_and_index(text).await?; - } - - // Wait for Ctrl-C to be pressed. - tokio::signal::ctrl_c().await?; - } - Command::Query { node_id, query } => { - // Query the remote node. - // This will send the query over our custom protocol, read hashes on the reply stream, - // and download each hash over iroh-blobs. - let hashes = proto.query_remote(node_id, &query).await?; - - // Print out our query results. - for hash in hashes { - read_and_print(&blobs_client, hash).await?; - } - } - } - - node.shutdown().await?; - - Ok(()) -} - -#[derive(Debug, Clone)] -struct BlobSearch { - blobs: MemClient, - endpoint: Endpoint, - index: Arc>>, -} - -impl ProtocolHandler for BlobSearch { - /// The `accept` method is called for each incoming connection for our ALPN. - /// - /// The returned future runs on a newly spawned tokio task, so it can run as long as - /// the connection lasts. - fn accept(self: Arc, connecting: Connecting) -> BoxedFuture> { - // We have to return a boxed future from the handler. - Box::pin(async move { - // Wait for the connection to be fully established. - let connection = connecting.await?; - // We can get the remote's node id from the connection. - let node_id = get_remote_node_id(&connection)?; - println!("accepted connection from {node_id}"); - - // Our protocol is a simple request-response protocol, so we expect the - // connecting peer to open a single bi-directional stream. - let (mut send, mut recv) = connection.accept_bi().await?; - - // We read the query from the receive stream, while enforcing a max query length. - let query_bytes = recv.read_to_end(64).await?; - - // Now, we can perform the actual query on our local database. - let query = String::from_utf8(query_bytes)?; - let hashes = self.query_local(&query); - - // We want to return a list of hashes. We do the simplest thing possible, and just send - // one hash after the other. Because the hashes have a fixed size of 32 bytes, this is - // very easy to parse on the other end. - for hash in hashes { - send.write_all(hash.as_bytes()).await?; - } - - // By calling `finish` on the send stream we signal that we will not send anything - // further, which makes the receive stream on the other end terminate. - send.finish()?; - // By calling stopped we wait until the remote iroh Endpoint has acknowledged - // all data. This does not mean the remote application has received all data - // from the Endpoint. - send.stopped().await?; - Ok(()) - }) - } -} - -impl BlobSearch { - /// Create a new protocol handler. - pub fn new(blobs: MemClient, endpoint: Endpoint) -> Arc { - Arc::new(Self { - blobs, - endpoint, - index: Default::default(), - }) - } - - /// Query a remote node, download all matching blobs and print the results. - pub async fn query_remote(&self, node_id: NodeId, query: &str) -> Result> { - // Establish a connection to our node. - // We use the default node discovery in iroh, so we can connect by node id without - // providing further information. - let conn = self.endpoint.connect(node_id, ALPN).await?; - - // Open a bi-directional in our connection. - let (mut send, mut recv) = conn.open_bi().await?; - - // Send our query. - send.write_all(query.as_bytes()).await?; - - // Finish the send stream, signalling that no further data will be sent. - // This makes the `read_to_end` call on the accepting side terminate. - send.finish()?; - // By calling stopped we wait until the remote iroh Endpoint has acknowledged all - // data. This does not mean the remote application has received all data from the - // Endpoint. - send.stopped().await?; - - // In this example, we simply collect all results into a vector. - // For real protocols, you'd usually want to return a stream of results instead. - let mut out = vec![]; - - // The response is sent as a list of 32-byte long hashes. - // We simply read one after the other into a byte buffer. - let mut hash_bytes = [0u8; 32]; - loop { - // Read 32 bytes from the stream. - match recv.read_exact(&mut hash_bytes).await { - // FinishedEarly means that the remote side did not send further data, - // so in this case we break our loop. - Err(quinn::ReadExactError::FinishedEarly(_)) => break, - // Other errors are connection errors, so we bail. - Err(err) => return Err(err.into()), - Ok(_) => {} - }; - // Upcast the raw bytes to the `Hash` type. - let hash = Hash::from_bytes(hash_bytes); - // Download the content via iroh-blobs. - self.blobs.download(hash, node_id.into()).await?.await?; - // Add the blob to our local database. - self.add_to_index(hash).await?; - out.push(hash); - } - Ok(out) - } - - /// Query the local database. - /// - /// Returns the list of hashes of blobs which contain `query` literally. - pub fn query_local(&self, query: &str) -> Vec { - let db = self.index.lock().unwrap(); - db.iter() - .filter_map(|(text, hash)| text.contains(query).then_some(*hash)) - .collect::>() - } - - /// Insert a text string into the database. - /// - /// This first imports the text as a blob into the iroh blob store, and then inserts a - /// reference to that hash in our (primitive) text database. - pub async fn insert_and_index(&self, text: String) -> Result { - let hash = self.blobs.add_bytes(text.into_bytes()).await?.hash; - self.add_to_index(hash).await?; - Ok(hash) - } - - /// Index a blob which is already in our blob store. - /// - /// This only indexes complete blobs that are smaller than 1KiB. - /// - /// Returns `true` if the blob was indexed. - async fn add_to_index(&self, hash: Hash) -> Result { - let mut reader = self.blobs.read(hash).await?; - // Skip blobs larger than 1KiB. - if reader.size() > 1024 * 1024 { - return Ok(false); - } - let bytes = reader.read_to_bytes().await?; - match String::from_utf8(bytes.to_vec()) { - Ok(text) => { - let mut db = self.index.lock().unwrap(); - db.insert(text, hash); - Ok(true) - } - Err(_err) => Ok(false), - } - } -} - -/// Read a blob from the local blob store and print it to STDOUT. -async fn read_and_print(blobs: &MemClient, hash: Hash) -> Result<()> { - let content = blobs.read_to_bytes(hash).await?; - let message = String::from_utf8(content.to_vec())?; - println!("{}: {message}", hash.fmt_short()); - Ok(()) -} - -/// Set the RUST_LOG env var to one of {debug,info,warn} to see logging. -fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} diff --git a/iroh/examples/hello-world-fetch.rs b/iroh/examples/hello-world-fetch.rs deleted file mode 100644 index 3c86be6007..0000000000 --- a/iroh/examples/hello-world-fetch.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! An example that fetches an iroh blob and prints the contents. -//! Will only work with blobs and collections that contain text, and is meant as a companion to the `hello-world-get` examples. -//! -//! This is using an in memory database and a random node id. -//! Run the `provide` example, which will give you instructions on how to run this example. -use std::{env, str::FromStr, sync::Arc}; - -use anyhow::{bail, ensure, Context, Result}; -use iroh::base::ticket::BlobTicket; -use iroh_blobs::{ - downloader::Downloader, net_protocol::Blobs, util::local_pool::LocalPool, BlobFormat, -}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[tokio::main] -async fn main() -> Result<()> { - setup_logging(); - println!("\n'Hello World' fetch example!"); - // get the ticket - let args: Vec = env::args().collect(); - - if args.len() != 2 { - bail!("expected one argument [BLOB_TICKET]\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`"); - } - - // deserialize ticket string into a ticket - let ticket = - BlobTicket::from_str(&args[1]).context("failed parsing blob ticket\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`")?; - - // create a new node - let mut builder = iroh::node::Node::memory().build().await?; - let local_pool = LocalPool::default(); - let store = iroh_blobs::store::mem::Store::new(); - let downloader = Downloader::new( - store.clone(), - builder.endpoint().clone(), - local_pool.handle().clone(), - ); - let blobs = Arc::new(Blobs::new_with_events( - store, - local_pool.handle().clone(), - Default::default(), - downloader, - builder.endpoint().clone(), - )); - let blobs_client = blobs.clone().client(); - builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); - let node = builder.spawn().await?; - - println!("fetching hash: {}", ticket.hash()); - println!("node id: {}", node.node_id()); - println!("node listening addresses:"); - let addrs = node.net().node_addr().await?; - for addr in addrs.direct_addresses() { - println!("\t{:?}", addr); - } - println!( - "node relay server url: {:?}", - node.home_relay() - .expect("a default relay url should be provided") - .to_string() - ); - - // If the `BlobFormat` is `Raw`, we have the hash for a single blob, and simply need to read the blob using the `blobs` API on the client to get the content. - ensure!( - ticket.format() == BlobFormat::Raw, - "'Hello World' example expects to fetch a single blob, but the ticket indicates a collection.", - ); - - // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress - // on the state of your download. - let download_stream = blobs_client - .download(ticket.hash(), ticket.node_addr().clone()) - .await?; - - // You can also just `await` the stream, which will poll the `DownloadProgress` stream for you. - let outcome = download_stream.await.context("unable to download hash")?; - - println!( - "\ndownloaded {} bytes from node {}", - outcome.downloaded_size, - ticket.node_addr().node_id - ); - - // Get the content we have just fetched from the iroh database. - - let bytes = blobs_client.read_to_bytes(ticket.hash()).await?; - let s = std::str::from_utf8(&bytes).context("unable to parse blob as as utf-8 string")?; - println!("{s}"); - - Ok(()) -} diff --git a/iroh/examples/hello-world-provide.rs b/iroh/examples/hello-world-provide.rs deleted file mode 100644 index 53fd92a322..0000000000 --- a/iroh/examples/hello-world-provide.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! The smallest possible example to spin up a node and serve a single blob. -//! -//! This is using an in memory database and a random node id. -//! run this example from the project root: -//! $ cargo run --example hello-world-provide -use std::sync::Arc; - -use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; -use iroh_blobs::{downloader::Downloader, net_protocol::Blobs, util::local_pool::LocalPool}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - setup_logging(); - println!("'Hello World' provide example!"); - - // create a new node - let mut builder = iroh::node::Node::memory().build().await?; - let local_pool = LocalPool::default(); - let store = iroh_blobs::store::mem::Store::new(); - let downloader = Downloader::new( - store.clone(), - builder.endpoint().clone(), - local_pool.handle().clone(), - ); - let blobs = Arc::new(Blobs::new_with_events( - store, - local_pool.handle().clone(), - Default::default(), - downloader, - builder.endpoint().clone(), - )); - let blobs_client = blobs.clone().client(); - builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); - let node = builder.spawn().await?; - - // add some data and remember the hash - let res = blobs_client.add_bytes("Hello, world!").await?; - - // create a ticket - let mut addr = node.net().node_addr().await?; - addr.apply_options(AddrInfoOptions::RelayAndAddresses); - let ticket = BlobTicket::new(addr, res.hash, res.format)?; - - // print some info about the node - println!("serving hash: {}", ticket.hash()); - println!("node id: {}", ticket.node_addr().node_id); - println!("node listening addresses:"); - for addr in ticket.node_addr().direct_addresses() { - println!("\t{:?}", addr); - } - println!( - "node relay server url: {:?}", - ticket - .node_addr() - .relay_url() - .expect("a default relay url should be provided") - .to_string() - ); - // print the ticket, containing all the above information - println!("\nin another terminal, run:"); - println!("\t cargo run --example hello-world-fetch {}", ticket); - // block until SIGINT is received (ctrl+c) - tokio::signal::ctrl_c().await?; - node.shutdown().await?; - Ok(()) -} diff --git a/iroh/examples/local-swarm-discovery.rs b/iroh/examples/local-swarm-discovery.rs deleted file mode 100644 index 4dcf121e5e..0000000000 --- a/iroh/examples/local-swarm-discovery.rs +++ /dev/null @@ -1,284 +0,0 @@ -//! Example that runs and iroh node with local node discovery and no relay server -//! -//! Run the follow command to run the "accept" side, that hosts the content: -//! $ cargo run --example local_swarm_discovery --features="discovery-local-network" -- accept [FILE_PATH] -//! Wait for output that looks like the following: -//! $ cargo run --example local_swarm_discovery --features="discovery-local-network" -- connect [NODE_ID] [HASH] -o [FILE_PATH] -//! Run that command on another machine in the same local network, replacing [FILE_PATH] to the path on which you want to save the transferred content. -use std::{path::PathBuf, sync::Arc}; - -use anyhow::ensure; -use clap::{Parser, Subcommand}; -use iroh::{ - base::{hash::Hash, key::SecretKey}, - net::{discovery::local_swarm_discovery::LocalSwarmDiscovery, key::PublicKey, NodeAddr}, - node::DiscoveryConfig, -}; -use iroh_blobs::{ - downloader::Downloader, net_protocol::Blobs, rpc::client::blobs::WrapOption, - util::local_pool::LocalPool, -}; -use tracing_subscriber::{prelude::*, EnvFilter}; - -use self::progress::show_download_progress; - -// set the RUST_LOG env var to one of {debug,info,warn} to see logging info -pub fn setup_logging() { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) - .with(EnvFilter::from_default_env()) - .try_init() - .ok(); -} - -#[derive(Debug, Parser)] -#[command(version, about)] -pub struct Cli { - #[clap(subcommand)] - command: Commands, -} - -#[derive(Subcommand, Clone, Debug)] -pub enum Commands { - /// Launch an iroh node and provide the content at the given path - Accept { - /// path to the file you want to provide - path: PathBuf, - }, - /// Get the node_id and hash string from a node running accept in the local network - /// Download the content from that node. - Connect { - /// Node ID of a node on the local network - node_id: PublicKey, - /// Hash of content you want to download from the node - hash: Hash, - /// save the content to a file - #[clap(long, short)] - out: Option, - }, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - setup_logging(); - let cli = Cli::parse(); - - let key = SecretKey::generate(); - let discovery = LocalSwarmDiscovery::new(key.public())?; - let cfg = DiscoveryConfig::Custom(Box::new(discovery)); - - println!("Starting iroh node with local node discovery..."); - // create a new node - let mut builder = iroh::node::Node::memory() - .secret_key(key) - .node_discovery(cfg) - .bind_random_port() - .relay_mode(iroh_net::RelayMode::Disabled) - .build() - .await?; - let local_pool = LocalPool::default(); - let store = iroh_blobs::store::mem::Store::new(); - let downloader = Downloader::new( - store.clone(), - builder.endpoint().clone(), - local_pool.handle().clone(), - ); - let blobs = Arc::new(Blobs::new_with_events( - store, - local_pool.handle().clone(), - Default::default(), - downloader, - builder.endpoint().clone(), - )); - let blobs_client = blobs.clone().client(); - builder = builder.accept(iroh_blobs::protocol::ALPN.to_vec(), blobs); - let node = builder.spawn().await?; - - match &cli.command { - Commands::Accept { path } => { - if !path.is_file() { - println!("Content must be a file."); - node.shutdown().await?; - return Ok(()); - } - let absolute = path.canonicalize()?; - println!("Adding {} as {}...", path.display(), absolute.display()); - let stream = blobs_client - .add_from_path( - absolute, - true, - iroh_blobs::util::SetTagOption::Auto, - WrapOption::NoWrap, - ) - .await?; - let outcome = stream.finish().await?; - println!("To fetch the blob:\n\tcargo run --example local_swarm_discovery --features=\"local-swarm-discovery\" -- connect {} {} -o [FILE_PATH]", node.node_id(), outcome.hash); - tokio::signal::ctrl_c().await?; - node.shutdown().await?; - std::process::exit(0); - } - Commands::Connect { node_id, hash, out } => { - println!("NodeID: {}", node.node_id()); - let mut stream = blobs_client - .download(*hash, NodeAddr::new(*node_id)) - .await?; - show_download_progress(*hash, &mut stream).await?; - if let Some(path) = out { - let absolute = std::env::current_dir()?.join(path); - ensure!(!absolute.is_dir(), "output must not be a directory"); - tracing::info!( - "exporting {hash} to {} -> {}", - path.display(), - absolute.display() - ); - let stream = blobs_client - .export( - *hash, - absolute, - iroh_blobs::store::ExportFormat::Blob, - iroh_blobs::store::ExportMode::Copy, - ) - .await?; - stream.await?; - } - } - } - Ok(()) -} - -mod progress { - use anyhow::{bail, Result}; - use console::style; - use futures_lite::{Stream, StreamExt}; - use indicatif::{ - HumanBytes, HumanDuration, MultiProgress, ProgressBar, ProgressDrawTarget, ProgressState, - ProgressStyle, - }; - use iroh_blobs::{ - get::{db::DownloadProgress, progress::BlobProgress, Stats}, - Hash, - }; - - pub async fn show_download_progress( - hash: Hash, - mut stream: impl Stream> + Unpin, - ) -> Result<()> { - eprintln!("Fetching: {}", hash); - let mp = MultiProgress::new(); - mp.set_draw_target(ProgressDrawTarget::stderr()); - let op = mp.add(make_overall_progress()); - let ip = mp.add(make_individual_progress()); - op.set_message(format!("{} Connecting ...\n", style("[1/3]").bold().dim())); - let mut seq = false; - while let Some(x) = stream.next().await { - match x? { - DownloadProgress::InitialState(state) => { - if state.connected { - op.set_message(format!("{} Requesting ...\n", style("[2/3]").bold().dim())); - } - if let Some(count) = state.root.child_count { - op.set_message(format!( - "{} Downloading {} blob(s)\n", - style("[3/3]").bold().dim(), - count + 1, - )); - op.set_length(count + 1); - op.reset(); - op.set_position(state.current.map(u64::from).unwrap_or(0)); - seq = true; - } - if let Some(blob) = state.get_current() { - if let Some(size) = blob.size { - ip.set_length(size.value()); - ip.reset(); - match blob.progress { - BlobProgress::Pending => {} - BlobProgress::Progressing(offset) => ip.set_position(offset), - BlobProgress::Done => ip.finish_and_clear(), - } - if !seq { - op.finish_and_clear(); - } - } - } - } - DownloadProgress::FoundLocal { .. } => {} - DownloadProgress::Connected => { - op.set_message(format!("{} Requesting ...\n", style("[2/3]").bold().dim())); - } - DownloadProgress::FoundHashSeq { children, .. } => { - op.set_message(format!( - "{} Downloading {} blob(s)\n", - style("[3/3]").bold().dim(), - children + 1, - )); - op.set_length(children + 1); - op.reset(); - seq = true; - } - DownloadProgress::Found { size, child, .. } => { - if seq { - op.set_position(child.into()); - } else { - op.finish_and_clear(); - } - ip.set_length(size); - ip.reset(); - } - DownloadProgress::Progress { offset, .. } => { - ip.set_position(offset); - } - DownloadProgress::Done { .. } => { - ip.finish_and_clear(); - } - DownloadProgress::AllDone(Stats { - bytes_read, - elapsed, - .. - }) => { - op.finish_and_clear(); - eprintln!( - "Transferred {} in {}, {}/s", - HumanBytes(bytes_read), - HumanDuration(elapsed), - HumanBytes((bytes_read as f64 / elapsed.as_secs_f64()) as u64) - ); - break; - } - DownloadProgress::Abort(e) => { - bail!("download aborted: {}", e); - } - } - } - Ok(()) - } - fn make_overall_progress() -> ProgressBar { - let pb = ProgressBar::hidden(); - pb.enable_steady_tick(std::time::Duration::from_millis(100)); - pb.set_style( - ProgressStyle::with_template( - "{msg}{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos}/{len}", - ) - .unwrap() - .progress_chars("#>-"), - ); - pb - } - - fn make_individual_progress() -> ProgressBar { - let pb = ProgressBar::hidden(); - pb.enable_steady_tick(std::time::Duration::from_millis(100)); - pb.set_style( - ProgressStyle::with_template("{msg}{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})") - .unwrap() - .with_key( - "eta", - |state: &ProgressState, w: &mut dyn std::fmt::Write| { - write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap() - }, - ) - .progress_chars("#>-"), - ); - pb - } -} From c29c5b1fee2b7ef1cf961daa7900b3e8983e6506 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 12:34:21 +0100 Subject: [PATCH 08/17] cleanup: remove docker --- .dockerignore | 2 - .github/workflows/ci.yml | 54 ---------------- .github/workflows/docker.yaml | 115 ---------------------------------- docker/Dockerfile | 84 ------------------------- docker/Dockerfile.ci | 52 --------------- docker/README.md | 31 --------- 6 files changed, 338 deletions(-) delete mode 100644 .dockerignore delete mode 100644 .github/workflows/docker.yaml delete mode 100644 docker/Dockerfile delete mode 100644 docker/Dockerfile.ci delete mode 100644 docker/README.md diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 540c2ede1c..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -docker -target \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1b0b6e6ea5..c9c20656d3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -298,60 +298,6 @@ jobs: sim_paths: "sims/iroh/iroh.json,sims/integration" pr_number: ${{ github.event.pull_request.number || '' }} - docker_build_and_test: - name: Docker Test - if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" - timeout-minutes: 30 - runs-on: [self-hosted, linux, X64] - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Install rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.6 - - - name: Prep - run: sudo apt-get install musl-tools -y - - - name: Build iroh - run: cargo build --profile=dev-ci --all-features --bin iroh --target x86_64-unknown-linux-musl - - - name: Prep bins - run: | - mkdir -p bins/linux/amd64 - cp target/x86_64-unknown-linux-musl/dev-ci/iroh bins/linux/amd64/iroh - - - name: Cleanup Docker - continue-on-error: true - run: | - docker kill $(docker ps -q) - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build Docker image - uses: docker/build-push-action@v6 - with: - context: . - push: false - load: true - tags: n0computer/iroh-test:latest - target: iroh - platforms: linux/amd64 - file: docker/Dockerfile.ci - - - name: Run Docker image & stats test - run: | - docker run -p 9090:9090 -p 4919:4919/udp -Pd n0computer/iroh-test:latest --rpc-addr 0.0.0.0:4919 start - # Give the server time to start - sleep 3 - target/x86_64-unknown-linux-musl/dev-ci/iroh --rpc-addr 127.0.0.1:4919 stats - codespell: timeout-minutes: 30 runs-on: ubuntu-latest diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml deleted file mode 100644 index 0216b7e25a..0000000000 --- a/.github/workflows/docker.yaml +++ /dev/null @@ -1,115 +0,0 @@ -name: Docker - -on: - workflow_dispatch: - inputs: - release_version: - description: "Release version" - required: true - type: string - default: "" - base_hash: - description: "Commit hash from which to build" - required: true - type: string - default: "" - publish: - description: "Publish to Docker Hub" - required: true - type: boolean - default: false - workflow_call: - inputs: - release_version: - description: "Release version" - required: true - type: string - default: "" - base_hash: - description: "Commit hash from which to build" - required: true - type: string - default: "" - publish: - description: "Publish to Docker Hub" - required: true - type: boolean - default: false - -env: - IROH_FORCE_STAGING_RELAYS: "1" - -jobs: - build_and_publish: - timeout-minutes: 30 - name: Docker - runs-on: [self-hosted, linux, X64] - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Prep dirs - run: | - mkdir -p bins/linux/amd64 - mkdir -p bins/linux/arm64 - - - name: Setup awscli on linux - run: | - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" - unzip awscliv2.zip - sudo ./aws/install --update - - - name: Set aws credentials - run: | - echo "AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}}" >> $GITHUB_ENV - echo "AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}}" >> $GITHUB_ENV - echo "AWS_DEFAULT_REGION=us-west-2" >> $GITHUB_ENV - - - name: Fetch release binaries - run: | - aws s3 cp s3://vorc/iroh-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh - aws s3 cp s3://vorc/iroh-relay-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-relay - aws s3 cp s3://vorc/iroh-dns-server-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-dns-server - - aws s3 cp s3://vorc/iroh-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh - aws s3 cp s3://vorc/iroh-relay-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-relay - aws s3 cp s3://vorc/iroh-dns-server-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-dns-server - - - name: Build Docker image (iroh) - uses: docker/build-push-action@v6 - with: - context: . - push: ${{ inputs.publish }} - tags: n0computer/iroh:latest,n0computer/iroh:${{ inputs.release_version }} - target: iroh - platforms: linux/amd64,linux/arm64/v8 - file: docker/Dockerfile.ci - - - name: Build Docker image (iroh-relay) - uses: docker/build-push-action@v6 - with: - context: . - push: ${{ inputs.publish }} - tags: n0computer/iroh-relay:latest,n0computer/iroh-relay:${{ inputs.release_version }} - target: iroh-relay - platforms: linux/amd64,linux/arm64/v8 - file: docker/Dockerfile.ci - - - name: Build Docker image (iroh-dns-server) - uses: docker/build-push-action@v6 - with: - context: . - push: ${{ inputs.publish }} - tags: n0computer/iroh-dns-server:latest,n0computer/iroh-dns-server:${{ inputs.release_version }} - target: iroh-dns-server - platforms: linux/amd64,linux/arm64/v8 - file: docker/Dockerfile.ci \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 016f0103b2..0000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,84 +0,0 @@ -FROM rust:alpine AS chef - -RUN update-ca-certificates -RUN apk add --no-cache musl-dev openssl-dev pkgconfig -RUN cargo install cargo-chef -WORKDIR /iroh - -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -### Builder image -FROM chef AS rust_builder - -RUN update-ca-certificates -RUN apk add --no-cache musl-dev openssl-dev pkgconfig - -COPY --from=planner /iroh/recipe.json recipe.json -# Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --recipe-path recipe.json - -WORKDIR /iroh - -# copy entire workspace -COPY . . - -RUN cargo build --release --all-features - -### Target image -FROM alpine:latest AS iroh - -RUN apk update && apk add ca-certificates && update-ca-certificates - -# Copy our build, changing owndership to distroless-provided "nonroot" user, -# (65532:65532) -COPY --from=rust_builder /iroh/target/release/iroh /iroh - -RUN chmod +x /iroh - -WORKDIR / - -# expose the default ports -# rpc, nat-pmp, metrics, iroh_node_ipv4, iroh_node_ipv6 -EXPOSE 4919/udp 5351 9090 11204/udp 11205/udp -ENTRYPOINT ["/iroh"] -CMD ["start"] - -### Target image -FROM alpine:latest AS iroh-relay - -RUN apk update && apk add ca-certificates && update-ca-certificates - -# Copy our build, changing owndership to distroless-provided "nonroot" user, -# (65532:65532) -COPY --from=rust_builder /iroh/target/release/iroh-relay /iroh-relay - -RUN chmod +x /iroh-relay - -WORKDIR / - -# expose the default ports -# http, https, stun, metrics -EXPOSE 80 443 3478/udp 9090 -ENTRYPOINT ["/iroh-relay"] -CMD [""] - -### Target image -FROM alpine:latest AS iroh-dns-server - -RUN apk update && apk add ca-certificates && update-ca-certificates - -# Copy our build, changing owndership to distroless-provided "nonroot" user, -# (65532:65532) -COPY --from=rust_builder /iroh/target/release/iroh-dns-server /iroh-dns-server - -RUN chmod +x /iroh-dns-server - -WORKDIR / - -# expose the default ports -# dns, metrics -EXPOSE 53/udp 9090 -ENTRYPOINT ["/iroh-dns-server"] -CMD [""] \ No newline at end of file diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci deleted file mode 100644 index 29384bad72..0000000000 --- a/docker/Dockerfile.ci +++ /dev/null @@ -1,52 +0,0 @@ -### Base image for iroh-relay and iroh-dns-server -FROM alpine:latest AS base -RUN apk update && apk add ca-certificates && update-ca-certificates - - -### Target image -FROM base AS iroh -ARG TARGETPLATFORM - -COPY bins/${TARGETPLATFORM}/iroh /iroh - -RUN chmod +x /iroh - -WORKDIR / - -# expose the default ports -# rpc, nat-pmp, metrics, iroh_node_ipv4, iroh_node_ipv6 -EXPOSE 4919/udp 5351 9090 11204/udp 11205/udp -ENTRYPOINT ["/iroh"] -CMD ["start"] - -### Target image -FROM base AS iroh-relay -ARG TARGETPLATFORM - -COPY bins/${TARGETPLATFORM}/iroh-relay /iroh-relay - -RUN chmod +x /iroh-relay - -WORKDIR / - -# expose the default ports -# http, https, stun, metrics -EXPOSE 80 443 3478/udp 9090 -ENTRYPOINT ["/iroh-relay"] -CMD [""] - -### Target image -FROM base AS iroh-dns-server -ARG TARGETPLATFORM - -COPY bins/${TARGETPLATFORM}/iroh-dns-server /iroh-dns-server - -RUN chmod +x /iroh-dns-server - -WORKDIR / - -# expose the default ports -# dns, metrics -EXPOSE 53/udp 9090 -ENTRYPOINT ["/iroh-dns-server"] -CMD [""] \ No newline at end of file diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index 0dd0f53239..0000000000 --- a/docker/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Iroh Docker Images - -## Intro - -A set of docker images provided to easily run iroh in a containerized environment. -Features `iroh`, `iroh-relay` and `iroh-dns-server`. - -The provided `Docker` files are intended for CI use but can be also manually built. - -## Building - -- All commands are run from the root folder -- If you're on macOS run `docker buildx build -f docker/Dockerfile --target iroh --platform linux/arm64/v8 --tag n0computer/iroh:latest .` -- If you're on linux run `docker buildx build -f docker/Dockerfile --target iroh --platform linux/amd64 --tag n0computer/iroh:latest .` -- Switch out `--target iroh` for other targets `iroh-relay,iroh-dns-server` - -## Running - -### iroh - -- As is: `docker run -p 9090:9090 -p 4919:4919/udp -Pit n0computer/iroh:latest --rpc-addr 0.0.0.0:4919` -- With parameters: `docker run -p 9090:9090 -p 4919:4919/udp -Pit n0computer/iroh:latest --rpc-addr 0.0.0.0:4919 ` -- Provide a config file: `docker run -v ./docker.iroh.cfg:/iroh.cfg -p 9090:9090 -p 4919:4919/udp -Pit n0computer/iroh:latest --rpc-addr 0.0.0.0:4919 --config iroh.cfg start` - -### iroh-relay - -- Provide a config file: `docker run -v /path/to/iroh-relay.conf:/config/iroh-relay.conf -p 80:80 -p 443:443 -p 3478:3478/udp -p 9090:9090 -it n0computer/iroh-relay:latest --config /config/iroh-relay.conf` - -### iroh-dns-server - -- Provide a config file: `docker run -v /path/to/iroh-dns-server.conf:/config/iroh-dns-server.conf -p 53:53/udp -p 9090:9090 -it n0computer/iroh-dns-server:latest --config /config/iroh-dns-server.conf` \ No newline at end of file From c89c2278c10805ea0e681d396d919b620ab41d57 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Nov 2024 13:14:01 +0100 Subject: [PATCH 09/17] cleanup some docs --- iroh/Cargo.toml | 3 +-- iroh/src/lib.rs | 30 ++---------------------------- 2 files changed, 3 insertions(+), 30 deletions(-) diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 153ee24995..91b2350171 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -68,9 +68,8 @@ url = { version = "2.5.0", features = ["serde"] } serde-error = "0.1.3" [features] -default = ["metrics", "fs-store"] +default = ["metrics"] metrics = ["iroh-metrics"] -fs-store = [] test = [] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index d5662459df..4b21148ea3 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -4,15 +4,11 @@ //! //! ## Example //! -//! Create a new node and add some data to the blobs store. This data will be -//! available over the network. +//! Create a new node. //! //! ```rust //! # async fn run() -> anyhow::Result<()> { -//! let node = iroh::node::Node::memory().spawn().await?; -//! let client = node.client(); -//! let hash = client.blobs().add_bytes(b"some data".to_vec()).await?.hash; -//! println!("hash: {}", hash); +//! let _node = iroh::node::Node::memory().spawn().await?; //! # Ok(()) //! # } //! ``` @@ -45,14 +41,6 @@ //! The client provides access to various subsystems: //! - [net](crate::client::net): //! information and control of the iroh network -//! - [blobs](crate::client::blobs): -//! manage and share content-addressed blobs of data -//! - [tags](crate::client::tags): -//! tags to tell iroh what data is important -//! - [gossip](crate::client::gossip): -//! exchange data with other nodes via a gossip protocol -//! - [docs](crate::client::docs): -//! interact with documents and document authors //! //! The subsystem clients can be obtained cheaply from the main iroh client. //! They are also cheaply cloneable and can be shared across threads. @@ -60,30 +48,16 @@ //! So if you have code that only needs to interact with one subsystem, pass //! it just the subsystem client. //! -//! ## Remote nodes -//! -//! To obtain a client to a remote node, you can use -//! [connect](crate::client::Iroh::connect_path) to connect to a node running on -//! the same machine, using the given data directory, or -//! [connect_addr](crate::client::Iroh::connect_addr) to connect to a node at a -//! known address. -//! -//! **Important**: the protocol to a remote node is not stable and will -//! frequently change. So the client and server must be running the same version of iroh. //! //! ## Reexports //! //! The iroh crate re-exports the following crates: //! - [iroh_base] as [`base`] -//! - [iroh_blobs] as [`blobs`] -//! - [iroh_docs] as [`docs`] -//! - [iroh_gossip] as [`gossip`] //! - [iroh_net] as [`net`] //! //! ## Feature Flags //! //! - `metrics`: Enable metrics collection. Enabled by default. -//! - `fs-store`: Enables the disk based storage backend for `iroh-blobs`. Enabled by default. #![cfg_attr(iroh_docsrs, feature(doc_cfg))] #![deny(missing_docs, rustdoc::broken_intra_doc_links)] From ffdfd1d1402cab992b861c43b762e8f029714337 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Thu, 21 Nov 2024 12:37:16 +0100 Subject: [PATCH 10/17] fixup docs --- iroh/src/node/builder.rs | 8 +------- iroh/src/util/path.rs | 12 ------------ 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index ca18703683..b446722cd2 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -47,12 +47,6 @@ pub const DEFAULT_BIND_ADDR_V6: SocketAddrV6 = /// Builder for the [`Node`]. /// -/// You must supply a blob store and a document store. -/// -/// Blob store implementations are available in [`iroh_blobs::store`]. -/// -/// Everything else is optional, with some sensible defaults. -/// /// The default **relay servers** are hosted by [number 0] on the `iroh.network` domain. To /// customise this use the [`Builder::relay_mode`] function. /// @@ -121,7 +115,7 @@ pub enum DiscoveryConfig { /// cargo feature from [iroh-net] is enabled. In this case only the Pkarr/DNS service /// is used, but on the `iroh.test` domain. This domain is not integrated with the /// global DNS network and thus node discovery is effectively disabled. To use node - /// discovery in a test use the [`iroh_net::test_utils::DnsPkarrServer`] in the test and + /// discovery in a test use the `iroh_net::test_utils::DnsPkarrServer` in the test and /// configure it here as a custom discovery mechanism ([`DiscoveryConfig::Custom`]). /// /// [number 0]: https://n0.computer diff --git a/iroh/src/util/path.rs b/iroh/src/util/path.rs index f7ee91af40..bfeffe913a 100644 --- a/iroh/src/util/path.rs +++ b/iroh/src/util/path.rs @@ -9,24 +9,12 @@ pub enum IrohPaths { /// Path to the node's secret key for the [`iroh_net::key::PublicKey`]. #[strum(serialize = "keypair")] SecretKey, - /// Path to the node's [file based blob store](iroh_blobs::store::fs::Store). - #[strum(serialize = "blobs")] - BaoStoreDir, - /// Path to the [iroh-docs document database](iroh_docs::store::fs::Store) - #[strum(serialize = "docs.redb")] - DocsDatabase, - /// Path to the console state - #[strum(serialize = "console")] - Console, #[strum(serialize = "peers.postcard")] /// Path to store known peer data. PeerData, #[strum(serialize = "rpc.lock")] /// Path to RPC lock file, containing the RPC port if running. RpcLock, - /// Path to the [`iroh_docs::AuthorId`] of the node's default author - #[strum(serialize = "default-author")] - DefaultAuthor, } impl AsRef for IrohPaths { From 1badc50170b1fdcf96246d19ffb6ca06364039d1 Mon Sep 17 00:00:00 2001 From: Asmir Avdicevic Date: Mon, 25 Nov 2024 12:47:13 +0100 Subject: [PATCH 11/17] docker is back --- .dockerignore | 2 + .github/workflows/docker.yaml | 103 ++++++++++++++++++++++++++++++++++ .github/workflows/release.yml | 4 +- docker/Dockerfile | 65 +++++++++++++++++++++ docker/Dockerfile.ci | 35 ++++++++++++ docker/README.md | 25 +++++++++ 6 files changed, 231 insertions(+), 3 deletions(-) create mode 100644 .dockerignore create mode 100644 .github/workflows/docker.yaml create mode 100644 docker/Dockerfile create mode 100644 docker/Dockerfile.ci create mode 100644 docker/README.md diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..540c2ede1c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +docker +target \ No newline at end of file diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml new file mode 100644 index 0000000000..0ad4609ead --- /dev/null +++ b/.github/workflows/docker.yaml @@ -0,0 +1,103 @@ +name: Docker + +on: + workflow_dispatch: + inputs: + release_version: + description: "Release version" + required: true + type: string + default: "" + base_hash: + description: "Commit hash from which to build" + required: true + type: string + default: "" + publish: + description: "Publish to Docker Hub" + required: true + type: boolean + default: false + workflow_call: + inputs: + release_version: + description: "Release version" + required: true + type: string + default: "" + base_hash: + description: "Commit hash from which to build" + required: true + type: string + default: "" + publish: + description: "Publish to Docker Hub" + required: true + type: boolean + default: false + +env: + IROH_FORCE_STAGING_RELAYS: "1" + +jobs: + build_and_publish: + timeout-minutes: 30 + name: Docker + runs-on: [self-hosted, linux, X64] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Prep dirs + run: | + mkdir -p bins/linux/amd64 + mkdir -p bins/linux/arm64 + + - name: Setup awscli on linux + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --update + + - name: Set aws credentials + run: | + echo "AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}}" >> $GITHUB_ENV + echo "AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}}" >> $GITHUB_ENV + echo "AWS_DEFAULT_REGION=us-west-2" >> $GITHUB_ENV + + - name: Fetch release binaries + run: | + aws s3 cp s3://vorc/iroh-relay-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-relay + aws s3 cp s3://vorc/iroh-dns-server-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-dns-server + + aws s3 cp s3://vorc/iroh-relay-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-relay + aws s3 cp s3://vorc/iroh-dns-server-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-dns-server + + - name: Build Docker image (iroh-relay) + uses: docker/build-push-action@v6 + with: + context: . + push: ${{ inputs.publish }} + tags: n0computer/iroh-relay:latest,n0computer/iroh-relay:${{ inputs.release_version }} + target: iroh-relay + platforms: linux/amd64,linux/arm64/v8 + file: docker/Dockerfile.ci + + - name: Build Docker image (iroh-dns-server) + uses: docker/build-push-action@v6 + with: + context: . + push: ${{ inputs.publish }} + tags: n0computer/iroh-dns-server:latest,n0computer/iroh-dns-server:${{ inputs.release_version }} + target: iroh-dns-server + platforms: linux/amd64,linux/arm64/v8 + file: docker/Dockerfile.ci \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index faf2ea2455..1635918175 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -42,7 +42,7 @@ env: RUSTDOCFLAGS: -Dwarnings MSRV: "1.76" SCCACHE_CACHE_SIZE: "50G" - BIN_NAMES: "iroh,iroh-relay,iroh-dns-server" + BIN_NAMES: "iroh-relay,iroh-dns-server" RELEASE_VERSION: ${{ github.event.inputs.release_version }} jobs: @@ -234,14 +234,12 @@ jobs: - name: push release if: matrix.os != 'windows-latest' run: | - aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh s3://vorc/iroh-${RELEASE_OS}-${RELEASE_ARCH}-${GITHUB_SHA::7} --no-progress aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh-relay s3://vorc/iroh-relay-${RELEASE_OS}-${RELEASE_ARCH}-${GITHUB_SHA::7} --no-progress aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh-dns-server s3://vorc/iroh-dns-server-${RELEASE_OS}-${RELEASE_ARCH}-${GITHUB_SHA::7} --no-progress - name: push release latest if: matrix.os != 'windows-latest' && (github.event.inputs.mark_latest == 'true' || github.event_name == 'push') run: | - aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh s3://vorc/iroh-${RELEASE_OS}-${RELEASE_ARCH}-latest --no-progress aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh-relay s3://vorc/iroh-relay-${RELEASE_OS}-${RELEASE_ARCH}-latest --no-progress aws s3 cp ./target/${{ matrix.cargo_targets }}/optimized-release/iroh-dns-server s3://vorc/iroh-dns-server-${RELEASE_OS}-${RELEASE_ARCH}-latest --no-progress diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000000..2a94587782 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,65 @@ +FROM rust:alpine AS chef + +RUN update-ca-certificates +RUN apk add --no-cache musl-dev openssl-dev pkgconfig +RUN cargo install cargo-chef +WORKDIR /iroh + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +### Builder image +FROM chef AS rust_builder + +RUN update-ca-certificates +RUN apk add --no-cache musl-dev openssl-dev pkgconfig + +COPY --from=planner /iroh/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json + +WORKDIR /iroh + +# copy entire workspace +COPY . . + +RUN cargo build --release --all-features + +### Target image +FROM alpine:latest AS iroh-relay + +RUN apk update && apk add ca-certificates && update-ca-certificates + +# Copy our build, changing owndership to distroless-provided "nonroot" user, +# (65532:65532) +COPY --from=rust_builder /iroh/target/release/iroh-relay /iroh-relay + +RUN chmod +x /iroh-relay + +WORKDIR / + +# expose the default ports +# http, https, stun, metrics +EXPOSE 80 443 3478/udp 9090 +ENTRYPOINT ["/iroh-relay"] +CMD [""] + +### Target image +FROM alpine:latest AS iroh-dns-server + +RUN apk update && apk add ca-certificates && update-ca-certificates + +# Copy our build, changing owndership to distroless-provided "nonroot" user, +# (65532:65532) +COPY --from=rust_builder /iroh/target/release/iroh-dns-server /iroh-dns-server + +RUN chmod +x /iroh-dns-server + +WORKDIR / + +# expose the default ports +# dns, metrics +EXPOSE 53/udp 9090 +ENTRYPOINT ["/iroh-dns-server"] +CMD [""] \ No newline at end of file diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci new file mode 100644 index 0000000000..5fa4f45c1e --- /dev/null +++ b/docker/Dockerfile.ci @@ -0,0 +1,35 @@ +### Base image for iroh-relay and iroh-dns-server +FROM alpine:latest AS base +RUN apk update && apk add ca-certificates && update-ca-certificates + +### Target image +FROM base AS iroh-relay +ARG TARGETPLATFORM + +COPY bins/${TARGETPLATFORM}/iroh-relay /iroh-relay + +RUN chmod +x /iroh-relay + +WORKDIR / + +# expose the default ports +# http, https, stun, metrics +EXPOSE 80 443 3478/udp 9090 +ENTRYPOINT ["/iroh-relay"] +CMD [""] + +### Target image +FROM base AS iroh-dns-server +ARG TARGETPLATFORM + +COPY bins/${TARGETPLATFORM}/iroh-dns-server /iroh-dns-server + +RUN chmod +x /iroh-dns-server + +WORKDIR / + +# expose the default ports +# dns, metrics +EXPOSE 53/udp 9090 +ENTRYPOINT ["/iroh-dns-server"] +CMD [""] \ No newline at end of file diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..c68c96086d --- /dev/null +++ b/docker/README.md @@ -0,0 +1,25 @@ +# Iroh Docker Images + +## Intro + +A set of docker images provided to easily run iroh in a containerized environment. +Features `iroh-relay` and `iroh-dns-server`. + +The provided `Docker` files are intended for CI use but can be also manually built. + +## Building + +- All commands are run from the root folder +- If you're on macOS run `docker buildx build -f docker/Dockerfile --target iroh-relay --platform linux/arm64/v8 --tag n0computer/iroh-relay:latest .` +- If you're on linux run `docker buildx build -f docker/Dockerfile --target iroh-relay --platform linux/amd64 --tag n0computer/iroh-relay:latest .` +- Switch out `--target iroh-relay` for `iroh-dns-server` + +## Running + +### iroh-relay + +- Provide a config file: `docker run -v /path/to/iroh-relay.conf:/config/iroh-relay.conf -p 80:80 -p 443:443 -p 3478:3478/udp -p 9090:9090 -it n0computer/iroh-relay:latest --config /config/iroh-relay.conf` + +### iroh-dns-server + +- Provide a config file: `docker run -v /path/to/iroh-dns-server.conf:/config/iroh-dns-server.conf -p 53:53/udp -p 9090:9090 -it n0computer/iroh-dns-server:latest --config /config/iroh-dns-server.conf` \ No newline at end of file From 9b3f007522c0a0d17ef618673e02d5052188aa3e Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Mon, 25 Nov 2024 16:33:17 +0100 Subject: [PATCH 12/17] skip deprecation --- iroh/src/node.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/iroh/src/node.rs b/iroh/src/node.rs index d5c5020cdd..2911617aef 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -114,14 +114,6 @@ struct NodeInner { client: crate::client::Iroh, } -/// In memory node. -#[deprecated] -pub type MemNode = Node; - -/// Persistent node. -#[deprecated] -pub type FsNode = Node; - impl Node { /// Returns a new builder for the [`Node`], by default configured to run in memory. /// From 87acc7f7f7ea7c33a3291020137fda5f099cf7a5 Mon Sep 17 00:00:00 2001 From: Asmir Avdicevic Date: Mon, 25 Nov 2024 16:39:59 +0100 Subject: [PATCH 13/17] fix: make netsim run from examples (#2959) ## Description Moves our netsim tests to run from an example and only test the lib part of the code, not the full CLI & blobs. Things left to do: - [x] rename the example from `new` to anything that makes more sense `netsim` runs it under `iroh-transfer` - [x] clean up the example code - [x] add the option to provide either a relay config or pass in at least the relay url as an argument - [x] continue the CI adjustment and move all invocations of the netsim runner to run from `iroh_v2` and `integration_v2` sims (we want a less abrupt netsim switchover) - [x] convert the remaining sims in `chuck/netsim/sims` to the new format - [ ] after some time flip back the CI invocations to be regular `iroh` and `integration` sims which includes doing the same on `netsim` and removing the old ones ## Breaking Changes ## Notes & open questions ## Change checklist - [ ] Self-review. - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant. - [ ] Tests if relevant. - [ ] All breaking changes documented. --- .github/workflows/ci.yml | 8 +- .github/workflows/netsim.yml | 4 +- .github/workflows/netsim_runner.yaml | 2 +- Cargo.lock | 7 + iroh-net/bench/src/lib.rs | 2 +- iroh/Cargo.toml | 7 +- iroh/examples/transfer.rs | 324 +++++++++++++++++++++++++++ 7 files changed, 345 insertions(+), 9 deletions(-) create mode 100644 iroh/examples/transfer.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c9c20656d3..7235d7b95d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -246,13 +246,13 @@ jobs: # TODO: We have a bunch of platform-dependent code so should # probably run this job on the full platform matrix - name: clippy check (all features) - run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches + run: cargo clippy --workspace --all-features --all-targets --lib --bins --tests --benches --examples - name: clippy check (no features) - run: cargo clippy --workspace --no-default-features --lib --bins --tests + run: cargo clippy --workspace --no-default-features --all-targets --lib --bins --tests --benches --examples - name: clippy check (default features) - run: cargo clippy --workspace --all-targets + run: cargo clippy --workspace --all-targets --lib --bins --tests --benches --examples msrv: if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" @@ -295,7 +295,7 @@ jobs: branch: ${{ github.ref }} max_workers: 4 netsim_branch: "main" - sim_paths: "sims/iroh/iroh.json,sims/integration" + sim_paths: "sims/iroh_v2/iroh.json,sims/integration_v2" pr_number: ${{ github.event.pull_request.number || '' }} codespell: diff --git a/.github/workflows/netsim.yml b/.github/workflows/netsim.yml index b7a79cd1d1..cbd08149e4 100644 --- a/.github/workflows/netsim.yml +++ b/.github/workflows/netsim.yml @@ -39,7 +39,7 @@ jobs: branch: "main" max_workers: 1 netsim_branch: "main" - sim_paths: "sims/iroh,sims/integration" + sim_paths: "sims/iroh_v2,sims/integration_v2" pr_number: "" publish_metrics: true build_profile: "optimized-release" @@ -53,7 +53,7 @@ jobs: branch: ${{inputs.branch}} max_workers: 1 netsim_branch: ${{inputs.netsim_branch}} - sim_paths: "sims/iroh" + sim_paths: "sims/iroh_v2" pr_number: ${{inputs.pr_number}} publish_metrics: false build_profile: "optimized-release" diff --git a/.github/workflows/netsim_runner.yaml b/.github/workflows/netsim_runner.yaml index ae7b20d08c..df168f2803 100644 --- a/.github/workflows/netsim_runner.yaml +++ b/.github/workflows/netsim_runner.yaml @@ -133,7 +133,7 @@ jobs: - name: Copy binaries to right location run: | cp target/${{inputs.build_profile}}/examples/* ../chuck/netsim/bins/ - cp target/${{inputs.build_profile}}/iroh ../chuck/netsim/bins/iroh + cp target/${{inputs.build_profile}}/examples/transfer ../chuck/netsim/bins/iroh-transfer cp target/${{inputs.build_profile}}/iroh-relay ../chuck/netsim/bins/iroh-relay cp ../chuck/target/release/chuck ../chuck/netsim/bins/chuck diff --git a/Cargo.lock b/Cargo.lock index f6779670d9..e3fb61707a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2499,6 +2499,7 @@ dependencies = [ "nested_enum_utils", "num_cpus", "parking_lot", + "parse-size", "postcard", "proptest", "quic-rpc", @@ -3686,6 +3687,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "parse-size" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "944553dd59c802559559161f9816429058b869003836120e262e8caec061b7ae" + [[package]] name = "paste" version = "1.0.15" diff --git a/iroh-net/bench/src/lib.rs b/iroh-net/bench/src/lib.rs index a591581d26..93e0c91e51 100644 --- a/iroh-net/bench/src/lib.rs +++ b/iroh-net/bench/src/lib.rs @@ -21,7 +21,7 @@ pub mod s2n; pub mod stats; #[derive(Parser, Debug, Clone, Copy)] -#[clap(name = "bulk")] +#[clap(name = "iroh-net-bench")] pub enum Commands { Iroh(Opt), #[cfg(not(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd")))] diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 91b2350171..392256c730 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -62,6 +62,7 @@ ref-cast = "1.0.23" # Examples clap = { version = "4", features = ["derive"], optional = true } indicatif = { version = "0.17", features = ["tokio"], optional = true } +parse-size = { version = "=1.0.0", optional = true } # pinned version to avoid bumping msrv to 1.81 # Documentation tests url = { version = "2.5.0", features = ["serde"] } @@ -74,7 +75,7 @@ test = [] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] -examples = ["dep:clap", "dep:indicatif"] +examples = ["dep:clap", "dep:indicatif", "dep:parse-size"] [dev-dependencies] anyhow = { version = "1" } @@ -101,3 +102,7 @@ rustdoc-args = ["--cfg", "iroh_docsrs"] [[example]] name = "rpc" required-features = ["examples"] + +[[example]] +name = "transfer" +required-features = ["examples"] diff --git a/iroh/examples/transfer.rs b/iroh/examples/transfer.rs new file mode 100644 index 0000000000..9225155b21 --- /dev/null +++ b/iroh/examples/transfer.rs @@ -0,0 +1,324 @@ +use std::{ + str::FromStr, + time::{Duration, Instant}, +}; + +use anyhow::{Context, Result}; +use bytes::Bytes; +use clap::{Parser, Subcommand}; +use futures_lite::StreamExt; +use indicatif::HumanBytes; +use iroh_net::{ + key::SecretKey, ticket::NodeTicket, Endpoint, NodeAddr, RelayMap, RelayMode, RelayUrl, +}; +use tracing::info; + +// Transfer ALPN that we are using to communicate over the `Endpoint` +const TRANSFER_ALPN: &[u8] = b"n0/iroh/transfer/example/0"; + +#[derive(Parser, Debug)] +#[command(name = "transfer")] +struct Cli { + #[command(subcommand)] + command: Commands, +} + +#[derive(Subcommand, Debug)] +enum Commands { + Provide { + #[clap(long, default_value = "1G", value_parser = parse_byte_size)] + size: u64, + #[clap(long)] + relay_url: Option, + }, + Fetch { + #[arg(index = 1)] + ticket: String, + #[clap(long)] + relay_url: Option, + }, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt::init(); + let cli = Cli::parse(); + + match &cli.command { + Commands::Provide { size, relay_url } => provide(*size, relay_url.clone()).await?, + Commands::Fetch { ticket, relay_url } => fetch(ticket, relay_url.clone()).await?, + } + + Ok(()) +} + +async fn provide(size: u64, relay_url: Option) -> anyhow::Result<()> { + let secret_key = SecretKey::generate(); + let relay_mode = match relay_url { + Some(relay_url) => { + let relay_url = RelayUrl::from_str(&relay_url)?; + let relay_map = RelayMap::from_url(relay_url); + RelayMode::Custom(relay_map) + } + None => RelayMode::Default, + }; + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .alpns(vec![TRANSFER_ALPN.to_vec()]) + .relay_mode(relay_mode) + .bind() + .await?; + + let node_id = endpoint.node_id(); + + for local_endpoint in endpoint + .direct_addresses() + .next() + .await + .context("no endpoints")? + { + println!("\t{}", local_endpoint.addr) + } + + let relay_url = endpoint + .home_relay() + .expect("should be connected to a relay server"); + let local_addrs = endpoint + .direct_addresses() + .next() + .await + .context("no endpoints")? + .into_iter() + .map(|endpoint| endpoint.addr) + .collect::>(); + + let node_addr = NodeAddr::from_parts(node_id, Some(relay_url), local_addrs); + let ticket = NodeTicket::new(node_addr); + + println!("NodeTicket: {}", ticket); + + // accept incoming connections, returns a normal QUIC connection + while let Some(incoming) = endpoint.accept().await { + let connecting = match incoming.accept() { + Ok(connecting) => connecting, + Err(err) => { + tracing::warn!("incoming connection failed: {err:#}"); + // we can carry on in these cases: + // this can be caused by retransmitted datagrams + continue; + } + }; + let conn = connecting.await?; + let node_id = iroh_net::endpoint::get_remote_node_id(&conn)?; + info!( + "new connection from {node_id} with ALPN {} (coming from {})", + String::from_utf8_lossy(&TRANSFER_ALPN), + conn.remote_address() + ); + + // spawn a task to handle reading and writing off of the connection + tokio::spawn(async move { + // accept a bi-directional QUIC connection + // use the `quinn` APIs to send and recv content + let (mut send, mut recv) = conn.accept_bi().await?; + tracing::debug!("accepted bi stream, waiting for data..."); + let message = recv.read_to_end(100).await?; + let message = String::from_utf8(message)?; + println!("received: {message}"); + + send_data_on_stream(&mut send, size).await?; + + // We sent the last message, so wait for the client to close the connection once + // it received this message. + let res = tokio::time::timeout(Duration::from_secs(3), async move { + let closed = conn.closed().await; + if !matches!(closed, quinn::ConnectionError::ApplicationClosed(_)) { + println!("node {node_id} disconnected with an error: {closed:#}"); + } + }) + .await; + if res.is_err() { + println!("node {node_id} did not disconnect within 3 seconds"); + } + Ok::<_, anyhow::Error>(()) + }); + } + + // stop with SIGINT (ctrl-c) + Ok(()) +} + +async fn fetch(ticket: &str, relay_url: Option) -> anyhow::Result<()> { + let ticket: NodeTicket = ticket.parse()?; + let secret_key = SecretKey::generate(); + let relay_mode = match relay_url { + Some(relay_url) => { + let relay_url = RelayUrl::from_str(&relay_url)?; + let relay_map = RelayMap::from_url(relay_url); + RelayMode::Custom(relay_map) + } + None => RelayMode::Default, + }; + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .alpns(vec![TRANSFER_ALPN.to_vec()]) + .relay_mode(relay_mode) + .bind() + .await?; + + let start = Instant::now(); + + let me = endpoint.node_id(); + println!("node id: {me}"); + println!("node listening addresses:"); + for local_endpoint in endpoint + .direct_addresses() + .next() + .await + .context("no endpoints")? + { + println!("\t{}", local_endpoint.addr) + } + + let relay_url = endpoint + .home_relay() + .expect("should be connected to a relay server, try calling `endpoint.local_endpoints()` or `endpoint.connect()` first, to ensure the endpoint has actually attempted a connection before checking for the connected relay server"); + println!("node relay server url: {relay_url}\n"); + + // Attempt to connect, over the given ALPN. + // Returns a Quinn connection. + let conn = endpoint + .connect(ticket.node_addr().clone(), TRANSFER_ALPN) + .await?; + info!("connected"); + + // Use the Quinn API to send and recv content. + let (mut send, mut recv) = conn.open_bi().await?; + + let message = format!("{me} is saying 'hello!'"); + send.write_all(message.as_bytes()).await?; + + // Call `finish` to signal no more data will be sent on this stream. + send.finish()?; + + let (len, time_to_first_byte, chnk) = drain_stream(&mut recv, false).await?; + + // We received the last message: close all connections and allow for the close + // message to be sent. + endpoint.close(0u8.into(), b"bye").await?; + + // Ensure the client has closed the connection + let res = tokio::time::timeout(Duration::from_secs(3), async move { + let closed = conn.closed().await; + if !matches!(closed, quinn::ConnectionError::LocallyClosed) { + println!("node disconnected with an error: {closed:#}"); + } + }) + .await; + if res.is_err() { + println!("node did not disconnect within 3 seconds"); + } + + let duration = start.elapsed(); + println!( + "Received {} in {:.4}s with time to first byte {}s in {} chunks", + HumanBytes(len as u64), + duration.as_secs_f64(), + time_to_first_byte.as_secs_f64(), + chnk + ); + println!( + "Transferred {} in {:.4}, {}/s", + HumanBytes(len as u64), + duration.as_secs_f64(), + HumanBytes((len as f64 / duration.as_secs_f64()) as u64) + ); + + Ok(()) +} + +async fn drain_stream( + stream: &mut iroh_net::endpoint::RecvStream, + read_unordered: bool, +) -> Result<(usize, Duration, u64)> { + let mut read = 0; + + let download_start = Instant::now(); + let mut first_byte = true; + let mut time_to_first_byte = download_start.elapsed(); + + let mut num_chunks: u64 = 0; + + if read_unordered { + while let Some(chunk) = stream.read_chunk(usize::MAX, false).await? { + if first_byte { + time_to_first_byte = download_start.elapsed(); + first_byte = false; + } + read += chunk.bytes.len(); + num_chunks += 1; + } + } else { + // These are 32 buffers, for reading approximately 32kB at once + #[rustfmt::skip] + let mut bufs = [ + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), + ]; + + while let Some(n) = stream.read_chunks(&mut bufs[..]).await? { + if first_byte { + time_to_first_byte = download_start.elapsed(); + first_byte = false; + } + read += bufs.iter().take(n).map(|buf| buf.len()).sum::(); + num_chunks += 1; + } + } + + Ok((read, time_to_first_byte, num_chunks)) +} + +async fn send_data_on_stream( + stream: &mut iroh_net::endpoint::SendStream, + stream_size: u64, +) -> Result<()> { + const DATA: &[u8] = &[0xAB; 1024 * 1024]; + let bytes_data = Bytes::from_static(DATA); + + let full_chunks = stream_size / (DATA.len() as u64); + let remaining = (stream_size % (DATA.len() as u64)) as usize; + + for _ in 0..full_chunks { + stream + .write_chunk(bytes_data.clone()) + .await + .context("failed sending data")?; + } + + if remaining != 0 { + stream + .write_chunk(bytes_data.slice(0..remaining)) + .await + .context("failed sending data")?; + } + + stream.finish().context("failed finishing stream")?; + stream + .stopped() + .await + .context("failed to wait for stream to be stopped")?; + + Ok(()) +} + +fn parse_byte_size(s: &str) -> Result { + let cfg = parse_size::Config::new().with_binary(); + cfg.parse_size(s).map_err(|e| anyhow::anyhow!(e)) +} From bc9d4206f50d13424d925d7dbf806d835b95817c Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Mon, 25 Nov 2024 16:48:08 +0100 Subject: [PATCH 14/17] fixups --- Cargo.lock | 8 ++++---- iroh/examples/transfer.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3fb61707a..5378414353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4674,9 +4674,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.17" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", @@ -6080,9 +6080,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna 1.0.3", diff --git a/iroh/examples/transfer.rs b/iroh/examples/transfer.rs index 9225155b21..5b0b59a1de 100644 --- a/iroh/examples/transfer.rs +++ b/iroh/examples/transfer.rs @@ -112,7 +112,7 @@ async fn provide(size: u64, relay_url: Option) -> anyhow::Result<()> { let node_id = iroh_net::endpoint::get_remote_node_id(&conn)?; info!( "new connection from {node_id} with ALPN {} (coming from {})", - String::from_utf8_lossy(&TRANSFER_ALPN), + String::from_utf8_lossy(TRANSFER_ALPN), conn.remote_address() ); From 10cce5f35e7be8269e35aa69479fe3cbeed829c6 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Mon, 25 Nov 2024 17:34:29 +0100 Subject: [PATCH 15/17] fixup yaml --- .github/workflows/tests.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index fc21b81aca..98efa3dba1 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -23,7 +23,7 @@ env: RUSTFLAGS: -Dwarnings RUSTDOCFLAGS: -Dwarnings SCCACHE_CACHE_SIZE: "50G" - CRATES_LIST: "iroh,iroh-node-util,iroh-metrics,iroh-net,iroh-net-bench,iroh-test,netwatch,portmapper,iroh-relay,iroh-net-report" + CRATES_LIST: "iroh,iroh-node-util,iroh-metrics,iroh-net,iroh-net-bench,iroh-test,iroh-dns-server,iroh-router,netwatch,portmapper,iroh-relay,iroh-net-report" IROH_FORCE_STAGING_RELAYS: "1" jobs: @@ -197,9 +197,9 @@ jobs: - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.6 - - uses: msys2/setup-msys2@v2 - with: - release: false + - uses: msys2/setup-msys2@v2 + with: + release: false - name: build tests run: | From dba3d18fb4432bc21f89efcd5ceaac9ed8a1ad87 Mon Sep 17 00:00:00 2001 From: Asmir Avdicevic Date: Mon, 25 Nov 2024 21:28:36 +0100 Subject: [PATCH 16/17] slight refactor of the transfer example --- iroh/examples/transfer.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/iroh/examples/transfer.rs b/iroh/examples/transfer.rs index 5b0b59a1de..1459884d5c 100644 --- a/iroh/examples/transfer.rs +++ b/iroh/examples/transfer.rs @@ -205,19 +205,9 @@ async fn fetch(ticket: &str, relay_url: Option) -> anyhow::Result<()> { // We received the last message: close all connections and allow for the close // message to be sent. - endpoint.close(0u8.into(), b"bye").await?; - - // Ensure the client has closed the connection - let res = tokio::time::timeout(Duration::from_secs(3), async move { - let closed = conn.closed().await; - if !matches!(closed, quinn::ConnectionError::LocallyClosed) { - println!("node disconnected with an error: {closed:#}"); - } - }) - .await; - if res.is_err() { - println!("node did not disconnect within 3 seconds"); - } + tokio::time::timeout(Duration::from_secs(3), async move { + endpoint.close(0u8.into(), b"bye").await?; + })??; let duration = start.elapsed(); println!( From 19345f5b03b2faca961edff538ba2fee0708e0e8 Mon Sep 17 00:00:00 2001 From: Asmir Avdicevic Date: Mon, 25 Nov 2024 21:36:40 +0100 Subject: [PATCH 17/17] eeeh --- iroh/examples/transfer.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/iroh/examples/transfer.rs b/iroh/examples/transfer.rs index 1459884d5c..4a9025cb1c 100644 --- a/iroh/examples/transfer.rs +++ b/iroh/examples/transfer.rs @@ -206,8 +206,12 @@ async fn fetch(ticket: &str, relay_url: Option) -> anyhow::Result<()> { // We received the last message: close all connections and allow for the close // message to be sent. tokio::time::timeout(Duration::from_secs(3), async move { - endpoint.close(0u8.into(), b"bye").await?; - })??; + let res = endpoint.close(0u8.into(), b"bye").await; + if res.is_err() { + println!("failed to close connection: {res:#?}"); + } + }) + .await?; let duration = start.elapsed(); println!(