where
diff --git a/core/src/upgrade/error.rs b/core/src/upgrade/error.rs
index 3d349587c2c..c81ed7cf75b 100644
--- a/core/src/upgrade/error.rs
+++ b/core/src/upgrade/error.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use multistream_select::NegotiationError;
use std::fmt;
+use multistream_select::NegotiationError;
+
/// Error that can happen when upgrading a connection or substream to use a protocol.
#[derive(Debug)]
pub enum UpgradeError {
diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs
index 5e3c65422f1..60a9fb9aba1 100644
--- a/core/src/upgrade/pending.rs
+++ b/core/src/upgrade/pending.rs
@@ -19,10 +19,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+use std::{convert::Infallible, iter};
+
use futures::future;
-use std::convert::Infallible;
-use std::iter;
+
+use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always
/// returns a pending upgrade.
diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs
index 13270aa8b6d..22708d726e7 100644
--- a/core/src/upgrade/ready.rs
+++ b/core/src/upgrade/ready.rs
@@ -19,12 +19,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+use std::{convert::Infallible, iter};
+
use futures::future;
-use std::convert::Infallible;
-use std::iter;
-/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream.
+use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
+
+/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`]
+/// that directly yields the substream.
#[derive(Debug, Copy, Clone)]
pub struct ReadyUpgrade {
protocol_name: P,
diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs
index 037045a2f29..b7fe4a53a7f 100644
--- a/core/src/upgrade/select.rs
+++ b/core/src/upgrade/select.rs
@@ -18,14 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::either::EitherFuture;
-use crate::upgrade::{
- InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade,
- UpgradeInfo,
-};
+use std::iter::{Chain, Map};
+
use either::Either;
use futures::future;
-use std::iter::{Chain, Map};
+
+use crate::{
+ either::EitherFuture,
+ upgrade::{
+ InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade,
+ UpgradeInfo,
+ },
+};
/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either
/// sub-upgrade.
diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs
index d8bec6f2b59..b9733e38322 100644
--- a/core/tests/transport_upgrade.rs
+++ b/core/tests/transport_upgrade.rs
@@ -18,18 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{io, pin::Pin};
+
use futures::prelude::*;
-use libp2p_core::transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport};
-use libp2p_core::upgrade::{
- self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo,
+use libp2p_core::{
+ transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport},
+ upgrade::{self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+ Endpoint,
};
-use libp2p_core::Endpoint;
use libp2p_identity as identity;
use libp2p_mplex::MplexConfig;
use libp2p_noise as noise;
use multiaddr::{Multiaddr, Protocol};
use rand::random;
-use std::{io, pin::Pin};
#[derive(Clone)]
struct HelloUpgrade {}
diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs
index def66c4823b..80d7039eccb 100644
--- a/examples/autonat/src/bin/autonat_client.rs
+++ b/examples/autonat/src/bin/autonat_client.rs
@@ -20,15 +20,17 @@
#![doc = include_str!("../../README.md")]
+use std::{error::Error, net::Ipv4Addr, time::Duration};
+
use clap::Parser;
use futures::StreamExt;
-use libp2p::core::multiaddr::Protocol;
-use libp2p::core::Multiaddr;
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId};
-use std::error::Error;
-use std::net::Ipv4Addr;
-use std::time::Duration;
+use libp2p::{
+ autonat,
+ core::{multiaddr::Protocol, Multiaddr},
+ identify, identity, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux, PeerId,
+};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs
index 389cc0fa26f..83e456d8fda 100644
--- a/examples/autonat/src/bin/autonat_server.rs
+++ b/examples/autonat/src/bin/autonat_server.rs
@@ -20,14 +20,17 @@
#![doc = include_str!("../../README.md")]
+use std::{error::Error, net::Ipv4Addr, time::Duration};
+
use clap::Parser;
use futures::StreamExt;
-use libp2p::core::{multiaddr::Protocol, Multiaddr};
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{autonat, identify, identity, noise, tcp, yamux};
-use std::error::Error;
-use std::net::Ipv4Addr;
-use std::time::Duration;
+use libp2p::{
+ autonat,
+ core::{multiaddr::Protocol, Multiaddr},
+ identify, identity, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs
index 9499ccbd158..e2d884cb445 100644
--- a/examples/browser-webrtc/src/lib.rs
+++ b/examples/browser-webrtc/src/lib.rs
@@ -1,13 +1,11 @@
#![cfg(target_arch = "wasm32")]
+use std::{io, time::Duration};
+
use futures::StreamExt;
use js_sys::Date;
-use libp2p::core::Multiaddr;
-use libp2p::ping;
-use libp2p::swarm::SwarmEvent;
+use libp2p::{core::Multiaddr, ping, swarm::SwarmEvent};
use libp2p_webrtc_websys as webrtc_websys;
-use std::io;
-use std::time::Duration;
use wasm_bindgen::prelude::*;
use web_sys::{Document, HtmlElement};
diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs
index 7f06b0d0d99..ec6be0c066d 100644
--- a/examples/browser-webrtc/src/main.rs
+++ b/examples/browser-webrtc/src/main.rs
@@ -1,23 +1,27 @@
#![allow(non_upper_case_globals)]
+use std::{
+ net::{Ipv4Addr, SocketAddr},
+ time::Duration,
+};
+
use anyhow::Result;
-use axum::extract::{Path, State};
-use axum::http::header::CONTENT_TYPE;
-use axum::http::StatusCode;
-use axum::response::{Html, IntoResponse};
-use axum::{http::Method, routing::get, Router};
+use axum::{
+ extract::{Path, State},
+ http::{header::CONTENT_TYPE, Method, StatusCode},
+ response::{Html, IntoResponse},
+ routing::get,
+ Router,
+};
use futures::StreamExt;
use libp2p::{
- core::muxing::StreamMuxerBox,
- core::Transport,
+ core::{muxing::StreamMuxerBox, Transport},
multiaddr::{Multiaddr, Protocol},
ping,
swarm::SwarmEvent,
};
use libp2p_webrtc as webrtc;
use rand::thread_rng;
-use std::net::{Ipv4Addr, SocketAddr};
-use std::time::Duration;
use tokio::net::TcpListener;
use tower_http::cors::{Any, CorsLayer};
@@ -127,7 +131,8 @@ struct Libp2pEndpoint(Multiaddr);
/// Serves the index.html file for our client.
///
/// Our server listens on a random UDP port for the WebRTC transport.
-/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address.
+/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__`
+/// placeholder with the actual address.
async fn get_index(
State(Libp2pEndpoint(libp2p_endpoint)): State,
) -> Result, StatusCode> {
diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs
index c785d301c2f..cda1e90bd35 100644
--- a/examples/chat/src/main.rs
+++ b/examples/chat/src/main.rs
@@ -20,12 +20,19 @@
#![doc = include_str!("../README.md")]
+use std::{
+ collections::hash_map::DefaultHasher,
+ error::Error,
+ hash::{Hash, Hasher},
+ time::Duration,
+};
+
use futures::stream::StreamExt;
-use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux};
-use std::collections::hash_map::DefaultHasher;
-use std::error::Error;
-use std::hash::{Hash, Hasher};
-use std::time::Duration;
+use libp2p::{
+ gossipsub, mdns, noise,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
use tokio::{io, io::AsyncBufReadExt, select};
use tracing_subscriber::EnvFilter;
@@ -61,7 +68,8 @@ async fn main() -> Result<(), Box> {
// Set a custom gossipsub configuration
let gossipsub_config = gossipsub::ConfigBuilder::default()
.heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space
- .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing)
+ .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message
+ // signing)
.message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated.
.build()
.map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`.
diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs
index 630d4b2b1f3..0ec1f2a321a 100644
--- a/examples/dcutr/src/main.rs
+++ b/examples/dcutr/src/main.rs
@@ -20,6 +20,8 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, str::FromStr, time::Duration};
+
use clap::Parser;
use futures::{executor::block_on, future::FutureExt, stream::StreamExt};
use libp2p::{
@@ -28,8 +30,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, PeerId,
};
-use std::str::FromStr;
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[derive(Debug, Parser)]
diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs
index 6b7947b7eb3..63944f2e9bd 100644
--- a/examples/distributed-key-value-store/src/main.rs
+++ b/examples/distributed-key-value-store/src/main.rs
@@ -20,17 +20,16 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::stream::StreamExt;
-use libp2p::kad;
-use libp2p::kad::store::MemoryStore;
-use libp2p::kad::Mode;
use libp2p::{
+ kad,
+ kad::{store::MemoryStore, Mode},
mdns, noise,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::time::Duration;
use tokio::{
io::{self, AsyncBufReadExt},
select,
diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs
index 5f6be83dc11..1e3b80a330c 100644
--- a/examples/file-sharing/src/main.rs
+++ b/examples/file-sharing/src/main.rs
@@ -22,15 +22,12 @@
mod network;
-use clap::Parser;
-use tokio::task::spawn;
+use std::{error::Error, io::Write, path::PathBuf};
-use futures::prelude::*;
-use futures::StreamExt;
+use clap::Parser;
+use futures::{prelude::*, StreamExt};
use libp2p::{core::Multiaddr, multiaddr::Protocol};
-use std::error::Error;
-use std::io::Write;
-use std::path::PathBuf;
+use tokio::task::spawn;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs
index a74afd1c0da..409255ee9ec 100644
--- a/examples/file-sharing/src/network.rs
+++ b/examples/file-sharing/src/network.rs
@@ -1,7 +1,14 @@
-use futures::channel::{mpsc, oneshot};
-use futures::prelude::*;
-use futures::StreamExt;
+use std::{
+ collections::{hash_map, HashMap, HashSet},
+ error::Error,
+ time::Duration,
+};
+use futures::{
+ channel::{mpsc, oneshot},
+ prelude::*,
+ StreamExt,
+};
use libp2p::{
core::Multiaddr,
identity, kad,
@@ -9,19 +16,13 @@ use libp2p::{
noise,
request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel},
swarm::{NetworkBehaviour, Swarm, SwarmEvent},
- tcp, yamux, PeerId,
+ tcp, yamux, PeerId, StreamProtocol,
};
-
-use libp2p::StreamProtocol;
use serde::{Deserialize, Serialize};
-use std::collections::{hash_map, HashMap, HashSet};
-use std::error::Error;
-use std::time::Duration;
/// Creates the network components, namely:
///
-/// - The network client to interact with the network layer from anywhere
-/// within your application.
+/// - The network client to interact with the network layer from anywhere within your application.
///
/// - The network event stream, e.g. for incoming requests.
///
diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs
index 22474061da6..55d093c0399 100644
--- a/examples/identify/src/main.rs
+++ b/examples/identify/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux};
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs
index 95921d6fa35..c2df603fcc2 100644
--- a/examples/ipfs-kad/src/main.rs
+++ b/examples/ipfs-kad/src/main.rs
@@ -20,15 +20,21 @@
#![doc = include_str!("../README.md")]
-use std::num::NonZeroUsize;
-use std::ops::Add;
-use std::time::{Duration, Instant};
+use std::{
+ num::NonZeroUsize,
+ ops::Add,
+ time::{Duration, Instant},
+};
use anyhow::{bail, Result};
use clap::Parser;
use futures::StreamExt;
-use libp2p::swarm::{StreamProtocol, SwarmEvent};
-use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId};
+use libp2p::{
+ bytes::BufMut,
+ identity, kad, noise,
+ swarm::{StreamProtocol, SwarmEvent},
+ tcp, yamux, PeerId,
+};
use tracing_subscriber::EnvFilter;
const BOOTNODES: [&str; 4] = [
diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs
index a57bfd465e0..19d38c767e9 100644
--- a/examples/ipfs-private/src/main.rs
+++ b/examples/ipfs-private/src/main.rs
@@ -20,6 +20,8 @@
#![doc = include_str!("../README.md")]
+use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration};
+
use either::Either;
use futures::prelude::*;
use libp2p::{
@@ -31,7 +33,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr, Transport,
};
-use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration};
use tokio::{io, io::AsyncBufReadExt, select};
use tracing_subscriber::EnvFilter;
diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs
index 4a9c9785bb3..f1485832d86 100644
--- a/examples/metrics/src/http_service.rs
+++ b/examples/metrics/src/http_service.rs
@@ -18,15 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use axum::extract::State;
-use axum::http::StatusCode;
-use axum::response::IntoResponse;
-use axum::routing::get;
-use axum::Router;
-use prometheus_client::encoding::text::encode;
-use prometheus_client::registry::Registry;
-use std::net::SocketAddr;
-use std::sync::{Arc, Mutex};
+use std::{
+ net::SocketAddr,
+ sync::{Arc, Mutex},
+};
+
+use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router};
+use prometheus_client::{encoding::text::encode, registry::Registry};
use tokio::net::TcpListener;
const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0";
diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs
index 1755c769053..92aa90479fd 100644
--- a/examples/metrics/src/main.rs
+++ b/examples/metrics/src/main.rs
@@ -20,18 +20,20 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
-use libp2p::core::Multiaddr;
-use libp2p::metrics::{Metrics, Recorder};
-use libp2p::swarm::{NetworkBehaviour, SwarmEvent};
-use libp2p::{identify, identity, noise, ping, tcp, yamux};
+use libp2p::{
+ core::Multiaddr,
+ identify, identity,
+ metrics::{Metrics, Recorder},
+ noise, ping,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ tcp, yamux,
+};
use opentelemetry::{trace::TracerProvider, KeyValue};
use prometheus_client::registry::Registry;
-use std::error::Error;
-use std::time::Duration;
-use tracing_subscriber::layer::SubscriberExt;
-use tracing_subscriber::util::SubscriberInitExt;
-use tracing_subscriber::{EnvFilter, Layer};
+use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
mod http_service;
diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs
index 911b0384f89..565ef057c0d 100644
--- a/examples/ping/src/main.rs
+++ b/examples/ping/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::prelude::*;
use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr};
-use std::{error::Error, time::Duration};
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs
index 46a122d0717..b7868418fb0 100644
--- a/examples/relay-server/src/main.rs
+++ b/examples/relay-server/src/main.rs
@@ -21,17 +21,19 @@
#![doc = include_str!("../README.md")]
+use std::{
+ error::Error,
+ net::{Ipv4Addr, Ipv6Addr},
+};
+
use clap::Parser;
use futures::StreamExt;
use libp2p::{
- core::multiaddr::Protocol,
- core::Multiaddr,
+ core::{multiaddr::Protocol, Multiaddr},
identify, identity, noise, ping, relay,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::net::{Ipv4Addr, Ipv6Addr};
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs
index edd3d10a0ce..b133c82d158 100644
--- a/examples/rendezvous/src/bin/rzv-discover.rs
+++ b/examples/rendezvous/src/bin/rzv-discover.rs
@@ -18,6 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
use libp2p::{
multiaddr::Protocol,
@@ -25,8 +27,6 @@ use libp2p::{
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::error::Error;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
const NAMESPACE: &str = "rendezvous";
diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs
index ff637aa6f49..ce4933a29a9 100644
--- a/examples/rendezvous/src/bin/rzv-identify.rs
+++ b/examples/rendezvous/src/bin/rzv-identify.rs
@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use futures::StreamExt;
use libp2p::{
identify, noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs
index bd848238d4a..8ef2d30c880 100644
--- a/examples/rendezvous/src/bin/rzv-register.rs
+++ b/examples/rendezvous/src/bin/rzv-register.rs
@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use futures::StreamExt;
use libp2p::{
noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux, Multiaddr,
};
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
@@ -54,8 +55,8 @@ async fn main() {
.with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5)))
.build();
- // In production the external address should be the publicly facing IP address of the rendezvous point.
- // This address is recorded in the registration entry by the rendezvous point.
+ // In production the external address should be the publicly facing IP address of the rendezvous
+ // point. This address is recorded in the registration entry by the rendezvous point.
let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap();
swarm.add_external_address(external_address);
diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs
index a15bc1ca2d3..0f26f2c9934 100644
--- a/examples/rendezvous/src/main.rs
+++ b/examples/rendezvous/src/main.rs
@@ -20,14 +20,14 @@
#![doc = include_str!("../README.md")]
+use std::{error::Error, time::Duration};
+
use futures::StreamExt;
use libp2p::{
identify, noise, ping, rendezvous,
swarm::{NetworkBehaviour, SwarmEvent},
tcp, yamux,
};
-use std::error::Error;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs
index 872ab8c3b98..71d2d2fcc76 100644
--- a/examples/stream/src/main.rs
+++ b/examples/stream/src/main.rs
@@ -44,12 +44,14 @@ async fn main() -> Result<()> {
// Deal with incoming streams.
// Spawning a dedicated task is just one way of doing this.
// libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow.
- // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them.
+ // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application
+ // cannot keep up processing them.
tokio::spawn(async move {
// This loop handles incoming streams _sequentially_ but that doesn't have to be the case.
// You can also spawn a dedicated task per stream if you want to.
- // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer.
- // Each task needs memory meaning an aggressive remote peer may force you OOM this way.
+ // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an
+ // unbounded buffer. Each task needs memory meaning an aggressive remote peer may
+ // force you OOM this way.
while let Some((peer, stream)) = incoming_streams.next().await {
match echo(stream).await {
@@ -102,7 +104,8 @@ async fn connection_handler(peer: PeerId, mut control: stream::Control) {
}
Err(error) => {
// Other errors may be temporary.
- // In production, something like an exponential backoff / circuit-breaker may be more appropriate.
+ // In production, something like an exponential backoff / circuit-breaker may be
+ // more appropriate.
tracing::debug!(%peer, %error);
continue;
}
diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs
index fd0764990d1..19de8d773ae 100644
--- a/examples/upnp/src/main.rs
+++ b/examples/upnp/src/main.rs
@@ -20,9 +20,10 @@
#![doc = include_str!("../README.md")]
+use std::error::Error;
+
use futures::prelude::*;
use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr};
-use std::error::Error;
use tracing_subscriber::EnvFilter;
#[tokio::main]
diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs
index 02229e16262..bc5a1bae4f5 100644
--- a/hole-punching-tests/src/main.rs
+++ b/hole-punching-tests/src/main.rs
@@ -18,24 +18,27 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::HashMap,
+ fmt, io,
+ net::{IpAddr, Ipv4Addr},
+ str::FromStr,
+ time::Duration,
+};
+
use anyhow::{Context, Result};
use either::Either;
use futures::stream::StreamExt;
-use libp2p::core::transport::ListenerId;
-use libp2p::swarm::dial_opts::DialOpts;
-use libp2p::swarm::ConnectionId;
use libp2p::{
- core::multiaddr::{Multiaddr, Protocol},
+ core::{
+ multiaddr::{Multiaddr, Protocol},
+ transport::ListenerId,
+ },
dcutr, identify, noise, ping, relay,
- swarm::{NetworkBehaviour, SwarmEvent},
+ swarm::{dial_opts::DialOpts, ConnectionId, NetworkBehaviour, SwarmEvent},
tcp, yamux, Swarm,
};
use redis::AsyncCommands;
-use std::collections::HashMap;
-use std::net::{IpAddr, Ipv4Addr};
-use std::str::FromStr;
-use std::time::Duration;
-use std::{fmt, io};
/// The redis key we push the relay's TCP listen address to.
const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS";
diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs
index 922675097df..11cdaced795 100644
--- a/identity/src/ecdsa.rs
+++ b/identity/src/ecdsa.rs
@@ -20,10 +20,9 @@
//! ECDSA keys with secp256r1 curve support.
-use super::error::DecodingError;
-use core::cmp;
-use core::fmt;
-use core::hash;
+use core::{cmp, fmt, hash};
+use std::convert::Infallible;
+
use p256::{
ecdsa::{
signature::{Signer, Verifier},
@@ -32,9 +31,10 @@ use p256::{
EncodedPoint,
};
use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey};
-use std::convert::Infallible;
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// An ECDSA keypair generated using `secp256r1` curve.
#[derive(Clone)]
pub struct Keypair {
@@ -158,7 +158,8 @@ impl PublicKey {
self.0.verify(msg, &sig).is_ok()
}
- /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression.
+ /// Try to parse a public key from a byte buffer containing raw
+ /// components of a key with or without compression.
pub fn try_from_bytes(k: &[u8]) -> Result {
let enc_pt = EncodedPoint::from_bytes(k)
.map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?;
@@ -168,7 +169,8 @@ impl PublicKey {
.map(PublicKey)
}
- /// Convert a public key into a byte buffer containing raw components of the key without compression.
+ /// Convert a public key into a byte buffer containing
+ /// raw components of the key without compression.
pub fn to_bytes(&self) -> Vec {
self.0.to_encoded_point(false).as_bytes().to_owned()
}
diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs
index d77c44547d6..5a1a53dd4af 100644
--- a/identity/src/ed25519.rs
+++ b/identity/src/ed25519.rs
@@ -20,13 +20,13 @@
//! Ed25519 keys.
-use super::error::DecodingError;
-use core::cmp;
-use core::fmt;
-use core::hash;
+use core::{cmp, fmt, hash};
+
use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _};
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// An Ed25519 keypair.
#[derive(Clone)]
pub struct Keypair(ed25519::SigningKey);
@@ -152,7 +152,8 @@ impl PublicKey {
self.0.to_bytes()
}
- /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`.
+ /// Try to parse a public key from a byte array containing
+ /// the actual key as produced by `to_bytes`.
pub fn try_from_bytes(k: &[u8]) -> Result {
let k = <[u8; 32]>::try_from(k)
.map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?;
@@ -206,9 +207,10 @@ impl SecretKey {
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::*;
+ use super::*;
+
fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool {
kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes()
}
diff --git a/identity/src/error.rs b/identity/src/error.rs
index 71cd78fe1ea..6e8c4d02caa 100644
--- a/identity/src/error.rs
+++ b/identity/src/error.rs
@@ -20,8 +20,7 @@
//! Errors during identity key operations.
-use std::error::Error;
-use std::fmt;
+use std::{error::Error, fmt};
use crate::KeyType;
diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs
index f1e8a7c2142..a1bbba00fa9 100644
--- a/identity/src/keypair.rs
+++ b/identity/src/keypair.rs
@@ -24,40 +24,40 @@
feature = "ed25519",
feature = "rsa"
))]
-#[cfg(feature = "ed25519")]
-use crate::ed25519;
+use quick_protobuf::{BytesReader, Writer};
+
+#[cfg(feature = "ecdsa")]
+use crate::ecdsa;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use crate::error::OtherVariantError;
-use crate::error::{DecodingError, SigningError};
+#[cfg(feature = "ed25519")]
+use crate::ed25519;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use crate::proto;
+use crate::error::OtherVariantError;
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
-use quick_protobuf::{BytesReader, Writer};
-
+use crate::proto;
#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
use crate::rsa;
-
#[cfg(feature = "secp256k1")]
use crate::secp256k1;
-
-#[cfg(feature = "ecdsa")]
-use crate::ecdsa;
-use crate::KeyType;
+use crate::{
+ error::{DecodingError, SigningError},
+ KeyType,
+};
/// Identity keypair of a node.
///
@@ -75,7 +75,6 @@ use crate::KeyType;
/// let mut bytes = std::fs::read("private.pk8").unwrap();
/// let keypair = Keypair::rsa_from_pkcs8(&mut bytes);
/// ```
-///
#[derive(Debug, Clone)]
pub struct Keypair {
keypair: KeyPairInner,
@@ -341,7 +340,8 @@ impl Keypair {
}
}
- /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain.
+ /// Deterministically derive a new secret from this [`Keypair`],
+ /// taking into account the provided domain.
///
/// This works for all key types except RSA where it returns `None`.
///
@@ -352,10 +352,11 @@ impl Keypair {
/// # use libp2p_identity as identity;
/// let key = identity::Keypair::generate_ed25519();
///
- /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519");
+ /// let new_key = key
+ /// .derive_secret(b"my encryption key")
+ /// .expect("can derive secret for ed25519");
/// # }
/// ```
- ///
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
@@ -904,9 +905,10 @@ mod tests {
#[test]
fn public_key_implements_hash() {
- use crate::PublicKey;
use std::hash::Hash;
+ use crate::PublicKey;
+
fn assert_implements_hash() {}
assert_implements_hash::();
@@ -914,9 +916,10 @@ mod tests {
#[test]
fn public_key_implements_ord() {
- use crate::PublicKey;
use std::cmp::Ord;
+ use crate::PublicKey;
+
fn assert_implements_ord() {}
assert_implements_ord::();
diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs
index 8ae6d99ae32..7f6d1f44eab 100644
--- a/identity/src/peer_id.rs
+++ b/identity/src/peer_id.rs
@@ -18,17 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{fmt, str::FromStr};
+
#[cfg(feature = "rand")]
use rand::Rng;
use sha2::Digest as _;
-use std::{fmt, str::FromStr};
use thiserror::Error;
/// Local type-alias for multihash.
///
/// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`.
/// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses.
-/// Given that this appears in our type-signature, using a "common" number here makes us more compatible.
+/// Given that this appears in our type-signature,
+/// using a "common" number here makes us more compatible.
type Multihash = multihash::Multihash<64>;
#[cfg(feature = "serde")]
diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs
index 5eb78a4af75..b14d8c66d86 100644
--- a/identity/src/rsa.rs
+++ b/identity/src/rsa.rs
@@ -20,15 +20,20 @@
//! RSA keys.
-use super::error::*;
-use asn1_der::typed::{DerDecodable, DerEncodable, DerTypeView, Sequence};
-use asn1_der::{Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking};
-use ring::rand::SystemRandom;
-use ring::signature::KeyPair;
-use ring::signature::{self, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256};
use std::{fmt, sync::Arc};
+
+use asn1_der::{
+ typed::{DerDecodable, DerEncodable, DerTypeView, Sequence},
+ Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking,
+};
+use ring::{
+ rand::SystemRandom,
+ signature::{self, KeyPair, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256},
+};
use zeroize::Zeroize;
+use super::error::*;
+
/// An RSA keypair.
#[derive(Clone)]
pub struct Keypair(Arc);
@@ -315,9 +320,10 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo {
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::*;
+ use super::*;
+
const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8");
const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8");
const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8");
diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs
index a6e9e923268..e884cf1385d 100644
--- a/identity/src/secp256k1.rs
+++ b/identity/src/secp256k1.rs
@@ -20,15 +20,15 @@
//! Secp256k1 keys.
-use super::error::DecodingError;
+use core::{cmp, fmt, hash};
+
use asn1_der::typed::{DerDecodable, Sequence};
-use core::cmp;
-use core::fmt;
-use core::hash;
use libsecp256k1::{Message, Signature};
use sha2::{Digest as ShaDigestTrait, Sha256};
use zeroize::Zeroize;
+use super::error::DecodingError;
+
/// A Secp256k1 keypair.
#[derive(Clone)]
pub struct Keypair {
diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs
index df36f8e5baf..87a508742dc 100644
--- a/interop-tests/src/arch.rs
+++ b/interop-tests/src/arch.rs
@@ -1,7 +1,6 @@
// Native re-exports
#[cfg(not(target_arch = "wasm32"))]
pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient};
-
// Wasm re-exports
#[cfg(target_arch = "wasm32")]
pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient};
@@ -11,11 +10,13 @@ pub(crate) mod native {
use std::time::Duration;
use anyhow::{bail, Context, Result};
- use futures::future::BoxFuture;
- use futures::FutureExt;
- use libp2p::identity::Keypair;
- use libp2p::swarm::{NetworkBehaviour, Swarm};
- use libp2p::{noise, tcp, tls, yamux};
+ use futures::{future::BoxFuture, FutureExt};
+ use libp2p::{
+ identity::Keypair,
+ noise,
+ swarm::{NetworkBehaviour, Swarm},
+ tcp, tls, yamux,
+ };
use libp2p_mplex as mplex;
use libp2p_webrtc as webrtc;
use redis::AsyncCommands;
@@ -186,15 +187,19 @@ pub(crate) mod native {
#[cfg(target_arch = "wasm32")]
pub(crate) mod wasm {
+ use std::time::Duration;
+
use anyhow::{bail, Context, Result};
use futures::future::{BoxFuture, FutureExt};
- use libp2p::core::upgrade::Version;
- use libp2p::identity::Keypair;
- use libp2p::swarm::{NetworkBehaviour, Swarm};
- use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _};
+ use libp2p::{
+ core::upgrade::Version,
+ identity::Keypair,
+ noise,
+ swarm::{NetworkBehaviour, Swarm},
+ websocket_websys, webtransport_websys, yamux, Transport as _,
+ };
use libp2p_mplex as mplex;
use libp2p_webrtc_websys as webrtc_websys;
- use std::time::Duration;
use crate::{BlpopRequest, Muxer, SecProtocol, Transport};
diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs
index 0d697a0e2a3..7730b869456 100644
--- a/interop-tests/src/bin/wasm_ping.rs
+++ b/interop-tests/src/bin/wasm_ping.rs
@@ -1,26 +1,27 @@
#![allow(non_upper_case_globals)]
-use std::future::IntoFuture;
-use std::process::Stdio;
-use std::time::Duration;
+use std::{future::IntoFuture, process::Stdio, time::Duration};
use anyhow::{bail, Context, Result};
-use axum::http::{header, Uri};
-use axum::response::{Html, IntoResponse, Response};
-use axum::routing::get;
-use axum::{extract::State, http::StatusCode, routing::post, Json, Router};
+use axum::{
+ extract::State,
+ http::{header, StatusCode, Uri},
+ response::{Html, IntoResponse, Response},
+ routing::{get, post},
+ Json, Router,
+};
+use interop_tests::{BlpopRequest, Report};
use redis::{AsyncCommands, Client};
use thirtyfour::prelude::*;
-use tokio::io::{AsyncBufReadExt, BufReader};
-use tokio::net::TcpListener;
-use tokio::process::Child;
-use tokio::sync::mpsc;
-use tower_http::cors::CorsLayer;
-use tower_http::trace::TraceLayer;
+use tokio::{
+ io::{AsyncBufReadExt, BufReader},
+ net::TcpListener,
+ process::Child,
+ sync::mpsc,
+};
+use tower_http::{cors::CorsLayer, trace::TraceLayer};
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
-use interop_tests::{BlpopRequest, Report};
-
mod config;
const BIND_ADDR: &str = "127.0.0.1:8080";
diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs
index 0154bec51a4..a16dc4b8228 100644
--- a/interop-tests/src/lib.rs
+++ b/interop-tests/src/lib.rs
@@ -1,11 +1,14 @@
-use std::str::FromStr;
-use std::time::Duration;
+use std::{str::FromStr, time::Duration};
use anyhow::{bail, Context, Result};
use futures::{FutureExt, StreamExt};
-use libp2p::identity::Keypair;
-use libp2p::swarm::SwarmEvent;
-use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr};
+use libp2p::{
+ identify,
+ identity::Keypair,
+ ping,
+ swarm::{NetworkBehaviour, SwarmEvent},
+ Multiaddr,
+};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs
index 8931c5c4166..ac668e26b3f 100644
--- a/libp2p/src/bandwidth.rs
+++ b/libp2p/src/bandwidth.rs
@@ -20,13 +20,6 @@
#![allow(deprecated)]
-use crate::core::muxing::{StreamMuxer, StreamMuxerEvent};
-
-use futures::{
- io::{IoSlice, IoSliceMut},
- prelude::*,
- ready,
-};
use std::{
convert::TryFrom as _,
io,
@@ -38,6 +31,14 @@ use std::{
task::{Context, Poll},
};
+use futures::{
+ io::{IoSlice, IoSliceMut},
+ prelude::*,
+ ready,
+};
+
+use crate::core::muxing::{StreamMuxer, StreamMuxerEvent};
+
/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened
/// streams.
#[derive(Clone)]
@@ -123,7 +124,7 @@ impl BandwidthSinks {
/// Returns the total number of bytes that have been downloaded on all the streams.
///
/// > **Note**: This method is by design subject to race conditions. The returned value should
- /// > only ever be used for statistics purposes.
+ /// > only ever be used for statistics purposes.
pub fn total_inbound(&self) -> u64 {
self.inbound.load(Ordering::Relaxed)
}
@@ -131,7 +132,7 @@ impl BandwidthSinks {
/// Returns the total number of bytes that have been uploaded on all the streams.
///
/// > **Note**: This method is by design subject to race conditions. The returned value should
- /// > only ever be used for statistics purposes.
+ /// > only ever be used for statistics purposes.
pub fn total_outbound(&self) -> u64 {
self.outbound.load(Ordering::Relaxed)
}
diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs
index de003314cca..99c340a5e3e 100644
--- a/libp2p/src/builder.rs
+++ b/libp2p/src/builder.rs
@@ -33,31 +33,31 @@ mod select_security;
/// # relay: libp2p_relay::client::Behaviour,
/// # }
///
-/// let swarm = SwarmBuilder::with_new_identity()
-/// .with_tokio()
-/// .with_tcp(
-/// Default::default(),
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )?
-/// .with_quic()
-/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())?
-/// .with_dns()?
-/// .with_websocket(
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )
-/// .await?
-/// .with_relay_client(
-/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
-/// libp2p_yamux::Config::default,
-/// )?
-/// .with_behaviour(|_key, relay| MyBehaviour { relay })?
-/// .with_swarm_config(|cfg| {
-/// // Edit cfg here.
-/// cfg
-/// })
-/// .build();
+/// let swarm = SwarmBuilder::with_new_identity()
+/// .with_tokio()
+/// .with_tcp(
+/// Default::default(),
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )?
+/// .with_quic()
+/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())?
+/// .with_dns()?
+/// .with_websocket(
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )
+/// .await?
+/// .with_relay_client(
+/// (libp2p_tls::Config::new, libp2p_noise::Config::new),
+/// libp2p_yamux::Config::default,
+/// )?
+/// .with_behaviour(|_key, relay| MyBehaviour { relay })?
+/// .with_swarm_config(|cfg| {
+/// // Edit cfg here.
+/// cfg
+/// })
+/// .build();
/// #
/// # Ok(())
/// # }
@@ -70,11 +70,12 @@ pub struct SwarmBuilder {
#[cfg(test)]
mod tests {
- use crate::SwarmBuilder;
use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport};
use libp2p_identity::PeerId;
use libp2p_swarm::NetworkBehaviour;
+ use crate::SwarmBuilder;
+
#[test]
#[cfg(all(
feature = "tokio",
diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs
index c9679a46767..6e3f41755ca 100644
--- a/libp2p/src/builder/phase.rs
+++ b/libp2p/src/builder/phase.rs
@@ -19,6 +19,8 @@ use bandwidth_metrics::*;
use behaviour::*;
use build::*;
use dns::*;
+use libp2p_core::{muxing::StreamMuxerBox, Transport};
+use libp2p_identity::Keypair;
use other_transport::*;
use provider::*;
use quic::*;
@@ -27,12 +29,9 @@ use swarm::*;
use tcp::*;
use websocket::*;
-use super::select_muxer::SelectMuxerUpgrade;
-use super::select_security::SelectSecurityUpgrade;
-use super::SwarmBuilder;
-
-use libp2p_core::{muxing::StreamMuxerBox, Transport};
-use libp2p_identity::Keypair;
+use super::{
+ select_muxer::SelectMuxerUpgrade, select_security::SelectSecurityUpgrade, SwarmBuilder,
+};
#[allow(unreachable_pub)]
pub trait IntoSecurityUpgrade {
diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs
index cee9498fcaa..f24df5f3df5 100644
--- a/libp2p/src/builder/phase/bandwidth_logging.rs
+++ b/libp2p/src/builder/phase/bandwidth_logging.rs
@@ -1,10 +1,9 @@
+use std::{marker::PhantomData, sync::Arc};
+
use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
-use crate::transport_ext::TransportExt;
-use crate::SwarmBuilder;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use crate::{transport_ext::TransportExt, SwarmBuilder};
pub struct BandwidthLoggingPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs
index 52daa731ddd..ddd292c140e 100644
--- a/libp2p/src/builder/phase/bandwidth_metrics.rs
+++ b/libp2p/src/builder/phase/bandwidth_metrics.rs
@@ -1,10 +1,9 @@
+use std::{marker::PhantomData, sync::Arc};
+
use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
-use crate::transport_ext::TransportExt;
-use crate::SwarmBuilder;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use crate::{transport_ext::TransportExt, SwarmBuilder};
pub struct BandwidthMetricsPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs
index 939db935c80..22f8c617051 100644
--- a/libp2p/src/builder/phase/behaviour.rs
+++ b/libp2p/src/builder/phase/behaviour.rs
@@ -1,8 +1,9 @@
+use std::{convert::Infallible, marker::PhantomData};
+
+use libp2p_swarm::NetworkBehaviour;
+
use super::*;
use crate::SwarmBuilder;
-use libp2p_swarm::NetworkBehaviour;
-use std::convert::Infallible;
-use std::marker::PhantomData;
pub struct BehaviourPhase {
pub(crate) relay_behaviour: R,
diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs
index 80a83994eeb..f9621da756b 100644
--- a/libp2p/src/builder/phase/build.rs
+++ b/libp2p/src/builder/phase/build.rs
@@ -1,9 +1,9 @@
+use libp2p_core::Transport;
+use libp2p_swarm::Swarm;
+
#[allow(unused_imports)]
use super::*;
-
use crate::SwarmBuilder;
-use libp2p_core::Transport;
-use libp2p_swarm::Swarm;
pub struct BuildPhase {
pub(crate) behaviour: B,
diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs
index 638064d58bb..83653836a34 100644
--- a/libp2p/src/builder/phase/dns.rs
+++ b/libp2p/src/builder/phase/dns.rs
@@ -1,6 +1,7 @@
+use std::marker::PhantomData;
+
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
pub struct DnsPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs
index ceb86819dc7..e2511267cd3 100644
--- a/libp2p/src/builder/phase/identity.rs
+++ b/libp2p/src/builder/phase/identity.rs
@@ -1,6 +1,7 @@
+use std::marker::PhantomData;
+
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
pub struct IdentityPhase {}
diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs
index e04621b2e3f..c3b951c8c75 100644
--- a/libp2p/src/builder/phase/other_transport.rs
+++ b/libp2p/src/builder/phase/other_transport.rs
@@ -1,20 +1,19 @@
-use std::convert::Infallible;
-use std::marker::PhantomData;
-use std::sync::Arc;
+use std::{convert::Infallible, marker::PhantomData, sync::Arc};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::Transport;
+use libp2p_core::{
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ Transport,
+};
#[cfg(feature = "relay")]
use libp2p_core::{Negotiated, UpgradeInfo};
#[cfg(feature = "relay")]
use libp2p_identity::PeerId;
+use super::*;
#[allow(deprecated)]
use crate::bandwidth::BandwidthSinks;
use crate::SwarmBuilder;
-use super::*;
-
pub struct OtherTransportPhase {
pub(crate) transport: T,
}
diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs
index 2a9154cda74..00a79e14a30 100644
--- a/libp2p/src/builder/phase/provider.rs
+++ b/libp2p/src/builder/phase/provider.rs
@@ -1,13 +1,15 @@
+use std::marker::PhantomData;
+
#[allow(unused_imports)]
use super::*;
use crate::SwarmBuilder;
-use std::marker::PhantomData;
/// Represents the phase where a provider is not yet specified.
-/// This is a marker type used in the type-state pattern to ensure compile-time checks of the builder's state.
+/// This is a marker type used in the type-state pattern to ensure compile-time checks of the
+/// builder's state.
pub enum NoProviderSpecified {}
-// Define enums for each of the possible runtime environments. These are used as markers in the type-state pattern,
-// allowing compile-time checks for the appropriate environment configuration.
+// Define enums for each of the possible runtime environments. These are used as markers in the
+// type-state pattern, allowing compile-time checks for the appropriate environment configuration.
#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))]
/// Represents the AsyncStd runtime environment.
@@ -26,7 +28,8 @@ pub struct ProviderPhase {}
impl SwarmBuilder {
/// Configures the SwarmBuilder to use the AsyncStd runtime.
- /// This method is only available when compiling for non-Wasm targets with the `async-std` feature enabled.
+ /// This method is only available when compiling for non-Wasm
+ /// targets with the `async-std` feature enabled.
#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))]
pub fn with_async_std(self) -> SwarmBuilder {
SwarmBuilder {
@@ -37,7 +40,8 @@ impl SwarmBuilder {
}
/// Configures the SwarmBuilder to use the Tokio runtime.
- /// This method is only available when compiling for non-Wasm targets with the `tokio` feature enabled
+ /// This method is only available when compiling for non-Wasm
+ /// targets with the `tokio` feature enabled
#[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))]
pub fn with_tokio(self) -> SwarmBuilder {
SwarmBuilder {
diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs
index e030e9493bb..1b6329c1095 100644
--- a/libp2p/src/builder/phase/quic.rs
+++ b/libp2p/src/builder/phase/quic.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::{marker::PhantomData, sync::Arc};
+
#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
use libp2p_core::muxing::StreamMuxer;
use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
@@ -8,7 +8,9 @@ use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
all(not(target_arch = "wasm32"), feature = "websocket")
))]
use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo};
-use std::{marker::PhantomData, sync::Arc};
+
+use super::*;
+use crate::SwarmBuilder;
pub struct QuicPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs
index f8305f9d246..33dbf1fb54c 100644
--- a/libp2p/src/builder/phase/relay.rs
+++ b/libp2p/src/builder/phase/relay.rs
@@ -10,9 +10,8 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, Upgr
#[cfg(feature = "relay")]
use libp2p_identity::PeerId;
-use crate::SwarmBuilder;
-
use super::*;
+use crate::SwarmBuilder;
pub struct RelayPhase {
pub(crate) transport: T,
diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs
index 4b7cf29b3d2..f38f52441e5 100644
--- a/libp2p/src/builder/phase/tcp.rs
+++ b/libp2p/src/builder/phase/tcp.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::marker::PhantomData;
+
#[cfg(all(
not(target_arch = "wasm32"),
any(feature = "tcp", feature = "websocket")
@@ -14,7 +14,9 @@ use libp2p_core::Transport;
use libp2p_core::{
upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo,
};
-use std::marker::PhantomData;
+
+use super::*;
+use crate::SwarmBuilder;
pub struct TcpPhase {}
diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs
index 68a85bb77b7..a23c6eca854 100644
--- a/libp2p/src/builder/phase/websocket.rs
+++ b/libp2p/src/builder/phase/websocket.rs
@@ -1,5 +1,5 @@
-use super::*;
-use crate::SwarmBuilder;
+use std::marker::PhantomData;
+
#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))]
use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox};
use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
@@ -15,7 +15,9 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo};
feature = "relay"
))]
use libp2p_identity::PeerId;
-use std::marker::PhantomData;
+
+use super::*;
+use crate::SwarmBuilder;
pub struct WebsocketPhase {
pub(crate) transport: T,
@@ -126,8 +128,8 @@ impl_websocket_builder!(
impl_websocket_builder!(
"tokio",
super::provider::Tokio,
- // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent
- // with above AsyncStd construction.
+ // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be
+ // consistent with above AsyncStd construction.
futures::future::ready(libp2p_dns::tokio::Transport::system(
libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default())
)),
diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs
index c93ba9d9991..93ae0547269 100644
--- a/libp2p/src/builder/select_muxer.rs
+++ b/libp2p/src/builder/select_muxer.rs
@@ -20,12 +20,15 @@
#![allow(unreachable_pub)]
+use std::iter::{Chain, Map};
+
use either::Either;
use futures::future;
-use libp2p_core::either::EitherFuture;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::UpgradeInfo;
-use std::iter::{Chain, Map};
+use libp2p_core::{
+ either::EitherFuture,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ UpgradeInfo,
+};
#[derive(Debug, Clone)]
pub struct SelectMuxerUpgrade(A, B);
diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs
index d6c7f8c172f..1ed760feb1b 100644
--- a/libp2p/src/builder/select_security.rs
+++ b/libp2p/src/builder/select_security.rs
@@ -21,13 +21,15 @@
#![allow(unreachable_pub)]
+use std::iter::{Chain, Map};
+
use either::Either;
-use futures::future::MapOk;
-use futures::{future, TryFutureExt};
-use libp2p_core::either::EitherFuture;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
+use futures::{future, future::MapOk, TryFutureExt};
+use libp2p_core::{
+ either::EitherFuture,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+};
use libp2p_identity::PeerId;
-use std::iter::{Chain, Map};
/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either
/// sub-upgrade.
diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs
index 58f911e9445..1ec1cc530fc 100644
--- a/libp2p/src/lib.rs
+++ b/libp2p/src/lib.rs
@@ -34,11 +34,6 @@
pub use bytes;
pub use futures;
-#[doc(inline)]
-pub use libp2p_core::multihash;
-#[doc(inline)]
-pub use multiaddr;
-
#[doc(inline)]
pub use libp2p_allow_block_list as allow_block_list;
#[cfg(feature = "autonat")]
@@ -48,6 +43,8 @@ pub use libp2p_autonat as autonat;
pub use libp2p_connection_limits as connection_limits;
#[doc(inline)]
pub use libp2p_core as core;
+#[doc(inline)]
+pub use libp2p_core::multihash;
#[cfg(feature = "dcutr")]
#[doc(inline)]
pub use libp2p_dcutr as dcutr;
@@ -140,6 +137,8 @@ pub use libp2p_webtransport_websys as webtransport_websys;
#[cfg(feature = "yamux")]
#[doc(inline)]
pub use libp2p_yamux as yamux;
+#[doc(inline)]
+pub use multiaddr;
mod builder;
mod transport_ext;
@@ -149,15 +148,18 @@ pub mod bandwidth;
#[cfg(doc)]
pub mod tutorials;
-pub use self::builder::SwarmBuilder;
-pub use self::core::{
- transport::TransportError,
- upgrade::{InboundUpgrade, OutboundUpgrade},
- Transport,
-};
-pub use self::multiaddr::{multiaddr as build_multiaddr, Multiaddr};
-pub use self::swarm::Swarm;
-pub use self::transport_ext::TransportExt;
pub use libp2p_identity as identity;
pub use libp2p_identity::PeerId;
pub use libp2p_swarm::{Stream, StreamProtocol};
+
+pub use self::{
+ builder::SwarmBuilder,
+ core::{
+ transport::TransportError,
+ upgrade::{InboundUpgrade, OutboundUpgrade},
+ Transport,
+ },
+ multiaddr::{multiaddr as build_multiaddr, Multiaddr},
+ swarm::Swarm,
+ transport_ext::TransportExt,
+};
diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs
index 4f07484fc1f..0ed5b816903 100644
--- a/libp2p/src/transport_ext.rs
+++ b/libp2p/src/transport_ext.rs
@@ -20,15 +20,19 @@
//! Provides the `TransportExt` trait.
+use std::sync::Arc;
+
+use libp2p_identity::PeerId;
+
#[allow(deprecated)]
use crate::bandwidth::{BandwidthLogging, BandwidthSinks};
-use crate::core::{
- muxing::{StreamMuxer, StreamMuxerBox},
- transport::Boxed,
+use crate::{
+ core::{
+ muxing::{StreamMuxer, StreamMuxerBox},
+ transport::Boxed,
+ },
+ Transport,
};
-use crate::Transport;
-use libp2p_identity::PeerId;
-use std::sync::Arc;
/// Trait automatically implemented on all objects that implement `Transport`. Provides some
/// additional utilities.
@@ -42,23 +46,17 @@ pub trait TransportExt: Transport {
/// # Example
///
/// ```
- /// use libp2p_yamux as yamux;
+ /// use libp2p::{core::upgrade, identity, Transport, TransportExt};
/// use libp2p_noise as noise;
/// use libp2p_tcp as tcp;
- /// use libp2p::{
- /// core::upgrade,
- /// identity,
- /// TransportExt,
- /// Transport,
- /// };
+ /// use libp2p_yamux as yamux;
///
/// let id_keys = identity::Keypair::generate_ed25519();
///
/// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true))
/// .upgrade(upgrade::Version::V1)
/// .authenticate(
- /// noise::Config::new(&id_keys)
- /// .expect("Signing libp2p-noise static DH keypair failed."),
+ /// noise::Config::new(&id_keys).expect("Signing libp2p-noise static DH keypair failed."),
/// )
/// .multiplex(yamux::Config::default())
/// .boxed();
diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs
index 0963c0ca59e..06a4dad4037 100644
--- a/libp2p/src/tutorials/hole_punching.rs
+++ b/libp2p/src/tutorials/hole_punching.rs
@@ -57,8 +57,8 @@
//! cargo build --bin relay-server-example
//! ```
//!
-//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy
-//! it to your server.
+//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally,
+//! copy it to your server.
//!
//! On your server, start the relay server binary:
//!
@@ -98,7 +98,8 @@
//!
//! ``` bash
//! $ libp2p-lookup direct --address /ip4/111.11.111.111/tcp/4001
-//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded.
+//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN")
+//! succeeded.
//!
//! Protocol version: "/TODO/0.0.1"
//! Agent version: "rust-libp2p/0.36.0"
@@ -163,12 +164,18 @@
//! [`Multiaddr`](crate::Multiaddr).
//!
//! ``` ignore
-//! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer }
+//! [2022-01-30T12:54:10Z INFO client] Established connection to
+//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address:
+//! "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/
+//! p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X",
+//! role_override: Dialer }
//! ```
//!
-//! 2. The direct connection upgrade, also known as hole punch, succeeding.
-//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection.
+//! 2. The direct connection upgrade, also known as hole punch, succeeding. Reported by
+//! [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with
+//! the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection.
//!
//! ``` ignore
-//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) }
+//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id:
+//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) }
//! ```
diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs
index 31bf5ba3a14..f35fef8f488 100644
--- a/libp2p/src/tutorials/ping.rs
+++ b/libp2p/src/tutorials/ping.rs
@@ -72,6 +72,7 @@
//!
//! ```rust
//! use std::error::Error;
+//!
//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
@@ -98,8 +99,9 @@
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -139,12 +141,14 @@
//! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly
//! separate _how_ to send bytes from _what_ bytes and to _whom_ to send.
//!
-//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end:
+//! With the above in mind, let's extend our example, creating a
+//! [`ping::Behaviour`](crate::ping::Behaviour) at the end:
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -174,8 +178,9 @@
//!
//! ```rust
//! use std::error::Error;
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -209,8 +214,9 @@
//!
//! ```rust
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -226,7 +232,9 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
+//! .with_swarm_config(|cfg| {
+//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))
+//! })
//! .build();
//!
//! Ok(())
@@ -261,8 +269,9 @@
//!
//! ```rust
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
+//!
//! use libp2p::{noise, ping, tcp, yamux, Multiaddr};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -278,7 +287,9 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
+//! .with_swarm_config(|cfg| {
+//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))
+//! })
//! .build();
//!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned
@@ -305,9 +316,10 @@
//!
//! ```no_run
//! use std::{error::Error, time::Duration};
-//! use tracing_subscriber::EnvFilter;
-//! use libp2p::{noise, ping, tcp, yamux, Multiaddr, swarm::SwarmEvent};
+//!
//! use futures::prelude::*;
+//! use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr};
+//! use tracing_subscriber::EnvFilter;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box> {
@@ -323,7 +335,9 @@
//! yamux::Config::default,
//! )?
//! .with_behaviour(|_| ping::Behaviour::default())?
-//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
+//! .with_swarm_config(|cfg| {
+//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))
+//! })
//! .build();
//!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned
diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs
index f93cf4ffefa..ea0d56b5a67 100644
--- a/misc/allow-block-list/src/lib.rs
+++ b/misc/allow-block-list/src/lib.rs
@@ -31,12 +31,12 @@
//! #[derive(NetworkBehaviour)]
//! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
//! struct MyBehaviour {
-//! allowed_peers: allow_block_list::Behaviour,
+//! allowed_peers: allow_block_list::Behaviour,
//! }
//!
//! # fn main() {
//! let behaviour = MyBehaviour {
-//! allowed_peers: allow_block_list::Behaviour::default()
+//! allowed_peers: allow_block_list::Behaviour::default(),
//! };
//! # }
//! ```
@@ -51,27 +51,29 @@
//! #[derive(NetworkBehaviour)]
//! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
//! struct MyBehaviour {
-//! blocked_peers: allow_block_list::Behaviour,
+//! blocked_peers: allow_block_list::Behaviour,
//! }
//!
//! # fn main() {
//! let behaviour = MyBehaviour {
-//! blocked_peers: allow_block_list::Behaviour::default()
+//! blocked_peers: allow_block_list::Behaviour::default(),
//! };
//! # }
//! ```
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use std::{
+ collections::{HashSet, VecDeque},
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll, Waker},
+};
+
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{HashSet, VecDeque};
-use std::convert::Infallible;
-use std::fmt;
-use std::task::{Context, Poll, Waker};
/// A [`NetworkBehaviour`] that can act as an allow or block list.
#[derive(Default, Debug)]
@@ -101,7 +103,8 @@ impl Behaviour {
/// Allow connections to the given peer.
///
- /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set.
+ /// Returns whether the peer was newly inserted. Does nothing if the peer
+ /// was already present in the set.
pub fn allow_peer(&mut self, peer: PeerId) -> bool {
let inserted = self.state.peers.insert(peer);
if inserted {
@@ -116,7 +119,8 @@ impl Behaviour {
///
/// All active connections to this peer will be closed immediately.
///
- /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set.
+ /// Returns whether the peer was present in the set. Does nothing if the peer
+ /// was not present in the set.
pub fn disallow_peer(&mut self, peer: PeerId) -> bool {
let removed = self.state.peers.remove(&peer);
if removed {
@@ -139,7 +143,8 @@ impl Behaviour {
///
/// All active connections to this peer will be closed immediately.
///
- /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set.
+ /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in
+ /// the set.
pub fn block_peer(&mut self, peer: PeerId) -> bool {
let inserted = self.state.peers.insert(peer);
if inserted {
@@ -153,7 +158,8 @@ impl Behaviour {
/// Unblock connections to a given peer.
///
- /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set.
+ /// Returns whether the peer was present in the set. Does nothing if the peer
+ /// was not present in the set.
pub fn unblock_peer(&mut self, peer: PeerId) -> bool {
let removed = self.state.peers.remove(&peer);
if removed {
@@ -294,10 +300,11 @@ where
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
+ use super::*;
+
#[async_std::test]
async fn cannot_dial_blocked_peer() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default());
diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs
index 016a7f2cfd4..c8df5be5653 100644
--- a/misc/connection-limits/src/lib.rs
+++ b/misc/connection-limits/src/lib.rs
@@ -18,6 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::{HashMap, HashSet},
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll},
+};
+
use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
@@ -25,22 +32,22 @@ use libp2p_swarm::{
dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{HashMap, HashSet};
-use std::convert::Infallible;
-use std::fmt;
-use std::task::{Context, Poll};
/// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`].
///
-/// For these limits to take effect, this needs to be composed into the behaviour tree of your application.
+/// For these limits to take effect, this needs to be composed
+/// into the behaviour tree of your application.
///
-/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
-/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted.
-/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
-/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this**
-/// behaviour denied the connection.
+/// If a connection is denied due to a limit, either a
+/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
+/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError)
+/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively
+/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
+/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if)
+/// **this** behaviour denied the connection.
///
-/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error.
+/// If you employ multiple [`NetworkBehaviour`]s that manage connections,
+/// it may also be a different error.
///
/// # Example
///
@@ -53,9 +60,9 @@ use std::task::{Context, Poll};
/// #[derive(NetworkBehaviour)]
/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
/// struct MyBehaviour {
-/// identify: identify::Behaviour,
-/// ping: ping::Behaviour,
-/// limits: connection_limits::Behaviour
+/// identify: identify::Behaviour,
+/// ping: ping::Behaviour,
+/// limits: connection_limits::Behaviour,
/// }
/// ```
pub struct Behaviour {
@@ -367,14 +374,16 @@ impl NetworkBehaviour for Behaviour {
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_swarm::{
- behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError,
- ListenError, Swarm, SwarmEvent,
+ behaviour::toggle::Toggle,
+ dial_opts::{DialOpts, PeerCondition},
+ DialError, ListenError, Swarm, SwarmEvent,
};
use libp2p_swarm_test::SwarmExt;
use quickcheck::*;
+ use super::*;
+
#[test]
fn max_outgoing() {
use rand::Rng;
diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs
index e6c563b3c32..7d46b1849bd 100644
--- a/misc/keygen/src/config.rs
+++ b/misc/keygen/src/config.rs
@@ -1,10 +1,8 @@
+use std::{error::Error, path::Path};
+
use base64::prelude::*;
+use libp2p_identity::{Keypair, PeerId};
use serde::{Deserialize, Serialize};
-use std::error::Error;
-use std::path::Path;
-
-use libp2p_identity::Keypair;
-use libp2p_identity::PeerId;
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs
index 64d98005369..4c4d3bfbf66 100644
--- a/misc/keygen/src/main.rs
+++ b/misc/keygen/src/main.rs
@@ -1,9 +1,12 @@
+use std::{
+ error::Error,
+ path::PathBuf,
+ str::{self, FromStr},
+ sync::mpsc,
+ thread,
+};
+
use base64::prelude::*;
-use std::error::Error;
-use std::path::PathBuf;
-use std::str::{self, FromStr};
-use std::sync::mpsc;
-use std::thread;
mod config;
diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs
index e2a89977991..0735464a67e 100644
--- a/misc/memory-connection-limits/src/lib.rs
+++ b/misc/memory-connection-limits/src/lib.rs
@@ -18,35 +18,40 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ convert::Infallible,
+ fmt,
+ task::{Context, Poll},
+ time::{Duration, Instant},
+};
+
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::convert::Infallible;
-
-use std::{
- fmt,
- task::{Context, Poll},
- time::{Duration, Instant},
-};
use sysinfo::MemoryRefreshKind;
/// A [`NetworkBehaviour`] that enforces a set of memory usage based limits.
///
-/// For these limits to take effect, this needs to be composed into the behaviour tree of your application.
+/// For these limits to take effect, this needs to be composed
+/// into the behaviour tree of your application.
///
-/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
-/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted.
-/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
-/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this**
-/// behaviour denied the connection.
+/// If a connection is denied due to a limit, either a
+/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError)
+/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError)
+/// will be emitted. The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively
+/// the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant
+/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error
+/// if (and only if) **this** behaviour denied the connection.
///
-/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error.
+/// If you employ multiple [`NetworkBehaviour`]s that manage connections,
+/// it may also be a different error.
///
/// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive.
-/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour.
+/// If you need to employ both of them,
+/// compose two instances of [Behaviour] into your custom behaviour.
///
/// # Example
///
@@ -58,8 +63,8 @@ use sysinfo::MemoryRefreshKind;
/// #[derive(NetworkBehaviour)]
/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
/// struct MyBehaviour {
-/// identify: identify::Behaviour,
-/// limits: memory_connection_limits::Behaviour
+/// identify: identify::Behaviour,
+/// limits: memory_connection_limits::Behaviour,
/// }
/// ```
pub struct Behaviour {
@@ -68,7 +73,8 @@ pub struct Behaviour {
last_refreshed: Instant,
}
-/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale.
+/// The maximum duration for which the retrieved memory-stats
+/// of the process are allowed to be stale.
///
/// Once exceeded, we will retrieve new stats.
const MAX_STALE_DURATION: Duration = Duration::from_millis(100);
diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs
index 7f89e2c7a9a..e82ad67d076 100644
--- a/misc/memory-connection-limits/tests/max_bytes.rs
+++ b/misc/memory-connection-limits/tests/max_bytes.rs
@@ -20,14 +20,14 @@
mod util;
+use std::time::Duration;
+
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_memory_connection_limits::*;
-use std::time::Duration;
-use util::*;
-
use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm};
use libp2p_swarm_test::SwarmExt;
+use util::*;
#[test]
fn max_bytes() {
@@ -69,7 +69,8 @@ fn max_bytes() {
.expect("Unexpected connection limit.");
}
- std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it.
+ std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try
+ // to exceed it.
match network
.dial(
diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs
index bfb1b504af5..51fe783b3c5 100644
--- a/misc/memory-connection-limits/tests/max_percentage.rs
+++ b/misc/memory-connection-limits/tests/max_percentage.rs
@@ -20,18 +20,18 @@
mod util;
+use std::time::Duration;
+
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_memory_connection_limits::*;
-use std::time::Duration;
-use sysinfo::{MemoryRefreshKind, RefreshKind};
-use util::*;
-
use libp2p_swarm::{
dial_opts::{DialOpts, PeerCondition},
DialError, Swarm,
};
use libp2p_swarm_test::SwarmExt;
+use sysinfo::{MemoryRefreshKind, RefreshKind};
+use util::*;
#[test]
fn max_percentage() {
@@ -76,7 +76,9 @@ fn max_percentage() {
.expect("Unexpected connection limit.");
}
- std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it.
+ // Memory stats are only updated every 100ms internally,
+ // ensure they are up-to-date when we try to exceed it.
+ std::thread::sleep(Duration::from_millis(100));
match network
.dial(
diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs
index 333b0ee135f..205f4d13bc4 100644
--- a/misc/memory-connection-limits/tests/util/mod.rs
+++ b/misc/memory-connection-limits/tests/util/mod.rs
@@ -18,7 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::task::{Context, Poll};
+use std::{
+ convert::Infallible,
+ task::{Context, Poll},
+};
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
@@ -26,7 +29,6 @@ use libp2p_swarm::{
dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::convert::Infallible;
#[derive(libp2p_swarm_derive::NetworkBehaviour)]
#[behaviour(prelude = "libp2p_swarm::derive_prelude")]
diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs
index 8a0f54e5b65..b6308ed1b51 100644
--- a/misc/metrics/src/bandwidth.rs
+++ b/misc/metrics/src/bandwidth.rs
@@ -1,4 +1,10 @@
-use crate::protocol_stack;
+use std::{
+ convert::TryFrom as _,
+ io,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
use futures::{
future::{MapOk, TryFutureExt},
io::{IoSlice, IoSliceMut},
@@ -16,12 +22,8 @@ use prometheus_client::{
metrics::{counter::Counter, family::Family},
registry::{Registry, Unit},
};
-use std::{
- convert::TryFrom as _,
- io,
- pin::Pin,
- task::{Context, Poll},
-};
+
+use crate::protocol_stack;
#[derive(Debug, Clone)]
#[pin_project::pin_project]
diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs
index 3e60dca2cab..6a0f27394e9 100644
--- a/misc/metrics/src/dcutr.rs
+++ b/misc/metrics/src/dcutr.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::registry::Registry;
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{counter::Counter, family::Family},
+ registry::Registry,
+};
pub(crate) struct Metrics {
events: Family,
diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs
index 2d90b92fbc6..b3e2e11f0b0 100644
--- a/misc/metrics/src/gossipsub.rs
+++ b/misc/metrics/src/gossipsub.rs
@@ -18,8 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::registry::Registry;
+use prometheus_client::{metrics::counter::Counter, registry::Registry};
pub(crate) struct Metrics {
messages: Counter,
diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs
index 03ac3f9634e..b16c6a56ccf 100644
--- a/misc/metrics/src/identify.rs
+++ b/misc/metrics/src/identify.rs
@@ -18,17 +18,21 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol_stack;
+use std::{
+ collections::HashMap,
+ sync::{Arc, Mutex},
+};
+
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
-use prometheus_client::collector::Collector;
-use prometheus_client::encoding::{DescriptorEncoder, EncodeMetric};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::gauge::ConstGauge;
-use prometheus_client::metrics::MetricType;
-use prometheus_client::registry::Registry;
-use std::collections::HashMap;
-use std::sync::{Arc, Mutex};
+use prometheus_client::{
+ collector::Collector,
+ encoding::{DescriptorEncoder, EncodeMetric},
+ metrics::{counter::Counter, gauge::ConstGauge, MetricType},
+ registry::Registry,
+};
+
+use crate::protocol_stack;
const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[
#[cfg(feature = "dcutr")]
diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs
index bd5a6526737..0a2a8038511 100644
--- a/misc/metrics/src/kad.rs
+++ b/misc/metrics/src/kad.rs
@@ -18,11 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
pub(crate) struct Metrics {
query_result_get_record_ok: Counter,
diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs
index 74fd15e2181..1fd79e7846f 100644
--- a/misc/metrics/src/lib.rs
+++ b/misc/metrics/src/lib.rs
@@ -67,8 +67,8 @@ impl Metrics {
/// Create a new set of Swarm and protocol [`Metrics`].
///
/// ```
- /// use prometheus_client::registry::Registry;
/// use libp2p_metrics::Metrics;
+ /// use prometheus_client::registry::Registry;
/// let mut registry = Registry::default();
/// let metrics = Metrics::new(&mut registry);
/// ```
diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs
index afdd05134a6..ce653c72ea1 100644
--- a/misc/metrics/src/ping.rs
+++ b/misc/metrics/src/ping.rs
@@ -18,11 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
#[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)]
struct FailureLabels {
diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs
index 607daf3f1e1..d4c25b6eb3e 100644
--- a/misc/metrics/src/relay.rs
+++ b/misc/metrics/src/relay.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::registry::Registry;
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{counter::Counter, family::Family},
+ registry::Registry,
+};
pub(crate) struct Metrics {
events: Family,
diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs
index 51c0a0af253..6e95d082de6 100644
--- a/misc/metrics/src/swarm.rs
+++ b/misc/metrics/src/swarm.rs
@@ -18,18 +18,25 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::collections::HashMap;
-use std::sync::{Arc, Mutex};
+use std::{
+ collections::HashMap,
+ sync::{Arc, Mutex},
+};
-use crate::protocol_stack;
use libp2p_swarm::{ConnectionId, DialError, SwarmEvent};
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::Family;
-use prometheus_client::metrics::histogram::{exponential_buckets, Histogram};
-use prometheus_client::registry::{Registry, Unit};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::Family,
+ histogram::{exponential_buckets, Histogram},
+ },
+ registry::{Registry, Unit},
+};
use web_time::Instant;
+use crate::protocol_stack;
+
pub(crate) struct Metrics {
connections_incoming: Family,
connections_incoming_error: Family,
diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs
index 83bb4909041..1d13e94910d 100644
--- a/misc/multistream-select/src/dialer_select.rs
+++ b/misc/multistream-select/src/dialer_select.rs
@@ -20,10 +20,6 @@
//! Protocol negotiation strategies for the peer acting as the dialer.
-use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError};
-use crate::{Negotiated, NegotiationError, Version};
-
-use futures::prelude::*;
use std::{
convert::TryFrom as _,
iter, mem,
@@ -31,6 +27,13 @@ use std::{
task::{Context, Poll},
};
+use futures::prelude::*;
+
+use crate::{
+ protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError},
+ Negotiated, NegotiationError, Version,
+};
+
/// Returns a `Future` that negotiates a protocol on the given I/O stream
/// for a peer acting as the _dialer_ (or _initiator_).
///
@@ -84,8 +87,9 @@ enum State {
impl Future for DialerSelectFuture
where
- // The Unpin bound here is required because we produce a `Negotiated` as the output.
- // It also makes the implementation considerably easier to write.
+ // The Unpin bound here is required because we produce
+ // a `Negotiated` as the output. It also makes
+ // the implementation considerably easier to write.
R: AsyncRead + AsyncWrite + Unpin,
I: Iterator,
I::Item: AsRef,
@@ -204,15 +208,19 @@ where
#[cfg(test)]
mod tests {
- use super::*;
- use crate::listener_select_proto;
- use async_std::future::timeout;
- use async_std::net::{TcpListener, TcpStream};
- use quickcheck::{Arbitrary, Gen, GenRange};
use std::time::Duration;
+
+ use async_std::{
+ future::timeout,
+ net::{TcpListener, TcpStream},
+ };
+ use quickcheck::{Arbitrary, Gen, GenRange};
use tracing::metadata::LevelFilter;
use tracing_subscriber::EnvFilter;
+ use super::*;
+ use crate::listener_select_proto;
+
#[test]
fn select_proto_basic() {
async fn run(version: Version) {
@@ -353,8 +361,8 @@ mod tests {
.unwrap();
assert_eq!(proto, "/proto1");
- // client can close the connection even though protocol negotiation is not yet done, i.e.
- // `_server_connection` had been untouched.
+ // client can close the connection even though protocol negotiation is not yet done,
+ // i.e. `_server_connection` had been untouched.
io.close().await.unwrap();
});
diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs
index 3a7988d0548..8062455de46 100644
--- a/misc/multistream-select/src/length_delimited.rs
+++ b/misc/multistream-select/src/length_delimited.rs
@@ -18,8 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::{Buf as _, BufMut as _, Bytes, BytesMut};
-use futures::{io::IoSlice, prelude::*};
use std::{
convert::TryFrom as _,
io,
@@ -27,6 +25,9 @@ use std::{
task::{Context, Poll},
};
+use bytes::{Buf as _, BufMut as _, Bytes, BytesMut};
+use futures::{io::IoSlice, prelude::*};
+
const MAX_LEN_BYTES: u16 = 2;
const MAX_FRAME_SIZE: u16 = (1 << (MAX_LEN_BYTES * 8 - MAX_LEN_BYTES)) - 1;
const DEFAULT_BUFFER_SIZE: usize = 64;
@@ -383,10 +384,12 @@ where
#[cfg(test)]
mod tests {
- use crate::length_delimited::LengthDelimited;
+ use std::io::ErrorKind;
+
use futures::{io::Cursor, prelude::*};
use quickcheck::*;
- use std::io::ErrorKind;
+
+ use crate::length_delimited::LengthDelimited;
#[test]
fn basic_read() {
diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs
index 5565623f25e..96432de6cb0 100644
--- a/misc/multistream-select/src/lib.rs
+++ b/misc/multistream-select/src/lib.rs
@@ -70,20 +70,21 @@
//!
//! ```no_run
//! use async_std::net::TcpStream;
-//! use multistream_select::{dialer_select_proto, Version};
//! use futures::prelude::*;
+//! use multistream_select::{dialer_select_proto, Version};
//!
//! async_std::task::block_on(async move {
//! let socket = TcpStream::connect("127.0.0.1:10333").await.unwrap();
//!
//! let protos = vec!["/echo/1.0.0", "/echo/2.5.0"];
-//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1).await.unwrap();
+//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1)
+//! .await
+//! .unwrap();
//!
//! println!("Negotiated protocol: {:?}", protocol);
//! // You can now use `_io` to communicate with the remote.
//! });
//! ```
-//!
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
@@ -93,10 +94,12 @@ mod listener_select;
mod negotiated;
mod protocol;
-pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture};
-pub use self::listener_select::{listener_select_proto, ListenerSelectFuture};
-pub use self::negotiated::{Negotiated, NegotiatedComplete, NegotiationError};
-pub use self::protocol::ProtocolError;
+pub use self::{
+ dialer_select::{dialer_select_proto, DialerSelectFuture},
+ listener_select::{listener_select_proto, ListenerSelectFuture},
+ negotiated::{Negotiated, NegotiatedComplete, NegotiationError},
+ protocol::ProtocolError,
+};
/// Supported multistream-select versions.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs
index b4236310a1d..cd5af72a9d0 100644
--- a/misc/multistream-select/src/listener_select.rs
+++ b/misc/multistream-select/src/listener_select.rs
@@ -21,11 +21,6 @@
//! Protocol negotiation strategies for the peer acting as the listener
//! in a multistream-select protocol negotiation.
-use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError};
-use crate::{Negotiated, NegotiationError};
-
-use futures::prelude::*;
-use smallvec::SmallVec;
use std::{
convert::TryFrom as _,
mem,
@@ -33,6 +28,14 @@ use std::{
task::{Context, Poll},
};
+use futures::prelude::*;
+use smallvec::SmallVec;
+
+use crate::{
+ protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError},
+ Negotiated, NegotiationError,
+};
+
/// Returns a `Future` that negotiates a protocol on the given I/O stream
/// for a peer acting as the _listener_ (or _responder_).
///
@@ -109,8 +112,10 @@ enum State {
impl Future for ListenerSelectFuture
where
- // The Unpin bound here is required because we produce a `Negotiated` as the output.
- // It also makes the implementation considerably easier to write.
+ // The Unpin bound here is required because
+ // we produce a `Negotiated` as the output.
+ // It also makes the implementation considerably
+ // easier to write.
R: AsyncRead + AsyncWrite + Unpin,
N: AsRef + Clone,
{
diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs
index a24014a4f5f..6693b3b5636 100644
--- a/misc/multistream-select/src/negotiated.rs
+++ b/misc/multistream-select/src/negotiated.rs
@@ -18,7 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError};
+use std::{
+ error::Error,
+ fmt, io, mem,
+ pin::Pin,
+ task::{Context, Poll},
+};
use futures::{
io::{IoSlice, IoSliceMut},
@@ -26,12 +31,8 @@ use futures::{
ready,
};
use pin_project::pin_project;
-use std::{
- error::Error,
- fmt, io, mem,
- pin::Pin,
- task::{Context, Poll},
-};
+
+use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError};
/// An I/O stream that has settled on an (application-layer) protocol to use.
///
@@ -59,8 +60,10 @@ pub struct NegotiatedComplete {
impl Future for NegotiatedComplete
where
- // `Unpin` is required not because of implementation details but because we produce the
- // `Negotiated` as the output of the future.
+ // `Unpin` is required not because of
+ // implementation details but because we produce
+ // the `Negotiated` as the output of the
+ // future.
TInner: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result, NegotiationError>;
@@ -250,13 +253,13 @@ where
}
// TODO: implement once method is stabilized in the futures crate
- /*unsafe fn initializer(&self) -> Initializer {
- match &self.state {
- State::Completed { io, .. } => io.initializer(),
- State::Expecting { io, .. } => io.inner_ref().initializer(),
- State::Invalid => panic!("Negotiated: Invalid state"),
- }
- }*/
+ // unsafe fn initializer(&self) -> Initializer {
+ // match &self.state {
+ // State::Completed { io, .. } => io.initializer(),
+ // State::Expecting { io, .. } => io.inner_ref().initializer(),
+ // State::Invalid => panic!("Negotiated: Invalid state"),
+ // }
+ // }
fn poll_read_vectored(
mut self: Pin<&mut Self>,
diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs
index 92b6acedaeb..93cd4ac02b5 100644
--- a/misc/multistream-select/src/protocol.rs
+++ b/misc/multistream-select/src/protocol.rs
@@ -25,19 +25,22 @@
//! `Stream` and `Sink` implementations of `MessageIO` and
//! `MessageReader`.
-use crate::length_delimited::{LengthDelimited, LengthDelimitedReader};
-use crate::Version;
-
-use bytes::{BufMut, Bytes, BytesMut};
-use futures::{io::IoSlice, prelude::*, ready};
use std::{
error::Error,
fmt, io,
pin::Pin,
task::{Context, Poll},
};
+
+use bytes::{BufMut, Bytes, BytesMut};
+use futures::{io::IoSlice, prelude::*, ready};
use unsigned_varint as uvi;
+use crate::{
+ length_delimited::{LengthDelimited, LengthDelimitedReader},
+ Version,
+};
+
/// The maximum number of supported protocols that can be processed.
const MAX_PROTOCOLS: usize = 1000;
@@ -461,10 +464,12 @@ impl fmt::Display for ProtocolError {
#[cfg(test)]
mod tests {
- use super::*;
- use quickcheck::*;
use std::iter;
+ use quickcheck::*;
+
+ use super::*;
+
impl Arbitrary for Protocol {
fn arbitrary(g: &mut Gen) -> Protocol {
let n = g.gen_range(1..g.size());
diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs
index c57b7da7db8..d49315a54c3 100644
--- a/misc/quick-protobuf-codec/src/lib.rs
+++ b/misc/quick-protobuf-codec/src/lib.rs
@@ -1,10 +1,10 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+use std::{io, marker::PhantomData};
+
use asynchronous_codec::{Decoder, Encoder};
use bytes::{Buf, BufMut, BytesMut};
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend};
-use std::io;
-use std::marker::PhantomData;
mod generated;
@@ -182,12 +182,13 @@ impl From for io::Error {
#[cfg(test)]
mod tests {
- use super::*;
+ use std::error::Error;
+
use asynchronous_codec::FramedRead;
- use futures::io::Cursor;
- use futures::{FutureExt, StreamExt};
+ use futures::{io::Cursor, FutureExt, StreamExt};
use quickcheck::{Arbitrary, Gen, QuickCheck};
- use std::error::Error;
+
+ use super::*;
#[test]
fn honors_max_message_length() {
diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs
index 65dafe065d1..a434d3ce17f 100644
--- a/misc/quick-protobuf-codec/tests/large_message.rs
+++ b/misc/quick-protobuf-codec/tests/large_message.rs
@@ -1,7 +1,6 @@
use asynchronous_codec::Encoder;
use bytes::BytesMut;
-use quick_protobuf_codec::proto;
-use quick_protobuf_codec::Codec;
+use quick_protobuf_codec::{proto, Codec};
#[test]
fn encode_large_message() {
diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs
index 4ada7e73ba1..9c2deec8743 100644
--- a/misc/quickcheck-ext/src/lib.rs
+++ b/misc/quickcheck-ext/src/lib.rs
@@ -1,9 +1,9 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-pub use quickcheck::*;
-
use core::ops::Range;
+
use num_traits::sign::Unsigned;
+pub use quickcheck::*;
pub trait GenRange {
fn gen_range(&mut self, _range: Range) -> T;
diff --git a/misc/rw-stream-sink/src/lib.rs b/misc/rw-stream-sink/src/lib.rs
index f10e683ad33..5fdf1987252 100644
--- a/misc/rw-stream-sink/src/lib.rs
+++ b/misc/rw-stream-sink/src/lib.rs
@@ -27,7 +27,6 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use futures::{prelude::*, ready};
use std::{
io::{self, Read},
mem,
@@ -35,6 +34,8 @@ use std::{
task::{Context, Poll},
};
+use futures::{prelude::*, ready};
+
static_assertions::const_assert!(mem::size_of::() <= mem::size_of::());
/// Wraps a [`Stream`] and [`Sink`] whose items are buffers.
@@ -115,14 +116,16 @@ where
#[cfg(test)]
mod tests {
- use super::RwStreamSink;
- use async_std::task;
- use futures::{channel::mpsc, prelude::*};
use std::{
pin::Pin,
task::{Context, Poll},
};
+ use async_std::task;
+ use futures::{channel::mpsc, prelude::*};
+
+ use super::RwStreamSink;
+
// This struct merges a stream and a sink and is quite useful for tests.
struct Wrapper(St, Si);
diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs
index 36b18c9798d..230d62a2ef3 100644
--- a/misc/server/src/behaviour.rs
+++ b/misc/server/src/behaviour.rs
@@ -1,13 +1,10 @@
-use libp2p::autonat;
-use libp2p::identify;
-use libp2p::kad;
-use libp2p::ping;
-use libp2p::relay;
-use libp2p::swarm::behaviour::toggle::Toggle;
-use libp2p::swarm::{NetworkBehaviour, StreamProtocol};
-use libp2p::{identity, Multiaddr, PeerId};
-use std::str::FromStr;
-use std::time::Duration;
+use std::{str::FromStr, time::Duration};
+
+use libp2p::{
+ autonat, identify, identity, kad, ping, relay,
+ swarm::{behaviour::toggle::Toggle, NetworkBehaviour, StreamProtocol},
+ Multiaddr, PeerId,
+};
const BOOTNODES: [&str; 4] = [
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs
index c3e3ec529c1..2e4b2746d09 100644
--- a/misc/server/src/config.rs
+++ b/misc/server/src/config.rs
@@ -1,7 +1,7 @@
+use std::{error::Error, path::Path};
+
use libp2p::Multiaddr;
use serde_derive::Deserialize;
-use std::error::Error;
-use std::path::Path;
#[derive(Clone, Deserialize)]
#[serde(rename_all = "PascalCase")]
diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs
index cee1aa96e28..87a8adb94e0 100644
--- a/misc/server/src/http_service.rs
+++ b/misc/server/src/http_service.rs
@@ -18,15 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use axum::extract::State;
-use axum::http::StatusCode;
-use axum::response::IntoResponse;
-use axum::routing::get;
-use axum::Router;
-use prometheus_client::encoding::text::encode;
-use prometheus_client::registry::Registry;
-use std::net::SocketAddr;
-use std::sync::{Arc, Mutex};
+use std::{
+ net::SocketAddr,
+ sync::{Arc, Mutex},
+};
+
+use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router};
+use prometheus_client::{encoding::text::encode, registry::Registry};
use tokio::net::TcpListener;
const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0";
diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs
index 820921beaed..a633a80207e 100644
--- a/misc/server/src/main.rs
+++ b/misc/server/src/main.rs
@@ -1,18 +1,18 @@
+use std::{error::Error, path::PathBuf, str::FromStr};
+
use base64::Engine;
use clap::Parser;
use futures::stream::StreamExt;
-use libp2p::identity;
-use libp2p::identity::PeerId;
-use libp2p::kad;
-use libp2p::metrics::{Metrics, Recorder};
-use libp2p::swarm::SwarmEvent;
-use libp2p::tcp;
-use libp2p::{identify, noise, yamux};
-use prometheus_client::metrics::info::Info;
-use prometheus_client::registry::Registry;
-use std::error::Error;
-use std::path::PathBuf;
-use std::str::FromStr;
+use libp2p::{
+ identify, identity,
+ identity::PeerId,
+ kad,
+ metrics::{Metrics, Recorder},
+ noise,
+ swarm::SwarmEvent,
+ tcp, yamux,
+};
+use prometheus_client::{metrics::info::Info, registry::Registry};
use tracing_subscriber::EnvFilter;
use zeroize::Zeroizing;
diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs
index a02c4d1116d..c32d33d5bab 100644
--- a/misc/webrtc-utils/src/fingerprint.rs
+++ b/misc/webrtc-utils/src/fingerprint.rs
@@ -19,9 +19,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::fmt;
+
use libp2p_core::multihash;
use sha2::Digest as _;
-use std::fmt;
pub const SHA256: &str = "sha-256";
const MULTIHASH_SHA256_CODE: u64 = 0x12;
diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs
index 9180acfc1ca..705db7f4697 100644
--- a/misc/webrtc-utils/src/noise.rs
+++ b/misc/webrtc-utils/src/noise.rs
@@ -19,16 +19,17 @@
// DEALINGS IN THE SOFTWARE.
use futures::{AsyncRead, AsyncWrite, AsyncWriteExt};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::UpgradeInfo;
+use libp2p_core::{
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ UpgradeInfo,
+};
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_noise as noise;
+pub use noise::Error;
use crate::fingerprint::Fingerprint;
-pub use noise::Error;
-
pub async fn inbound(
id_keys: identity::Keypair,
stream: T,
@@ -89,9 +90,10 @@ pub(crate) fn noise_prologue(
#[cfg(test)]
mod tests {
- use super::*;
use hex_literal::hex;
+ use super::*;
+
#[test]
fn noise_prologue_tests() {
let a = Fingerprint::raw(hex!(
diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs
index 0796548f449..96a07f5db95 100644
--- a/misc/webrtc-utils/src/sdp.rs
+++ b/misc/webrtc-utils/src/sdp.rs
@@ -18,13 +18,13 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::fingerprint::Fingerprint;
-use serde::Serialize;
use std::net::{IpAddr, SocketAddr};
+
+use rand::{distributions::Alphanumeric, thread_rng, Rng};
+use serde::Serialize;
use tinytemplate::TinyTemplate;
-use rand::distributions::Alphanumeric;
-use rand::{thread_rng, Rng};
+use crate::fingerprint::Fingerprint;
pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String {
let answer = render_description(
@@ -71,7 +71,8 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &
// the answerer is received, which adds additional latency. setup:active allows the answer and
// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED.
//
-// a=candidate:
+// a=candidate:
+//
//
// A transport address for a candidate that can be used for connectivity checks (RFC8839).
//
diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs
index 17f746a92a1..0ec420a103a 100644
--- a/misc/webrtc-utils/src/stream.rs
+++ b/misc/webrtc-utils/src/stream.rs
@@ -19,20 +19,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::Bytes;
-use futures::{channel::oneshot, prelude::*, ready};
-
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
-use crate::proto::{Flag, Message};
+use bytes::Bytes;
+use futures::{channel::oneshot, prelude::*, ready};
+
use crate::{
- stream::drop_listener::GracefullyClosed,
- stream::framed_dc::FramedDc,
- stream::state::{Closing, State},
+ proto::{Flag, Message},
+ stream::{
+ drop_listener::GracefullyClosed,
+ framed_dc::FramedDc,
+ state::{Closing, State},
+ },
};
mod drop_listener;
@@ -69,7 +71,8 @@ impl Stream
where
T: AsyncRead + AsyncWrite + Unpin + Clone,
{
- /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped.
+ /// Returns a new [`Stream`] and a [`DropListener`],
+ /// which will notify the receiver when/if the stream is dropped.
pub fn new(data_channel: T) -> (Self, DropListener) {
let (sender, receiver) = oneshot::channel();
@@ -175,8 +178,9 @@ where
buf: &[u8],
) -> Poll> {
while self.state.read_flags_in_async_write() {
- // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we will poll the
- // underlying I/O resource once more. Is that allowed? How about introducing a state IoReadClosed?
+ // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we
+ // will poll the underlying I/O resource once more. Is that allowed? How
+ // about introducing a state IoReadClosed?
let Self {
read_buffer,
@@ -265,11 +269,12 @@ where
#[cfg(test)]
mod tests {
- use super::*;
- use crate::stream::framed_dc::codec;
use asynchronous_codec::Encoder;
use bytes::BytesMut;
+ use super::*;
+ use crate::stream::framed_dc::codec;
+
#[test]
fn max_data_len() {
// Largest possible message.
diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs
index 9745e3d4364..ea3f19d2f57 100644
--- a/misc/webrtc-utils/src/stream/drop_listener.rs
+++ b/misc/webrtc-utils/src/stream/drop_listener.rs
@@ -18,17 +18,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::channel::oneshot;
-use futures::channel::oneshot::Canceled;
-use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt};
+use std::{
+ future::Future,
+ io,
+ pin::Pin,
+ task::{Context, Poll},
+};
-use std::future::Future;
-use std::io;
-use std::pin::Pin;
-use std::task::{Context, Poll};
+use futures::{
+ channel::{oneshot, oneshot::Canceled},
+ AsyncRead, AsyncWrite, FutureExt, SinkExt,
+};
-use crate::proto::{Flag, Message};
-use crate::stream::framed_dc::FramedDc;
+use crate::{
+ proto::{Flag, Message},
+ stream::framed_dc::FramedDc,
+};
#[must_use]
pub struct DropListener {
diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs
index 721178fdcd3..a7b9b6214e0 100644
--- a/misc/webrtc-utils/src/stream/framed_dc.rs
+++ b/misc/webrtc-utils/src/stream/framed_dc.rs
@@ -21,8 +21,10 @@
use asynchronous_codec::Framed;
use futures::{AsyncRead, AsyncWrite};
-use crate::proto::Message;
-use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN};
+use crate::{
+ proto::Message,
+ stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN},
+};
pub(crate) type FramedDc = Framed>;
pub(crate) fn new(inner: T) -> FramedDc
diff --git a/misc/webrtc-utils/src/stream/state.rs b/misc/webrtc-utils/src/stream/state.rs
index 082325e4d47..006c1610d00 100644
--- a/misc/webrtc-utils/src/stream/state.rs
+++ b/misc/webrtc-utils/src/stream/state.rs
@@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use bytes::Bytes;
-
use std::io;
+use bytes::Bytes;
+
use crate::proto::Flag;
#[derive(Debug, Copy, Clone)]
@@ -46,8 +46,8 @@ pub(crate) enum State {
/// Represents the state of closing one half (either read or write) of the connection.
///
-/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag respectively
-/// and flushing the underlying connection.
+/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag
+/// respectively and flushing the underlying connection.
#[derive(Debug, Copy, Clone)]
pub(crate) enum Closing {
Requested,
@@ -181,8 +181,8 @@ impl State {
/// Whether we should read from the stream in the [`futures::AsyncWrite`] implementation.
///
- /// This is necessary for read-closed streams because we would otherwise not read any more flags from
- /// the socket.
+ /// This is necessary for read-closed streams because we would otherwise
+ /// not read any more flags from the socket.
pub(crate) fn read_flags_in_async_write(&self) -> bool {
matches!(self, Self::ReadClosed)
}
@@ -324,9 +324,10 @@ impl State {
#[cfg(test)]
mod tests {
- use super::*;
use std::io::ErrorKind;
+ use super::*;
+
#[test]
fn cannot_read_after_receiving_fin() {
let mut open = State::Open;
diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs
index 440ad73ed02..60b1934082f 100644
--- a/misc/webrtc-utils/src/transport.rs
+++ b/misc/webrtc-utils/src/transport.rs
@@ -1,7 +1,9 @@
-use crate::fingerprint::Fingerprint;
-use libp2p_core::{multiaddr::Protocol, Multiaddr};
use std::net::{IpAddr, SocketAddr};
+use libp2p_core::{multiaddr::Protocol, Multiaddr};
+
+use crate::fingerprint::Fingerprint;
+
/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing.
pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> {
let mut iter = addr.iter();
@@ -38,9 +40,10 @@ pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerpri
#[cfg(test)]
mod tests {
- use super::*;
use std::net::{Ipv4Addr, Ipv6Addr};
+ use super::*;
+
#[test]
fn parse_valid_address_with_certhash_and_p2p() {
let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS"
diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs
index 44eafa884ac..b0dd4babff7 100644
--- a/muxers/mplex/benches/split_send_size.rs
+++ b/muxers/mplex/benches/split_send_size.rs
@@ -21,21 +21,23 @@
//! A benchmark for the `split_send_size` configuration option
//! using different transports.
+use std::{pin::Pin, time::Duration};
+
use async_std::task;
use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput};
-use futures::future::poll_fn;
-use futures::prelude::*;
-use futures::{channel::oneshot, future::join};
-use libp2p_core::muxing::StreamMuxerExt;
-use libp2p_core::transport::ListenerId;
-use libp2p_core::Endpoint;
-use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport};
+use futures::{
+ channel::oneshot,
+ future::{join, poll_fn},
+ prelude::*,
+};
+use libp2p_core::{
+ multiaddr::multiaddr, muxing, muxing::StreamMuxerExt, transport, transport::ListenerId,
+ upgrade, Endpoint, Multiaddr, Transport,
+};
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_mplex as mplex;
use libp2p_plaintext as plaintext;
-use std::pin::Pin;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>;
@@ -120,7 +122,8 @@ fn run(
}
transport::TransportEvent::Incoming { upgrade, .. } => {
let (_peer, mut conn) = upgrade.await.unwrap();
- // Just calling `poll_inbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though.
+ // Just calling `poll_inbound` without `poll` is fine here because mplex makes
+ // progress through all `poll_` functions. It is hacky though.
let mut s = poll_fn(|cx| conn.poll_inbound_unpin(cx))
.await
.expect("unexpected error");
@@ -158,7 +161,8 @@ fn run(
.unwrap()
.await
.unwrap();
- // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though.
+ // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress
+ // through all `poll_` functions. It is hacky though.
let mut stream = poll_fn(|cx| conn.poll_outbound_unpin(cx)).await.unwrap();
let mut off = 0;
loop {
diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs
index 014ee899280..a4a04d1964d 100644
--- a/muxers/mplex/src/codec.rs
+++ b/muxers/mplex/src/codec.rs
@@ -18,14 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use asynchronous_codec::{Decoder, Encoder};
-use bytes::{BufMut, Bytes, BytesMut};
-use libp2p_core::Endpoint;
use std::{
fmt,
hash::{Hash, Hasher},
io, mem,
};
+
+use asynchronous_codec::{Decoder, Encoder};
+use bytes::{BufMut, Bytes, BytesMut};
+use libp2p_core::Endpoint;
use unsigned_varint::{codec, encode};
// Maximum size for a packet: 1MB as per the spec.
diff --git a/muxers/mplex/src/config.rs b/muxers/mplex/src/config.rs
index 3bf5e703a18..45bb05b2240 100644
--- a/muxers/mplex/src/config.rs
+++ b/muxers/mplex/src/config.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::MAX_FRAME_SIZE;
use std::cmp;
+use crate::codec::MAX_FRAME_SIZE;
+
pub(crate) const DEFAULT_MPLEX_PROTOCOL_NAME: &str = "/mplex/6.7.0";
/// Configuration for the multiplexer.
diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs
index 50fc0fc1d3f..ac93fd3865e 100644
--- a/muxers/mplex/src/io.rs
+++ b/muxers/mplex/src/io.rs
@@ -18,23 +18,31 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId};
-use crate::{MaxBufferBehaviour, MplexConfig};
+pub(crate) use std::io::{Error, Result};
+use std::{
+ cmp,
+ collections::VecDeque,
+ fmt, io, mem,
+ sync::Arc,
+ task::{Context, Poll, Waker},
+};
+
use asynchronous_codec::Framed;
use bytes::Bytes;
-use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef};
-use futures::{prelude::*, ready, stream::Fuse};
+use futures::{
+ prelude::*,
+ ready,
+ stream::Fuse,
+ task::{waker_ref, ArcWake, AtomicWaker, WakerRef},
+};
use nohash_hasher::{IntMap, IntSet};
use parking_lot::Mutex;
use smallvec::SmallVec;
-use std::collections::VecDeque;
-use std::{
- cmp, fmt, io, mem,
- sync::Arc,
- task::{Context, Poll, Waker},
-};
-pub(crate) use std::io::{Error, Result};
+use crate::{
+ codec::{Codec, Frame, LocalStreamId, RemoteStreamId},
+ MaxBufferBehaviour, MplexConfig,
+};
/// A connection identifier.
///
/// Randomly generated and mainly intended to improve log output
@@ -302,13 +310,11 @@ where
/// reading and writing immediately. The remote is informed
/// based on the current state of the substream:
///
- /// * If the substream was open, a `Reset` frame is sent at
- /// the next opportunity.
- /// * If the substream was half-closed, i.e. a `Close` frame
- /// has already been sent, nothing further happens.
- /// * If the substream was half-closed by the remote, i.e.
- /// a `Close` frame has already been received, a `Close`
- /// frame is sent at the next opportunity.
+ /// * If the substream was open, a `Reset` frame is sent at the next opportunity.
+ /// * If the substream was half-closed, i.e. a `Close` frame has already been sent, nothing
+ /// further happens.
+ /// * If the substream was half-closed by the remote, i.e. a `Close` frame has already been
+ /// received, a `Close` frame is sent at the next opportunity.
///
/// If the multiplexed stream is closed or encountered
/// an error earlier, or there is no known substream with
@@ -1146,15 +1152,14 @@ const EXTRA_PENDING_FRAMES: usize = 1000;
#[cfg(test)]
mod tests {
- use super::*;
+ use std::{collections::HashSet, num::NonZeroU8, ops::DerefMut, pin::Pin};
+
use async_std::task;
use asynchronous_codec::{Decoder, Encoder};
use bytes::BytesMut;
use quickcheck::*;
- use std::collections::HashSet;
- use std::num::NonZeroU8;
- use std::ops::DerefMut;
- use std::pin::Pin;
+
+ use super::*;
impl Arbitrary for MaxBufferBehaviour {
fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour {
diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs
index 17ca9ad46f6..1ef89dc283a 100644
--- a/muxers/mplex/src/lib.rs
+++ b/muxers/mplex/src/lib.rs
@@ -26,15 +26,22 @@ mod codec;
mod config;
mod io;
-pub use config::{MaxBufferBehaviour, MplexConfig};
+use std::{
+ cmp, iter,
+ pin::Pin,
+ sync::Arc,
+ task::{Context, Poll},
+};
use bytes::Bytes;
use codec::LocalStreamId;
+pub use config::{MaxBufferBehaviour, MplexConfig};
use futures::{prelude::*, ready};
-use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
+use libp2p_core::{
+ muxing::{StreamMuxer, StreamMuxerEvent},
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
+};
use parking_lot::Mutex;
-use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll};
impl UpgradeInfo for MplexConfig {
type Info = &'static str;
diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs
index d03bdbdfed7..489d476f158 100644
--- a/muxers/test-harness/src/lib.rs
+++ b/muxers/test-harness/src/lib.rs
@@ -1,15 +1,20 @@
+use std::{
+ fmt,
+ future::Future,
+ mem,
+ pin::Pin,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use futures::{future, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Stream, StreamExt};
+use libp2p_core::{
+ muxing::StreamMuxerExt,
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade},
+ StreamMuxer, UpgradeInfo,
+};
+
use crate::future::{BoxFuture, Either, FutureExt};
-use futures::{future, AsyncRead, AsyncWrite};
-use futures::{AsyncReadExt, Stream};
-use futures::{AsyncWriteExt, StreamExt};
-use libp2p_core::muxing::StreamMuxerExt;
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade};
-use libp2p_core::{StreamMuxer, UpgradeInfo};
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::time::Duration;
-use std::{fmt, mem};
pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M)
where
@@ -41,7 +46,8 @@ where
.unwrap()
}
-/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can use `read_to_end` to read the entire message.
+/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can
+/// use `read_to_end` to read the entire message.
pub async fn close_implies_flush(alice: A, bob: B)
where
A: StreamMuxer + Unpin,
@@ -99,7 +105,8 @@ where
.await;
}
-/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can be the dialer and listener.
+/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can
+/// be the dialer and listener.
async fn run_commutative(
mut alice: A,
mut bob: B,
@@ -120,7 +127,8 @@ async fn run_commutative(
/// Runs a given protocol between the two parties.
///
/// The first party will open a new substream and the second party will wait for this.
-/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the underlying connection can make progress at all times.
+/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the
+/// underlying connection can make progress at all times.
async fn run(
dialer: &mut A,
listener: &mut B,
diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs
index bcfeb62fccf..001eb6b0348 100644
--- a/muxers/yamux/src/lib.rs
+++ b/muxers/yamux/src/lib.rs
@@ -22,17 +22,20 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use either::Either;
-use futures::{prelude::*, ready};
-use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent};
-use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo};
-use std::collections::VecDeque;
-use std::io::{IoSlice, IoSliceMut};
-use std::task::Waker;
use std::{
- io, iter,
+ collections::VecDeque,
+ io,
+ io::{IoSlice, IoSliceMut},
+ iter,
pin::Pin,
- task::{Context, Poll},
+ task::{Context, Poll, Waker},
+};
+
+use either::Either;
+use futures::{prelude::*, ready};
+use libp2p_core::{
+ muxing::{StreamMuxer, StreamMuxerEvent},
+ upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo},
};
use thiserror::Error;
@@ -40,10 +43,12 @@ use thiserror::Error;
#[derive(Debug)]
pub struct Muxer {
connection: Either, yamux013::Connection>,
- /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote.
+ /// Temporarily buffers inbound streams in case our node is
+ /// performing backpressure on the remote.
///
- /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the
- /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via
+ /// The only way how yamux can make progress is by calling
+ /// [`yamux013::Connection::poll_next_inbound`]. However, the [`StreamMuxer`] interface is
+ /// designed to allow a caller to selectively make progress via
/// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general
/// [`StreamMuxer::poll`] is designed to make progress on existing streams etc.
///
@@ -57,7 +62,8 @@ pub struct Muxer {
/// How many streams to buffer before we start resetting them.
///
/// This is equal to the ACK BACKLOG in `rust-yamux`.
-/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog.
+/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset
+/// streams because they'll voluntarily stop opening them once they hit the ACK backlog.
const MAX_BUFFERED_INBOUND_STREAMS: usize = 256;
impl Muxer
diff --git a/protocols/autonat/src/v1.rs b/protocols/autonat/src/v1.rs
index c60e4805f40..4de601c5df5 100644
--- a/protocols/autonat/src/v1.rs
+++ b/protocols/autonat/src/v1.rs
@@ -29,6 +29,8 @@
pub(crate) mod behaviour;
pub(crate) mod protocol;
+pub use libp2p_request_response::{InboundFailure, OutboundFailure};
+
pub use self::{
behaviour::{
Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, NatStatus,
@@ -36,7 +38,6 @@ pub use self::{
},
protocol::{ResponseError, DEFAULT_PROTOCOL_NAME},
};
-pub use libp2p_request_response::{InboundFailure, OutboundFailure};
pub(crate) mod proto {
#![allow(unreachable_pub)]
diff --git a/protocols/autonat/src/v1/behaviour.rs b/protocols/autonat/src/v1/behaviour.rs
index 7a717baed8d..24ec1b13be7 100644
--- a/protocols/autonat/src/v1/behaviour.rs
+++ b/protocols/autonat/src/v1/behaviour.rs
@@ -21,15 +21,19 @@
mod as_client;
mod as_server;
-use crate::protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError};
-use crate::DEFAULT_PROTOCOL_NAME;
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ iter,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use as_client::AsClient;
pub use as_client::{OutboundProbeError, OutboundProbeEvent};
use as_server::AsServer;
pub use as_server::{InboundProbeError, InboundProbeEvent};
use futures_timer::Delay;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr};
+use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_request_response::{
self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel,
@@ -39,14 +43,13 @@ use libp2p_swarm::{
ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- iter,
- task::{Context, Poll},
- time::Duration,
-};
use web_time::Instant;
+use crate::{
+ protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError},
+ DEFAULT_PROTOCOL_NAME,
+};
+
/// Config for the [`Behaviour`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Config {
@@ -148,17 +151,18 @@ pub enum Event {
/// [`NetworkBehaviour`] for AutoNAT.
///
-/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a firewall, or
-/// publicly reachable.
-/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of fixed servers and
-/// connected peers. Upon receiving a dial-back request, the remote tries to dial the included addresses. When a
-/// first address was successfully dialed, a status Ok will be send back together with the dialed address. If no address
-/// can be reached a dial-error is send back.
+/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a
+/// firewall, or publicly reachable.
+/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of
+/// fixed servers and connected peers. Upon receiving a dial-back request, the remote tries to dial
+/// the included addresses. When a first address was successfully dialed, a status Ok will be send
+/// back together with the dialed address. If no address can be reached a dial-error is send back.
/// Based on the received response, the sender assumes themselves to be public or private.
-/// The status is retried in a frequency of [`Config::retry_interval`] or [`Config::retry_interval`], depending on whether
-/// enough confidence in the assumed NAT status was reached or not.
-/// The confidence increases each time a probe confirms the assumed status, and decreases if a different status is reported.
-/// If the confidence is 0, the status is flipped and the Behaviour will report the new status in an `OutEvent`.
+/// The status is retried in a frequency of [`Config::retry_interval`] or
+/// [`Config::retry_interval`], depending on whether enough confidence in the assumed NAT status was
+/// reached or not. The confidence increases each time a probe confirms the assumed status, and
+/// decreases if a different status is reported. If the confidence is 0, the status is flipped and
+/// the Behaviour will report the new status in an `OutEvent`.
pub struct Behaviour {
// Local peer id
local_peer_id: PeerId,
@@ -195,11 +199,12 @@ pub struct Behaviour {
ongoing_outbound: HashMap,
// Connected peers with the observed address of each connection.
- // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips),
- // the observed address is `None`.
+ // If the endpoint of a connection is relayed or not global (in case of
+ // Config::only_global_ips), the observed address is `None`.
connected: HashMap>>,
- // Used servers in recent outbound probes that are throttled through Config::throttle_server_period.
+ // Used servers in recent outbound probes that are throttled through
+ // Config::throttle_server_period.
throttled_servers: Vec<(PeerId, Instant)>,
// Recent probes done for clients
@@ -264,8 +269,8 @@ impl Behaviour {
}
/// Add a peer to the list over servers that may be used for probes.
- /// These peers are used for dial-request even if they are currently not connection, in which case a connection will be
- /// establish before sending the dial-request.
+ /// These peers are used for dial-request even if they are currently not connection, in which
+ /// case a connection will be establish before sending the dial-request.
pub fn add_server(&mut self, peer: PeerId, address: Option) {
self.servers.insert(peer);
if let Some(addr) = address {
@@ -564,7 +569,8 @@ impl NetworkBehaviour for Behaviour {
type Action = ToSwarm<::ToSwarm, THandlerInEvent>;
-// Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol.
+// Trait implemented for `AsClient` and `AsServer` to handle events from the inner
+// [`request_response::Behaviour`] Protocol.
trait HandleInnerEvent {
fn handle_event(
&mut self,
@@ -671,7 +677,8 @@ impl GlobalIp for std::net::Ipv6Addr {
// Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that instead of the
// `Ipv6MulticastScope` just returns if the scope is global or not.
- // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, Ipv6MulticastScope::Global))`.
+ // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope,
+ // Ipv6MulticastScope::Global))`.
fn is_multicast_scope_global(addr: &std::net::Ipv6Addr) -> Option {
match addr.segments()[0] & 0x000f {
14 => Some(true), // Global multicast scope.
diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs
index 385dee50ee1..3377964373c 100644
--- a/protocols/autonat/src/v1/behaviour/as_client.rs
+++ b/protocols/autonat/src/v1/behaviour/as_client.rs
@@ -18,12 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::ResponseError;
-
-use super::{
- Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus,
- ProbeId,
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ task::{Context, Poll},
+ time::Duration,
};
+
use futures::FutureExt;
use futures_timer::Delay;
use libp2p_core::Multiaddr;
@@ -31,13 +31,14 @@ use libp2p_identity::PeerId;
use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId};
use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm};
use rand::{seq::SliceRandom, thread_rng};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- task::{Context, Poll},
- time::Duration,
-};
use web_time::Instant;
+use super::{
+ Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus,
+ ProbeId,
+};
+use crate::ResponseError;
+
/// Outbound probe failed or was aborted.
#[derive(Debug)]
pub enum OutboundProbeError {
diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs
index 01148add6e8..663f94122c7 100644
--- a/protocols/autonat/src/v1/behaviour/as_server.rs
+++ b/protocols/autonat/src/v1/behaviour/as_server.rs
@@ -17,10 +17,11 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::{
- Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId,
- ResponseError,
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ num::NonZeroU8,
};
+
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_request_response::{
@@ -30,12 +31,13 @@ use libp2p_swarm::{
dial_opts::{DialOpts, PeerCondition},
ConnectionId, DialError, ToSwarm,
};
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- num::NonZeroU8,
-};
use web_time::Instant;
+use super::{
+ Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId,
+ ResponseError,
+};
+
/// Inbound probe failed.
#[derive(Debug)]
pub enum InboundProbeError {
@@ -379,10 +381,10 @@ impl AsServer<'_> {
#[cfg(test)]
mod test {
- use super::*;
-
use std::net::Ipv4Addr;
+ use super::*;
+
fn random_ip<'a>() -> Protocol<'a> {
Protocol::Ip4(Ipv4Addr::new(
rand::random(),
diff --git a/protocols/autonat/src/v1/protocol.rs b/protocols/autonat/src/v1/protocol.rs
index 2ce538fddf4..6aa0c99167b 100644
--- a/protocols/autonat/src/v1/protocol.rs
+++ b/protocols/autonat/src/v1/protocol.rs
@@ -18,16 +18,20 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
+use std::io;
+
use async_trait::async_trait;
use asynchronous_codec::{FramedRead, FramedWrite};
-use futures::io::{AsyncRead, AsyncWrite};
-use futures::{SinkExt, StreamExt};
+use futures::{
+ io::{AsyncRead, AsyncWrite},
+ SinkExt, StreamExt,
+};
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_request_response::{self as request_response};
use libp2p_swarm::StreamProtocol;
-use std::io;
+
+use crate::proto;
/// The protocol name used for negotiating with multistream-select.
pub const DEFAULT_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/autonat/1.0.0");
diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs
index 48e9f25f890..94decf50a55 100644
--- a/protocols/autonat/src/v2.rs
+++ b/protocols/autonat/src/v2.rs
@@ -4,17 +4,17 @@
//!
//! The new version fixes the issues of the first version:
//! - The server now always dials back over a newly allocated port. This greatly reduces the risk of
-//! false positives that often occurred in the first version, when the clinet-server connection
-//! occurred over a hole-punched port.
+//! false positives that often occurred in the first version, when the clinet-server connection
+//! occurred over a hole-punched port.
//! - The server protects against DoS attacks by requiring the client to send more data to the
-//! server then the dial back puts on the client, thus making the protocol unatractive for an
-//! attacker.
+//! server then the dial back puts on the client, thus making the protocol unatractive for an
+//! attacker.
//!
//! The protocol is separated into two parts:
//! - The client part, which is implemented in the `client` module. (The client is the party that
-//! wants to check if it is reachable from the outside.)
+//! wants to check if it is reachable from the outside.)
//! - The server part, which is implemented in the `server` module. (The server is the party
-//! performing reachability checks on behalf of the client.)
+//! performing reachability checks on behalf of the client.)
//!
//! The two can be used together.
diff --git a/protocols/autonat/src/v2/client.rs b/protocols/autonat/src/v2/client.rs
index d3272512f35..11ddb792839 100644
--- a/protocols/autonat/src/v2/client.rs
+++ b/protocols/autonat/src/v2/client.rs
@@ -1,5 +1,4 @@
mod behaviour;
mod handler;
-pub use behaviour::Event;
-pub use behaviour::{Behaviour, Config};
+pub use behaviour::{Behaviour, Config, Event};
diff --git a/protocols/autonat/src/v2/client/behaviour.rs b/protocols/autonat/src/v2/client/behaviour.rs
index 97509c05443..8e238fc9be4 100644
--- a/protocols/autonat/src/v2/client/behaviour.rs
+++ b/protocols/autonat/src/v2/client/behaviour.rs
@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, VecDeque},
+ fmt::{Debug, Display, Formatter},
task::{Context, Poll},
time::Duration,
};
@@ -15,14 +16,12 @@ use libp2p_swarm::{
};
use rand::prelude::*;
use rand_core::OsRng;
-use std::fmt::{Debug, Display, Formatter};
-
-use crate::v2::{protocol::DialRequest, Nonce};
use super::handler::{
dial_back::{self, IncomingNonce},
dial_request,
};
+use crate::v2::{protocol::DialRequest, Nonce};
#[derive(Debug, Clone, Copy)]
pub struct Config {
@@ -281,10 +280,12 @@ where
}
}
- /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested candidates.
+ /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested
+ /// candidates.
///
/// In the current implementation, we only send a single address to each AutoNAT server.
- /// This spreads our candidates out across all servers we are connected to which should give us pretty fast feedback on all of them.
+ /// This spreads our candidates out across all servers we are connected to which should give us
+ /// pretty fast feedback on all of them.
fn issue_dial_requests_for_untested_candidates(&mut self) {
for addr in self.untested_candidates() {
let Some((conn_id, peer_id)) = self.random_autonat_server() else {
@@ -311,7 +312,8 @@ where
/// Returns all untested candidates, sorted by the frequency they were reported at.
///
- /// More frequently reported candidates are considered to more likely be external addresses and thus tested first.
+ /// More frequently reported candidates are considered to more likely be external addresses and
+ /// thus tested first.
fn untested_candidates(&self) -> impl Iterator- {
let mut entries = self
.address_candidates
@@ -333,7 +335,8 @@ where
.map(|(addr, _)| addr)
}
- /// Chooses an active connection to one of our peers that reported support for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol.
+ /// Chooses an active connection to one of our peers that reported support for the
+ /// [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol.
fn random_autonat_server(&mut self) -> Option<(ConnectionId, PeerId)> {
let (conn_id, info) = self
.peer_info
diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs
index b3b3a59c02d..ef544a4c77a 100644
--- a/protocols/autonat/src/v2/client/handler/dial_back.rs
+++ b/protocols/autonat/src/v2/client/handler/dial_back.rs
@@ -1,4 +1,5 @@
use std::{
+ convert::Infallible,
io,
task::{Context, Poll},
time::Duration,
@@ -11,7 +12,6 @@ use libp2p_swarm::{
handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError},
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol,
};
-use std::convert::Infallible;
use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL};
@@ -83,7 +83,7 @@ impl ConnectionHandler for Handler {
tracing::warn!("Dial back request dropped, too many requests in flight");
}
}
- // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs
+ // TODO: remove when Rust 1.82 is MSRV
#[allow(unreachable_patterns)]
ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => {
libp2p_core::util::unreachable(error);
diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs
index 0f303167523..fff83ad9453 100644
--- a/protocols/autonat/src/v2/client/handler/dial_request.rs
+++ b/protocols/autonat/src/v2/client/handler/dial_request.rs
@@ -1,10 +1,18 @@
+use std::{
+ collections::VecDeque,
+ convert::Infallible,
+ io,
+ iter::{once, repeat},
+ task::{Context, Poll},
+ time::Duration,
+};
+
use futures::{channel::oneshot, AsyncWrite};
use futures_bounded::FuturesMap;
use libp2p_core::{
upgrade::{DeniedUpgrade, ReadyUpgrade},
Multiaddr,
};
-
use libp2p_swarm::{
handler::{
ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound, OutboundUpgradeSend,
@@ -13,14 +21,6 @@ use libp2p_swarm::{
ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError,
SubstreamProtocol,
};
-use std::{
- collections::VecDeque,
- convert::Infallible,
- io,
- iter::{once, repeat},
- task::{Context, Poll},
- time::Duration,
-};
use crate::v2::{
generated::structs::{mod_DialResponse::ResponseStatus, DialStatus},
@@ -261,7 +261,9 @@ async fn start_stream_handle(
Ok(_) => {}
Err(err) => {
if err.kind() == io::ErrorKind::ConnectionReset {
- // The AutoNAT server may have already closed the stream (this is normal because the probe is finished), in this case we have this error:
+ // The AutoNAT server may have already closed the stream
+ // (this is normal because the probe is finished),
+ // in this case we have this error:
// Err(Custom { kind: ConnectionReset, error: Stopped(0) })
// so we silently ignore this error
} else {
diff --git a/protocols/autonat/src/v2/protocol.rs b/protocols/autonat/src/v2/protocol.rs
index 4077fd65f5d..70f9f8c37af 100644
--- a/protocols/autonat/src/v2/protocol.rs
+++ b/protocols/autonat/src/v2/protocol.rs
@@ -1,13 +1,10 @@
// change to quick-protobuf-codec
-use std::io;
-use std::io::ErrorKind;
+use std::{io, io::ErrorKind};
use asynchronous_codec::{Framed, FramedRead, FramedWrite};
-
use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt};
use libp2p_core::Multiaddr;
-
use quick_protobuf_codec::Codec;
use rand::Rng;
@@ -103,7 +100,10 @@ impl From for proto::Message {
);
proto::Message {
msg: proto::mod_Message::OneOfmsg::dialDataResponse(proto::DialDataResponse {
- data: vec![0; val.data_count], // One could use Cow::Borrowed here, but it will require a modification of the generated code and that will fail the CI
+ // One could use Cow::Borrowed here, but it will
+ // require a modification of the generated code
+ // and that will fail the CI
+ data: vec![0; val.data_count],
}),
}
}
diff --git a/protocols/autonat/src/v2/server.rs b/protocols/autonat/src/v2/server.rs
index 25819307784..cd9b1e46b18 100644
--- a/protocols/autonat/src/v2/server.rs
+++ b/protocols/autonat/src/v2/server.rs
@@ -1,5 +1,4 @@
mod behaviour;
mod handler;
-pub use behaviour::Behaviour;
-pub use behaviour::Event;
+pub use behaviour::{Behaviour, Event};
diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs
index 027cfff7c13..125955cb53a 100644
--- a/protocols/autonat/src/v2/server/behaviour.rs
+++ b/protocols/autonat/src/v2/server/behaviour.rs
@@ -4,20 +4,19 @@ use std::{
task::{Context, Poll},
};
-use crate::v2::server::handler::dial_request::DialBackStatus;
use either::Either;
use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::dial_opts::PeerCondition;
use libp2p_swarm::{
- dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure,
- FromSwarm, NetworkBehaviour, ToSwarm,
+ dial_opts::{DialOpts, PeerCondition},
+ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, FromSwarm,
+ NetworkBehaviour, ToSwarm,
};
use rand_core::{OsRng, RngCore};
use crate::v2::server::handler::{
dial_back,
- dial_request::{self, DialBackCommand},
+ dial_request::{self, DialBackCommand, DialBackStatus},
Handler,
};
diff --git a/protocols/autonat/src/v2/server/handler/dial_back.rs b/protocols/autonat/src/v2/server/handler/dial_back.rs
index 3cacd4ff32b..61593da318d 100644
--- a/protocols/autonat/src/v2/server/handler/dial_back.rs
+++ b/protocols/autonat/src/v2/server/handler/dial_back.rs
@@ -14,13 +14,12 @@ use libp2p_swarm::{
SubstreamProtocol,
};
+use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes};
use crate::v2::{
protocol::{dial_back, recv_dial_back_response},
DIAL_BACK_PROTOCOL,
};
-use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes};
-
pub(crate) type ToBehaviour = io::Result<()>;
pub struct Handler {
diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs
index f22a2e51470..49866a9adb5 100644
--- a/protocols/autonat/tests/autonatv2.rs
+++ b/protocols/autonat/tests/autonatv2.rs
@@ -1,15 +1,15 @@
-use libp2p_autonat::v2::client::{self, Config};
-use libp2p_autonat::v2::server;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::TransportError;
-use libp2p_core::Multiaddr;
+use std::{sync::Arc, time::Duration};
+
+use libp2p_autonat::v2::{
+ client::{self, Config},
+ server,
+};
+use libp2p_core::{multiaddr::Protocol, transport::TransportError, Multiaddr};
use libp2p_swarm::{
DialError, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, Swarm, SwarmEvent,
};
use libp2p_swarm_test::SwarmExt;
use rand_core::OsRng;
-use std::sync::Arc;
-use std::time::Duration;
use tokio::sync::oneshot;
use tracing_subscriber::EnvFilter;
diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs
index f5c18e3f34e..49c6c483514 100644
--- a/protocols/autonat/tests/test_client.rs
+++ b/protocols/autonat/tests/test_client.rs
@@ -18,6 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::time::Duration;
+
use libp2p_autonat::{
Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError,
};
@@ -25,7 +27,6 @@ use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
use tokio::task::JoinHandle;
const MAX_CONFIDENCE: usize = 3;
@@ -116,7 +117,8 @@ async fn test_auto_probe() {
// It can happen that the server observed the established connection and
// returned a response before the inbound established connection was reported at the client.
- // In this (rare) case the `ConnectionEstablished` event occurs after the `OutboundProbeEvent::Response`.
+ // In this (rare) case the `ConnectionEstablished` event
+ // occurs after the `OutboundProbeEvent::Response`.
if !had_connection_event {
match client.next_swarm_event().await {
SwarmEvent::ConnectionEstablished {
diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs
index d43d14198d4..944c4301b20 100644
--- a/protocols/autonat/tests/test_server.rs
+++ b/protocols/autonat/tests/test_server.rs
@@ -18,15 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{num::NonZeroU32, time::Duration};
+
use libp2p_autonat::{
Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError,
};
use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::DialError;
-use libp2p_swarm::{Swarm, SwarmEvent};
+use libp2p_swarm::{DialError, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::{num::NonZeroU32, time::Duration};
#[tokio::test]
async fn test_dial_back() {
@@ -340,7 +340,8 @@ async fn test_global_ips_config() {
client.listen().await;
tokio::spawn(client.loop_on_next());
- // Expect the probe to be refused as both peers run on the same machine and thus in the same local network.
+ // Expect the probe to be refused as both peers run
+ // on the same machine and thus in the same local network.
match server.next_behaviour_event().await {
Event::InboundProbe(InboundProbeEvent::Error { error, .. }) => assert!(matches!(
error,
diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs
index 7d0366c98bc..989635c02ba 100644
--- a/protocols/dcutr/src/behaviour.rs
+++ b/protocols/dcutr/src/behaviour.rs
@@ -20,27 +20,29 @@
//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node.
-use crate::{handler, protocol};
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ convert::Infallible,
+ num::NonZeroUsize,
+ task::{Context, Poll},
+};
+
use either::Either;
-use libp2p_core::connection::ConnectedPoint;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{
+ connection::ConnectedPoint, multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr,
+};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm};
-use libp2p_swarm::dial_opts::{self, DialOpts};
use libp2p_swarm::{
- dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler,
- THandlerOutEvent,
+ behaviour::{ConnectionClosed, DialFailure, FromSwarm},
+ dial_opts::{self, DialOpts},
+ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour,
+ NewExternalAddrCandidate, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm};
use lru::LruCache;
-use std::collections::{HashMap, HashSet, VecDeque};
-use std::convert::Infallible;
-use std::num::NonZeroUsize;
-use std::task::{Context, Poll};
use thiserror::Error;
+use crate::{handler, protocol};
+
pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3;
/// The events produced by the [`Behaviour`].
@@ -184,7 +186,8 @@ impl NetworkBehaviour for Behaviour {
handler::relayed::Handler::new(connected_point, self.observed_addresses());
handler.on_behaviour_event(handler::relayed::Command::Connect);
- return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ return Ok(Either::Left(handler));
}
self.direct_connections
.entry(peer)
@@ -217,7 +220,8 @@ impl NetworkBehaviour for Behaviour {
port_use,
},
self.observed_addresses(),
- ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound.
+ ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one
+ // outbound.
}
self.direct_connections
@@ -255,7 +259,8 @@ impl NetworkBehaviour for Behaviour {
Either::Left(_) => connection_id,
Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) {
None => {
- // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it.
+ // If the connection ID is unknown to us, it means we didn't create it so ignore
+ // any event coming from it.
return;
}
Some(relayed_connection_id) => *relayed_connection_id,
@@ -347,8 +352,9 @@ impl NetworkBehaviour for Behaviour {
///
/// We use an [`LruCache`] to favor addresses that are reported more often.
/// When attempting a hole-punch, we will try more frequent addresses first.
-/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol).
-/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch.
+/// Most of these addresses will come from observations by other nodes (via e.g. the identify
+/// protocol). More common observations mean a more likely stable port-mapping and thus a higher
+/// chance of a successful hole-punch.
struct Candidates {
inner: LruCache,
me: PeerId,
diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs
index ad12a196cb9..0d6e1b5e889 100644
--- a/protocols/dcutr/src/handler/relayed.rs
+++ b/protocols/dcutr/src/handler/relayed.rs
@@ -20,26 +20,31 @@
//! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection.
-use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS;
-use crate::{protocol, PROTOCOL_NAME};
+use std::{
+ collections::VecDeque,
+ io,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use either::Either;
use futures::future;
-use libp2p_core::multiaddr::Multiaddr;
-use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade};
-use libp2p_core::ConnectedPoint;
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
- ListenUpgradeError,
+use libp2p_core::{
+ multiaddr::Multiaddr,
+ upgrade::{DeniedUpgrade, ReadyUpgrade},
+ ConnectedPoint,
};
use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
+ ListenUpgradeError,
+ },
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError,
SubstreamProtocol,
};
use protocol::{inbound, outbound};
-use std::collections::VecDeque;
-use std::io;
-use std::task::{Context, Poll};
-use std::time::Duration;
+
+use crate::{behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS, protocol, PROTOCOL_NAME};
#[derive(Debug)]
pub enum Command {
@@ -114,8 +119,8 @@ impl Handler {
}
self.attempts += 1;
}
- // A connection listener denies all incoming substreams, thus none can ever be fully negotiated.
- // TODO: remove when Rust 1.82 is MSRV
+ // A connection listener denies all incoming substreams, thus none can ever be fully
+ // negotiated. TODO: remove when Rust 1.82 is MSRV
#[allow(unreachable_patterns)]
future::Either::Right(output) => libp2p_core::util::unreachable(output),
}
diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs
index 005d8394f5e..c5209930ca2 100644
--- a/protocols/dcutr/src/protocol/inbound.rs
+++ b/protocols/dcutr/src/protocol/inbound.rs
@@ -18,14 +18,16 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
+use std::io;
+
use asynchronous_codec::Framed;
use futures::prelude::*;
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_swarm::Stream;
-use std::io;
use thiserror::Error;
+use crate::proto;
+
pub(crate) async fn handshake(
stream: Stream,
candidates: Vec,
diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs
index 8639ff4f053..cdd3d5fbf0b 100644
--- a/protocols/dcutr/src/protocol/outbound.rs
+++ b/protocols/dcutr/src/protocol/outbound.rs
@@ -18,17 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use crate::PROTOCOL_NAME;
+use std::io;
+
use asynchronous_codec::Framed;
use futures::prelude::*;
use futures_timer::Delay;
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_swarm::Stream;
-use std::io;
use thiserror::Error;
use web_time::Instant;
+use crate::{proto, PROTOCOL_NAME};
+
pub(crate) async fn handshake(
stream: Stream,
candidates: Vec,
diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs
index 36f168fb04a..a35c9a50cfe 100644
--- a/protocols/dcutr/tests/lib.rs
+++ b/protocols/dcutr/tests/lib.rs
@@ -18,9 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use libp2p_core::multiaddr::{Multiaddr, Protocol};
-use libp2p_core::transport::upgrade::Version;
-use libp2p_core::transport::{MemoryTransport, Transport};
+use std::time::Duration;
+
+use libp2p_core::{
+ multiaddr::{Multiaddr, Protocol},
+ transport::{upgrade::Version, MemoryTransport, Transport},
+};
use libp2p_dcutr as dcutr;
use libp2p_identify as identify;
use libp2p_identity as identity;
@@ -29,7 +32,6 @@ use libp2p_plaintext as plaintext;
use libp2p_relay as relay;
use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::test]
diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs
index 1a70d2213b2..477172b42c0 100644
--- a/protocols/floodsub/src/layer.rs
+++ b/protocols/floodsub/src/layer.rs
@@ -18,27 +18,36 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{
- FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription,
- FloodsubSubscriptionAction,
+use std::{
+ collections::{
+ hash_map::{DefaultHasher, HashMap},
+ VecDeque,
+ },
+ iter,
+ task::{Context, Poll},
};
-use crate::topic::Topic;
-use crate::FloodsubConfig;
+
use bytes::Bytes;
use cuckoofilter::{CuckooError, CuckooFilter};
use fnv::FnvHashSet;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm};
use libp2p_swarm::{
- dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour,
- NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
+ behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm},
+ dial_opts::DialOpts,
+ CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler,
+ OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
use smallvec::SmallVec;
-use std::collections::hash_map::{DefaultHasher, HashMap};
-use std::task::{Context, Poll};
-use std::{collections::VecDeque, iter};
+
+use crate::{
+ protocol::{
+ FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription,
+ FloodsubSubscriptionAction,
+ },
+ topic::Topic,
+ FloodsubConfig,
+};
/// Network behaviour that handles the floodsub protocol.
pub struct Floodsub {
@@ -192,7 +201,8 @@ impl Floodsub {
self.publish_many_inner(topic, data, true)
}
- /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics.
+ /// Publishes a message with multiple topics to the network, even if we're not subscribed to any
+ /// of the topics.
pub fn publish_many_any(
&mut self,
topic: impl IntoIterator
- >,
diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs
index 94766d5fdca..d43b0c88788 100644
--- a/protocols/floodsub/src/lib.rs
+++ b/protocols/floodsub/src/lib.rs
@@ -35,9 +35,11 @@ mod proto {
pub(crate) use self::floodsub::pb::{mod_RPC::SubOpts, Message, RPC};
}
-pub use self::layer::{Floodsub, FloodsubEvent};
-pub use self::protocol::{FloodsubMessage, FloodsubRpc};
-pub use self::topic::Topic;
+pub use self::{
+ layer::{Floodsub, FloodsubEvent},
+ protocol::{FloodsubMessage, FloodsubRpc},
+ topic::Topic,
+};
/// Configuration options for the Floodsub protocol.
#[derive(Debug, Clone)]
diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs
index edc842be8ce..69cfcbd9dc7 100644
--- a/protocols/floodsub/src/protocol.rs
+++ b/protocols/floodsub/src/protocol.rs
@@ -18,19 +18,19 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use crate::topic::Topic;
+use std::{io, iter, pin::Pin};
+
use asynchronous_codec::Framed;
use bytes::Bytes;
use futures::{
io::{AsyncRead, AsyncWrite},
- Future,
+ Future, SinkExt, StreamExt,
};
-use futures::{SinkExt, StreamExt};
use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
-use std::{io, iter, pin::Pin};
+
+use crate::{proto, topic::Topic};
const MAX_MESSAGE_LEN_BYTES: usize = 2048;
diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs
index c955ee59c65..ee600d22098 100644
--- a/protocols/gossipsub/src/backoff.rs
+++ b/protocols/gossipsub/src/backoff.rs
@@ -19,15 +19,19 @@
// DEALINGS IN THE SOFTWARE.
//! Data structure for efficiently storing known back-off's when pruning peers.
-use crate::topic::TopicHash;
-use libp2p_identity::PeerId;
-use std::collections::{
- hash_map::{Entry, HashMap},
- HashSet,
+use std::{
+ collections::{
+ hash_map::{Entry, HashMap},
+ HashSet,
+ },
+ time::Duration,
};
-use std::time::Duration;
+
+use libp2p_identity::PeerId;
use web_time::Instant;
+use crate::topic::TopicHash;
+
#[derive(Copy, Clone)]
struct HeartbeatIndex(usize);
@@ -68,8 +72,8 @@ impl BackoffStorage {
}
}
- /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call
- /// doesn't change anything).
+ /// Updates the backoff for a peer (if there is already a more restrictive backoff then this
+ /// call doesn't change anything).
pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) {
let instant = Instant::now() + time;
let insert_into_backoffs_by_heartbeat =
@@ -155,7 +159,7 @@ impl BackoffStorage {
None => false,
};
if !keep {
- //remove from backoffs
+ // remove from backoffs
if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) {
if m.get_mut().remove(peer).is_some() && m.get().is_empty() {
m.remove();
diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs
index ae808d97261..bb3eaaa9b5a 100644
--- a/protocols/gossipsub/src/behaviour.rs
+++ b/protocols/gossipsub/src/behaviour.rs
@@ -19,11 +19,10 @@
// DEALINGS IN THE SOFTWARE.
use std::{
- cmp::{max, Ordering},
- collections::HashSet,
- collections::VecDeque,
- collections::{BTreeSet, HashMap},
+ cmp::{max, Ordering, Ordering::Equal},
+ collections::{BTreeSet, HashMap, HashSet, VecDeque},
fmt,
+ fmt::Debug,
net::IpAddr,
task::{Context, Poll},
time::Duration,
@@ -31,52 +30,44 @@ use std::{
use futures::FutureExt;
use futures_timer::Delay;
-use prometheus_client::registry::Registry;
-use rand::{seq::SliceRandom, thread_rng};
-
use libp2p_core::{
- multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr,
+ multiaddr::Protocol::{Ip4, Ip6},
+ transport::PortUse,
+ Endpoint, Multiaddr,
};
-use libp2p_identity::Keypair;
-use libp2p_identity::PeerId;
+use libp2p_identity::{Keypair, PeerId};
use libp2p_swarm::{
behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm},
dial_opts::DialOpts,
ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
+use prometheus_client::registry::Registry;
+use quick_protobuf::{MessageWrite, Writer};
+use rand::{seq::SliceRandom, thread_rng};
use web_time::{Instant, SystemTime};
-use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason};
-use crate::protocol::SIGNING_PREFIX;
-use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter};
-use crate::time_cache::DuplicateCache;
-use crate::topic::{Hasher, Topic, TopicHash};
-use crate::transform::{DataTransform, IdentityTransform};
-use crate::types::{
- ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription,
- SubscriptionAction,
-};
-use crate::types::{PeerConnections, PeerKind, RpcOut};
-use crate::{backoff::BackoffStorage, FailedMessages};
use crate::{
+ backoff::BackoffStorage,
config::{Config, ValidationMode},
- types::Graft,
-};
-use crate::{gossip_promises::GossipPromises, types::Prune};
-use crate::{
+ gossip_promises::GossipPromises,
handler::{Handler, HandlerEvent, HandlerIn},
- types::IWant,
-};
-use crate::{mcache::MessageCache, types::IHave};
-use crate::{
+ mcache::MessageCache,
metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty},
+ peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason},
+ protocol::SIGNING_PREFIX,
rpc::Sender,
+ rpc_proto::proto,
+ subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter},
+ time_cache::DuplicateCache,
+ topic::{Hasher, Topic, TopicHash},
+ transform::{DataTransform, IdentityTransform},
+ types::{
+ ControlAction, Graft, IHave, IWant, Message, MessageAcceptance, MessageId, PeerConnections,
+ PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction,
+ },
+ FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError,
};
-use crate::{rpc_proto::proto, TopicScoreParams};
-use crate::{PublishError, SubscriptionError, ValidationError};
-use quick_protobuf::{MessageWrite, Writer};
-use std::{cmp::Ordering::Equal, fmt::Debug};
#[cfg(test)]
mod tests;
@@ -221,8 +212,9 @@ impl From for PublishConfig {
let public_key = keypair.public();
let key_enc = public_key.encode_protobuf();
let key = if key_enc.len() <= 42 {
- // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it
- // specifically in the [`rpc_proto::proto::Message::key`] field.
+ // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we
+ // don't include it specifically in the
+ // [`rpc_proto::proto::Message::key`] field.
None
} else {
// Include the protobuf encoding of the public key in the message.
@@ -289,7 +281,7 @@ pub struct Behaviour {
/// The last publish time for fanout topics.
fanout_last_pub: HashMap,
- ///Storage for backoffs
+ /// Storage for backoffs
backoffs: BackoffStorage,
/// Message cache for the last few heartbeats.
@@ -1415,7 +1407,7 @@ where
+ self.config.graft_flood_threshold())
- self.config.prune_backoff();
if flood_cutoff > now {
- //extra penalty
+ // extra penalty
peer_score.add_penalty(peer_id, 1);
}
}
@@ -1436,15 +1428,16 @@ where
topic=%topic_hash,
"GRAFT: ignoring peer with negative score"
);
- // we do send them PRUNE however, because it's a matter of protocol correctness
+ // we do send them PRUNE however, because it's a matter of protocol
+ // correctness
to_prune_topics.insert(topic_hash.clone());
// but we won't PX to them
do_px = false;
continue;
}
- // check mesh upper bound and only allow graft if the upper bound is not reached or
- // if it is an outbound peer
+ // check mesh upper bound and only allow graft if the upper bound is not reached
+ // or if it is an outbound peer
if peers.len() >= self.config.mesh_n_high()
&& !self.outbound_peers.contains(peer_id)
{
@@ -1572,7 +1565,7 @@ where
self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune);
if self.mesh.contains_key(&topic_hash) {
- //connect to px peers
+ // connect to px peers
if !px.is_empty() {
// we ignore PX from peers with insufficient score
if below_threshold {
@@ -1604,7 +1597,7 @@ where
let n = self.config.prune_peers();
// Ignore peerInfo with no ID
//
- //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a
+ // TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a
// signed peer record?
px.retain(|p| p.peer_id.is_some());
if px.len() > n {
@@ -2867,8 +2860,8 @@ where
.expect("Previously established connection to peer must be present");
peer.connections.remove(index);
- // If there are more connections and this peer is in a mesh, inform the first connection
- // handler.
+ // If there are more connections and this peer is in a mesh, inform the first
+ // connection handler.
if !peer.connections.is_empty() {
for topic in &peer.topics {
if let Some(mesh_peers) = self.mesh.get(topic) {
@@ -3162,7 +3155,8 @@ where
}
// Handle control messages
- // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however)
+ // group some control messages, this minimises SendEvents (code is simplified to
+ // handle each event at a time however)
let mut ihave_msgs = vec![];
let mut graft_msgs = vec![];
let mut prune_msgs = vec![];
diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs
index 9567150382a..eaa983d214d 100644
--- a/protocols/gossipsub/src/behaviour/tests.rs
+++ b/protocols/gossipsub/src/behaviour/tests.rs
@@ -20,16 +20,17 @@
// Collection of tests for the gossipsub network behaviour
-use super::*;
-use crate::rpc::Receiver;
-use crate::subscription_filter::WhitelistSubscriptionFilter;
-use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic};
+use std::{future, net::Ipv4Addr, thread::sleep};
+
use byteorder::{BigEndian, ByteOrder};
use libp2p_core::ConnectedPoint;
use rand::Rng;
-use std::future;
-use std::net::Ipv4Addr;
-use std::thread::sleep;
+
+use super::*;
+use crate::{
+ config::ConfigBuilder, rpc::Receiver, subscription_filter::WhitelistSubscriptionFilter,
+ types::Rpc, IdentTopic as Topic,
+};
#[derive(Default, Debug)]
struct InjectNodes
@@ -311,7 +312,8 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc {
messages.push(RawMessage {
source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()),
data: message.data.unwrap_or_default(),
- sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application
+ sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), /* don't inform the
+ * application */
topic: TopicHash::from_raw(message.topic),
signature: message.signature, // don't inform the application
key: None,
@@ -677,7 +679,7 @@ fn test_publish_without_flood_publishing() {
// - Send publish message to all peers
// - Insert message into gs.mcache and gs.received
- //turn off flood publish to test old behaviour
+ // turn off flood publish to test old behaviour
let config = ConfigBuilder::default()
.flood_publish(false)
.build()
@@ -757,7 +759,7 @@ fn test_fanout() {
// - Send publish message to fanout peers
// - Insert message into gs.mcache and gs.received
- //turn off flood publish to test fanout behaviour
+ // turn off flood publish to test fanout behaviour
let config = ConfigBuilder::default()
.flood_publish(false)
.build()
@@ -1447,10 +1449,10 @@ fn test_explicit_peer_gets_connected() {
.to_subscribe(true)
.create_network();
- //create new peer
+ // create new peer
let peer = PeerId::random();
- //add peer as explicit peer
+ // add peer as explicit peer
gs.add_explicit_peer(&peer);
let num_events = gs
@@ -1483,17 +1485,17 @@ fn test_explicit_peer_reconnects() {
let peer = others.first().unwrap();
- //add peer as explicit peer
+ // add peer as explicit peer
gs.add_explicit_peer(peer);
flush_events(&mut gs, receivers);
- //disconnect peer
+ // disconnect peer
disconnect_peer(&mut gs, peer);
gs.heartbeat();
- //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2`
+ // check that no reconnect after first heartbeat since `explicit_peer_ticks == 2`
assert_eq!(
gs.events
.iter()
@@ -1508,7 +1510,7 @@ fn test_explicit_peer_reconnects() {
gs.heartbeat();
- //check that there is a reconnect after second heartbeat
+ // check that there is a reconnect after second heartbeat
assert!(
gs.events
.iter()
@@ -1536,11 +1538,11 @@ fn test_handle_graft_explicit_peer() {
gs.handle_graft(peer, topic_hashes.clone());
- //peer got not added to mesh
+ // peer got not added to mesh
assert!(gs.mesh[&topic_hashes[0]].is_empty());
assert!(gs.mesh[&topic_hashes[1]].is_empty());
- //check prunes
+ // check prunes
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == peer
&& match m {
@@ -1566,13 +1568,13 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() {
.explicit(1)
.create_network();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(
gs.mesh[&topic_hashes[0]],
vec![peers[1]].into_iter().collect()
);
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1581,7 +1583,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1603,10 +1605,10 @@ fn do_not_graft_explicit_peer() {
gs.heartbeat();
- //mesh stays empty
+ // mesh stays empty
assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new());
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &others[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1663,7 +1665,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
.explicit(1)
.create_network();
- //create new topic, both peers subscribing to it but we do not subscribe to it
+ // create new topic, both peers subscribing to it but we do not subscribe to it
let topic = Topic::new(String::from("t"));
let topic_hash = topic.hash();
for peer in peers.iter().take(2) {
@@ -1676,13 +1678,13 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
);
}
- //subscribe now to topic
+ // subscribe now to topic
gs.subscribe(&topic).unwrap();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect());
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1691,7 +1693,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1711,7 +1713,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
.explicit(1)
.create_network();
- //create new topic, both peers subscribing to it but we do not subscribe to it
+ // create new topic, both peers subscribing to it but we do not subscribe to it
let topic = Topic::new(String::from("t"));
let topic_hash = topic.hash();
for peer in peers.iter().take(2) {
@@ -1724,16 +1726,16 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
);
}
- //we send a message for this topic => this will initialize the fanout
+ // we send a message for this topic => this will initialize the fanout
gs.publish(topic.clone(), vec![1, 2, 3]).unwrap();
- //subscribe now to topic
+ // subscribe now to topic
gs.subscribe(&topic).unwrap();
- //only peer 1 is in the mesh not peer 0 (which is an explicit peer)
+ // only peer 1 is in the mesh not peer 0 (which is an explicit peer)
assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect());
- //assert that graft gets created to non-explicit peer
+ // assert that graft gets created to non-explicit peer
let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. })
});
@@ -1742,7 +1744,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() {
"No graft message got created to non-explicit peer"
);
- //assert that no graft gets created to explicit peer
+ // assert that no graft gets created to explicit peer
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. })
});
@@ -1774,15 +1776,15 @@ fn no_gossip_gets_sent_to_explicit_peers() {
validated: true,
};
- //forward the message
+ // forward the message
gs.handle_received_message(message, &local_id);
- //simulate multiple gossip calls (for randomness)
+ // simulate multiple gossip calls (for randomness)
for _ in 0..3 {
gs.emit_gossip();
}
- //assert that no gossip gets sent to explicit peer
+ // assert that no gossip gets sent to explicit peer
let receiver = receivers.remove(&peers[0]).unwrap();
let mut gossips = 0;
let non_priority = receiver.non_priority.get_ref();
@@ -1835,7 +1837,7 @@ fn test_mesh_subtraction() {
// Adds mesh_low peers and PRUNE 2 giving us a deficit.
let n = config.mesh_n_high() + 10;
- //make all outbound connections so that we allow grafting to all
+ // make all outbound connections so that we allow grafting to all
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
.topics(vec!["test".into()])
@@ -1866,10 +1868,10 @@ fn test_connect_to_px_peers_on_handle_prune() {
.to_subscribe(true)
.create_network();
- //handle prune from single peer with px peers
+ // handle prune from single peer with px peers
let mut px = Vec::new();
- //propose more px peers than config.prune_peers()
+ // propose more px peers than config.prune_peers()
for _ in 0..config.prune_peers() + 5 {
px.push(PeerInfo {
peer_id: Some(PeerId::random()),
@@ -1885,7 +1887,7 @@ fn test_connect_to_px_peers_on_handle_prune() {
)],
);
- //Check DialPeer events for px peers
+ // Check DialPeer events for px peers
let dials: Vec<_> = gs
.events
.iter()
@@ -1903,7 +1905,7 @@ fn test_connect_to_px_peers_on_handle_prune() {
// No duplicates
assert_eq!(dials_set.len(), config.prune_peers());
- //all dial peers must be in px
+ // all dial peers must be in px
assert!(dials_set.is_subset(
&px.iter()
.map(|i| *i.peer_id.as_ref().unwrap())
@@ -1915,14 +1917,14 @@ fn test_connect_to_px_peers_on_handle_prune() {
fn test_send_px_and_backoff_in_prune() {
let config: Config = Config::default();
- //build mesh with enough peers for px
+ // build mesh with enough peers for px
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(config.prune_peers() + 1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //send prune to peer
+ // send prune to peer
gs.send_graft_prune(
HashMap::new(),
vec![(peers[0], vec![topics[0].clone()])]
@@ -1931,7 +1933,7 @@ fn test_send_px_and_backoff_in_prune() {
HashSet::new(),
);
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -1957,14 +1959,14 @@ fn test_send_px_and_backoff_in_prune() {
fn test_prune_backoffed_peer_on_graft() {
let config: Config = Config::default();
- //build mesh with enough peers for px
+ // build mesh with enough peers for px
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(config.prune_peers() + 1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //remove peer from mesh and send prune to peer => this adds a backoff for this peer
+ // remove peer from mesh and send prune to peer => this adds a backoff for this peer
gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]);
gs.send_graft_prune(
HashMap::new(),
@@ -1974,13 +1976,13 @@ fn test_prune_backoffed_peer_on_graft() {
HashSet::new(),
);
- //ignore all messages until now
+ // ignore all messages until now
let receivers = flush_events(&mut gs, receivers);
- //handle graft
+ // handle graft
gs.handle_graft(&peers[0], vec![topics[0].clone()]);
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -2007,7 +2009,7 @@ fn test_do_not_graft_within_backoff_period() {
.heartbeat_interval(Duration::from_millis(100))
.build()
.unwrap();
- //only one peer => mesh too small and will try to regraft as early as possible
+ // only one peer => mesh too small and will try to regraft as early as possible
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2015,22 +2017,22 @@ fn test_do_not_graft_within_backoff_period() {
.gs_config(config)
.create_network();
- //handle prune from peer with backoff of one second
+ // handle prune from peer with backoff of one second
gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]);
- //forget all events until now
+ // forget all events until now
let receivers = flush_events(&mut gs, receivers);
- //call heartbeat
+ // call heartbeat
gs.heartbeat();
- //Sleep for one second and apply 10 regular heartbeats (interval = 100ms).
+ // Sleep for one second and apply 10 regular heartbeats (interval = 100ms).
for _ in 0..10 {
sleep(Duration::from_millis(100));
gs.heartbeat();
}
- //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
+ // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
// is needed).
let (control_msgs, receivers) =
count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
@@ -2039,11 +2041,11 @@ fn test_do_not_graft_within_backoff_period() {
"Graft message created too early within backoff period"
);
- //Heartbeat one more time this should graft now
+ // Heartbeat one more time this should graft now
sleep(Duration::from_millis(100));
gs.heartbeat();
- //check that graft got created
+ // check that graft got created
let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
assert!(
control_msgs > 0,
@@ -2053,14 +2055,14 @@ fn test_do_not_graft_within_backoff_period() {
#[test]
fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() {
- //set default backoff period to 1 second
+ // set default backoff period to 1 second
let config = ConfigBuilder::default()
.prune_backoff(Duration::from_millis(90))
.backoff_slack(1)
.heartbeat_interval(Duration::from_millis(100))
.build()
.unwrap();
- //only one peer => mesh too small and will try to regraft as early as possible
+ // only one peer => mesh too small and will try to regraft as early as possible
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2068,20 +2070,20 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without
.gs_config(config)
.create_network();
- //handle prune from peer without a specified backoff
+ // handle prune from peer without a specified backoff
gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]);
- //forget all events until now
+ // forget all events until now
let receivers = flush_events(&mut gs, receivers);
- //call heartbeat
+ // call heartbeat
gs.heartbeat();
- //Apply one more heartbeat
+ // Apply one more heartbeat
sleep(Duration::from_millis(100));
gs.heartbeat();
- //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
+ // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat
// is needed).
let (control_msgs, receivers) =
count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
@@ -2090,11 +2092,11 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without
"Graft message created too early within backoff period"
);
- //Heartbeat one more time this should graft now
+ // Heartbeat one more time this should graft now
sleep(Duration::from_millis(100));
gs.heartbeat();
- //check that graft got created
+ // check that graft got created
let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. }));
assert!(
control_msgs > 0,
@@ -2181,7 +2183,7 @@ fn test_flood_publish() {
.to_subscribe(true)
.create_network();
- //publish message
+ // publish message
let publish_data = vec![0; 42];
gs.publish(Topic::new(topic), publish_data).unwrap();
@@ -2228,15 +2230,15 @@ fn test_flood_publish() {
fn test_gossip_to_at_least_gossip_lazy_peers() {
let config: Config = Config::default();
- //add more peers than in mesh to test gossipping
- //by default only mesh_n_low peers will get added to mesh
+ // add more peers than in mesh to test gossipping
+ // by default only mesh_n_low peers will get added to mesh
let (mut gs, _, receivers, topic_hashes) = inject_nodes1()
.peer_no(config.mesh_n_low() + config.gossip_lazy() + 1)
.topics(vec!["topic".into()])
.to_subscribe(true)
.create_network();
- //receive message
+ // receive message
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2248,7 +2250,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
};
gs.handle_received_message(raw_message.clone(), &PeerId::random());
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// Transform the inbound message
@@ -2256,7 +2258,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
let msg_id = gs.config.message_id(message);
- //check that exactly config.gossip_lazy() many gossip messages were sent.
+ // check that exactly config.gossip_lazy() many gossip messages were sent.
let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action {
RpcOut::IHave(IHave {
topic_hash,
@@ -2271,7 +2273,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() {
fn test_gossip_to_at_most_gossip_factor_peers() {
let config: Config = Config::default();
- //add a lot of peers
+ // add a lot of peers
let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize;
let (mut gs, _, receivers, topic_hashes) = inject_nodes1()
.peer_no(m)
@@ -2279,7 +2281,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
.to_subscribe(true)
.create_network();
- //receive message
+ // receive message
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2291,14 +2293,14 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
};
gs.handle_received_message(raw_message.clone(), &PeerId::random());
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// Transform the inbound message
let message = &gs.data_transform.inbound_transform(raw_message).unwrap();
let msg_id = gs.config.message_id(message);
- //check that exactly config.gossip_lazy() many gossip messages were sent.
+ // check that exactly config.gossip_lazy() many gossip messages were sent.
let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action {
RpcOut::IHave(IHave {
topic_hash,
@@ -2316,7 +2318,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() {
fn test_accept_only_outbound_peer_grafts_when_mesh_full() {
let config: Config = Config::default();
- //enough peers to fill the mesh
+ // enough peers to fill the mesh
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2328,30 +2330,30 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() {
gs.handle_graft(&peer, topics.clone());
}
- //assert current mesh size
+ // assert current mesh size
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high());
- //create an outbound and an inbound peer
+ // create an outbound and an inbound peer
let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false);
let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false);
- //send grafts
+ // send grafts
gs.handle_graft(&inbound, vec![topics[0].clone()]);
gs.handle_graft(&outbound, vec![topics[0].clone()]);
- //assert mesh size
+ // assert mesh size
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1);
- //inbound is not in mesh
+ // inbound is not in mesh
assert!(!gs.mesh[&topics[0]].contains(&inbound));
- //outbound is in mesh
+ // outbound is in mesh
assert!(gs.mesh[&topics[0]].contains(&outbound));
}
#[test]
fn test_do_not_remove_too_many_outbound_peers() {
- //use an extreme case to catch errors with high probability
+ // use an extreme case to catch errors with high probability
let m = 50;
let n = 2 * m;
let config = ConfigBuilder::default()
@@ -2362,7 +2364,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
.build()
.unwrap();
- //fill the mesh with inbound connections
+ // fill the mesh with inbound connections
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
.topics(vec!["test".into()])
@@ -2375,7 +2377,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
gs.handle_graft(&peer, topics.clone());
}
- //create m outbound connections and graft (we will accept the graft)
+ // create m outbound connections and graft (we will accept the graft)
let mut outbound = HashSet::new();
for _ in 0..m {
let (peer, _) = add_peer(&mut gs, &topics, true, false);
@@ -2383,7 +2385,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
gs.handle_graft(&peer, topics.clone());
}
- //mesh is overly full
+ // mesh is overly full
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m);
// run a heartbeat
@@ -2392,7 +2394,7 @@ fn test_do_not_remove_too_many_outbound_peers() {
// Peers should be removed to reach n
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n);
- //all outbound peers are still in the mesh
+ // all outbound peers are still in the mesh
assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p)));
}
@@ -2412,7 +2414,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() {
gs.handle_graft(&peer, topics.clone());
}
- //create config.mesh_outbound_min() many outbound connections without grafting
+ // create config.mesh_outbound_min() many outbound connections without grafting
let mut peers = vec![];
for _ in 0..config.mesh_outbound_min() {
peers.push(add_peer(&mut gs, &topics, true, false));
@@ -2435,7 +2437,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() {
fn test_prune_negative_scored_peers() {
let config = Config::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2449,16 +2451,16 @@ fn test_prune_negative_scored_peers() {
)))
.create_network();
- //add penalty to peer
+ // add penalty to peer
gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1);
- //execute heartbeat
+ // execute heartbeat
gs.heartbeat();
- //peer should not be in mesh anymore
+ // peer should not be in mesh anymore
assert!(gs.mesh[&topics[0]].is_empty());
- //check prune message
+ // check prune message
let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| {
peer_id == &peers[0]
&& match m {
@@ -2481,7 +2483,7 @@ fn test_prune_negative_scored_peers() {
#[test]
fn test_dont_graft_to_negative_scored_peers() {
let config = Config::default();
- //init full mesh
+ // init full mesh
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2493,34 +2495,34 @@ fn test_dont_graft_to_negative_scored_peers() {
)))
.create_network();
- //add two additional peers that will not be part of the mesh
+ // add two additional peers that will not be part of the mesh
let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false);
let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false);
- //reduce score of p1 to negative
+ // reduce score of p1 to negative
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1);
- //handle prunes of all other peers
+ // handle prunes of all other peers
for p in peers {
gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]);
}
- //heartbeat
+ // heartbeat
gs.heartbeat();
- //assert that mesh only contains p2
+ // assert that mesh only contains p2
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1);
assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2));
}
-///Note that in this test also without a penalty the px would be ignored because of the
+/// Note that in this test also without a penalty the px would be ignored because of the
/// acceptPXThreshold, but the spec still explicitly states the rule that px from negative
/// peers should get ignored, therefore we test it here.
#[test]
fn test_ignore_px_from_negative_scored_peer() {
let config = Config::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -2532,10 +2534,10 @@ fn test_ignore_px_from_negative_scored_peer() {
)))
.create_network();
- //penalize peer
+ // penalize peer
gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1);
- //handle prune from single peer with px peers
+ // handle prune from single peer with px peers
let px = vec![PeerInfo {
peer_id: Some(PeerId::random()),
}];
@@ -2549,7 +2551,7 @@ fn test_ignore_px_from_negative_scored_peer() {
)],
);
- //assert no dials
+ // assert no dials
assert_eq!(
gs.events
.iter()
@@ -2760,7 +2762,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() {
collected_messages
});
- //the message got sent to p2
+ // the message got sent to p2
assert!(sent_messages
.iter()
.map(|(peer_id, msg)| (
@@ -2768,7 +2770,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() {
gs.data_transform.inbound_transform(msg.clone()).unwrap()
))
.any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id));
- //the message got not sent to p1
+ // the message got not sent to p1
assert!(sent_messages
.iter()
.map(|(peer_id, msg)| (
@@ -2786,7 +2788,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() {
gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight,
..PeerScoreThresholds::default()
};
- //build full mesh
+ // build full mesh
let (mut gs, peers, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -2802,21 +2804,21 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() {
gs.handle_graft(&peer, topics.clone());
}
- //add two additional peers that will not be part of the mesh
+ // add two additional peers that will not be part of the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.gossip_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.gossip_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //message that other peers have
+ // message that other peers have
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![],
@@ -2863,31 +2865,31 @@ fn test_do_not_publish_to_peer_below_publish_threshold() {
..PeerScoreThresholds::default()
};
- //build mesh with no peers and no subscribed topics
+ // build mesh with no peers and no subscribed topics
let (mut gs, _, mut receivers, _) = inject_nodes1()
.gs_config(config)
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //create a new topic for which we are not subscribed
+ // create a new topic for which we are not subscribed
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.publish_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.publish_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //a heartbeat will remove the peers from the mesh
+ // a heartbeat will remove the peers from the mesh
gs.heartbeat();
// publish on topic
@@ -2907,7 +2909,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() {
collected_publish
});
- //assert only published to p2
+ // assert only published to p2
assert_eq!(publishes.len(), 1);
assert_eq!(publishes[0].0, p2);
}
@@ -2921,28 +2923,28 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() {
publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight,
..PeerScoreThresholds::default()
};
- //build mesh with no peers
+ // build mesh with no peers
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.topics(vec!["test".into()])
.gs_config(config)
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //reduce score of p1 below peer_score_thresholds.publish_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.publish_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
+ // reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
- //a heartbeat will remove the peers from the mesh
+ // a heartbeat will remove the peers from the mesh
gs.heartbeat();
// publish on topic
@@ -2962,7 +2964,7 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() {
collected_publish
});
- //assert only published to p2
+ // assert only published to p2
assert_eq!(publishes.len(), 1);
assert!(publishes[0].0 == p2);
}
@@ -2978,23 +2980,23 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
..PeerScoreThresholds::default()
};
- //build mesh with no peers
+ // build mesh with no peers
let (mut gs, _, _, topics) = inject_nodes1()
.topics(vec!["test".into()])
.gs_config(config.clone())
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //add two additional peers that will be added to the mesh
+ // add two additional peers that will be added to the mesh
let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false);
let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false);
- //reduce score of p1 below peer_score_thresholds.graylist_threshold
- //note that penalties get squared so two penalties means a score of
+ // reduce score of p1 below peer_score_thresholds.graylist_threshold
+ // note that penalties get squared so two penalties means a score of
// 4 * peer_score_params.behaviour_penalty_weight.
gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2);
- //reduce score of p2 below publish_threshold but not below graylist_threshold
+ // reduce score of p2 below publish_threshold but not below graylist_threshold
gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
let raw_message1 = RawMessage {
@@ -3053,10 +3055,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
message_ids: vec![config.message_id(message2)],
});
- //clear events
+ // clear events
gs.events.clear();
- //receive from p1
+ // receive from p1
gs.on_connection_handler_event(
p1,
ConnectionId::new_unchecked(0),
@@ -3070,7 +3072,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
},
);
- //only the subscription event gets processed, the rest is dropped
+ // only the subscription event gets processed, the rest is dropped
assert_eq!(gs.events.len(), 1);
assert!(matches!(
gs.events[0],
@@ -3082,7 +3084,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
message_ids: vec![config.message_id(message4)],
});
- //receive from p2
+ // receive from p2
gs.on_connection_handler_event(
p2,
ConnectionId::new_unchecked(0),
@@ -3096,7 +3098,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() {
},
);
- //events got processed
+ // events got processed
assert!(gs.events.len() > 1);
}
@@ -3145,7 +3147,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() {
0
);
- //handle prune from peer peers[1] with px peers
+ // handle prune from peer peers[1] with px peers
let px = vec![PeerInfo {
peer_id: Some(PeerId::random()),
}];
@@ -3158,7 +3160,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() {
)],
);
- //assert there are dials now
+ // assert there are dials now
assert!(
gs.events
.iter()
@@ -3178,7 +3180,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() {
.build()
.unwrap();
- //build mesh with more peers than mesh can hold
+ // build mesh with more peers than mesh can hold
let n = config.mesh_n_high() + 1;
let (mut gs, peers, _receivers, topics) = inject_nodes1()
.peer_no(n)
@@ -3198,21 +3200,21 @@ fn test_keep_best_scoring_peers_on_oversubscription() {
gs.handle_graft(peer, topics.clone());
}
- //assign scores to peers equalling their index
+ // assign scores to peers equalling their index
- //set random positive scores
+ // set random positive scores
for (index, peer) in peers.iter().enumerate() {
gs.set_application_score(peer, index as f64);
}
assert_eq!(gs.mesh[&topics[0]].len(), n);
- //heartbeat to prune some peers
+ // heartbeat to prune some peers
gs.heartbeat();
assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n());
- //mesh contains retain_scores best peers
+ // mesh contains retain_scores best peers
assert!(gs.mesh[&topics[0]].is_superset(
&peers[(n - config.retain_scores())..]
.iter()
@@ -3239,7 +3241,7 @@ fn test_scoring_p1() {
.insert(topic_hash, topic_params.clone());
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, _) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3250,9 +3252,9 @@ fn test_scoring_p1() {
.scoring(Some((peer_score_params, peer_score_thresholds)))
.create_network();
- //sleep for 2 times the mesh_quantum
+ // sleep for 2 times the mesh_quantum
sleep(topic_params.time_in_mesh_quantum * 2);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0])
@@ -3265,9 +3267,9 @@ fn test_scoring_p1() {
"score should be less than 3 * time_in_mesh_weight * topic_weight"
);
- //sleep again for 2 times the mesh_quantum
+ // sleep again for 2 times the mesh_quantum
sleep(topic_params.time_in_mesh_quantum * 2);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0])
@@ -3275,9 +3277,9 @@ fn test_scoring_p1() {
"score should be at least 4 * time_in_mesh_weight * topic_weight"
);
- //sleep for enough periods to reach maximum
+ // sleep for enough periods to reach maximum
sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32);
- //refresh scores
+ // refresh scores
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
@@ -3309,7 +3311,7 @@ fn test_scoring_p2() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
first_message_deliveries_weight: 2.0,
first_message_deliveries_cap: 10.0,
first_message_deliveries_decay: 0.9,
@@ -3321,7 +3323,7 @@ fn test_scoring_p2() {
.insert(topic_hash, topic_params.clone());
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3338,9 +3340,9 @@ fn test_scoring_p2() {
};
let m1 = random_message(&mut seq, &topics);
- //peer 0 delivers message first
+ // peer 0 delivers message first
deliver_message(&mut gs, 0, m1.clone());
- //peer 1 delivers message second
+ // peer 1 delivers message second
deliver_message(&mut gs, 1, m1);
assert_eq!(
@@ -3355,7 +3357,7 @@ fn test_scoring_p2() {
"there should be no score for second message deliveries * topic_weight"
);
- //peer 2 delivers two new messages
+ // peer 2 delivers two new messages
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
assert_eq!(
@@ -3364,7 +3366,7 @@ fn test_scoring_p2() {
"score should be exactly 2 * first_message_deliveries_weight * topic_weight"
);
- //test decaying
+ // test decaying
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
@@ -3385,7 +3387,7 @@ fn test_scoring_p2() {
first_message_deliveries_weight * topic_weight"
);
- //test cap
+ // test cap
for _ in 0..topic_params.first_message_deliveries_cap as u64 {
deliver_message(&mut gs, 1, random_message(&mut seq, &topics));
}
@@ -3407,8 +3409,8 @@ fn test_scoring_p3() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
+ first_message_deliveries_weight: 0.0, // deactivate first time deliveries
mesh_message_deliveries_weight: -2.0,
mesh_message_deliveries_decay: 0.9,
mesh_message_deliveries_cap: 10.0,
@@ -3421,7 +3423,7 @@ fn test_scoring_p3() {
peer_score_params.topics.insert(topic_hash, topic_params);
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3439,35 +3441,35 @@ fn test_scoring_p3() {
let mut expected_message_deliveries = 0.0;
- //messages used to test window
+ // messages used to test window
let m1 = random_message(&mut seq, &topics);
let m2 = random_message(&mut seq, &topics);
- //peer 1 delivers m1
+ // peer 1 delivers m1
deliver_message(&mut gs, 1, m1.clone());
- //peer 0 delivers two message
+ // peer 0 delivers two message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 2.0;
sleep(Duration::from_millis(60));
- //peer 1 delivers m2
+ // peer 1 delivers m2
deliver_message(&mut gs, 1, m2.clone());
sleep(Duration::from_millis(70));
- //peer 0 delivers m1 and m2 only m2 gets counted
+ // peer 0 delivers m1 and m2 only m2 gets counted
deliver_message(&mut gs, 0, m1);
deliver_message(&mut gs, 0, m2);
expected_message_deliveries += 1.0;
sleep(Duration::from_millis(900));
- //message deliveries penalties get activated, peer 0 has only delivered 3 messages and
+ // message deliveries penalties get activated, peer 0 has only delivered 3 messages and
// therefore gets a penalty
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
@@ -3483,10 +3485,10 @@ fn test_scoring_p3() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //apply 10 decays
+ // apply 10 decays
for _ in 0..10 {
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
}
assert_eq!(
@@ -3505,8 +3507,8 @@ fn test_scoring_p3b() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
+ time_in_mesh_weight: 0.0, // deactivate time in mesh
+ first_message_deliveries_weight: 0.0, // deactivate first time deliveries
mesh_message_deliveries_weight: -2.0,
mesh_message_deliveries_decay: 0.9,
mesh_message_deliveries_cap: 10.0,
@@ -3522,7 +3524,7 @@ fn test_scoring_p3b() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3540,49 +3542,49 @@ fn test_scoring_p3b() {
let mut expected_message_deliveries = 0.0;
- //add some positive score
+ // add some positive score
gs.peer_score
.as_mut()
.unwrap()
.0
.set_application_score(&peers[0], 100.0);
- //peer 0 delivers two message
+ // peer 0 delivers two message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 2.0;
sleep(Duration::from_millis(1050));
- //activation kicks in
+ // activation kicks in
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
- //prune peer
+ // prune peer
gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]);
- //wait backoff
+ // wait backoff
sleep(Duration::from_millis(130));
- //regraft peer
+ // regraft peer
gs.handle_graft(&peers[0], topics.clone());
- //the score should now consider p3b
+ // the score should now consider p3b
let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2);
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
100.0 + expected_b3 * -3.0 * 0.7
);
- //we can also add a new p3 to the score
+ // we can also add a new p3 to the score
- //peer 0 delivers one message
+ // peer 0 delivers one message
deliver_message(&mut gs, 0, random_message(&mut seq, &topics));
expected_message_deliveries += 1.0;
sleep(Duration::from_millis(1050));
gs.peer_score.as_mut().unwrap().0.refresh_scores();
- expected_message_deliveries *= 0.9; //decay
+ expected_message_deliveries *= 0.9; // decay
expected_b3 *= 0.95;
assert_eq!(
@@ -3601,10 +3603,14 @@ fn test_scoring_p4_valid_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3614,7 +3620,7 @@ fn test_scoring_p4_valid_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3630,7 +3636,7 @@ fn test_scoring_p4_valid_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers valid message
+ // peer 0 delivers valid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3639,7 +3645,7 @@ fn test_scoring_p4_valid_message() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //message m1 gets validated
+ // message m1 gets validated
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3659,10 +3665,14 @@ fn test_scoring_p4_invalid_signature() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3672,7 +3682,7 @@ fn test_scoring_p4_invalid_signature() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3685,7 +3695,7 @@ fn test_scoring_p4_invalid_signature() {
let mut seq = 0;
- //peer 0 delivers message with invalid signature
+ // peer 0 delivers message with invalid signature
let m = random_message(&mut seq, &topics);
gs.on_connection_handler_event(
@@ -3717,10 +3727,14 @@ fn test_scoring_p4_message_from_self() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3730,7 +3744,7 @@ fn test_scoring_p4_message_from_self() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3746,7 +3760,7 @@ fn test_scoring_p4_message_from_self() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message from self
+ // peer 0 delivers invalid message from self
let mut m = random_message(&mut seq, &topics);
m.source = Some(*gs.publish_config.get_own_id().unwrap());
@@ -3767,10 +3781,14 @@ fn test_scoring_p4_ignored_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3780,7 +3798,7 @@ fn test_scoring_p4_ignored_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3796,7 +3814,7 @@ fn test_scoring_p4_ignored_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers ignored message
+ // peer 0 delivers ignored message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3805,7 +3823,7 @@ fn test_scoring_p4_ignored_message() {
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
- //message m1 gets ignored
+ // message m1 gets ignored
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3825,10 +3843,14 @@ fn test_scoring_p4_application_invalidated_message() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3838,7 +3860,7 @@ fn test_scoring_p4_application_invalidated_message() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3854,7 +3876,7 @@ fn test_scoring_p4_application_invalidated_message() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -3863,7 +3885,7 @@ fn test_scoring_p4_application_invalidated_message() {
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3886,10 +3908,14 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3899,7 +3925,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with two peers
+ // build mesh with two peers
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(2)
.topics(vec!["test".into()])
@@ -3915,20 +3941,20 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
// Transform the inbound message
let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap();
- //peer 1 delivers same message
+ // peer 1 delivers same message
deliver_message(&mut gs, 1, m1);
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0);
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -3955,10 +3981,14 @@ fn test_scoring_p4_three_application_invalid_messages() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -3968,7 +3998,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -3984,7 +4014,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers two invalid message
+ // peer 0 delivers two invalid message
let m1 = random_message(&mut seq, &topics);
let m2 = random_message(&mut seq, &topics);
let m3 = random_message(&mut seq, &topics);
@@ -4002,7 +4032,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //messages gets rejected
+ // messages gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -4021,7 +4051,7 @@ fn test_scoring_p4_three_application_invalid_messages() {
MessageAcceptance::Reject,
);
- //number of invalid messages gets squared
+ // number of invalid messages gets squared
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
9.0 * -2.0 * 0.7
@@ -4038,10 +4068,14 @@ fn test_scoring_p4_decay() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let topic_params = TopicScoreParams {
- time_in_mesh_weight: 0.0, //deactivate time in mesh
- first_message_deliveries_weight: 0.0, //deactivate first time deliveries
- mesh_message_deliveries_weight: 0.0, //deactivate message deliveries
- mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties
+ // deactivate time in mesh
+ time_in_mesh_weight: 0.0,
+ // deactivate first time deliveries
+ first_message_deliveries_weight: 0.0,
+ // deactivate message deliveries
+ mesh_message_deliveries_weight: 0.0,
+ // deactivate mesh failure penalties
+ mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -2.0,
invalid_message_deliveries_decay: 0.9,
topic_weight: 0.7,
@@ -4051,7 +4085,7 @@ fn test_scoring_p4_decay() {
peer_score_params.app_specific_weight = 1.0;
let peer_score_thresholds = PeerScoreThresholds::default();
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -4067,7 +4101,7 @@ fn test_scoring_p4_decay() {
gs.handle_received_message(msg, &peers[index]);
};
- //peer 0 delivers invalid message
+ // peer 0 delivers invalid message
let m1 = random_message(&mut seq, &topics);
deliver_message(&mut gs, 0, m1.clone());
@@ -4075,7 +4109,7 @@ fn test_scoring_p4_decay() {
let message1 = &gs.data_transform.inbound_transform(m1).unwrap();
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0);
- //message m1 gets rejected
+ // message m1 gets rejected
gs.report_message_validation_result(
&config.message_id(message1),
&peers[0],
@@ -4087,7 +4121,7 @@ fn test_scoring_p4_decay() {
-2.0 * 0.7
);
- //we decay
+ // we decay
gs.peer_score.as_mut().unwrap().0.refresh_scores();
// the number of invalids gets decayed to 0.9 and then squared in the score
@@ -4104,7 +4138,7 @@ fn test_scoring_p5() {
..PeerScoreParams::default()
};
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, _, _) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
@@ -4141,7 +4175,7 @@ fn test_scoring_p6() {
.scoring(Some((peer_score_params, PeerScoreThresholds::default())))
.create_network();
- //create 5 peers with the same ip
+ // create 5 peers with the same ip
let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3));
let peers = vec![
add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0,
@@ -4151,7 +4185,7 @@ fn test_scoring_p6() {
add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0,
];
- //create 4 other peers with other ip
+ // create 4 other peers with other ip
let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4));
let others = vec![
add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0,
@@ -4160,12 +4194,12 @@ fn test_scoring_p6() {
add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0,
];
- //no penalties yet
+ // no penalties yet
for peer in peers.iter().chain(others.iter()) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0);
}
- //add additional connection for 3 others with addr
+ // add additional connection for 3 others with addr
for id in others.iter().take(3) {
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: *id,
@@ -4180,14 +4214,14 @@ fn test_scoring_p6() {
}));
}
- //penalties apply squared
+ // penalties apply squared
for peer in peers.iter().chain(others.iter().take(3)) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
- //fourth other peer still no penalty
+ // fourth other peer still no penalty
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0);
- //add additional connection for 3 of the peers to addr2
+ // add additional connection for 3 of the peers to addr2
for peer in peers.iter().take(3) {
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: *peer,
@@ -4202,7 +4236,7 @@ fn test_scoring_p6() {
}));
}
- //double penalties for the first three of each
+ // double penalties for the first three of each
for peer in peers.iter().take(3).chain(others.iter().take(3)) {
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(peer),
@@ -4210,7 +4244,7 @@ fn test_scoring_p6() {
);
}
- //single penalties for the rest
+ // single penalties for the rest
for peer in peers.iter().skip(3) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
@@ -4219,7 +4253,7 @@ fn test_scoring_p6() {
4.0 * -2.0
);
- //two times same ip doesn't count twice
+ // two times same ip doesn't count twice
gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id: peers[0],
connection_id: ConnectionId::new_unchecked(0),
@@ -4232,8 +4266,8 @@ fn test_scoring_p6() {
other_established: 2,
}));
- //nothing changed
- //double penalties for the first three of each
+ // nothing changed
+ // double penalties for the first three of each
for peer in peers.iter().take(3).chain(others.iter().take(3)) {
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(peer),
@@ -4241,7 +4275,7 @@ fn test_scoring_p6() {
);
}
- //single penalties for the rest
+ // single penalties for the rest
for peer in peers.iter().skip(3) {
assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0);
}
@@ -4274,7 +4308,7 @@ fn test_scoring_p7_grafts_before_backoff() {
.scoring(Some((peer_score_params, PeerScoreThresholds::default())))
.create_network();
- //remove peers from mesh and send prune to them => this adds a backoff for the peers
+ // remove peers from mesh and send prune to them => this adds a backoff for the peers
for peer in peers.iter().take(2) {
gs.mesh.get_mut(&topics[0]).unwrap().remove(peer);
gs.send_graft_prune(
@@ -4284,31 +4318,31 @@ fn test_scoring_p7_grafts_before_backoff() {
);
}
- //wait 50 millisecs
+ // wait 50 millisecs
sleep(Duration::from_millis(50));
- //first peer tries to graft
+ // first peer tries to graft
gs.handle_graft(&peers[0], vec![topics[0].clone()]);
- //double behaviour penalty for first peer (squared)
+ // double behaviour penalty for first peer (squared)
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[0]),
4.0 * -2.0
);
- //wait 100 millisecs
+ // wait 100 millisecs
sleep(Duration::from_millis(100));
- //second peer tries to graft
+ // second peer tries to graft
gs.handle_graft(&peers[1], vec![topics[0].clone()]);
- //single behaviour penalty for second peer
+ // single behaviour penalty for second peer
assert_eq!(
gs.peer_score.as_ref().unwrap().0.score(&peers[1]),
1.0 * -2.0
);
- //test decay
+ // test decay
gs.peer_score.as_mut().unwrap().0.refresh_scores();
assert_eq!(
@@ -4327,7 +4361,7 @@ fn test_opportunistic_grafting() {
.mesh_n_low(3)
.mesh_n(5)
.mesh_n_high(7)
- .mesh_outbound_min(0) //deactivate outbound handling
+ .mesh_outbound_min(0) // deactivate outbound handling
.opportunistic_graft_ticks(2)
.opportunistic_graft_peers(2)
.build()
@@ -4351,30 +4385,30 @@ fn test_opportunistic_grafting() {
.scoring(Some((peer_score_params, thresholds)))
.create_network();
- //fill mesh with 5 peers
+ // fill mesh with 5 peers
for peer in &peers {
gs.handle_graft(peer, topics.clone());
}
- //add additional 5 peers
+ // add additional 5 peers
let others: Vec<_> = (0..5)
.map(|_| add_peer(&mut gs, &topics, false, false))
.collect();
- //currently mesh equals peers
+ // currently mesh equals peers
assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect());
- //give others high scores (but the first two have not high enough scores)
+ // give others high scores (but the first two have not high enough scores)
for (i, peer) in peers.iter().enumerate().take(5) {
gs.set_application_score(peer, 0.0 + i as f64);
}
- //set scores for peers in the mesh
+ // set scores for peers in the mesh
for (i, (peer, _receiver)) in others.iter().enumerate().take(5) {
gs.set_application_score(peer, 0.0 + i as f64);
}
- //this gives a median of exactly 2.0 => should not apply opportunistic grafting
+ // this gives a median of exactly 2.0 => should not apply opportunistic grafting
gs.heartbeat();
gs.heartbeat();
@@ -4384,10 +4418,10 @@ fn test_opportunistic_grafting() {
"should not apply opportunistic grafting"
);
- //reduce middle score to 1.0 giving a median of 1.0
+ // reduce middle score to 1.0 giving a median of 1.0
gs.set_application_score(&peers[2], 1.0);
- //opportunistic grafting after two heartbeats
+ // opportunistic grafting after two heartbeats
gs.heartbeat();
assert_eq!(
@@ -4417,17 +4451,17 @@ fn test_opportunistic_grafting() {
#[test]
fn test_ignore_graft_from_unknown_topic() {
- //build gossipsub without subscribing to any topics
+ // build gossipsub without subscribing to any topics
let (mut gs, peers, receivers, _) = inject_nodes1()
.peer_no(1)
.topics(vec![])
.to_subscribe(false)
.create_network();
- //handle an incoming graft for some topic
+ // handle an incoming graft for some topic
gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]);
- //assert that no prune got created
+ // assert that no prune got created
let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. }));
assert_eq!(
control_msgs, 0,
@@ -4438,18 +4472,18 @@ fn test_ignore_graft_from_unknown_topic() {
#[test]
fn test_ignore_too_many_iwants_from_same_peer_for_same_message() {
let config = Config::default();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
.to_subscribe(false)
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //receive a message
+ // receive a message
let mut seq = 0;
let m1 = random_message(&mut seq, &topics);
@@ -4460,10 +4494,10 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() {
gs.handle_received_message(m1, &PeerId::random());
- //clear events
+ // clear events
let receivers = flush_events(&mut gs, receivers);
- //the first gossip_retransimission many iwants return the valid message, all others are
+ // the first gossip_retransimission many iwants return the valid message, all others are
// ignored.
for _ in 0..(2 * config.gossip_retransimission() + 10) {
gs.handle_iwant(&peer, vec![id.clone()]);
@@ -4490,7 +4524,7 @@ fn test_ignore_too_many_ihaves() {
.max_ihave_messages(10)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4498,15 +4532,15 @@ fn test_ignore_too_many_ihaves() {
.gs_config(config.clone())
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //peer has 20 messages
+ // peer has 20 messages
let mut seq = 0;
let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect();
- //peer sends us one ihave for each message in order
+ // peer sends us one ihave for each message in order
for raw_message in &messages {
// Transform the inbound message
let message = &gs
@@ -4527,7 +4561,7 @@ fn test_ignore_too_many_ihaves() {
.map(|m| config.message_id(&m))
.collect();
- //we send iwant only for the first 10 messages
+ // we send iwant only for the first 10 messages
let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| {
p == &peer
&& matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))
@@ -4537,7 +4571,7 @@ fn test_ignore_too_many_ihaves() {
"exactly the first ten ihaves should be processed and one iwant for each created"
);
- //after a heartbeat everything is forgotten
+ // after a heartbeat everything is forgotten
gs.heartbeat();
for raw_message in messages[10..].iter() {
@@ -4553,7 +4587,7 @@ fn test_ignore_too_many_ihaves() {
);
}
- //we sent iwant for all 10 messages
+ // we sent iwant for all 10 messages
let (control_msgs, _) = count_control_msgs(receivers, |p, action| {
p == &peer
&& matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1)
@@ -4568,7 +4602,7 @@ fn test_ignore_too_many_messages_in_ihave() {
.max_ihave_length(10)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, _, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4576,11 +4610,11 @@ fn test_ignore_too_many_messages_in_ihave() {
.gs_config(config.clone())
.create_network();
- //add another peer not in the mesh
+ // add another peer not in the mesh
let (peer, receiver) = add_peer(&mut gs, &topics, false, false);
receivers.insert(peer, receiver);
- //peer has 20 messages
+ // peer has 20 messages
let mut seq = 0;
let message_ids: Vec<_> = (0..20)
.map(|_| random_message(&mut seq, &topics))
@@ -4588,7 +4622,7 @@ fn test_ignore_too_many_messages_in_ihave() {
.map(|msg| config.message_id(&msg))
.collect();
- //peer sends us three ihaves
+ // peer sends us three ihaves
gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]);
gs.handle_ihave(
&peer,
@@ -4601,7 +4635,7 @@ fn test_ignore_too_many_messages_in_ihave() {
let first_twelve: HashSet<_> = message_ids.iter().take(12).collect();
- //we send iwant only for the first 10 messages
+ // we send iwant only for the first 10 messages
let mut sum = 0;
let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc {
RpcOut::IWant(IWant { message_ids }) => {
@@ -4620,14 +4654,14 @@ fn test_ignore_too_many_messages_in_ihave() {
assert_eq!(sum, 10, "exactly the first ten ihaves should be processed");
- //after a heartbeat everything is forgotten
+ // after a heartbeat everything is forgotten
gs.heartbeat();
gs.handle_ihave(
&peer,
vec![(topics[0].clone(), message_ids[10..20].to_vec())],
);
- //we sent 10 iwant messages ids via a IWANT rpc.
+ // we sent 10 iwant messages ids via a IWANT rpc.
let mut sum = 0;
let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc {
RpcOut::IWant(IWant { message_ids }) => {
@@ -4649,7 +4683,7 @@ fn test_limit_number_of_message_ids_inside_ihave() {
.max_ihave_length(100)
.build()
.unwrap();
- //build gossipsub with full mesh
+ // build gossipsub with full mesh
let (mut gs, peers, mut receivers, topics) = inject_nodes1()
.peer_no(config.mesh_n_high())
.topics(vec!["test".into()])
@@ -4657,24 +4691,24 @@ fn test_limit_number_of_message_ids_inside_ihave() {
.gs_config(config)
.create_network();
- //graft to all peers to really fill the mesh with all the peers
+ // graft to all peers to really fill the mesh with all the peers
for peer in peers {
gs.handle_graft(&peer, topics.clone());
}
- //add two other peers not in the mesh
+ // add two other peers not in the mesh
let (p1, receiver1) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p1, receiver1);
let (p2, receiver2) = add_peer(&mut gs, &topics, false, false);
receivers.insert(p2, receiver2);
- //receive 200 messages from another peer
+ // receive 200 messages from another peer
let mut seq = 0;
for _ in 0..200 {
gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random());
}
- //emit gossip
+ // emit gossip
gs.emit_gossip();
// both peers should have gotten 100 random ihave messages, to assert the randomness, we
@@ -4727,12 +4761,10 @@ fn test_limit_number_of_message_ids_inside_ihave() {
#[test]
fn test_iwant_penalties() {
- /*
- use tracing_subscriber::EnvFilter;
- let _ = tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .try_init();
- */
+ // use tracing_subscriber::EnvFilter;
+ // let _ = tracing_subscriber::fmt()
+ // .with_env_filter(EnvFilter::from_default_env())
+ // .try_init();
let config = ConfigBuilder::default()
.iwant_followup_time(Duration::from_secs(4))
.build()
@@ -4862,7 +4894,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() {
.gs_config(config)
.create_network();
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let (p1, receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -4877,10 +4909,10 @@ fn test_publish_to_floodsub_peers_without_flood_publish() {
add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
receivers.insert(p2, receiver2);
- //p1 and p2 are not in the mesh
+ // p1 and p2 are not in the mesh
assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2));
- //publish a message
+ // publish a message
let publish_data = vec![0; 42];
gs.publish(Topic::new("test"), publish_data).unwrap();
@@ -4921,7 +4953,7 @@ fn test_do_not_use_floodsub_in_fanout() {
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let (p1, receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -4936,7 +4968,7 @@ fn test_do_not_use_floodsub_in_fanout() {
add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
receivers.insert(p2, receiver2);
- //publish a message
+ // publish a message
let publish_data = vec![0; 42];
gs.publish(Topic::new("test"), publish_data).unwrap();
@@ -4977,7 +5009,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() {
let topic = Topic::new("test");
let topics = vec![topic.hash()];
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5004,7 +5036,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
.to_subscribe(false)
.create_network();
- //add an old gossipsub peer
+ // add an old gossipsub peer
let (p1, _receiver1) = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5014,14 +5046,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
Some(PeerKind::Gossipsub),
);
- //prune the peer
+ // prune the peer
gs.send_graft_prune(
HashMap::new(),
vec![(p1, topics.clone())].into_iter().collect(),
HashSet::new(),
);
- //check that prune does not contain px
+ // check that prune does not contain px
let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m {
RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(),
_ => false,
@@ -5031,14 +5063,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() {
#[test]
fn test_dont_send_floodsub_peers_in_px() {
- //build mesh with one peer
+ // build mesh with one peer
let (mut gs, peers, receivers, topics) = inject_nodes1()
.peer_no(1)
.topics(vec!["test".into()])
.to_subscribe(true)
.create_network();
- //add two floodsub peers
+ // add two floodsub peers
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5049,14 +5081,14 @@ fn test_dont_send_floodsub_peers_in_px() {
);
let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None);
- //prune only mesh node
+ // prune only mesh node
gs.send_graft_prune(
HashMap::new(),
vec![(peers[0], topics.clone())].into_iter().collect(),
HashSet::new(),
);
- //check that px in prune message is empty
+ // check that px in prune message is empty
let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m {
RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(),
_ => false,
@@ -5072,7 +5104,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() {
.to_subscribe(false)
.create_network();
- //add two floodsub peer, one explicit, one implicit
+ // add two floodsub peer, one explicit, one implicit
let _p1 = add_peer_with_addr_and_kind(
&mut gs,
&topics,
@@ -5139,7 +5171,7 @@ fn test_subscribe_to_invalid_topic() {
#[test]
fn test_subscribe_and_graft_with_negative_score() {
- //simulate a communication between two gossipsub instances
+ // simulate a communication between two gossipsub instances
let (mut gs1, _, _, topic_hashes) = inject_nodes1()
.topics(vec!["test".into()])
.scoring(Some((
@@ -5157,12 +5189,12 @@ fn test_subscribe_and_graft_with_negative_score() {
let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false);
let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false);
- //add penalty to peer p2
+ // add penalty to peer p2
gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1);
let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2);
- //subscribe to topic in gs2
+ // subscribe to topic in gs2
gs2.subscribe(&topic).unwrap();
let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>,
@@ -5191,17 +5223,17 @@ fn test_subscribe_and_graft_with_negative_score() {
new_receivers
};
- //forward the subscribe message
+ // forward the subscribe message
let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers);
- //heartbeats on both
+ // heartbeats on both
gs1.heartbeat();
gs2.heartbeat();
- //forward messages again
+ // forward messages again
forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers);
- //nobody got penalized
+ // nobody got penalized
assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score);
}
diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs
index 6e7861bae10..d53908ad267 100644
--- a/protocols/gossipsub/src/config.rs
+++ b/protocols/gossipsub/src/config.rs
@@ -18,22 +18,22 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::borrow::Cow;
-use std::sync::Arc;
-use std::time::Duration;
-
-use crate::error::ConfigBuilderError;
-use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL};
-use crate::types::{Message, MessageId, PeerKind};
+use std::{borrow::Cow, sync::Arc, time::Duration};
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
+use crate::{
+ error::ConfigBuilderError,
+ protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL},
+ types::{Message, MessageId, PeerKind},
+};
+
/// The types of message validation that can be employed by gossipsub.
#[derive(Debug, Clone)]
pub enum ValidationMode {
- /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to
- /// be present as well as the sequence number. All messages must have valid signatures.
+ /// This is the default setting. This requires the message author to be a valid [`PeerId`] and
+ /// to be present as well as the sequence number. All messages must have valid signatures.
///
/// NOTE: This setting will reject messages from nodes using
/// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have
@@ -134,8 +134,8 @@ impl Config {
/// Affects how peers are selected when pruning a mesh due to over subscription.
///
- /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are
- /// chosen randomly (D_score in the spec, default is 4).
+ /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder
+ /// are chosen randomly (D_score in the spec, default is 4).
pub fn retain_scores(&self) -> usize {
self.retain_scores
}
@@ -423,7 +423,9 @@ impl Default for ConfigBuilder {
}),
allow_self_origin: false,
do_px: false,
- prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented.
+ // NOTE: Increasing this currently has little effect until Signed
+ // records are implemented.
+ prune_peers: 0,
prune_backoff: Duration::from_secs(60),
unsubscribe_backoff: Duration::from_secs(10),
backoff_slack: 1,
@@ -457,7 +459,8 @@ impl From for ConfigBuilder {
}
impl ConfigBuilder {
- /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`).
+ /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and
+ /// `/meshsub/1.0.0`).
pub fn protocol_id_prefix(
&mut self,
protocol_id_prefix: impl Into>,
@@ -547,8 +550,8 @@ impl ConfigBuilder {
/// Affects how peers are selected when pruning a mesh due to over subscription.
///
- /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are
- /// chosen randomly (D_score in the spec, default is 4).
+ /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the
+ /// remainder are chosen randomly (D_score in the spec, default is 4).
pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self {
self.config.retain_scores = retain_scores;
self
@@ -902,12 +905,15 @@ impl std::fmt::Debug for Config {
#[cfg(test)]
mod test {
- use super::*;
- use crate::topic::IdentityHash;
- use crate::Topic;
+ use std::{
+ collections::hash_map::DefaultHasher,
+ hash::{Hash, Hasher},
+ };
+
use libp2p_core::UpgradeInfo;
- use std::collections::hash_map::DefaultHasher;
- use std::hash::{Hash, Hasher};
+
+ use super::*;
+ use crate::{topic::IdentityHash, Topic};
#[test]
fn create_config_with_message_id_as_plain_function() {
diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs
index 047d50f2338..eae4c51214e 100644
--- a/protocols/gossipsub/src/error.rs
+++ b/protocols/gossipsub/src/error.rs
@@ -36,8 +36,8 @@ pub enum PublishError {
MessageTooLarge,
/// The compression algorithm failed.
TransformFailed(std::io::Error),
- /// Messages could not be sent because the queues for all peers were full. The usize represents the
- /// number of peers that were attempted.
+ /// Messages could not be sent because the queues for all peers were full. The usize represents
+ /// the number of peers that were attempted.
AllQueuesFull(usize),
}
diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs
index bdf58b74fc2..b64811bb062 100644
--- a/protocols/gossipsub/src/gossip_promises.rs
+++ b/protocols/gossipsub/src/gossip_promises.rs
@@ -18,13 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::peer_score::RejectReason;
-use crate::MessageId;
-use crate::ValidationError;
-use libp2p_identity::PeerId;
use std::collections::HashMap;
+
+use libp2p_identity::PeerId;
use web_time::Instant;
+use crate::{peer_score::RejectReason, MessageId, ValidationError};
+
/// Tracks recently sent `IWANT` messages and checks if peers respond to them.
#[derive(Default)]
pub(crate) struct GossipPromises {
diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs
index 5f9669c02c2..2936182c3f8 100644
--- a/protocols/gossipsub/src/handler.rs
+++ b/protocols/gossipsub/src/handler.rs
@@ -18,27 +18,31 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{GossipsubCodec, ProtocolConfig};
-use crate::rpc::Receiver;
-use crate::rpc_proto::proto;
-use crate::types::{PeerKind, RawMessage, Rpc, RpcOut};
-use crate::ValidationError;
-use asynchronous_codec::Framed;
-use futures::future::Either;
-use futures::prelude::*;
-use futures::StreamExt;
-use libp2p_core::upgrade::DeniedUpgrade;
-use libp2p_swarm::handler::{
- ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
- FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
-};
-use libp2p_swarm::Stream;
use std::{
pin::Pin,
task::{Context, Poll},
};
+
+use asynchronous_codec::Framed;
+use futures::{future::Either, prelude::*, StreamExt};
+use libp2p_core::upgrade::DeniedUpgrade;
+use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
+ FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
+ },
+ Stream,
+};
use web_time::Instant;
+use crate::{
+ protocol::{GossipsubCodec, ProtocolConfig},
+ rpc::Receiver,
+ rpc_proto::proto,
+ types::{PeerKind, RawMessage, Rpc, RpcOut},
+ ValidationError,
+};
+
/// The event emitted by the Handler. This informs the behaviour of various events created
/// by the handler.
#[derive(Debug)]
@@ -111,7 +115,6 @@ pub struct EnabledHandler {
peer_kind: Option,
/// Keeps track on whether we have sent the peer kind to the behaviour.
- //
// NOTE: Use this flag rather than checking the substream count each poll.
peer_kind_sent: bool,
diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs
index f6a51da4a51..87db1b771d1 100644
--- a/protocols/gossipsub/src/lib.rs
+++ b/protocols/gossipsub/src/lib.rs
@@ -43,22 +43,23 @@
//! implementations, due to undefined elements in the current specification.
//!
//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter.
-//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this
-//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64
+//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and
+//! this is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64
//! encoded) by setting the `hash_topics` configuration parameter to true.
//!
//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source
-//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in
-//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned
-//! integers. When messages are signed, they are monotonically increasing integers starting from a
-//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random.
-//! NOTE: These numbers are sequential in the current go implementation.
+//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence
+//! numbers in this implementation are sent as raw bytes across the wire. They are 64-bit
+//! big-endian unsigned integers. When messages are signed, they are monotonically increasing
+//! integers starting from a random value and wrapping around u64::MAX. When messages are
+//! unsigned, they are chosen at random. NOTE: These numbers are sequential in the current go
+//! implementation.
//!
//! # Peer Discovery
//!
//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which
-//! peers in a p2p network exchange information about each other among other reasons to become resistant
-//! against the failure or replacement of the
+//! peers in a p2p network exchange information about each other among other reasons to become
+//! resistant against the failure or replacement of the
//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network.
//!
//! Peer
@@ -111,22 +112,24 @@ mod topic;
mod transform;
mod types;
-pub use self::behaviour::{Behaviour, Event, MessageAuthenticity};
-pub use self::config::{Config, ConfigBuilder, ValidationMode, Version};
-pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError};
-pub use self::metrics::Config as MetricsConfig;
-pub use self::peer_score::{
- score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
- TopicScoreParams,
+pub use self::{
+ behaviour::{Behaviour, Event, MessageAuthenticity},
+ config::{Config, ConfigBuilder, ValidationMode, Version},
+ error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError},
+ metrics::Config as MetricsConfig,
+ peer_score::{
+ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams,
+ PeerScoreThresholds, TopicScoreParams,
+ },
+ subscription_filter::{
+ AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters,
+ MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter,
+ WhitelistSubscriptionFilter,
+ },
+ topic::{Hasher, Topic, TopicHash},
+ transform::{DataTransform, IdentityTransform},
+ types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage},
};
-pub use self::subscription_filter::{
- AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters,
- MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter,
- WhitelistSubscriptionFilter,
-};
-pub use self::topic::{Hasher, Topic, TopicHash};
-pub use self::transform::{DataTransform, IdentityTransform};
-pub use self::types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage};
#[deprecated(note = "Will be removed from the public API.")]
pub type Rpc = self::types::Rpc;
diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs
index aa65e3b7f1d..8ed71ea07f2 100644
--- a/protocols/gossipsub/src/mcache.rs
+++ b/protocols/gossipsub/src/mcache.rs
@@ -18,14 +18,17 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::topic::TopicHash;
-use crate::types::{MessageId, RawMessage};
-use libp2p_identity::PeerId;
-use std::collections::hash_map::Entry;
-use std::fmt::Debug;
use std::{
- collections::{HashMap, HashSet},
+ collections::{hash_map::Entry, HashMap, HashSet},
fmt,
+ fmt::Debug,
+};
+
+use libp2p_identity::PeerId;
+
+use crate::{
+ topic::TopicHash,
+ types::{MessageId, RawMessage},
};
/// CacheEntry stored in the history.
@@ -210,7 +213,7 @@ impl MessageCache {
&mut self,
message_id: &MessageId,
) -> Option<(RawMessage, HashSet)> {
- //We only remove the message from msgs and iwant_count and keep the message_id in the
+ // We only remove the message from msgs and iwant_count and keep the message_id in the
// history vector. Zhe id in the history vector will simply be ignored on popping.
self.iwant_counts.remove(message_id);
diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs
index 40af1af2cac..2519da64b73 100644
--- a/protocols/gossipsub/src/metrics.rs
+++ b/protocols/gossipsub/src/metrics.rs
@@ -23,15 +23,21 @@
use std::collections::HashMap;
-use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
-use prometheus_client::metrics::counter::Counter;
-use prometheus_client::metrics::family::{Family, MetricConstructor};
-use prometheus_client::metrics::gauge::Gauge;
-use prometheus_client::metrics::histogram::{linear_buckets, Histogram};
-use prometheus_client::registry::Registry;
-
-use crate::topic::TopicHash;
-use crate::types::{MessageAcceptance, PeerKind};
+use prometheus_client::{
+ encoding::{EncodeLabelSet, EncodeLabelValue},
+ metrics::{
+ counter::Counter,
+ family::{Family, MetricConstructor},
+ gauge::Gauge,
+ histogram::{linear_buckets, Histogram},
+ },
+ registry::Registry,
+};
+
+use crate::{
+ topic::TopicHash,
+ types::{MessageAcceptance, PeerKind},
+};
// Default value that limits for how many topics do we store metrics.
const DEFAULT_MAX_TOPICS: usize = 300;
@@ -100,7 +106,7 @@ type EverSubscribed = bool;
/// A collection of metrics used throughout the Gossipsub behaviour.
pub(crate) struct Metrics {
- /* Configuration parameters */
+ // Configuration parameters
/// Maximum number of topics for which we store metrics. This helps keep the metrics bounded.
max_topics: usize,
/// Maximum number of topics for which we store metrics, where the topic in not one to which we
@@ -108,11 +114,11 @@ pub(crate) struct Metrics {
/// from received messages and not explicit application subscriptions.
max_never_subscribed_topics: usize,
- /* Auxiliary variables */
+ // Auxiliary variables
/// Information needed to decide if a topic is allowed or not.
topic_info: HashMap,
- /* Metrics per known topic */
+ // Metrics per known topic
/// Status of our subscription to this topic. This metric allows analyzing other topic metrics
/// filtered by our current subscription status.
topic_subscription_status: Family,
@@ -134,7 +140,7 @@ pub(crate) struct Metrics {
/// The number of messages that timed out and could not be sent.
timedout_messages_dropped: Family,
- /* Metrics regarding mesh state */
+ // Metrics regarding mesh state
/// Number of peers in our mesh. This metric should be updated with the count of peers for a
/// topic in the mesh regardless of inclusion and churn events.
mesh_peer_counts: Family,
@@ -143,7 +149,7 @@ pub(crate) struct Metrics {
/// Number of times we remove peers in a topic mesh for different reasons.
mesh_peer_churn_events: Family,
- /* Metrics regarding messages sent/received */
+ // Metrics regarding messages sent/received
/// Number of gossip messages sent to each topic.
topic_msg_sent_counts: Family,
/// Bytes from gossip messages sent to each topic.
@@ -158,13 +164,13 @@ pub(crate) struct Metrics {
/// Bytes received from gossip messages for each topic.
topic_msg_recv_bytes: Family,
- /* Metrics related to scoring */
+ // Metrics related to scoring
/// Histogram of the scores for each mesh topic.
score_per_mesh: Family,
/// A counter of the kind of penalties being applied to peers.
scoring_penalties: Family,
- /* General Metrics */
+ // General Metrics
/// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based
/// on which protocol they support. This metric keeps track of the number of peers that are
/// connected of each type.
@@ -172,7 +178,7 @@ pub(crate) struct Metrics {
/// The time it takes to complete one iteration of the heartbeat.
heartbeat_duration: Histogram,
- /* Performance metrics */
+ // Performance metrics
/// When the user validates a message, it tries to re propagate it to its mesh peers. If the
/// message expires from the memcache before it can be validated, we count this a cache miss
/// and it is an indicator that the memcache size should be increased.
@@ -414,7 +420,7 @@ impl Metrics {
}
}
- /* Mesh related methods */
+ // Mesh related methods
/// Registers the subscription to a topic if the configured limits allow it.
/// Sets the registered number of peers in the mesh to 0.
diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs
index e8d1a6e5f97..33573ebeacc 100644
--- a/protocols/gossipsub/src/peer_score.rs
+++ b/protocols/gossipsub/src/peer_score.rs
@@ -18,25 +18,31 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-//!
//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour.
-use crate::metrics::{Metrics, Penalty};
-use crate::time_cache::TimeCache;
-use crate::{MessageId, TopicHash};
+use std::{
+ collections::{hash_map, HashMap, HashSet},
+ net::IpAddr,
+ time::Duration,
+};
+
use libp2p_identity::PeerId;
-use std::collections::{hash_map, HashMap, HashSet};
-use std::net::IpAddr;
-use std::time::Duration;
use web_time::Instant;
+use crate::{
+ metrics::{Metrics, Penalty},
+ time_cache::TimeCache,
+ MessageId, TopicHash,
+};
+
mod params;
-use crate::ValidationError;
pub use params::{
score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams,
};
+use crate::ValidationError;
+
#[cfg(test)]
mod tests;
@@ -96,8 +102,9 @@ impl Default for PeerStats {
}
impl PeerStats {
- /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the
- /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None.
+ /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied
+ /// parameters score the topic, inserts the default stats and returns a reference to those.
+ /// If neither apply, returns None.
pub(crate) fn stats_or_default_mut(
&mut self,
topic_hash: TopicHash,
@@ -285,12 +292,14 @@ impl PeerScore {
}
// P3b:
- // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts.
+ // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so
+ // this detracts.
let p3b = topic_stats.mesh_failure_penalty;
topic_score += p3b * topic_params.mesh_failure_penalty_weight;
// P4: invalid messages
- // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts.
+ // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so
+ // this detracts.
let p4 =
topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries;
topic_score += p4 * topic_params.invalid_message_deliveries_weight;
@@ -391,8 +400,8 @@ impl PeerScore {
}
// we don't decay retained scores, as the peer is not active.
- // this way the peer cannot reset a negative score by simply disconnecting and reconnecting,
- // unless the retention period has elapsed.
+ // this way the peer cannot reset a negative score by simply disconnecting and
+ // reconnecting, unless the retention period has elapsed.
// similarly, a well behaved peer does not lose its score by getting disconnected.
return true;
}
@@ -638,7 +647,8 @@ impl PeerScore {
}
}
- /// Similar to `reject_message` except does not require the message id or reason for an invalid message.
+ /// Similar to `reject_message` except does not require the message id or reason for an invalid
+ /// message.
pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) {
tracing::debug!(
peer=%from,
@@ -679,8 +689,8 @@ impl PeerScore {
}
if let RejectReason::ValidationIgnored = reason {
- // we were explicitly instructed by the validator to ignore the message but not penalize
- // the peer
+ // we were explicitly instructed by the validator to ignore the message but not
+ // penalize the peer
record.status = DeliveryStatus::Ignored;
record.peers.clear();
return;
@@ -882,13 +892,14 @@ impl PeerScore {
.get(topic_hash)
.expect("Topic must exist if there are known topic_stats");
- // check against the mesh delivery window -- if the validated time is passed as 0, then
- // the message was received before we finished validation and thus falls within the mesh
+ // check against the mesh delivery window -- if the validated time is passed as
+ // 0, then the message was received before we finished
+ // validation and thus falls within the mesh
// delivery window.
let mut falls_in_mesh_deliver_window = true;
if let Some(validated_time) = validated_time {
if let Some(now) = &now {
- //should always be true
+ // should always be true
let window_time = validated_time
.checked_add(topic_params.mesh_message_deliveries_window)
.unwrap_or(*now);
diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs
index ae70991f7fb..cc48df8f61b 100644
--- a/protocols/gossipsub/src/peer_score/params.rs
+++ b/protocols/gossipsub/src/peer_score/params.rs
@@ -18,10 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::{HashMap, HashSet},
+ net::IpAddr,
+ time::Duration,
+};
+
use crate::TopicHash;
-use std::collections::{HashMap, HashSet};
-use std::net::IpAddr;
-use std::time::Duration;
/// The default number of seconds for a decay interval.
const DEFAULT_DECAY_INTERVAL: u64 = 1;
@@ -117,12 +120,13 @@ pub struct PeerScoreParams {
/// P6: IP-colocation factor.
/// The parameter has an associated counter which counts the number of peers with the same IP.
- /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value
- /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`.
- /// If the number of peers in the same IP is less than the threshold, then the value is 0.
- /// The weight of the parameter MUST be negative, unless you want to disable for testing.
- /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0
- /// thus disabling the IP colocation penalty.
+ /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the
+ /// value is the square of the difference, ie `(peers_in_same_ip -
+ /// ip_colocation_threshold)^2`. If the number of peers in the same IP is less than the
+ /// threshold, then the value is 0. The weight of the parameter MUST be negative, unless
+ /// you want to disable for testing. Note: In order to simulate many IPs in a manageable
+ /// manner when testing, you can set the weight to 0 thus disabling the IP
+ /// colocation penalty.
pub ip_colocation_factor_weight: f64,
pub ip_colocation_factor_threshold: f64,
pub ip_colocation_factor_whitelist: HashSet,
@@ -239,16 +243,16 @@ pub struct TopicScoreParams {
/// P1: time in the mesh
/// This is the time the peer has been grafted in the mesh.
- /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap`
- /// The weight of the parameter must be positive (or zero to disable).
+ /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by
+ /// `time_in_mesh_cap` The weight of the parameter must be positive (or zero to disable).
pub time_in_mesh_weight: f64,
pub time_in_mesh_quantum: Duration,
pub time_in_mesh_cap: f64,
/// P2: first message deliveries
/// This is the number of message deliveries in the topic.
- /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped
- /// by `first_message_deliveries_cap`.
+ /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`,
+ /// and capped by `first_message_deliveries_cap`.
/// The weight of the parameter MUST be positive (or zero to disable).
pub first_message_deliveries_weight: f64,
pub first_message_deliveries_decay: f64,
@@ -264,8 +268,8 @@ pub struct TopicScoreParams {
/// before we have forwarded it to them.
/// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`.
/// If the counter exceeds the threshold, its value is 0.
- /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of
- /// the deficit, ie (`message_deliveries_threshold - counter)^2`
+ /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square
+ /// of the deficit, ie (`message_deliveries_threshold - counter)^2`
/// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh.
/// The weight of the parameter MUST be negative (or zero to disable).
pub mesh_message_deliveries_weight: f64,
diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs
index 064e277eed7..9e20cea2dde 100644
--- a/protocols/gossipsub/src/peer_score/tests.rs
+++ b/protocols/gossipsub/src/peer_score/tests.rs
@@ -20,9 +20,7 @@
/// A collection of unit tests mostly ported from the go implementation.
use super::*;
-
-use crate::types::RawMessage;
-use crate::{IdentTopic as Topic, Message};
+use crate::{types::RawMessage, IdentTopic as Topic, Message};
// estimates a value within variance
fn within_variance(value: f64, expected: f64, variance: f64) -> bool {
@@ -447,7 +445,8 @@ fn test_score_mesh_message_deliveries_decay() {
}
let score_a = peer_score.score(&peer_id_a);
- // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared.
+ // the penalty is the difference between the threshold and the (decayed)
+ // mesh deliveries, squared.
let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count;
let penalty = deficit * deficit;
let expected =
diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs
index 8d33fe51a90..e4272737342 100644
--- a/protocols/gossipsub/src/protocol.rs
+++ b/protocols/gossipsub/src/protocol.rs
@@ -18,15 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::config::ValidationMode;
-use crate::handler::HandlerEvent;
-use crate::rpc_proto::proto;
-use crate::topic::TopicHash;
-use crate::types::{
- ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc,
- Subscription, SubscriptionAction,
-};
-use crate::ValidationError;
+use std::{convert::Infallible, pin::Pin};
+
use asynchronous_codec::{Decoder, Encoder, Framed};
use byteorder::{BigEndian, ByteOrder};
use bytes::BytesMut;
@@ -35,8 +28,18 @@ use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use libp2p_identity::{PeerId, PublicKey};
use libp2p_swarm::StreamProtocol;
use quick_protobuf::Writer;
-use std::convert::Infallible;
-use std::pin::Pin;
+
+use crate::{
+ config::ValidationMode,
+ handler::HandlerEvent,
+ rpc_proto::proto,
+ topic::TopicHash,
+ types::{
+ ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc,
+ Subscription, SubscriptionAction,
+ },
+ ValidationError,
+};
pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:";
@@ -136,7 +139,7 @@ where
}
}
-/* Gossip codec for the framing */
+// Gossip codec for the framing
pub struct GossipsubCodec {
/// Determines the level of validation performed on incoming messages.
@@ -506,13 +509,14 @@ impl Decoder for GossipsubCodec {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::config::Config;
- use crate::{Behaviour, ConfigBuilder, MessageAuthenticity};
- use crate::{IdentTopic as Topic, Version};
use libp2p_identity::Keypair;
use quickcheck::*;
+ use super::*;
+ use crate::{
+ config::Config, Behaviour, ConfigBuilder, IdentTopic as Topic, MessageAuthenticity, Version,
+ };
+
#[derive(Clone, Debug)]
struct Message(RawMessage);
diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs
index c90e46a85da..b5f05c7b2e5 100644
--- a/protocols/gossipsub/src/rpc.rs
+++ b/protocols/gossipsub/src/rpc.rs
@@ -18,7 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::{stream::Peekable, Stream, StreamExt};
use std::{
future::Future,
pin::Pin,
@@ -29,6 +28,8 @@ use std::{
task::{Context, Poll},
};
+use futures::{stream::Peekable, Stream, StreamExt};
+
use crate::types::RpcOut;
/// `RpcOut` sender that is priority aware.
diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs
index 94c7aafbc3e..2f6832a01a1 100644
--- a/protocols/gossipsub/src/rpc_proto.rs
+++ b/protocols/gossipsub/src/rpc_proto.rs
@@ -26,12 +26,12 @@ pub(crate) mod proto {
#[cfg(test)]
mod test {
- use crate::rpc_proto::proto::compat;
- use crate::IdentTopic as Topic;
use libp2p_identity::PeerId;
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer};
use rand::Rng;
+ use crate::{rpc_proto::proto::compat, IdentTopic as Topic};
+
#[test]
fn test_multi_topic_message_compatibility() {
let topic1 = Topic::new("t1").hash();
diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs
index 02bb9b4eab6..c051b6c333b 100644
--- a/protocols/gossipsub/src/subscription_filter.rs
+++ b/protocols/gossipsub/src/subscription_filter.rs
@@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::types::Subscription;
-use crate::TopicHash;
use std::collections::{BTreeSet, HashMap, HashSet};
+use crate::{types::Subscription, TopicHash};
+
pub trait TopicSubscriptionFilter {
/// Returns true iff the topic is of interest and we can subscribe to it.
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool;
@@ -82,7 +82,7 @@ pub trait TopicSubscriptionFilter {
}
}
-//some useful implementers
+// some useful implementers
/// Allows all subscriptions
#[derive(Default, Clone)]
@@ -199,7 +199,7 @@ where
}
}
-///A subscription filter that filters topics based on a regular expression.
+/// A subscription filter that filters topics based on a regular expression.
pub struct RegexSubscriptionFilter(pub regex::Regex);
impl TopicSubscriptionFilter for RegexSubscriptionFilter {
diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs
index a3e5c01ac4c..ace02606e88 100644
--- a/protocols/gossipsub/src/time_cache.rs
+++ b/protocols/gossipsub/src/time_cache.rs
@@ -20,13 +20,18 @@
//! This implements a time-based LRU cache for checking gossipsub message duplicates.
-use fnv::FnvHashMap;
-use std::collections::hash_map::{
- self,
- Entry::{Occupied, Vacant},
+use std::{
+ collections::{
+ hash_map::{
+ self,
+ Entry::{Occupied, Vacant},
+ },
+ VecDeque,
+ },
+ time::Duration,
};
-use std::collections::VecDeque;
-use std::time::Duration;
+
+use fnv::FnvHashMap;
use web_time::Instant;
struct ExpiringElement {
@@ -206,7 +211,7 @@ mod test {
cache.insert("t");
assert!(!cache.insert("t"));
cache.insert("e");
- //assert!(!cache.insert("t"));
+ // assert!(!cache.insert("t"));
assert!(!cache.insert("e"));
// sleep until cache expiry
std::thread::sleep(Duration::from_millis(101));
diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs
index a73496b53f2..4793c23a8e1 100644
--- a/protocols/gossipsub/src/topic.rs
+++ b/protocols/gossipsub/src/topic.rs
@@ -18,12 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::rpc_proto::proto;
+use std::fmt;
+
use base64::prelude::*;
use prometheus_client::encoding::EncodeLabelSet;
use quick_protobuf::Writer;
use sha2::{Digest, Sha256};
-use std::fmt;
+
+use crate::rpc_proto::proto;
/// A generic trait that can be extended for various hashing types for a topic.
pub trait Hasher {
diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs
index bb1916fefd0..bcb1f279ae5 100644
--- a/protocols/gossipsub/src/types.rs
+++ b/protocols/gossipsub/src/types.rs
@@ -19,20 +19,18 @@
// DEALINGS IN THE SOFTWARE.
//! A collection of types using the Gossipsub system.
-use crate::rpc::Sender;
-use crate::TopicHash;
+use std::{collections::BTreeSet, fmt, fmt::Debug};
+
use futures_timer::Delay;
use libp2p_identity::PeerId;
use libp2p_swarm::ConnectionId;
use prometheus_client::encoding::EncodeLabelValue;
use quick_protobuf::MessageWrite;
-use std::fmt::Debug;
-use std::{collections::BTreeSet, fmt};
-
-use crate::rpc_proto::proto;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
+use crate::{rpc::Sender, rpc_proto::proto, TopicHash};
+
/// Messages that have expired while attempting to be sent to a peer.
#[derive(Clone, Debug, Default)]
pub struct FailedMessages {
@@ -42,7 +40,8 @@ pub struct FailedMessages {
pub forward: usize,
/// The number of messages that were failed to be sent to the priority queue as it was full.
pub priority: usize,
- /// The number of messages that were failed to be sent to the non-priority queue as it was full.
+ /// The number of messages that were failed to be sent to the non-priority queue as it was
+ /// full.
pub non_priority: usize,
/// The number of messages that timed out and could not be sent.
pub timeout: usize,
@@ -230,9 +229,9 @@ pub enum SubscriptionAction {
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct PeerInfo {
pub(crate) peer_id: Option,
- //TODO add this when RFC: Signed Address Records got added to the spec (see pull request
+ // TODO add this when RFC: Signed Address Records got added to the spec (see pull request
// https://github.com/libp2p/specs/pull/217)
- //pub signed_peer_record: ?,
+ // pub signed_peer_record: ?,
}
/// A Control message received by the gossipsub system.
@@ -240,7 +239,8 @@ pub(crate) struct PeerInfo {
pub enum ControlAction {
/// Node broadcasts known messages per topic - IHave control message.
IHave(IHave),
- /// The node requests specific message ids (peer_id + sequence _number) - IWant control message.
+ /// The node requests specific message ids (peer_id + sequence _number) - IWant control
+ /// message.
IWant(IWant),
/// The node has been added to the mesh - Graft control message.
Graft(Graft),
diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs
index 3b6261afa54..85038665b4d 100644
--- a/protocols/gossipsub/tests/smoke.rs
+++ b/protocols/gossipsub/tests/smoke.rs
@@ -18,15 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::stream::{FuturesUnordered, SelectAll};
-use futures::StreamExt;
+use std::{task::Poll, time::Duration};
+
+use futures::{
+ stream::{FuturesUnordered, SelectAll},
+ StreamExt,
+};
use libp2p_gossipsub as gossipsub;
use libp2p_gossipsub::{MessageAuthenticity, ValidationMode};
use libp2p_swarm::Swarm;
use libp2p_swarm_test::SwarmExt as _;
use quickcheck::{QuickCheck, TestResult};
use rand::{seq::SliceRandom, SeedableRng};
-use std::{task::Poll, time::Duration};
use tokio::{runtime::Runtime, time};
use tracing_subscriber::EnvFilter;
diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs
index b69f2014d81..0cd27d90717 100644
--- a/protocols/identify/src/behaviour.rs
+++ b/protocols/identify/src/behaviour.rs
@@ -18,28 +18,27 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::handler::{self, Handler, InEvent};
-use crate::protocol::{Info, UpgradeError};
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{multiaddr, ConnectedPoint, Endpoint, Multiaddr};
-use libp2p_identity::PeerId;
-use libp2p_identity::PublicKey;
-use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm};
+use std::{
+ collections::{hash_map::Entry, HashMap, HashSet, VecDeque},
+ num::NonZeroUsize,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use libp2p_core::{
+ multiaddr, multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr,
+};
+use libp2p_identity::{PeerId, PublicKey};
use libp2p_swarm::{
- ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour,
- NotifyHandler, PeerAddresses, StreamUpgradeError, THandlerInEvent, ToSwarm,
- _address_translation,
+ behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm},
+ ConnectionDenied, ConnectionId, DialError, ExternalAddresses, ListenAddresses,
+ NetworkBehaviour, NotifyHandler, PeerAddresses, StreamUpgradeError, THandler, THandlerInEvent,
+ THandlerOutEvent, ToSwarm, _address_translation,
};
-use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent};
-use std::collections::hash_map::Entry;
-use std::num::NonZeroUsize;
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- task::Context,
- task::Poll,
- time::Duration,
+use crate::{
+ handler::{self, Handler, InEvent},
+ protocol::{Info, UpgradeError},
};
/// Whether an [`Multiaddr`] is a valid for the QUIC transport.
@@ -323,7 +322,8 @@ impl Behaviour {
.contains(&connection_id)
{
// Apply address translation to the candidate address.
- // For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address.
+ // For TCP without port-reuse, the observed address contains an ephemeral port which
+ // needs to be replaced by the port of a listen address.
let translated_addresses = {
let mut addrs: Vec<_> = self
.listen_addresses
@@ -398,7 +398,8 @@ impl NetworkBehaviour for Behaviour {
) -> Result, ConnectionDenied> {
// Contrary to inbound events, outbound events are full-p2p qualified
// so we remove /p2p/ in order to be homogeneous
- // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not fully-p2p-qualified)
+ // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not
+ // fully-p2p-qualified)
let mut addr = addr.clone();
if matches!(addr.iter().last(), Some(multiaddr::Protocol::P2p(_))) {
addr.pop();
@@ -415,7 +416,9 @@ impl NetworkBehaviour for Behaviour {
self.config.local_public_key.clone(),
self.config.protocol_version.clone(),
self.config.agent_version.clone(),
- addr.clone(), // TODO: This is weird? That is the public address we dialed, shouldn't need to tell the other party?
+ // TODO: This is weird? That is the public address we dialed,
+ // shouldn't need to tell the other party?
+ addr.clone(),
self.all_addresses(),
))
}
diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs
index dd073d50ed6..cda49f992b8 100644
--- a/protocols/identify/src/handler.rs
+++ b/protocols/identify/src/handler.rs
@@ -18,29 +18,38 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::protocol::{Info, PushInfo, UpgradeError};
-use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME};
+use std::{
+ collections::HashSet,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use either::Either;
use futures::prelude::*;
use futures_bounded::Timeout;
use futures_timer::Delay;
-use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade};
-use libp2p_core::Multiaddr;
-use libp2p_identity::PeerId;
-use libp2p_identity::PublicKey;
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
- ProtocolSupport,
+use libp2p_core::{
+ upgrade::{ReadyUpgrade, SelectUpgrade},
+ Multiaddr,
};
+use libp2p_identity::{PeerId, PublicKey};
use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
+ ProtocolSupport,
+ },
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError,
SubstreamProtocol, SupportedProtocols,
};
use smallvec::SmallVec;
-use std::collections::HashSet;
-use std::{task::Context, task::Poll, time::Duration};
use tracing::Level;
+use crate::{
+ protocol,
+ protocol::{Info, PushInfo, UpgradeError},
+ PROTOCOL_NAME, PUSH_PROTOCOL_NAME,
+};
+
const STREAM_TIMEOUT: Duration = Duration::from_secs(60);
const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10;
diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs
index 7d28e5b5cc7..868ace87aeb 100644
--- a/protocols/identify/src/lib.rs
+++ b/protocols/identify/src/lib.rs
@@ -28,10 +28,10 @@
//!
//! # Important Discrepancies
//!
-//! - **Using Identify with other protocols** Unlike some other libp2p implementations,
-//! rust-libp2p does not treat Identify as a core protocol. This means that other protocols cannot
-//! rely upon the existence of Identify, and need to be manually hooked up to Identify in order to
-//! make use of its capabilities.
+//! - **Using Identify with other protocols** Unlike some other libp2p implementations, rust-libp2p
+//! does not treat Identify as a core protocol. This means that other protocols cannot rely upon
+//! the existence of Identify, and need to be manually hooked up to Identify in order to make use
+//! of its capabilities.
//!
//! # Usage
//!
@@ -41,8 +41,10 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-pub use self::behaviour::{Behaviour, Config, Event};
-pub use self::protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME};
+pub use self::{
+ behaviour::{Behaviour, Config, Event},
+ protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME},
+};
mod behaviour;
mod handler;
diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs
index f4dfd544dd1..33aeedb7c4f 100644
--- a/protocols/identify/src/protocol.rs
+++ b/protocols/identify/src/protocol.rs
@@ -18,16 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
+use std::io;
+
use asynchronous_codec::{FramedRead, FramedWrite};
use futures::prelude::*;
use libp2p_core::{multiaddr, Multiaddr};
use libp2p_identity as identity;
use libp2p_identity::PublicKey;
use libp2p_swarm::StreamProtocol;
-use std::io;
use thiserror::Error;
+use crate::proto;
+
const MAX_MESSAGE_SIZE_BYTES: usize = 4096;
pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0");
@@ -77,7 +79,8 @@ impl Info {
}
/// Identify push information of a peer sent in protocol messages.
-/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed.
+/// Note that missing fields should be ignored, as peers may choose to send partial updates
+/// containing only the fields whose values have changed.
#[derive(Debug, Clone)]
pub struct PushInfo {
pub public_key: Option,
@@ -264,9 +267,10 @@ pub enum UpgradeError {
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_identity as identity;
+ use super::*;
+
#[test]
fn skip_invalid_multiaddr() {
let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap();
diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs
index d624005408e..dd48b314173 100644
--- a/protocols/identify/tests/smoke.rs
+++ b/protocols/identify/tests/smoke.rs
@@ -1,10 +1,13 @@
+use std::{
+ collections::HashSet,
+ iter,
+ time::{Duration, Instant},
+};
+
use futures::StreamExt;
use libp2p_identify as identify;
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
-use std::collections::HashSet;
-use std::iter;
-use std::time::{Duration, Instant};
use tracing_subscriber::EnvFilter;
#[async_std::test]
@@ -34,8 +37,7 @@ async fn periodic_identify() {
let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await;
swarm2.connect(&mut swarm1).await;
- use identify::Event::Received;
- use identify::Event::Sent;
+ use identify::Event::{Received, Sent};
match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await {
(
@@ -67,7 +69,8 @@ async fn periodic_identify() {
assert_eq!(s2_info.agent_version, "b");
assert!(!s2_info.protocols.is_empty());
- // Cannot assert observed address of dialer because memory transport uses ephemeral, outgoing ports.
+ // Cannot assert observed address of dialer because memory transport uses ephemeral,
+ // outgoing ports.
// assert_eq!(
// s2_info.observed_addr,
// swarm2_memory_listen.with(Protocol::P2p(swarm2_peer_id.into()))
diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs
index 0b3dc71e649..c2168be661e 100644
--- a/protocols/kad/src/addresses.rs
+++ b/protocols/kad/src/addresses.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::fmt;
+
use libp2p_core::Multiaddr;
use smallvec::SmallVec;
-use std::fmt;
/// A non-empty list of (unique) addresses of a peer in the routing table.
/// Every address must be a fully-qualified /p2p address.
diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs
index f577971167f..988a16dc41f 100644
--- a/protocols/kad/src/behaviour.rs
+++ b/protocols/kad/src/behaviour.rs
@@ -22,41 +22,46 @@
mod test;
-use crate::addresses::Addresses;
-use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId};
-use crate::kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus};
-use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig};
-use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState};
-use crate::record::{
- self,
- store::{self, RecordStore},
- ProviderRecord, Record,
+use std::{
+ collections::{BTreeMap, HashMap, HashSet, VecDeque},
+ fmt,
+ num::NonZeroUsize,
+ task::{Context, Poll, Waker},
+ time::Duration,
+ vec,
};
-use crate::{bootstrap, K_VALUE};
-use crate::{jobs::*, protocol};
+
use fnv::FnvHashSet;
use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{
- AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm,
-};
use libp2p_swarm::{
+ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm},
dial_opts::{self, DialOpts},
ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses,
ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
};
-use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
-use std::fmt;
-use std::num::NonZeroUsize;
-use std::task::{Context, Poll, Waker};
-use std::time::Duration;
-use std::vec;
use thiserror::Error;
use tracing::Level;
use web_time::Instant;
pub use crate::query::QueryStats;
+use crate::{
+ addresses::Addresses,
+ bootstrap,
+ handler::{Handler, HandlerEvent, HandlerIn, RequestId},
+ jobs::*,
+ kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus},
+ protocol,
+ protocol::{ConnectionType, KadPeer, ProtocolConfig},
+ query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState},
+ record::{
+ self,
+ store::{self, RecordStore},
+ ProviderRecord, Record,
+ },
+ K_VALUE,
+};
/// `Behaviour` is a `NetworkBehaviour` that implements the libp2p
/// Kademlia protocol.
@@ -157,8 +162,9 @@ pub enum StoreInserts {
/// the record is forwarded immediately to the [`RecordStore`].
Unfiltered,
/// Whenever a (provider) record is received, an event is emitted.
- /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`],
- /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`].
+ /// Provider records generate a [`InboundRequest::AddProvider`] under
+ /// [`Event::InboundRequest`], normal records generate a [`InboundRequest::PutRecord`]
+ /// under [`Event::InboundRequest`].
///
/// When deemed valid, a (provider) record needs to be explicitly stored in
/// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`],
@@ -205,9 +211,10 @@ pub enum Caching {
/// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty.
Disabled,
/// Up to `max_peers` peers not returning a record that are closest to the key
- /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`].
- /// The write-back operation must be performed explicitly, if
- /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`].
+ /// being looked up are tracked and returned in
+ /// [`GetRecordOk::FinishedWithNoAdditionalRecord`]. The write-back operation must be
+ /// performed explicitly, if desired and after choosing a record from the results, via
+ /// [`Behaviour::put_record_to`].
Enabled { max_peers: u16 },
}
@@ -442,16 +449,17 @@ impl Config {
self
}
- /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted in the routing table.
- /// This prevent cascading bootstrap requests when multiple peers are inserted into the routing table "at the same time".
- /// This also allows to wait a little bit for other potential peers to be inserted into the routing table before
- /// triggering a bootstrap, giving more context to the future bootstrap request.
+ /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted
+ /// in the routing table. This prevent cascading bootstrap requests when multiple peers are
+ /// inserted into the routing table "at the same time". This also allows to wait a little
+ /// bit for other potential peers to be inserted into the routing table before triggering a
+ /// bootstrap, giving more context to the future bootstrap request.
///
/// * Default to `500` ms.
- /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a new peer
- /// is inserted in the routing table.
- /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when a new
- /// peer is inserted in the routing table).
+ /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a
+ /// new peer is inserted in the routing table.
+ /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when
+ /// a new peer is inserted in the routing table).
#[cfg(test)]
pub(crate) fn set_automatic_bootstrap_throttle(
&mut self,
@@ -573,15 +581,13 @@ where
///
/// Explicitly adding addresses of peers serves two purposes:
///
- /// 1. In order for a node to join the DHT, it must know about at least
- /// one other node of the DHT.
+ /// 1. In order for a node to join the DHT, it must know about at least one other node of the
+ /// DHT.
///
- /// 2. When a remote peer initiates a connection and that peer is not
- /// yet in the routing table, the `Kademlia` behaviour must be
- /// informed of an address on which that peer is listening for
- /// connections before it can be added to the routing table
- /// from where it can subsequently be discovered by all peers
- /// in the DHT.
+ /// 2. When a remote peer initiates a connection and that peer is not yet in the routing
+ /// table, the `Kademlia` behaviour must be informed of an address on which that peer is
+ /// listening for connections before it can be added to the routing table from where it can
+ /// subsequently be discovered by all peers in the DHT.
///
/// If the routing table has been updated as a result of this operation,
/// a [`Event::RoutingUpdated`] event is emitted.
@@ -983,7 +989,8 @@ where
///
/// > **Note**: Bootstrap does not require to be called manually. It is periodically
/// > invoked at regular intervals based on the configured `periodic_bootstrap_interval` (see
- /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically invoked
+ /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically
+ /// > invoked
/// > when a new peer is inserted in the routing table.
/// > This parameter is used to call [`Behaviour::bootstrap`] periodically and automatically
/// > to ensure a healthy routing table.
@@ -1107,10 +1114,12 @@ where
/// Set the [`Mode`] in which we should operate.
///
- /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`].
+ /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we
+ /// have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`].
///
- /// Setting a mode via this function disables this automatic behaviour and unconditionally operates in the specified mode.
- /// To reactivate the automatic configuration, pass [`None`] instead.
+ /// Setting a mode via this function disables this automatic behaviour and unconditionally
+ /// operates in the specified mode. To reactivate the automatic configuration, pass [`None`]
+ /// instead.
pub fn set_mode(&mut self, mode: Option) {
match mode {
Some(mode) => {
@@ -1191,8 +1200,8 @@ where
"Previous match arm handled empty list"
);
- // Previously, server-mode, now also server-mode because > 1 external address. Don't log anything to avoid spam.
-
+ // Previously, server-mode, now also server-mode because > 1 external address.
+ // Don't log anything to avoid spam.
Mode::Server
}
};
@@ -2157,7 +2166,8 @@ where
}
}
- /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer.
+ /// Preloads a new [`Handler`] with requests that are waiting
+ /// to be sent to the newly connected peer.
fn preload_new_handler(
&mut self,
handler: &mut Handler,
@@ -2755,7 +2765,6 @@ pub struct PeerRecord {
#[allow(clippy::large_enum_variant)]
pub enum Event {
/// An inbound request has been received and handled.
- //
// Note on the difference between 'request' and 'query': A request is a
// single request-response style exchange with a single remote peer. A query
// is made of multiple requests across multiple remote peers.
diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs
index 7409168ac2a..82749ffb5fd 100644
--- a/protocols/kad/src/behaviour/test.rs
+++ b/protocols/kad/src/behaviour/test.rs
@@ -20,10 +20,6 @@
#![cfg(test)]
-use super::*;
-
-use crate::record::{store::MemoryStore, Key};
-use crate::{K_VALUE, PROTOCOL_NAME, SHA_256_MH};
use futures::{executor::block_on, future::poll_fn, prelude::*};
use futures_timer::Delay;
use libp2p_core::{
@@ -39,6 +35,12 @@ use libp2p_yamux as yamux;
use quickcheck::*;
use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng};
+use super::*;
+use crate::{
+ record::{store::MemoryStore, Key},
+ K_VALUE, PROTOCOL_NAME, SHA_256_MH,
+};
+
type TestSwarm = Swarm>;
fn build_node() -> (Multiaddr, TestSwarm) {
@@ -164,7 +166,8 @@ fn bootstrap() {
let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2);
let mut cfg = Config::new(PROTOCOL_NAME);
- // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically.
+ // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from
+ // triggering automatically.
cfg.set_periodic_bootstrap_interval(None);
cfg.set_automatic_bootstrap_throttle(None);
if rng.gen() {
@@ -246,7 +249,8 @@ fn query_iter() {
fn run(rng: &mut impl Rng) {
let num_total = rng.gen_range(2..20);
let mut config = Config::new(PROTOCOL_NAME);
- // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically.
+ // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from
+ // triggering automatically.
config.set_periodic_bootstrap_interval(None);
config.set_automatic_bootstrap_throttle(None);
let mut swarms = build_connected_nodes_with_config(num_total, 1, config)
@@ -561,7 +565,8 @@ fn put_record() {
let mut config = Config::new(PROTOCOL_NAME);
config.set_replication_factor(replication_factor);
- // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically.
+ // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from
+ // triggering automatically.
config.set_periodic_bootstrap_interval(None);
config.set_automatic_bootstrap_throttle(None);
if rng.gen() {
@@ -933,7 +938,8 @@ fn add_provider() {
let mut config = Config::new(PROTOCOL_NAME);
config.set_replication_factor(replication_factor);
- // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically.
+ // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from
+ // triggering automatically.
config.set_periodic_bootstrap_interval(None);
config.set_automatic_bootstrap_throttle(None);
if rng.gen() {
@@ -1161,7 +1167,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() {
config.disjoint_query_paths(true);
// I.e. setting the amount disjoint paths to be explored to 2.
config.set_parallelism(NonZeroUsize::new(2).unwrap());
- // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically.
+ // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering
+ // automatically.
config.set_periodic_bootstrap_interval(None);
config.set_automatic_bootstrap_throttle(None);
diff --git a/protocols/kad/src/bootstrap.rs b/protocols/kad/src/bootstrap.rs
index 40acdfd88ee..d6576a3ef54 100644
--- a/protocols/kad/src/bootstrap.rs
+++ b/protocols/kad/src/bootstrap.rs
@@ -1,7 +1,9 @@
-use futures::FutureExt;
-use std::task::{Context, Poll, Waker};
-use std::time::Duration;
+use std::{
+ task::{Context, Poll, Waker},
+ time::Duration,
+};
+use futures::FutureExt;
use futures_timer::Delay;
/// Default value chosen at ``.
@@ -9,18 +11,18 @@ pub(crate) const DEFAULT_AUTOMATIC_THROTTLE: Duration = Duration::from_millis(50
#[derive(Debug)]
pub(crate) struct Status {
- /// If the user did not disable periodic bootstrap (by providing `None` for `periodic_interval`)
- /// this is the periodic interval and the delay of the current period. When `Delay` finishes,
- /// a bootstrap will be triggered and the `Delay` will be reset.
+ /// If the user did not disable periodic bootstrap (by providing `None` for
+ /// `periodic_interval`) this is the periodic interval and the delay of the current period.
+ /// When `Delay` finishes, a bootstrap will be triggered and the `Delay` will be reset.
interval_and_delay: Option<(Duration, Delay)>,
/// Configured duration to wait before triggering a bootstrap when a new peer
/// is inserted in the routing table. `None` if automatic bootstrap is disabled.
automatic_throttle: Option,
/// Timer that will be set (if automatic bootstrap is not disabled) when a new peer is inserted
- /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to `None`
- /// again. If an other new peer is inserted in the routing table before this timer finishes,
- /// the timer is reset.
+ /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to
+ /// `None` again. If an other new peer is inserted in the routing table before this timer
+ /// finishes, the timer is reset.
throttle_timer: Option,
/// Number of bootstrap requests currently in progress. We ensure neither periodic bootstrap
@@ -108,16 +110,19 @@ impl Status {
// A `throttle_timer` has been registered. It means one or more peers have been
// inserted into the routing table and that a bootstrap request should be triggered.
// However, to not risk cascading bootstrap requests, we wait a little time to ensure
- // the user will not add more peers in the routing table in the next "throttle_timer" remaining.
+ // the user will not add more peers in the routing table in the next "throttle_timer"
+ // remaining.
if throttle_delay.poll_unpin(cx).is_ready() {
// The `throttle_timer` is finished, triggering bootstrap right now.
// The call to `on_started` will reset `throttle_delay`.
return Poll::Ready(());
}
- // The `throttle_timer` is not finished but the periodic interval for triggering bootstrap might be reached.
+ // The `throttle_timer` is not finished but the periodic interval for triggering
+ // bootstrap might be reached.
} else {
- // No new peer has recently been inserted into the routing table or automatic bootstrap is disabled.
+ // No new peer has recently been inserted into the routing table or automatic bootstrap
+ // is disabled.
}
// Checking if the user has enabled the periodic bootstrap feature.
@@ -131,7 +136,8 @@ impl Status {
// The user disabled periodic bootstrap.
}
- // Registering the `waker` so that we can wake up when calling `on_new_peer_in_routing_table`.
+ // Registering the `waker` so that we can wake up when calling
+ // `on_new_peer_in_routing_table`.
self.waker = Some(cx.waker().clone());
Poll::Pending
}
@@ -175,9 +181,10 @@ impl futures::Future for ThrottleTimer {
#[cfg(test)]
mod tests {
- use super::*;
use web_time::Instant;
+ use super::*;
+
const MS_5: Duration = Duration::from_millis(5);
const MS_100: Duration = Duration::from_millis(100);
@@ -296,7 +303,8 @@ mod tests {
let elapsed = Instant::now().duration_since(start);
- assert!(elapsed > (i * MS_100 - Duration::from_millis(10))); // Subtract 10ms to avoid flakes.
+ // Subtract 10ms to avoid flakes.
+ assert!(elapsed > (i * MS_100 - Duration::from_millis(10)));
}
}
@@ -308,7 +316,8 @@ mod tests {
status.trigger();
for _ in 0..10 {
Delay::new(MS_100 / 2).await;
- status.trigger(); // should reset throttle_timer
+ // should reset throttle_timer
+ status.trigger();
}
assert!(
status.next().now_or_never().is_none(),
@@ -330,9 +339,12 @@ mod tests {
) {
let mut status = Status::new(Some(MS_100), None);
- status.on_started(); // first manually triggering
- status.on_started(); // second manually triggering
- status.on_finish(); // one finishes
+ // first manually triggering
+ status.on_started();
+ // second manually triggering
+ status.on_started();
+ // one finishes
+ status.on_finish();
assert!(
async_std::future::timeout(10 * MS_100, status.next())
diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs
index 384ebc3f2b1..6b4e944e2b0 100644
--- a/protocols/kad/src/handler.rs
+++ b/protocols/kad/src/handler.rs
@@ -18,27 +18,33 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::behaviour::Mode;
-use crate::protocol::{
- KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig,
+use std::{
+ collections::VecDeque,
+ error, fmt, io,
+ marker::PhantomData,
+ pin::Pin,
+ task::{Context, Poll, Waker},
+ time::Duration,
};
-use crate::record::{self, Record};
-use crate::QueryId;
+
use either::Either;
-use futures::channel::oneshot;
-use futures::prelude::*;
-use futures::stream::SelectAll;
+use futures::{channel::oneshot, prelude::*, stream::SelectAll};
use libp2p_core::{upgrade, ConnectedPoint};
use libp2p_identity::PeerId;
-use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound};
use libp2p_swarm::{
+ handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound},
ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol,
SupportedProtocols,
};
-use std::collections::VecDeque;
-use std::task::Waker;
-use std::time::Duration;
-use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll};
+
+use crate::{
+ behaviour::Mode,
+ protocol::{
+ KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig,
+ },
+ record::{self, Record},
+ QueryId,
+};
const MAX_NUM_STREAMS: usize = 32;
@@ -550,7 +556,8 @@ impl Handler {
});
}
- /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`].
+ /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol
+ /// handshake using a [`oneshot::channel`].
fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) {
let (sender, receiver) = oneshot::channel();
@@ -1060,10 +1067,11 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::{Arbitrary, Gen};
use tracing_subscriber::EnvFilter;
+ use super::*;
+
impl Arbitrary for ProtocolStatus {
fn arbitrary(g: &mut Gen) -> Self {
Self {
diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs
index fa558878a38..56b3e080d96 100644
--- a/protocols/kad/src/jobs.rs
+++ b/protocols/kad/src/jobs.rs
@@ -25,12 +25,11 @@
//! To ensure persistence of records in the DHT, a Kademlia node
//! must periodically (re-)publish and (re-)replicate its records:
//!
-//! 1. (Re-)publishing: The original publisher or provider of a record
-//! must regularly re-publish in order to prolong the expiration.
+//! 1. (Re-)publishing: The original publisher or provider of a record must regularly re-publish
+//! in order to prolong the expiration.
//!
-//! 2. (Re-)replication: Every node storing a replica of a record must
-//! regularly re-replicate it to the closest nodes to the key in
-//! order to ensure the record is present at these nodes.
+//! 2. (Re-)replication: Every node storing a replica of a record must regularly re-replicate it
+//! to the closest nodes to the key in order to ensure the record is present at these nodes.
//!
//! Re-publishing primarily ensures persistence of the record beyond its
//! initial TTL, for as long as the publisher stores (or provides) the record,
@@ -41,11 +40,10 @@
//!
//! This module implements two periodic jobs:
//!
-//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of
-//! regular (value-)records.
+//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of regular (value-)records.
//!
-//! * [`AddProviderJob`]: For (re-)publication of provider records.
-//! Provider records currently have no separate replication mechanism.
+//! * [`AddProviderJob`]: For (re-)publication of provider records. Provider records currently
+//! have no separate replication mechanism.
//!
//! A periodic job is driven like a `Future` or `Stream` by `poll`ing it.
//! Once a job starts running it emits records to send to the `k` closest
@@ -61,17 +59,21 @@
//! > to the size of all stored records. As a job runs, the records are moved
//! > out of the job to the consumer, where they can be dropped after being sent.
-use crate::record::{self, store::RecordStore, ProviderRecord, Record};
+use std::{
+ collections::HashSet,
+ pin::Pin,
+ task::{Context, Poll},
+ time::Duration,
+ vec,
+};
+
use futures::prelude::*;
use futures_timer::Delay;
use libp2p_identity::PeerId;
-use std::collections::HashSet;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::time::Duration;
-use std::vec;
use web_time::Instant;
+use crate::record::{self, store::RecordStore, ProviderRecord, Record};
+
/// The maximum number of queries towards which background jobs
/// are allowed to start new queries on an invocation of
/// `Behaviour::poll`.
@@ -335,12 +337,13 @@ impl AddProviderJob {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::record::store::MemoryStore;
use futures::{executor::block_on, future::poll_fn};
use quickcheck::*;
use rand::Rng;
+ use super::*;
+ use crate::record::store::MemoryStore;
+
fn rand_put_record_job() -> PutRecordJob {
let mut rng = rand::thread_rng();
let id = PeerId::random();
diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs
index 99d534fa669..1c6d8857c9c 100644
--- a/protocols/kad/src/kbucket.rs
+++ b/protocols/kad/src/kbucket.rs
@@ -72,13 +72,11 @@ mod entry;
#[allow(clippy::assign_op_pattern)]
mod key;
-pub use bucket::NodeStatus;
-pub use entry::*;
+use std::{collections::VecDeque, num::NonZeroUsize, time::Duration};
use bucket::KBucket;
-use std::collections::VecDeque;
-use std::num::NonZeroUsize;
-use std::time::Duration;
+pub use bucket::NodeStatus;
+pub use entry::*;
use web_time::Instant;
/// Maximum number of k-buckets.
@@ -561,10 +559,11 @@ where
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_identity::PeerId;
use quickcheck::*;
+ use super::*;
+
type TestTable = KBucketsTable;
impl Arbitrary for TestTable {
diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs
index ec2b7756c43..244525238ec 100644
--- a/protocols/kad/src/kbucket/bucket.rs
+++ b/protocols/kad/src/kbucket/bucket.rs
@@ -311,19 +311,18 @@ where
///
/// The status of the node to insert determines the result as follows:
///
- /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected
- /// or there is already a pending node, insertion fails with `InsertResult::Full`.
- /// If the bucket is full but at least one node is disconnected and there is no pending
- /// node, the new node is inserted as pending, yielding `InsertResult::Pending`.
- /// Otherwise the bucket has free slots and the new node is added to the end of the
- /// bucket as the most-recently connected node.
+ /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected or
+ /// there is already a pending node, insertion fails with `InsertResult::Full`. If the
+ /// bucket is full but at least one node is disconnected and there is no pending node, the
+ /// new node is inserted as pending, yielding `InsertResult::Pending`. Otherwise the bucket
+ /// has free slots and the new node is added to the end of the bucket as the most-recently
+ /// connected node.
///
/// * `NodeStatus::Disconnected`: If the bucket is full, insertion fails with
- /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node
- /// is inserted at the position preceding the first connected node,
- /// i.e. as the most-recently disconnected node. If there are no connected nodes,
- /// the new node is added as the last element of the bucket.
- ///
+ /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node is inserted
+ /// at the position preceding the first connected node, i.e. as the most-recently
+ /// disconnected node. If there are no connected nodes, the new node is added as the last
+ /// element of the bucket.
pub(crate) fn insert(
&mut self,
node: Node,
@@ -443,10 +442,11 @@ where
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_identity::PeerId;
use quickcheck::*;
+ use super::*;
+
impl Arbitrary for KBucket, ()> {
fn arbitrary(g: &mut Gen) -> KBucket, ()> {
let timeout = Duration::from_secs(g.gen_range(1..g.size()) as u64);
diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs
index 808db08d858..bdf8b9b5a18 100644
--- a/protocols/kad/src/kbucket/entry.rs
+++ b/protocols/kad/src/kbucket/entry.rs
@@ -23,7 +23,6 @@
pub(crate) use super::bucket::{AppliedPending, InsertResult, Node, K_VALUE};
pub use super::key::*;
-
use super::*;
/// An immutable by-reference view of a bucket entry.
diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs
index f35849c6b26..367dfa807d3 100644
--- a/protocols/kad/src/kbucket/key.rs
+++ b/protocols/kad/src/kbucket/key.rs
@@ -18,15 +18,21 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::record;
+use std::{
+ borrow::Borrow,
+ hash::{Hash, Hasher},
+};
+
use libp2p_core::multihash::Multihash;
use libp2p_identity::PeerId;
-use sha2::digest::generic_array::{typenum::U32, GenericArray};
-use sha2::{Digest, Sha256};
-use std::borrow::Borrow;
-use std::hash::{Hash, Hasher};
+use sha2::{
+ digest::generic_array::{typenum::U32, GenericArray},
+ Digest, Sha256,
+};
use uint::*;
+use crate::record;
+
construct_uint! {
/// 256-bit unsigned integer.
pub(super) struct U256(4);
@@ -200,9 +206,10 @@ impl Distance {
#[cfg(test)]
mod tests {
+ use quickcheck::*;
+
use super::*;
use crate::SHA_256_MH;
- use quickcheck::*;
impl Arbitrary for Key {
fn arbitrary(_: &mut Gen) -> Key {
diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs
index 060bfc518e4..8ab45665c9b 100644
--- a/protocols/kad/src/lib.rs
+++ b/protocols/kad/src/lib.rs
@@ -54,36 +54,34 @@ mod proto {
};
}
+use std::num::NonZeroUsize;
+
pub use addresses::Addresses;
pub use behaviour::{
AddProviderContext, AddProviderError, AddProviderOk, AddProviderPhase, AddProviderResult,
- BootstrapError, BootstrapOk, BootstrapResult, GetClosestPeersError, GetClosestPeersOk,
- GetClosestPeersResult, GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError,
- GetRecordOk, GetRecordResult, InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord,
- PutRecordContext, PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo,
- QueryMut, QueryRef, QueryResult, QueryStats, RoutingUpdate,
-};
-pub use behaviour::{
- Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts,
+ Behaviour, BootstrapError, BootstrapOk, BootstrapResult, BucketInserts, Caching, Config, Event,
+ GetClosestPeersError, GetClosestPeersOk, GetClosestPeersResult, GetProvidersError,
+ GetProvidersOk, GetProvidersResult, GetRecordError, GetRecordOk, GetRecordResult,
+ InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, ProgressStep, PutRecordContext,
+ PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, QueryMut, QueryRef,
+ QueryResult, QueryStats, Quorum, RoutingUpdate, StoreInserts,
};
pub use kbucket::{
Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus,
};
+use libp2p_swarm::StreamProtocol;
pub use protocol::{ConnectionType, KadPeer};
pub use query::QueryId;
pub use record::{store, Key as RecordKey, ProviderRecord, Record};
-use libp2p_swarm::StreamProtocol;
-use std::num::NonZeroUsize;
-
/// The `k` parameter of the Kademlia specification.
///
/// This parameter determines:
///
/// 1) The (fixed) maximum number of nodes in a bucket.
-/// 2) The (default) replication factor, which in turn determines:
-/// a) The number of closer peers returned in response to a request.
-/// b) The number of closest peers to a key to search for in an iterative query.
+/// 2) The (default) replication factor, which in turn determines: a) The number of closer peers
+/// returned in response to a request. b) The number of closest peers to a key to search for in
+/// an iterative query.
///
/// The choice of (1) is fixed to this constant. The replication factor is configurable
/// but should generally be no greater than `K_VALUE`. All nodes in a Kademlia
diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs
index 9d2ef56f5d8..9d0d69b670e 100644
--- a/protocols/kad/src/protocol.rs
+++ b/protocols/kad/src/protocol.rs
@@ -26,21 +26,25 @@
//! to poll the underlying transport for incoming messages, and the `Sink` component
//! is used to send messages to remote peers.
-use crate::proto;
-use crate::record::{self, Record};
+use std::{io, iter, marker::PhantomData, time::Duration};
+
use asynchronous_codec::{Decoder, Encoder, Framed};
use bytes::BytesMut;
use futures::prelude::*;
-use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
-use libp2p_core::Multiaddr;
+use libp2p_core::{
+ upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo},
+ Multiaddr,
+};
use libp2p_identity::PeerId;
use libp2p_swarm::StreamProtocol;
-use std::marker::PhantomData;
-use std::time::Duration;
-use std::{io, iter};
use tracing::debug;
use web_time::Instant;
+use crate::{
+ proto,
+ record::{self, Record},
+};
+
/// The protocol name used for negotiating with multistream-select.
pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0");
/// The default maximum size for a varint length-delimited packet.
@@ -667,92 +671,92 @@ mod tests {
assert_eq!(peer.multiaddrs, vec![valid_multiaddr])
}
- /*// TODO: restore
- use self::libp2p_tcp::TcpTransport;
- use self::tokio::runtime::current_thread::Runtime;
- use futures::{Future, Sink, Stream};
- use libp2p_core::{PeerId, PublicKey, Transport};
- use multihash::{encode, Hash};
- use protocol::{ConnectionType, KadPeer, ProtocolConfig};
- use std::sync::mpsc;
- use std::thread;
-
- #[test]
- fn correct_transfer() {
- // We open a server and a client, send a message between the two, and check that they were
- // successfully received.
-
- test_one(KadMsg::Ping);
- test_one(KadMsg::FindNodeReq {
- key: PeerId::random(),
- });
- test_one(KadMsg::FindNodeRes {
- closer_peers: vec![KadPeer {
- node_id: PeerId::random(),
- multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
- connection_ty: ConnectionType::Connected,
- }],
- });
- test_one(KadMsg::GetProvidersReq {
- key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
- });
- test_one(KadMsg::GetProvidersRes {
- closer_peers: vec![KadPeer {
- node_id: PeerId::random(),
- multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
- connection_ty: ConnectionType::Connected,
- }],
- provider_peers: vec![KadPeer {
- node_id: PeerId::random(),
- multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()],
- connection_ty: ConnectionType::NotConnected,
- }],
- });
- test_one(KadMsg::AddProvider {
- key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
- provider_peer: KadPeer {
- node_id: PeerId::random(),
- multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()],
- connection_ty: ConnectionType::Connected,
- },
- });
- // TODO: all messages
-
- fn test_one(msg_server: KadMsg) {
- let msg_client = msg_server.clone();
- let (tx, rx) = mpsc::channel();
-
- let bg_thread = thread::spawn(move || {
- let transport = TcpTransport::default().with_upgrade(ProtocolConfig);
-
- let (listener, addr) = transport
- .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap())
- .unwrap();
- tx.send(addr).unwrap();
-
- let future = listener
- .into_future()
- .map_err(|(err, _)| err)
- .and_then(|(client, _)| client.unwrap().0)
- .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v))
- .map(|recv_msg| {
- assert_eq!(recv_msg.unwrap(), msg_server);
- ()
- });
- let mut rt = Runtime::new().unwrap();
- let _ = rt.block_on(future).unwrap();
- });
-
- let transport = TcpTransport::default().with_upgrade(ProtocolConfig);
-
- let future = transport
- .dial(rx.recv().unwrap())
- .unwrap()
- .and_then(|proto| proto.send(msg_client))
- .map(|_| ());
- let mut rt = Runtime::new().unwrap();
- let _ = rt.block_on(future).unwrap();
- bg_thread.join().unwrap();
- }
- }*/
+ // // TODO: restore
+ // use self::libp2p_tcp::TcpTransport;
+ // use self::tokio::runtime::current_thread::Runtime;
+ // use futures::{Future, Sink, Stream};
+ // use libp2p_core::{PeerId, PublicKey, Transport};
+ // use multihash::{encode, Hash};
+ // use protocol::{ConnectionType, KadPeer, ProtocolConfig};
+ // use std::sync::mpsc;
+ // use std::thread;
+ //
+ // #[test]
+ // fn correct_transfer() {
+ // We open a server and a client, send a message between the two, and check that they were
+ // successfully received.
+ //
+ // test_one(KadMsg::Ping);
+ // test_one(KadMsg::FindNodeReq {
+ // key: PeerId::random(),
+ // });
+ // test_one(KadMsg::FindNodeRes {
+ // closer_peers: vec![KadPeer {
+ // node_id: PeerId::random(),
+ // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
+ // connection_ty: ConnectionType::Connected,
+ // }],
+ // });
+ // test_one(KadMsg::GetProvidersReq {
+ // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
+ // });
+ // test_one(KadMsg::GetProvidersRes {
+ // closer_peers: vec![KadPeer {
+ // node_id: PeerId::random(),
+ // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
+ // connection_ty: ConnectionType::Connected,
+ // }],
+ // provider_peers: vec![KadPeer {
+ // node_id: PeerId::random(),
+ // multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()],
+ // connection_ty: ConnectionType::NotConnected,
+ // }],
+ // });
+ // test_one(KadMsg::AddProvider {
+ // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
+ // provider_peer: KadPeer {
+ // node_id: PeerId::random(),
+ // multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()],
+ // connection_ty: ConnectionType::Connected,
+ // },
+ // });
+ // TODO: all messages
+ //
+ // fn test_one(msg_server: KadMsg) {
+ // let msg_client = msg_server.clone();
+ // let (tx, rx) = mpsc::channel();
+ //
+ // let bg_thread = thread::spawn(move || {
+ // let transport = TcpTransport::default().with_upgrade(ProtocolConfig);
+ //
+ // let (listener, addr) = transport
+ // .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap())
+ // .unwrap();
+ // tx.send(addr).unwrap();
+ //
+ // let future = listener
+ // .into_future()
+ // .map_err(|(err, _)| err)
+ // .and_then(|(client, _)| client.unwrap().0)
+ // .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v))
+ // .map(|recv_msg| {
+ // assert_eq!(recv_msg.unwrap(), msg_server);
+ // ()
+ // });
+ // let mut rt = Runtime::new().unwrap();
+ // let _ = rt.block_on(future).unwrap();
+ // });
+ //
+ // let transport = TcpTransport::default().with_upgrade(ProtocolConfig);
+ //
+ // let future = transport
+ // .dial(rx.recv().unwrap())
+ // .unwrap()
+ // .and_then(|proto| proto.send(msg_client))
+ // .map(|_| ());
+ // let mut rt = Runtime::new().unwrap();
+ // let _ = rt.block_on(future).unwrap();
+ // bg_thread.join().unwrap();
+ // }
+ // }
}
diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs
index 1a895d9627c..69257f73b26 100644
--- a/protocols/kad/src/query.rs
+++ b/protocols/kad/src/query.rs
@@ -20,24 +20,27 @@
mod peers;
-use libp2p_core::Multiaddr;
-use peers::closest::{
- disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig,
-};
-use peers::fixed::FixedPeersIter;
-use peers::PeersIterState;
-use smallvec::SmallVec;
+use std::{num::NonZeroUsize, time::Duration};
-use crate::behaviour::PeerInfo;
-use crate::handler::HandlerIn;
-use crate::kbucket::{Key, KeyBytes};
-use crate::{QueryInfo, ALPHA_VALUE, K_VALUE};
use either::Either;
use fnv::FnvHashMap;
+use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
-use std::{num::NonZeroUsize, time::Duration};
+use peers::{
+ closest::{disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig},
+ fixed::FixedPeersIter,
+ PeersIterState,
+};
+use smallvec::SmallVec;
use web_time::Instant;
+use crate::{
+ behaviour::PeerInfo,
+ handler::HandlerIn,
+ kbucket::{Key, KeyBytes},
+ QueryInfo, ALPHA_VALUE, K_VALUE,
+};
+
/// A `QueryPool` provides an aggregate state machine for driving `Query`s to completion.
///
/// Internally, a `Query` is in turn driven by an underlying `QueryPeerIter`
diff --git a/protocols/kad/src/query/peers.rs b/protocols/kad/src/query/peers.rs
index 11b8f974de9..fe8ada51e44 100644
--- a/protocols/kad/src/query/peers.rs
+++ b/protocols/kad/src/query/peers.rs
@@ -23,13 +23,11 @@
//! Using a peer iterator in a query involves performing the following steps
//! repeatedly and in an alternating fashion:
//!
-//! 1. Calling `next` to observe the next state of the iterator and determine
-//! what to do, which is to either issue new requests to peers or continue
-//! waiting for responses.
+//! 1. Calling `next` to observe the next state of the iterator and determine what to do, which is
+//! to either issue new requests to peers or continue waiting for responses.
//!
-//! 2. When responses are received or requests fail, providing input to the
-//! iterator via the `on_success` and `on_failure` callbacks,
-//! respectively, followed by repeating step (1).
+//! 2. When responses are received or requests fail, providing input to the iterator via the
+//! `on_success` and `on_failure` callbacks, respectively, followed by repeating step (1).
//!
//! When a call to `next` returns [`Finished`], no more peers can be obtained
//! from the iterator and the results can be obtained from `into_result`.
@@ -40,9 +38,10 @@
pub(crate) mod closest;
pub(crate) mod fixed;
-use libp2p_identity::PeerId;
use std::borrow::Cow;
+use libp2p_identity::PeerId;
+
/// The state of a peer iterator.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PeersIterState<'a> {
diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs
index 2505ee2e9b2..2d1f91f050c 100644
--- a/protocols/kad/src/query/peers/closest.rs
+++ b/protocols/kad/src/query/peers/closest.rs
@@ -18,14 +18,20 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::*;
+use std::{
+ collections::btree_map::{BTreeMap, Entry},
+ num::NonZeroUsize,
+ time::Duration,
+};
-use crate::kbucket::{Distance, Key, KeyBytes};
-use crate::{ALPHA_VALUE, K_VALUE};
-use std::collections::btree_map::{BTreeMap, Entry};
-use std::{num::NonZeroUsize, time::Duration};
use web_time::Instant;
+use super::*;
+use crate::{
+ kbucket::{Distance, Key, KeyBytes},
+ ALPHA_VALUE, K_VALUE,
+};
+
pub(crate) mod disjoint;
/// A peer iterator for a dynamically changing list of peers, sorted by increasing
/// distance to a chosen target.
@@ -494,12 +500,14 @@ enum PeerState {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::SHA_256_MH;
+ use std::iter;
+
use libp2p_core::multihash::Multihash;
use quickcheck::*;
use rand::{rngs::StdRng, Rng, SeedableRng};
- use std::iter;
+
+ use super::*;
+ use crate::SHA_256_MH;
fn random_peers(n: usize, g: &mut R) -> Vec {
(0..n)
diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs
index cafe87b6ef4..70ded360c7e 100644
--- a/protocols/kad/src/query/peers/closest/disjoint.rs
+++ b/protocols/kad/src/query/peers/closest/disjoint.rs
@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::*;
use std::{
collections::HashMap,
iter::{Cycle, Map, Peekable},
ops::{Index, IndexMut, Range},
};
+use super::*;
+
/// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery
/// path per configured parallelism according to the S/Kademlia paper.
pub(crate) struct ClosestDisjointPeersIter {
@@ -373,7 +374,6 @@ enum ResponseState {
/// Iterator combining the result of multiple [`ClosestPeersIter`] into a single
/// deduplicated ordered iterator.
-//
// Note: This operates under the assumption that `I` is ordered.
#[derive(Clone, Debug)]
struct ResultIter
@@ -433,13 +433,13 @@ impl>> Iterator for ResultIter {
#[cfg(test)]
mod tests {
- use super::*;
+ use std::{collections::HashSet, iter};
- use crate::SHA_256_MH;
use libp2p_core::multihash::Multihash;
use quickcheck::*;
- use std::collections::HashSet;
- use std::iter;
+
+ use super::*;
+ use crate::SHA_256_MH;
impl Arbitrary for ResultIter>> {
fn arbitrary(g: &mut Gen) -> Self {
diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs
index 2d0b312454d..41cb3559f1b 100644
--- a/protocols/kad/src/query/peers/fixed.rs
+++ b/protocols/kad/src/query/peers/fixed.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::*;
+use std::{collections::hash_map::Entry, num::NonZeroUsize, vec};
use fnv::FnvHashMap;
-use std::{collections::hash_map::Entry, num::NonZeroUsize, vec};
+
+use super::*;
/// A peer iterator for a fixed set of peers.
pub(crate) struct FixedPeersIter {
diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs
index b8a644acdd6..fea17f826a4 100644
--- a/protocols/kad/src/record.rs
+++ b/protocols/kad/src/record.rs
@@ -22,13 +22,16 @@
pub mod store;
+use std::{
+ borrow::Borrow,
+ hash::{Hash, Hasher},
+};
+
use bytes::Bytes;
use libp2p_core::{multihash::Multihash, Multiaddr};
use libp2p_identity::PeerId;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
-use std::borrow::Borrow;
-use std::hash::{Hash, Hasher};
use web_time::Instant;
/// The (opaque) key of a record.
@@ -160,10 +163,12 @@ impl ProviderRecord {
#[cfg(test)]
mod tests {
+ use std::time::Duration;
+
+ use quickcheck::*;
+
use super::*;
use crate::SHA_256_MH;
- use quickcheck::*;
- use std::time::Duration;
impl Arbitrary for Key {
fn arbitrary(g: &mut Gen) -> Key {
diff --git a/protocols/kad/src/record/store.rs b/protocols/kad/src/record/store.rs
index 5c25bc8b2fa..ee40f568bb3 100644
--- a/protocols/kad/src/record/store.rs
+++ b/protocols/kad/src/record/store.rs
@@ -20,12 +20,13 @@
mod memory;
+use std::borrow::Cow;
+
pub use memory::{MemoryStore, MemoryStoreConfig};
use thiserror::Error;
use super::*;
use crate::K_VALUE;
-use std::borrow::Cow;
/// The result of an operation on a `RecordStore`.
pub type Result = std::result::Result;
@@ -50,20 +51,16 @@ pub enum Error {
///
/// There are two types of records managed by a `RecordStore`:
///
-/// 1. Regular (value-)records. These records store an arbitrary value
-/// associated with a key which is distributed to the closest nodes
-/// to the key in the Kademlia DHT as per the standard Kademlia "push-model".
-/// These records are subject to re-replication and re-publication as
+/// 1. Regular (value-)records. These records store an arbitrary value associated with a key which
+/// is distributed to the closest nodes to the key in the Kademlia DHT as per the standard
+/// Kademlia "push-model". These records are subject to re-replication and re-publication as
/// per the standard Kademlia protocol.
///
-/// 2. Provider records. These records associate the ID of a peer with a key
-/// who can supposedly provide the associated value. These records are
-/// mere "pointers" to the data which may be followed by contacting these
-/// providers to obtain the value. These records are specific to the
-/// libp2p Kademlia specification and realise a "pull-model" for distributed
-/// content. Just like a regular record, a provider record is distributed
-/// to the closest nodes to the key.
-///
+/// 2. Provider records. These records associate the ID of a peer with a key who can supposedly
+/// provide the associated value. These records are mere "pointers" to the data which may be
+/// followed by contacting these providers to obtain the value. These records are specific to
+/// the libp2p Kademlia specification and realise a "pull-model" for distributed content. Just
+/// like a regular record, a provider record is distributed to the closest nodes to the key.
pub trait RecordStore {
type RecordsIter<'a>: Iterator
- >
where
diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs
index 3fb6d2be3e8..28f6a55044f 100644
--- a/protocols/kad/src/record/store/memory.rs
+++ b/protocols/kad/src/record/store/memory.rs
@@ -18,12 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::*;
+use std::{
+ collections::{hash_map, hash_set, HashMap, HashSet},
+ iter,
+};
-use crate::kbucket;
use smallvec::SmallVec;
-use std::collections::{hash_map, hash_set, HashMap, HashSet};
-use std::iter;
+
+use super::*;
+use crate::kbucket;
/// In-memory implementation of a `RecordStore`.
pub struct MemoryStore {
@@ -208,11 +211,12 @@ impl RecordStore for MemoryStore {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::SHA_256_MH;
use quickcheck::*;
use rand::Rng;
+ use super::*;
+ use crate::SHA_256_MH;
+
fn random_multihash() -> Multihash<64> {
Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap()
}
diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs
index 2c8d11beac7..3275c525890 100644
--- a/protocols/kad/tests/client_mode.rs
+++ b/protocols/kad/tests/client_mode.rs
@@ -1,7 +1,6 @@
use libp2p_identify as identify;
use libp2p_identity as identity;
-use libp2p_kad::store::MemoryStore;
-use libp2p_kad::{Behaviour, Config, Event, Mode};
+use libp2p_kad::{store::MemoryStore, Behaviour, Config, Event, Mode};
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
use tracing_subscriber::EnvFilter;
@@ -104,7 +103,9 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti
// Server learns its external address (this could be through AutoNAT or some other mechanism).
server.add_external_address(memory_addr);
- // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server.
+ // The server reconfigured its connection to the client to be in server mode,
+ // pushes that information to client which as a result updates its routing
+ // table and triggers a mode change to Mode::Server.
match libp2p_swarm_test::drive(&mut client, &mut server).await {
(
[Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })],
diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs
index cecd27bf78b..b6dde8f4487 100644
--- a/protocols/mdns/src/behaviour.rs
+++ b/protocols/mdns/src/behaviour.rs
@@ -22,25 +22,34 @@ mod iface;
mod socket;
mod timer;
-use self::iface::InterfaceState;
-use crate::behaviour::{socket::AsyncSocket, timer::Builder};
-use crate::Config;
-use futures::channel::mpsc;
-use futures::{Stream, StreamExt};
+use std::{
+ cmp,
+ collections::hash_map::{Entry, HashMap},
+ fmt,
+ future::Future,
+ io,
+ net::IpAddr,
+ pin::Pin,
+ sync::{Arc, RwLock},
+ task::{Context, Poll},
+ time::Instant,
+};
+
+use futures::{channel::mpsc, Stream, StreamExt};
use if_watch::IfEvent;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::FromSwarm;
use libp2p_swarm::{
- dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler,
- THandlerInEvent, THandlerOutEvent, ToSwarm,
+ behaviour::FromSwarm, dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour,
+ THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
use smallvec::SmallVec;
-use std::collections::hash_map::{Entry, HashMap};
-use std::future::Future;
-use std::sync::{Arc, RwLock};
-use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant};
+
+use self::iface::InterfaceState;
+use crate::{
+ behaviour::{socket::AsyncSocket, timer::Builder},
+ Config,
+};
/// An abstraction to allow for compatibility with various async runtimes.
pub trait Provider: 'static {
@@ -68,11 +77,13 @@ pub trait Abort {
/// The type of a [`Behaviour`] using the `async-io` implementation.
#[cfg(feature = "async-io")]
pub mod async_io {
- use super::Provider;
- use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort};
+ use std::future::Future;
+
use async_std::task::JoinHandle;
use if_watch::smol::IfWatcher;
- use std::future::Future;
+
+ use super::Provider;
+ use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort};
#[doc(hidden)]
pub enum AsyncIo {}
@@ -104,12 +115,14 @@ pub mod async_io {
/// The type of a [`Behaviour`] using the `tokio` implementation.
#[cfg(feature = "tokio")]
pub mod tokio {
- use super::Provider;
- use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort};
- use if_watch::tokio::IfWatcher;
use std::future::Future;
+
+ use if_watch::tokio::IfWatcher;
use tokio::task::JoinHandle;
+ use super::Provider;
+ use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort};
+
#[doc(hidden)]
pub enum Tokio {}
@@ -170,7 +183,8 @@ where
/// The current set of listen addresses.
///
/// This is shared across all interface tasks using an [`RwLock`].
- /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries.
+ /// The [`Behaviour`] updates this upon new [`FromSwarm`]
+ /// events where as [`InterfaceState`]s read from it to answer inbound mDNS queries.
listen_addresses: Arc>,
local_peer_id: PeerId,
diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs
index 9302065cde2..873bb8a307b 100644
--- a/protocols/mdns/src/behaviour/iface.rs
+++ b/protocols/mdns/src/behaviour/iface.rs
@@ -21,27 +21,32 @@
mod dns;
mod query;
-use self::dns::{build_query, build_query_response, build_service_discovery_response};
-use self::query::MdnsPacket;
-use crate::behaviour::{socket::AsyncSocket, timer::Builder};
-use crate::Config;
-use futures::channel::mpsc;
-use futures::{SinkExt, StreamExt};
-use libp2p_core::Multiaddr;
-use libp2p_identity::PeerId;
-use libp2p_swarm::ListenAddresses;
-use socket2::{Domain, Socket, Type};
-use std::future::Future;
-use std::sync::{Arc, RwLock};
use std::{
collections::VecDeque,
+ future::Future,
io,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket},
pin::Pin,
+ sync::{Arc, RwLock},
task::{Context, Poll},
time::{Duration, Instant},
};
+use futures::{channel::mpsc, SinkExt, StreamExt};
+use libp2p_core::Multiaddr;
+use libp2p_identity::PeerId;
+use libp2p_swarm::ListenAddresses;
+use socket2::{Domain, Socket, Type};
+
+use self::{
+ dns::{build_query, build_query_response, build_service_discovery_response},
+ query::MdnsPacket,
+};
+use crate::{
+ behaviour::{socket::AsyncSocket, timer::Builder},
+ Config,
+};
+
/// Initial interval for starting probe
const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500);
diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs
index 39dbf08c731..35cba44f4af 100644
--- a/protocols/mdns/src/behaviour/iface/dns.rs
+++ b/protocols/mdns/src/behaviour/iface/dns.rs
@@ -20,12 +20,13 @@
//! (M)DNS encoding and decoding on top of the `dns_parser` library.
-use crate::{META_QUERY_SERVICE, SERVICE_NAME};
+use std::{borrow::Cow, cmp, error, fmt, str, time::Duration};
+
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
-use rand::distributions::Alphanumeric;
-use rand::{thread_rng, Rng};
-use std::{borrow::Cow, cmp, error, fmt, str, time::Duration};
+use rand::{distributions::Alphanumeric, thread_rng, Rng};
+
+use crate::{META_QUERY_SERVICE, SERVICE_NAME};
/// DNS TXT records can have up to 255 characters as a single string value.
///
@@ -293,7 +294,6 @@ fn generate_peer_name() -> Vec {
/// Panics if `name` has a zero-length component or a component that is too long.
/// This is fine considering that this function is not public and is only called in a controlled
/// environment.
-///
fn append_qname(out: &mut Vec, name: &[u8]) {
debug_assert!(name.is_ascii());
@@ -394,10 +394,11 @@ impl error::Error for MdnsResponseError {}
#[cfg(test)]
mod tests {
- use super::*;
use hickory_proto::op::Message;
use libp2p_identity as identity;
+ use super::*;
+
#[test]
fn build_query_correct() {
let query = build_query();
diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs
index 70b84816d0f..7762ac5d214 100644
--- a/protocols/mdns/src/behaviour/iface/query.rs
+++ b/protocols/mdns/src/behaviour/iface/query.rs
@@ -18,18 +18,23 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use super::dns;
-use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN};
+use std::{
+ fmt,
+ net::SocketAddr,
+ str,
+ time::{Duration, Instant},
+};
+
use hickory_proto::{
op::Message,
rr::{Name, RData},
};
use libp2p_core::multiaddr::{Multiaddr, Protocol};
+use libp2p_identity::PeerId;
use libp2p_swarm::_address_translation;
-use libp2p_identity::PeerId;
-use std::time::Instant;
-use std::{fmt, net::SocketAddr, str, time::Duration};
+use super::dns;
+use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN};
/// A valid mDNS packet received by the service.
#[derive(Debug)]
@@ -69,7 +74,8 @@ impl MdnsPacket {
.iter()
.any(|q| q.name().to_utf8() == META_QUERY_SERVICE_FQDN)
{
- // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE?
+ // TODO: what if multiple questions,
+ // one with SERVICE_NAME and one with META_QUERY_SERVICE?
return Ok(Some(MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery {
from,
query_id: packet.header().id(),
@@ -307,8 +313,7 @@ impl fmt::Debug for MdnsPeer {
#[cfg(test)]
mod tests {
- use super::super::dns::build_query_response;
- use super::*;
+ use super::{super::dns::build_query_response, *};
#[test]
fn test_create_mdns_peer() {
diff --git a/protocols/mdns/src/behaviour/socket.rs b/protocols/mdns/src/behaviour/socket.rs
index ebaad17e45f..cf11450fb4b 100644
--- a/protocols/mdns/src/behaviour/socket.rs
+++ b/protocols/mdns/src/behaviour/socket.rs
@@ -24,7 +24,8 @@ use std::{
task::{Context, Poll},
};
-/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async mode
+/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async
+/// mode
#[allow(unreachable_pub)] // Users should not depend on this.
pub trait AsyncSocket: Unpin + Send + 'static {
/// Create the async socket from the [`std::net::UdpSocket`]
@@ -32,7 +33,8 @@ pub trait AsyncSocket: Unpin + Send + 'static {
where
Self: Sized;
- /// Attempts to receive a single packet on the socket from the remote address to which it is connected.
+ /// Attempts to receive a single packet on the socket
+ /// from the remote address to which it is connected.
fn poll_read(
&mut self,
_cx: &mut Context,
@@ -50,10 +52,11 @@ pub trait AsyncSocket: Unpin + Send + 'static {
#[cfg(feature = "async-io")]
pub(crate) mod asio {
- use super::*;
use async_io::Async;
use futures::FutureExt;
+ use super::*;
+
/// AsyncIo UdpSocket
pub(crate) type AsyncUdpSocket = Async;
impl AsyncSocket for AsyncUdpSocket {
@@ -92,9 +95,10 @@ pub(crate) mod asio {
#[cfg(feature = "tokio")]
pub(crate) mod tokio {
- use super::*;
use ::tokio::{io::ReadBuf, net::UdpSocket as TkUdpSocket};
+ use super::*;
+
/// Tokio ASync Socket`
pub(crate) type TokioUdpSocket = TkUdpSocket;
impl AsyncSocket for TokioUdpSocket {
diff --git a/protocols/mdns/src/behaviour/timer.rs b/protocols/mdns/src/behaviour/timer.rs
index 5e284654676..5fdb1beffae 100644
--- a/protocols/mdns/src/behaviour/timer.rs
+++ b/protocols/mdns/src/behaviour/timer.rs
@@ -42,14 +42,16 @@ pub trait Builder: Send + Unpin + 'static {
#[cfg(feature = "async-io")]
pub(crate) mod asio {
- use super::*;
- use async_io::Timer as AsioTimer;
- use futures::Stream;
use std::{
pin::Pin,
task::{Context, Poll},
};
+ use async_io::Timer as AsioTimer;
+ use futures::Stream;
+
+ use super::*;
+
/// Async Timer
pub(crate) type AsyncTimer = Timer;
impl Builder for AsyncTimer {
@@ -83,14 +85,16 @@ pub(crate) mod asio {
#[cfg(feature = "tokio")]
pub(crate) mod tokio {
- use super::*;
- use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior};
- use futures::Stream;
use std::{
pin::Pin,
task::{Context, Poll},
};
+ use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior};
+ use futures::Stream;
+
+ use super::*;
+
/// Tokio wrapper
pub(crate) type TokioTimer = Timer;
impl Builder for TokioTimer {
diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs
index 4823d740272..a0086a0e2d5 100644
--- a/protocols/mdns/src/lib.rs
+++ b/protocols/mdns/src/lib.rs
@@ -31,21 +31,20 @@
//! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled features, which
//! implements the `NetworkBehaviour` trait. This struct will automatically discover other
//! libp2p nodes on the local network.
-//!
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use std::net::{Ipv4Addr, Ipv6Addr};
-use std::time::Duration;
+use std::{
+ net::{Ipv4Addr, Ipv6Addr},
+ time::Duration,
+};
mod behaviour;
-pub use crate::behaviour::{Behaviour, Event};
-
#[cfg(feature = "async-io")]
pub use crate::behaviour::async_io;
-
#[cfg(feature = "tokio")]
pub use crate::behaviour::tokio;
+pub use crate::behaviour::{Behaviour, Event};
/// The DNS service name for all libp2p peers used to query for addresses.
const SERVICE_NAME: &[u8] = b"_p2p._udp.local";
diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs
index 549f70978af..df08b39af07 100644
--- a/protocols/mdns/tests/use-async-std.rs
+++ b/protocols/mdns/tests/use-async-std.rs
@@ -18,12 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.use futures::StreamExt;
+use std::time::Duration;
+
use futures::future::Either;
-use libp2p_mdns::Event;
-use libp2p_mdns::{async_io::Behaviour, Config};
+use libp2p_mdns::{async_io::Behaviour, Config, Event};
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[async_std::test]
diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs
index cf0d9f4bed4..0ec90a52b90 100644
--- a/protocols/mdns/tests/use-tokio.rs
+++ b/protocols/mdns/tests/use-tokio.rs
@@ -17,11 +17,12 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.use futures::StreamExt;
+use std::time::Duration;
+
use futures::future::Either;
use libp2p_mdns::{tokio::Behaviour, Config, Event};
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt as _;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::test]
diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs
index 9a4cfb8bcac..506455f081a 100644
--- a/protocols/perf/src/bin/perf.rs
+++ b/protocols/perf/src/bin/perf.rs
@@ -23,12 +23,13 @@ use std::{net::SocketAddr, str::FromStr};
use anyhow::{bail, Result};
use clap::Parser;
use futures::StreamExt;
-use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr};
-use libp2p::identity::PeerId;
-use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent};
-use libp2p::SwarmBuilder;
-use libp2p_perf::{client, server};
-use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate};
+use libp2p::{
+ core::{multiaddr::Protocol, upgrade, Multiaddr},
+ identity::PeerId,
+ swarm::{NetworkBehaviour, Swarm, SwarmEvent},
+ SwarmBuilder,
+};
+use libp2p_perf::{client, server, Final, Intermediate, Run, RunParams, RunUpdate};
use serde::{Deserialize, Serialize};
use tracing_subscriber::EnvFilter;
use web_time::{Duration, Instant};
diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs
index 9f984a5bba1..7699bc85c17 100644
--- a/protocols/perf/src/client.rs
+++ b/protocols/perf/src/client.rs
@@ -21,11 +21,13 @@
mod behaviour;
mod handler;
-use std::sync::atomic::{AtomicUsize, Ordering};
+use std::{
+ convert::Infallible,
+ sync::atomic::{AtomicUsize, Ordering},
+};
pub use behaviour::{Behaviour, Event};
use libp2p_swarm::StreamUpgradeError;
-use std::convert::Infallible;
static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1);
diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs
index 1b181557acc..86c85d61da9 100644
--- a/protocols/perf/src/client/behaviour.rs
+++ b/protocols/perf/src/client/behaviour.rs
@@ -32,10 +32,8 @@ use libp2p_swarm::{
NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use crate::RunParams;
-use crate::{client::handler::Handler, RunUpdate};
-
use super::{RunError, RunId};
+use crate::{client::handler::Handler, RunParams, RunUpdate};
#[derive(Debug)]
pub struct Event {
diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs
index 85e864949f8..fc427d8134c 100644
--- a/protocols/perf/src/client/handler.rs
+++ b/protocols/perf/src/client/handler.rs
@@ -36,8 +36,10 @@ use libp2p_swarm::{
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol,
};
-use crate::client::{RunError, RunId};
-use crate::{RunParams, RunUpdate};
+use crate::{
+ client::{RunError, RunId},
+ RunParams, RunUpdate,
+};
#[derive(Debug)]
pub struct Command {
diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs
index f995bbe2d3b..d07c90fa951 100644
--- a/protocols/perf/src/protocol.rs
+++ b/protocols/perf/src/protocol.rs
@@ -18,14 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures_timer::Delay;
use std::time::Duration;
-use web_time::Instant;
use futures::{
future::{select, Either},
AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt,
};
+use futures_timer::Delay;
+use web_time::Instant;
use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate};
diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs
index 5408029e85d..22466bfe56a 100644
--- a/protocols/perf/src/server/behaviour.rs
+++ b/protocols/perf/src/server/behaviour.rs
@@ -31,8 +31,7 @@ use libp2p_swarm::{
ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use crate::server::handler::Handler;
-use crate::Run;
+use crate::{server::handler::Handler, Run};
#[derive(Debug)]
pub struct Event {
diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs
index c1363ae2380..a78485cd9b5 100644
--- a/protocols/perf/src/server/handler.rs
+++ b/protocols/perf/src/server/handler.rs
@@ -18,7 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::task::{Context, Poll};
+use std::{
+ convert::Infallible,
+ task::{Context, Poll},
+};
use futures::FutureExt;
use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade};
@@ -29,7 +32,6 @@ use libp2p_swarm::{
},
ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol,
};
-use std::convert::Infallible;
use tracing::error;
use crate::Run;
diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs
index 961716e934a..c7d65c64500 100644
--- a/protocols/ping/src/handler.rs
+++ b/protocols/ping/src/handler.rs
@@ -18,27 +18,29 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::{protocol, PROTOCOL_NAME};
-use futures::future::{BoxFuture, Either};
-use futures::prelude::*;
-use futures_timer::Delay;
-use libp2p_core::upgrade::ReadyUpgrade;
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
-};
-use libp2p_swarm::{
- ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError,
- SubstreamProtocol,
-};
-use std::collections::VecDeque;
-use std::convert::Infallible;
use std::{
+ collections::VecDeque,
+ convert::Infallible,
error::Error,
fmt, io,
task::{Context, Poll},
time::Duration,
};
+use futures::{
+ future::{BoxFuture, Either},
+ prelude::*,
+};
+use futures_timer::Delay;
+use libp2p_core::upgrade::ReadyUpgrade;
+use libp2p_swarm::{
+ handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound},
+ ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError,
+ SubstreamProtocol,
+};
+
+use crate::{protocol, PROTOCOL_NAME};
+
/// The configuration for outbound pings.
#[derive(Debug, Clone)]
pub struct Config {
@@ -57,8 +59,7 @@ impl Config {
/// These settings have the following effect:
///
/// * A ping is sent every 15 seconds on a healthy connection.
- /// * Every ping sent must yield a response within 20 seconds in order to
- /// be successful.
+ /// * Every ping sent must yield a response within 20 seconds in order to be successful.
pub fn new() -> Self {
Self {
timeout: Duration::from_secs(20),
diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs
index 82f240cab6b..d48bcbc98ab 100644
--- a/protocols/ping/src/lib.rs
+++ b/protocols/ping/src/lib.rs
@@ -27,9 +27,11 @@
//! # Usage
//!
//! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait.
-//! It will respond to inbound ping requests and periodically send outbound ping requests on every established connection.
+//! It will respond to inbound ping requests and periodically send outbound ping requests on every
+//! established connection.
//!
-//! It is up to the user to implement a health-check / connection management policy based on the ping protocol.
+//! It is up to the user to implement a health-check / connection management policy based on the
+//! ping protocol.
//!
//! For example:
//!
@@ -39,8 +41,10 @@
//!
//! Users should inspect emitted [`Event`]s and call APIs on [`Swarm`]:
//!
-//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific connection
-//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all connections to a peer
+//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific
+//! connection
+//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all
+//! connections to a peer
//!
//! [`Swarm`]: libp2p_swarm::Swarm
//! [`Transport`]: libp2p_core::Transport
@@ -50,22 +54,22 @@
mod handler;
mod protocol;
+use std::{
+ collections::VecDeque,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use handler::Handler;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+pub use handler::{Config, Failure};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::time::Duration;
-use std::{
- collections::VecDeque,
- task::{Context, Poll},
-};
pub use self::protocol::PROTOCOL_NAME;
-pub use handler::{Config, Failure};
/// A [`NetworkBehaviour`] that responds to inbound pings and
/// periodically sends outbound pings on every established connection.
diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs
index 101c219aac4..5e84f55e090 100644
--- a/protocols/ping/src/protocol.rs
+++ b/protocols/ping/src/protocol.rs
@@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{io, time::Duration};
+
use futures::prelude::*;
use libp2p_swarm::StreamProtocol;
use rand::{distributions, prelude::*};
-use std::{io, time::Duration};
use web_time::Instant;
pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0");
@@ -40,10 +41,10 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0"
/// Successful pings report the round-trip time.
///
/// > **Note**: The round-trip time of a ping may be subject to delays induced
-/// > by the underlying transport, e.g. in the case of TCP there is
-/// > Nagle's algorithm, delayed acks and similar configuration options
-/// > which can affect latencies especially on otherwise low-volume
-/// > connections.
+/// > by the underlying transport, e.g. in the case of TCP there is
+/// > Nagle's algorithm, delayed acks and similar configuration options
+/// > which can affect latencies especially on otherwise low-volume
+/// > connections.
const PING_SIZE: usize = 32;
/// Sends a ping and waits for the pong.
@@ -81,7 +82,6 @@ where
#[cfg(test)]
mod tests {
- use super::*;
use futures::StreamExt;
use libp2p_core::{
multiaddr::multiaddr,
@@ -89,6 +89,8 @@ mod tests {
Endpoint,
};
+ use super::*;
+
#[tokio::test]
async fn ping_pong() {
let mem_addr = multiaddr![Memory(thread_rng().gen::())];
diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs
index 0752b1fced9..210f9435e4a 100644
--- a/protocols/ping/tests/ping.rs
+++ b/protocols/ping/tests/ping.rs
@@ -20,12 +20,12 @@
//! Integration tests for the `Ping` network behaviour.
+use std::{num::NonZeroU8, time::Duration};
+
use libp2p_ping as ping;
-use libp2p_swarm::dummy;
-use libp2p_swarm::{Swarm, SwarmEvent};
+use libp2p_swarm::{dummy, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
use quickcheck::*;
-use std::{num::NonZeroU8, time::Duration};
#[tokio::test]
async fn ping_pong() {
diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs
index e854ed2a1ff..968642b3f1f 100644
--- a/protocols/relay/src/behaviour.rs
+++ b/protocols/relay/src/behaviour.rs
@@ -22,27 +22,31 @@
pub(crate) mod handler;
pub(crate) mod rate_limiter;
-use crate::behaviour::handler::Handler;
-use crate::multiaddr_ext::MultiaddrExt;
-use crate::proto;
-use crate::protocol::{inbound_hop, outbound_stop};
+use std::{
+ collections::{hash_map, HashMap, HashSet, VecDeque},
+ num::NonZeroU32,
+ ops::Add,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use either::Either;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr};
+use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm};
use libp2p_swarm::{
+ behaviour::{ConnectionClosed, FromSwarm},
dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler,
THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{hash_map, HashMap, HashSet, VecDeque};
-use std::num::NonZeroU32;
-use std::ops::Add;
-use std::task::{Context, Poll};
-use std::time::Duration;
use web_time::Instant;
+use crate::{
+ behaviour::handler::Handler,
+ multiaddr_ext::MultiaddrExt,
+ proto,
+ protocol::{inbound_hop, outbound_stop},
+};
+
/// Configuration for the relay [`Behaviour`].
///
/// # Panics
@@ -120,12 +124,14 @@ impl std::fmt::Debug for Config {
impl Default for Config {
fn default() -> Self {
let reservation_rate_limiters = vec![
- // For each peer ID one reservation every 2 minutes with up to 30 reservations per hour.
+ // For each peer ID one reservation every 2 minutes with up
+ // to 30 reservations per hour.
rate_limiter::new_per_peer(rate_limiter::GenericRateLimiterConfig {
limit: NonZeroU32::new(30).expect("30 > 0"),
interval: Duration::from_secs(60 * 2),
}),
- // For each IP address one reservation every minute with up to 60 reservations per hour.
+ // For each IP address one reservation every minute with up
+ // to 60 reservations per hour.
rate_limiter::new_per_ip(rate_limiter::GenericRateLimiterConfig {
limit: NonZeroU32::new(60).expect("60 > 0"),
interval: Duration::from_secs(60),
@@ -386,7 +392,8 @@ impl NetworkBehaviour for Behaviour {
);
let action = if
- // Deny if it is a new reservation and exceeds `max_reservations_per_peer`.
+ // Deny if it is a new reservation and exceeds
+ // `max_reservations_per_peer`.
(!renewed
&& self
.reservations
diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs
index 92e45720f3f..0a4fe11c00a 100644
--- a/protocols/relay/src/behaviour/handler.rs
+++ b/protocols/relay/src/behaviour/handler.rs
@@ -18,32 +18,38 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::behaviour::CircuitId;
-use crate::copy_future::CopyFuture;
-use crate::protocol::{inbound_hop, outbound_stop};
-use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME};
+use std::{
+ collections::{HashMap, VecDeque},
+ fmt, io,
+ task::{Context, Poll},
+ time::Duration,
+};
+
use bytes::Bytes;
use either::Either;
-use futures::future::{BoxFuture, FutureExt, TryFutureExt};
-use futures::io::AsyncWriteExt;
-use futures::stream::{FuturesUnordered, StreamExt};
+use futures::{
+ future::{BoxFuture, FutureExt, TryFutureExt},
+ io::AsyncWriteExt,
+ stream::{FuturesUnordered, StreamExt},
+};
use futures_timer::Delay;
-use libp2p_core::upgrade::ReadyUpgrade;
-use libp2p_core::{ConnectedPoint, Multiaddr};
+use libp2p_core::{upgrade::ReadyUpgrade, ConnectedPoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
-};
use libp2p_swarm::{
+ handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound},
ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol,
StreamUpgradeError, SubstreamProtocol,
};
-use std::collections::{HashMap, VecDeque};
-use std::task::{Context, Poll};
-use std::time::Duration;
-use std::{fmt, io};
use web_time::Instant;
+use crate::{
+ behaviour::CircuitId,
+ copy_future::CopyFuture,
+ proto,
+ protocol::{inbound_hop, outbound_stop},
+ HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME,
+};
+
const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10;
const STREAM_TIMEOUT: Duration = Duration::from_secs(60);
diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs
index 45b701c1b50..4b97c3d5090 100644
--- a/protocols/relay/src/behaviour/rate_limiter.rs
+++ b/protocols/relay/src/behaviour/rate_limiter.rs
@@ -18,18 +18,20 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{
+ collections::{HashMap, VecDeque},
+ hash::Hash,
+ net::IpAddr,
+ num::NonZeroU32,
+ time::Duration,
+};
+
use libp2p_core::multiaddr::{Multiaddr, Protocol};
use libp2p_identity::PeerId;
-use std::collections::{HashMap, VecDeque};
-use std::hash::Hash;
-use std::net::IpAddr;
-use std::num::NonZeroU32;
-use std::time::Duration;
use web_time::Instant;
/// Allows rate limiting access to some resource based on the [`PeerId`] and
/// [`Multiaddr`] of a remote peer.
-//
// See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use
// [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system
// number of a peers IP address.
@@ -170,9 +172,10 @@ impl GenericRateLimiter {
#[cfg(test)]
mod tests {
- use super::*;
use quickcheck::{QuickCheck, TestResult};
+ use super::*;
+
#[test]
fn first() {
let id = 1;
diff --git a/protocols/relay/src/copy_future.rs b/protocols/relay/src/copy_future.rs
index c0039c29534..ae7ef22d648 100644
--- a/protocols/relay/src/copy_future.rs
+++ b/protocols/relay/src/copy_future.rs
@@ -24,16 +24,19 @@
//!
//! Inspired by [`futures::io::Copy`].
-use futures::future::Future;
-use futures::future::FutureExt;
-use futures::io::{AsyncBufRead, BufReader};
-use futures::io::{AsyncRead, AsyncWrite};
-use futures::ready;
+use std::{
+ io,
+ pin::Pin,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use futures::{
+ future::{Future, FutureExt},
+ io::{AsyncBufRead, AsyncRead, AsyncWrite, BufReader},
+ ready,
+};
use futures_timer::Delay;
-use std::io;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::time::Duration;
pub(crate) struct CopyFuture
{
src: BufReader,
@@ -161,12 +164,13 @@ fn forward_data(
#[cfg(test)]
mod tests {
- use super::*;
- use futures::executor::block_on;
- use futures::io::BufWriter;
- use quickcheck::QuickCheck;
use std::io::ErrorKind;
+ use futures::{executor::block_on, io::BufWriter};
+ use quickcheck::QuickCheck;
+
+ use super::*;
+
#[test]
fn quickcheck() {
struct Connection {
@@ -356,13 +360,14 @@ mod tests {
}
}
- // The source has two reads available, handing them out on `AsyncRead::poll_read` one by one.
+ // The source has two reads available, handing them out
+ // on `AsyncRead::poll_read` one by one.
let mut source = BufReader::new(NeverEndingSource { read: vec![1, 2] });
// The destination is wrapped by a `BufWriter` with a capacity of `3`, i.e. one larger than
// the available reads of the source. Without an explicit `AsyncWrite::poll_flush` the two
- // reads would thus never make it to the destination, but instead be stuck in the buffer of
- // the `BufWrite`.
+ // reads would thus never make it to the destination,
+ // but instead be stuck in the buffer of the `BufWrite`.
let mut destination = BufWriter::with_capacity(
3,
RecordingDestination {
diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs
index eca3578d599..dba07015765 100644
--- a/protocols/relay/src/lib.rs
+++ b/protocols/relay/src/lib.rs
@@ -32,10 +32,10 @@ mod protocol;
mod proto {
#![allow(unreachable_pub)]
include!("generated/mod.rs");
- pub(crate) use self::message_v2::pb::mod_HopMessage::Type as HopMessageType;
pub use self::message_v2::pb::mod_StopMessage::Type as StopMessageType;
pub(crate) use self::message_v2::pb::{
- HopMessage, Limit, Peer, Reservation, Status, StopMessage,
+ mod_HopMessage::Type as HopMessageType, HopMessage, Limit, Peer, Reservation, Status,
+ StopMessage,
};
}
diff --git a/protocols/relay/src/multiaddr_ext.rs b/protocols/relay/src/multiaddr_ext.rs
index 6991a8b9ded..7c06eb7eab0 100644
--- a/protocols/relay/src/multiaddr_ext.rs
+++ b/protocols/relay/src/multiaddr_ext.rs
@@ -1,5 +1,4 @@
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::Multiaddr;
+use libp2p_core::{multiaddr::Protocol, Multiaddr};
pub(crate) trait MultiaddrExt {
fn is_relayed(&self) -> bool;
diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs
index fc9d28e66ed..7ac9b716700 100644
--- a/protocols/relay/src/priv_client.rs
+++ b/protocols/relay/src/priv_client.rs
@@ -23,33 +23,39 @@
pub(crate) mod handler;
pub(crate) mod transport;
-use crate::multiaddr_ext::MultiaddrExt;
-use crate::priv_client::handler::Handler;
-use crate::protocol::{self, inbound_stop};
+use std::{
+ collections::{hash_map, HashMap, VecDeque},
+ convert::Infallible,
+ io::{Error, ErrorKind, IoSlice},
+ pin::Pin,
+ task::{Context, Poll},
+};
+
use bytes::Bytes;
use either::Either;
-use futures::channel::mpsc::Receiver;
-use futures::future::{BoxFuture, FutureExt};
-use futures::io::{AsyncRead, AsyncWrite};
-use futures::ready;
-use futures::stream::StreamExt;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use futures::{
+ channel::mpsc::Receiver,
+ future::{BoxFuture, FutureExt},
+ io::{AsyncRead, AsyncWrite},
+ ready,
+ stream::StreamExt,
+};
+use libp2p_core::{multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm};
-use libp2p_swarm::dial_opts::DialOpts;
use libp2p_swarm::{
+ behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm},
+ dial_opts::DialOpts,
dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour,
NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::{hash_map, HashMap, VecDeque};
-use std::convert::Infallible;
-use std::io::{Error, ErrorKind, IoSlice};
-use std::pin::Pin;
-use std::task::{Context, Poll};
use transport::Transport;
+use crate::{
+ multiaddr_ext::MultiaddrExt,
+ priv_client::handler::Handler,
+ protocol::{self, inbound_stop},
+};
+
/// The events produced by the client `Behaviour`.
#[derive(Debug)]
pub enum Event {
@@ -89,7 +95,8 @@ pub struct Behaviour {
/// Stores the address of a pending or confirmed reservation.
///
- /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it.
+ /// This is indexed by the [`ConnectionId`] to a relay server and the address is the
+ /// `/p2p-circuit` address we reserved on it.
reservation_addresses: HashMap,
/// Queue of actions to return when polled.
diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs
index 77b7f94ae60..8f60b689ec8 100644
--- a/protocols/relay/src/priv_client/handler.rs
+++ b/protocols/relay/src/priv_client/handler.rs
@@ -18,29 +18,35 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::client::Connection;
-use crate::priv_client::transport;
-use crate::priv_client::transport::ToListenerMsg;
-use crate::protocol::{self, inbound_stop, outbound_hop};
-use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME};
-use futures::channel::mpsc::Sender;
-use futures::channel::{mpsc, oneshot};
-use futures::future::FutureExt;
+use std::{
+ collections::VecDeque,
+ convert::Infallible,
+ fmt, io,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use futures::{
+ channel::{mpsc, mpsc::Sender, oneshot},
+ future::FutureExt,
+};
use futures_timer::Delay;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::upgrade::ReadyUpgrade;
-use libp2p_core::Multiaddr;
+use libp2p_core::{multiaddr::Protocol, upgrade::ReadyUpgrade, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound};
use libp2p_swarm::{
+ handler::{ConnectionEvent, FullyNegotiatedInbound},
ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError,
SubstreamProtocol,
};
-use std::collections::VecDeque;
-use std::convert::Infallible;
-use std::task::{Context, Poll};
-use std::time::Duration;
-use std::{fmt, io};
+
+use crate::{
+ client::Connection,
+ priv_client,
+ priv_client::{transport, transport::ToListenerMsg},
+ proto,
+ protocol::{self, inbound_stop, outbound_hop},
+ HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME,
+};
/// The maximum number of circuits being denied concurrently.
///
diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs
index ec1e8ca5fb8..ed9faa946db 100644
--- a/protocols/relay/src/priv_client/transport.rs
+++ b/protocols/relay/src/priv_client/transport.rs
@@ -19,25 +19,35 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::multiaddr_ext::MultiaddrExt;
-use crate::priv_client::Connection;
-use crate::protocol::outbound_hop;
-use crate::protocol::outbound_hop::{ConnectError, ReserveError};
-use crate::RequestId;
-use futures::channel::mpsc;
-use futures::channel::oneshot;
-use futures::future::{ready, BoxFuture, FutureExt, Ready};
-use futures::sink::SinkExt;
-use futures::stream::SelectAll;
-use futures::stream::{Stream, StreamExt};
-use libp2p_core::multiaddr::{Multiaddr, Protocol};
-use libp2p_core::transport::{DialOpts, ListenerId, TransportError, TransportEvent};
+use std::{
+ collections::VecDeque,
+ pin::Pin,
+ task::{Context, Poll, Waker},
+};
+
+use futures::{
+ channel::{mpsc, oneshot},
+ future::{ready, BoxFuture, FutureExt, Ready},
+ sink::SinkExt,
+ stream::{SelectAll, Stream, StreamExt},
+};
+use libp2p_core::{
+ multiaddr::{Multiaddr, Protocol},
+ transport::{DialOpts, ListenerId, TransportError, TransportEvent},
+};
use libp2p_identity::PeerId;
-use std::collections::VecDeque;
-use std::pin::Pin;
-use std::task::{Context, Poll, Waker};
use thiserror::Error;
+use crate::{
+ multiaddr_ext::MultiaddrExt,
+ priv_client::Connection,
+ protocol::{
+ outbound_hop,
+ outbound_hop::{ConnectError, ReserveError},
+ },
+ RequestId,
+};
+
/// A [`Transport`] enabling client relay capabilities.
///
/// Note: The transport only handles listening and dialing on relayed [`Multiaddr`], and depends on
@@ -49,7 +59,8 @@ use thiserror::Error;
/// 1. Establish relayed connections by dialing `/p2p-circuit` addresses.
///
/// ```
-/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, transport::{DialOpts, PortUse}, connection::Endpoint};
+/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport,
+/// # transport::{DialOpts, PortUse}, connection::Endpoint};
/// # use libp2p_core::transport::memory::MemoryTransport;
/// # use libp2p_core::transport::choice::OrTransport;
/// # use libp2p_relay as relay;
@@ -307,8 +318,9 @@ pub(crate) struct Listener {
queued_events: VecDeque<::Item>,
/// Channel for messages from the behaviour [`Handler`][super::handler::Handler].
from_behaviour: mpsc::Receiver,
- /// The listener can be closed either manually with [`Transport::remove_listener`](libp2p_core::Transport) or if
- /// the sender side of the `from_behaviour` channel is dropped.
+ /// The listener can be closed either manually with
+ /// [`Transport::remove_listener`](libp2p_core::Transport) or if the sender side of the
+ /// `from_behaviour` channel is dropped.
is_closed: bool,
waker: Option,
}
@@ -344,7 +356,8 @@ impl Stream for Listener {
}
if self.is_closed {
- // Terminate the stream if the listener closed and all remaining events have been reported.
+ // Terminate the stream if the listener closed and
+ // all remaining events have been reported.
self.waker = None;
return Poll::Ready(None);
}
diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs
index b94151259cd..b1adeedaaf5 100644
--- a/protocols/relay/src/protocol.rs
+++ b/protocols/relay/src/protocol.rs
@@ -18,10 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use libp2p_swarm::StreamProtocol;
use std::time::Duration;
+use libp2p_swarm::StreamProtocol;
+
+use crate::proto;
+
pub(crate) mod inbound_hop;
pub(crate) mod inbound_stop;
pub(crate) mod outbound_hop;
diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs
index 401c6258176..01280d70897 100644
--- a/protocols/relay/src/protocol/inbound_hop.rs
+++ b/protocols/relay/src/protocol/inbound_hop.rs
@@ -19,21 +19,18 @@
// DEALINGS IN THE SOFTWARE.
use std::time::Duration;
-use web_time::SystemTime;
use asynchronous_codec::{Framed, FramedParts};
use bytes::Bytes;
use either::Either;
use futures::prelude::*;
-use thiserror::Error;
-
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_swarm::Stream;
+use thiserror::Error;
+use web_time::SystemTime;
-use crate::proto;
-use crate::proto::message_v2::pb::mod_HopMessage::Type;
-use crate::protocol::MAX_MESSAGE_SIZE;
+use crate::{proto, proto::message_v2::pb::mod_HopMessage::Type, protocol::MAX_MESSAGE_SIZE};
#[derive(Debug, Error)]
pub enum Error {
diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs
index b698a5ff769..8994c2cff73 100644
--- a/protocols/relay/src/protocol/inbound_stop.rs
+++ b/protocols/relay/src/protocol/inbound_stop.rs
@@ -18,16 +18,20 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::proto;
-use crate::protocol::{self, MAX_MESSAGE_SIZE};
+use std::io;
+
use asynchronous_codec::{Framed, FramedParts};
use bytes::Bytes;
use futures::prelude::*;
use libp2p_identity::PeerId;
use libp2p_swarm::Stream;
-use std::io;
use thiserror::Error;
+use crate::{
+ proto,
+ protocol::{self, MAX_MESSAGE_SIZE},
+};
+
pub(crate) async fn handle_open_circuit(io: Stream) -> Result {
let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE));
diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs
index b349f8848be..216c6d115bf 100644
--- a/protocols/relay/src/protocol/outbound_hop.rs
+++ b/protocols/relay/src/protocol/outbound_hop.rs
@@ -18,22 +18,23 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::io;
-use std::time::Duration;
+use std::{io, time::Duration};
use asynchronous_codec::{Framed, FramedParts};
use bytes::Bytes;
use futures::prelude::*;
use futures_timer::Delay;
-use thiserror::Error;
-use web_time::SystemTime;
-
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
use libp2p_swarm::Stream;
+use thiserror::Error;
+use web_time::SystemTime;
-use crate::protocol::{Limit, MAX_MESSAGE_SIZE};
-use crate::{proto, HOP_PROTOCOL_NAME};
+use crate::{
+ proto,
+ protocol::{Limit, MAX_MESSAGE_SIZE},
+ HOP_PROTOCOL_NAME,
+};
#[derive(Debug, Error)]
pub enum ConnectError {
diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs
index 525ebc10821..272aa24eef6 100644
--- a/protocols/relay/src/protocol/outbound_stop.rs
+++ b/protocols/relay/src/protocol/outbound_stop.rs
@@ -18,19 +18,16 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use std::io;
-use std::time::Duration;
+use std::{io, time::Duration};
use asynchronous_codec::{Framed, FramedParts};
use bytes::Bytes;
use futures::prelude::*;
-use thiserror::Error;
-
use libp2p_identity::PeerId;
use libp2p_swarm::Stream;
+use thiserror::Error;
-use crate::protocol::MAX_MESSAGE_SIZE;
-use crate::{proto, STOP_PROTOCOL_NAME};
+use crate::{proto, protocol::MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME};
#[derive(Debug, Error)]
pub enum Error {
diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs
index 2b28d5a50cd..125f0dbb4ad 100644
--- a/protocols/relay/tests/lib.rs
+++ b/protocols/relay/tests/lib.rs
@@ -18,26 +18,28 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::executor::LocalPool;
-use futures::future::FutureExt;
-use futures::io::{AsyncRead, AsyncWrite};
-use futures::stream::StreamExt;
-use futures::task::Spawn;
-use libp2p_core::multiaddr::{Multiaddr, Protocol};
-use libp2p_core::muxing::StreamMuxerBox;
-use libp2p_core::transport::choice::OrTransport;
-use libp2p_core::transport::{Boxed, MemoryTransport, Transport};
-use libp2p_core::upgrade;
+use std::{error::Error, time::Duration};
+
+use futures::{
+ executor::LocalPool,
+ future::FutureExt,
+ io::{AsyncRead, AsyncWrite},
+ stream::StreamExt,
+ task::Spawn,
+};
+use libp2p_core::{
+ multiaddr::{Multiaddr, Protocol},
+ muxing::StreamMuxerBox,
+ transport::{choice::OrTransport, Boxed, MemoryTransport, Transport},
+ upgrade,
+};
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_ping as ping;
use libp2p_plaintext as plaintext;
use libp2p_relay as relay;
-use libp2p_swarm::dial_opts::DialOpts;
-use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent};
+use libp2p_swarm::{dial_opts::DialOpts, Config, DialError, NetworkBehaviour, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
-use std::error::Error;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[test]
diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs
index a794252ff0b..019b23c092b 100644
--- a/protocols/rendezvous/src/client.rs
+++ b/protocols/rendezvous/src/client.rs
@@ -18,24 +18,28 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::Message::*;
-use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl};
-use futures::future::BoxFuture;
-use futures::future::FutureExt;
-use futures::stream::FuturesUnordered;
-use futures::stream::StreamExt;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr, PeerRecord};
+use std::{
+ collections::HashMap,
+ iter,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use futures::{
+ future::{BoxFuture, FutureExt},
+ stream::{FuturesUnordered, StreamExt},
+};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr, PeerRecord};
use libp2p_identity::{Keypair, PeerId, SigningError};
use libp2p_request_response::{OutboundRequestId, ProtocolSupport};
use libp2p_swarm::{
ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
};
-use std::collections::HashMap;
-use std::iter;
-use std::task::{Context, Poll};
-use std::time::Duration;
+
+use crate::codec::{
+ Cookie, ErrorCode, Message, Message::*, Namespace, NewRegistration, Registration, Ttl,
+};
pub struct Behaviour {
inner: libp2p_request_response::Behaviour,
@@ -47,12 +51,14 @@ pub struct Behaviour {
/// Hold addresses of all peers that we have discovered so far.
///
- /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`].
+ /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by
+ /// returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`].
discovered_peers: HashMap<(PeerId, Namespace), Vec>,
registered_namespaces: HashMap<(PeerId, Namespace), Ttl>,
- /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak.
+ /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers`
+ /// otherwise we have a memory leak.
expiring_registrations: FuturesUnordered>,
external_addresses: ExternalAddresses,
@@ -81,8 +87,9 @@ impl Behaviour {
/// Register our external addresses in the given namespace with the given rendezvous peer.
///
- /// External addresses are either manually added via [`libp2p_swarm::Swarm::add_external_address`] or reported
- /// by other [`NetworkBehaviour`]s via [`ToSwarm::ExternalAddrConfirmed`].
+ /// External addresses are either manually added via
+ /// [`libp2p_swarm::Swarm::add_external_address`] or reported by other [`NetworkBehaviour`]s
+ /// via [`ToSwarm::ExternalAddrConfirmed`].
pub fn register(
&mut self,
namespace: Namespace,
diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs
index cad3688e00b..60f9f14f332 100644
--- a/protocols/rendezvous/src/codec.rs
+++ b/protocols/rendezvous/src/codec.rs
@@ -18,16 +18,17 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::DEFAULT_TTL;
+use std::{fmt, io};
+
use async_trait::async_trait;
-use asynchronous_codec::{BytesMut, Decoder, Encoder};
-use asynchronous_codec::{FramedRead, FramedWrite};
+use asynchronous_codec::{BytesMut, Decoder, Encoder, FramedRead, FramedWrite};
use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt};
use libp2p_core::{peer_record, signed_envelope, PeerRecord, SignedEnvelope};
use libp2p_swarm::StreamProtocol;
use quick_protobuf_codec::Codec as ProtobufCodec;
use rand::RngCore;
-use std::{fmt, io};
+
+use crate::DEFAULT_TTL;
pub type Ttl = u64;
pub(crate) type Limit = u64;
@@ -54,7 +55,9 @@ pub struct Namespace(String);
impl Namespace {
/// Creates a new [`Namespace`] from a static string.
///
- /// This will panic if the namespace is too long. We accepting panicking in this case because we are enforcing a `static lifetime which means this value can only be a constant in the program and hence we hope the developer checked that it is of an acceptable length.
+ /// This will panic if the namespace is too long. We accepting panicking in this case because we
+ /// are enforcing a `static lifetime which means this value can only be a constant in the
+ /// program and hence we hope the developer checked that it is of an acceptable length.
pub fn from_static(value: &'static str) -> Self {
if value.len() > crate::MAX_NAMESPACE {
panic!("Namespace '{value}' is too long!")
@@ -109,7 +112,8 @@ pub struct Cookie {
impl Cookie {
/// Construct a new [`Cookie`] for a given namespace.
///
- /// This cookie will only be valid for subsequent DISCOVER requests targeting the same namespace.
+ /// This cookie will only be valid for subsequent DISCOVER requests targeting the same
+ /// namespace.
pub fn for_namespace(namespace: Namespace) -> Self {
Self {
id: rand::thread_rng().next_u64(),
diff --git a/protocols/rendezvous/src/lib.rs b/protocols/rendezvous/src/lib.rs
index 7c607085f20..221178728af 100644
--- a/protocols/rendezvous/src/lib.rs
+++ b/protocols/rendezvous/src/lib.rs
@@ -22,9 +22,10 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl};
use libp2p_swarm::StreamProtocol;
+pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl};
+
mod codec;
/// If unspecified, rendezvous nodes should assume a TTL of 2h.
diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs
index 45a525d9573..8aafcfb48e3 100644
--- a/protocols/rendezvous/src/server.rs
+++ b/protocols/rendezvous/src/server.rs
@@ -18,25 +18,27 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl};
-use crate::{MAX_TTL, MIN_TTL};
+use std::{
+ collections::{HashMap, HashSet},
+ iter,
+ task::{ready, Context, Poll},
+ time::Duration,
+};
+
use bimap::BiMap;
-use futures::future::BoxFuture;
-use futures::stream::FuturesUnordered;
-use futures::{FutureExt, StreamExt};
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_request_response::ProtocolSupport;
-use libp2p_swarm::behaviour::FromSwarm;
use libp2p_swarm::{
- ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent,
- ToSwarm,
+ behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler,
+ THandlerInEvent, THandlerOutEvent, ToSwarm,
+};
+
+use crate::{
+ codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl},
+ MAX_TTL, MIN_TTL,
};
-use std::collections::{HashMap, HashSet};
-use std::iter;
-use std::task::{ready, Context, Poll};
-use std::time::Duration;
pub struct Behaviour {
inner: libp2p_request_response::Behaviour,
@@ -534,10 +536,9 @@ pub struct CookieNamespaceMismatch;
#[cfg(test)]
mod tests {
- use web_time::SystemTime;
-
use libp2p_core::PeerRecord;
use libp2p_identity as identity;
+ use web_time::SystemTime;
use super::*;
@@ -792,7 +793,8 @@ mod tests {
.unwrap_err();
}
- /// Polls [`Registrations`] for at most `seconds` and panics if doesn't return an event within that time.
+ /// Polls [`Registrations`] for at most `seconds` and panics if doesn't
+ /// return an event within that time.
async fn next_event_in_at_most(&mut self, seconds: u64) -> ExpiredRegistration {
tokio::time::timeout(Duration::from_secs(seconds), self.next_event())
.await
diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs
index d9200780ece..2305c2ef412 100644
--- a/protocols/rendezvous/tests/rendezvous.rs
+++ b/protocols/rendezvous/tests/rendezvous.rs
@@ -18,16 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use futures::stream::FuturesUnordered;
-use futures::StreamExt;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::Multiaddr;
+use std::time::Duration;
+
+use futures::{stream::FuturesUnordered, StreamExt};
+use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_identity as identity;
use libp2p_rendezvous as rendezvous;
use libp2p_rendezvous::client::RegisterError;
use libp2p_swarm::{DialError, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
-use std::time::Duration;
use tracing_subscriber::EnvFilter;
#[tokio::test]
@@ -471,9 +470,11 @@ async fn new_combined_node() -> Swarm {
}
async fn new_impersonating_client() -> Swarm {
- // In reality, if Eve were to try and fake someones identity, she would obviously only know the public key.
- // Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else).
- // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection.
+ // In reality, if Eve were to try and fake someones identity, she would obviously only know the
+ // public key. Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we
+ // actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else).
+ // As such, the best we can do is hand eve a completely different keypair from what she is using
+ // to authenticate her connection.
let someone_else = identity::Keypair::generate_ed25519();
let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else));
eve.listen().with_memory_addr_external().await;
diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs
index a27d069e758..744d94cb961 100644
--- a/protocols/request-response/src/cbor.rs
+++ b/protocols/request-response/src/cbor.rs
@@ -37,19 +37,23 @@
/// }
///
/// let behaviour = cbor::Behaviour::::new(
-/// [(StreamProtocol::new("/my-cbor-protocol"), ProtocolSupport::Full)],
-/// request_response::Config::default()
+/// [(
+/// StreamProtocol::new("/my-cbor-protocol"),
+/// ProtocolSupport::Full,
+/// )],
+/// request_response::Config::default(),
/// );
/// ```
pub type Behaviour = crate::Behaviour>;
mod codec {
+ use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData};
+
use async_trait::async_trait;
use cbor4ii::core::error::DecodeError;
use futures::prelude::*;
use libp2p_swarm::StreamProtocol;
use serde::{de::DeserializeOwned, Serialize};
- use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData};
/// Max request size in bytes
const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024;
@@ -168,13 +172,13 @@ mod codec {
#[cfg(test)]
mod tests {
- use crate::cbor::codec::Codec;
- use crate::Codec as _;
use futures::AsyncWriteExt;
use futures_ringbuf::Endpoint;
use libp2p_swarm::StreamProtocol;
use serde::{Deserialize, Serialize};
+ use crate::{cbor::codec::Codec, Codec as _};
+
#[async_std::test]
async fn test_codec() {
let expected_request = TestRequest {
diff --git a/protocols/request-response/src/codec.rs b/protocols/request-response/src/codec.rs
index d26b729acae..d396a75ad7b 100644
--- a/protocols/request-response/src/codec.rs
+++ b/protocols/request-response/src/codec.rs
@@ -18,9 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::io;
+
use async_trait::async_trait;
use futures::prelude::*;
-use std::io;
/// A `Codec` defines the request and response types
/// for a request-response [`Behaviour`](crate::Behaviour) protocol or
diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs
index dbd7a0708ce..133cff87f40 100644
--- a/protocols/request-response/src/handler.rs
+++ b/protocols/request-response/src/handler.rs
@@ -20,23 +20,6 @@
pub(crate) mod protocol;
-pub use protocol::ProtocolSupport;
-
-use crate::codec::Codec;
-use crate::handler::protocol::Protocol;
-use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD};
-
-use futures::channel::mpsc;
-use futures::{channel::oneshot, prelude::*};
-use libp2p_swarm::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
- ListenUpgradeError,
-};
-use libp2p_swarm::{
- handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError},
- SubstreamProtocol,
-};
-use smallvec::SmallVec;
use std::{
collections::VecDeque,
fmt, io,
@@ -48,6 +31,25 @@ use std::{
time::Duration,
};
+use futures::{
+ channel::{mpsc, oneshot},
+ prelude::*,
+};
+use libp2p_swarm::{
+ handler::{
+ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
+ FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError,
+ },
+ SubstreamProtocol,
+};
+pub use protocol::ProtocolSupport;
+use smallvec::SmallVec;
+
+use crate::{
+ codec::Codec, handler::protocol::Protocol, InboundRequestId, OutboundRequestId,
+ EMPTY_QUEUE_SHRINK_THRESHOLD,
+};
+
/// A connection handler for a request response [`Behaviour`](super::Behaviour) protocol.
pub struct Handler
where
diff --git a/protocols/request-response/src/json.rs b/protocols/request-response/src/json.rs
index 85e78e7ddda..9bd5b8c6df9 100644
--- a/protocols/request-response/src/json.rs
+++ b/protocols/request-response/src/json.rs
@@ -18,7 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-/// A request-response behaviour using [`serde_json`] for serializing and deserializing the messages.
+/// A request-response behaviour using [`serde_json`] for serializing and deserializing the
+/// messages.
///
/// # Example
///
@@ -36,18 +37,22 @@
/// }
///
/// let behaviour = json::Behaviour::::new(
-/// [(StreamProtocol::new("/my-json-protocol"), ProtocolSupport::Full)],
-/// request_response::Config::default()
+/// [(
+/// StreamProtocol::new("/my-json-protocol"),
+/// ProtocolSupport::Full,
+/// )],
+/// request_response::Config::default(),
/// );
/// ```
pub type Behaviour = crate::Behaviour>;
mod codec {
+ use std::{io, marker::PhantomData};
+
use async_trait::async_trait;
use futures::prelude::*;
use libp2p_swarm::StreamProtocol;
use serde::{de::DeserializeOwned, Serialize};
- use std::{io, marker::PhantomData};
/// Max request size in bytes
const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024;
@@ -140,12 +145,13 @@ mod codec {
#[cfg(test)]
mod tests {
- use crate::Codec;
use futures::AsyncWriteExt;
use futures_ringbuf::Endpoint;
use libp2p_swarm::StreamProtocol;
use serde::{Deserialize, Serialize};
+ use crate::Codec;
+
#[async_std::test]
async fn test_codec() {
let expected_request = TestRequest {
diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs
index e627f5668ff..052e1e87e2b 100644
--- a/protocols/request-response/src/lib.rs
+++ b/protocols/request-response/src/lib.rs
@@ -73,12 +73,18 @@ mod handler;
#[cfg(feature = "json")]
pub mod json;
-pub use codec::Codec;
-pub use handler::ProtocolSupport;
+use std::{
+ collections::{HashMap, HashSet, VecDeque},
+ fmt, io,
+ sync::{atomic::AtomicU64, Arc},
+ task::{Context, Poll},
+ time::Duration,
+};
-use crate::handler::OutboundMessage;
+pub use codec::Codec;
use futures::channel::oneshot;
use handler::Handler;
+pub use handler::ProtocolSupport;
use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
use libp2p_swarm::{
@@ -88,13 +94,8 @@ use libp2p_swarm::{
PeerAddresses, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
};
use smallvec::SmallVec;
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- fmt, io,
- sync::{atomic::AtomicU64, Arc},
- task::{Context, Poll},
- time::Duration,
-};
+
+use crate::handler::OutboundMessage;
/// An inbound request or response.
#[derive(Debug)]
@@ -353,8 +354,8 @@ where
/// Pending events to return from `poll`.
pending_events:
VecDeque, OutboundMessage>>,
- /// The currently connected peers, their pending outbound and inbound responses and their known,
- /// reachable addresses, if any.
+ /// The currently connected peers, their pending outbound and inbound responses and their
+ /// known, reachable addresses, if any.
connected: HashMap>,
/// Externally managed addresses via `add_address` and `remove_address`.
addresses: PeerAddresses,
@@ -367,7 +368,8 @@ impl Behaviour
where
TCodec: Codec + Default + Clone + Send + 'static,
{
- /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to construct the codec.
+ /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to
+ /// construct the codec.
pub fn new(protocols: I, cfg: Config) -> Self
where
I: IntoIterator- ,
@@ -693,7 +695,8 @@ where
}
}
- /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer.
+ /// Preloads a new [`Handler`] with requests that are
+ /// waiting to be sent to the newly connected peer.
fn preload_new_handler(
&mut self,
handler: &mut Handler,
diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs
index 19f323e169f..d1f26378a77 100644
--- a/protocols/request-response/tests/error_reporting.rs
+++ b/protocols/request-response/tests/error_reporting.rs
@@ -1,3 +1,5 @@
+use std::{io, iter, pin::pin, time::Duration};
+
use anyhow::{bail, Result};
use async_std::task::sleep;
use async_trait::async_trait;
@@ -10,9 +12,6 @@ use libp2p_swarm_test::SwarmExt;
use request_response::{
Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel,
};
-use std::pin::pin;
-use std::time::Duration;
-use std::{io, iter};
use tracing_subscriber::EnvFilter;
#[async_std::test]
diff --git a/protocols/request-response/tests/peer_address.rs b/protocols/request-response/tests/peer_address.rs
index 0ed7ffe5551..603e2d09dc0 100644
--- a/protocols/request-response/tests/peer_address.rs
+++ b/protocols/request-response/tests/peer_address.rs
@@ -1,10 +1,11 @@
+use std::iter;
+
use libp2p_core::ConnectedPoint;
use libp2p_request_response as request_response;
use libp2p_request_response::ProtocolSupport;
use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
use serde::{Deserialize, Serialize};
-use std::iter;
use tracing_subscriber::EnvFilter;
#[async_std::test]
diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs
index 827afae249c..e53fe99d6cf 100644
--- a/protocols/request-response/tests/ping.rs
+++ b/protocols/request-response/tests/ping.rs
@@ -20,6 +20,8 @@
//! Integration tests for the `Behaviour`.
+use std::{io, iter};
+
use futures::prelude::*;
use libp2p_identity::PeerId;
use libp2p_request_response as request_response;
@@ -28,7 +30,6 @@ use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent};
use libp2p_swarm_test::SwarmExt;
use rand::Rng;
use serde::{Deserialize, Serialize};
-use std::{io, iter};
use tracing_subscriber::EnvFilter;
#[async_std::test]
diff --git a/protocols/stream/src/control.rs b/protocols/stream/src/control.rs
index 036d285b2a3..2149c6bca48 100644
--- a/protocols/stream/src/control.rs
+++ b/protocols/stream/src/control.rs
@@ -6,9 +6,6 @@ use std::{
task::{Context, Poll},
};
-use crate::AlreadyRegistered;
-use crate::{handler::NewStream, shared::Shared};
-
use futures::{
channel::{mpsc, oneshot},
SinkExt as _, StreamExt as _,
@@ -16,6 +13,8 @@ use futures::{
use libp2p_identity::PeerId;
use libp2p_swarm::{Stream, StreamProtocol};
+use crate::{handler::NewStream, shared::Shared, AlreadyRegistered};
+
/// A (remote) control for opening new streams and registration of inbound protocols.
///
/// A [`Control`] can be cloned and thus allows for concurrent access.
@@ -31,13 +30,15 @@ impl Control {
/// Attempt to open a new stream for the given protocol and peer.
///
- /// In case we are currently not connected to the peer, we will attempt to make a new connection.
+ /// In case we are currently not connected to the peer,
+ /// we will attempt to make a new connection.
///
/// ## Backpressure
///
/// [`Control`]s support backpressure similarly to bounded channels:
/// Each [`Control`] has a guaranteed slot for internal messages.
- /// A single control will always open one stream at a time which is enforced by requiring `&mut self`.
+ /// A single control will always open one stream at a
+ /// time which is enforced by requiring `&mut self`.
///
/// This backpressure mechanism breaks if you clone [`Control`]s excessively.
pub async fn open_stream(
diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs
index b7ec516d3b1..d626f48fb09 100644
--- a/protocols/stream/src/handler.rs
+++ b/protocols/stream/src/handler.rs
@@ -162,7 +162,8 @@ impl ConnectionHandler for Handler {
}
}
-/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to negotiate a new outbound stream.
+/// Message from a [`Control`](crate::Control) to
+/// a [`ConnectionHandler`] to negotiate a new outbound stream.
#[derive(Debug)]
pub(crate) struct NewStream {
pub(crate) protocol: StreamProtocol,
diff --git a/protocols/stream/src/shared.rs b/protocols/stream/src/shared.rs
index 48aa6613d83..62d7b3cfe68 100644
--- a/protocols/stream/src/shared.rs
+++ b/protocols/stream/src/shared.rs
@@ -12,9 +12,11 @@ use rand::seq::IteratorRandom as _;
use crate::{handler::NewStream, AlreadyRegistered, IncomingStreams};
pub(crate) struct Shared {
- /// Tracks the supported inbound protocols created via [`Control::accept`](crate::Control::accept).
+ /// Tracks the supported inbound protocols created via
+ /// [`Control::accept`](crate::Control::accept).
///
- /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the [`mpsc::Receiver`] in [`IncomingStreams`].
+ /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the
+ /// [`mpsc::Receiver`] in [`IncomingStreams`].
supported_inbound_protocols: HashMap>,
connections: HashMap,
@@ -25,7 +27,8 @@ pub(crate) struct Shared {
/// Sender for peers we want to dial.
///
- /// We manage this through a channel to avoid locks as part of [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll).
+ /// We manage this through a channel to avoid locks as part of
+ /// [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll).
dial_sender: mpsc::Sender,
}
diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs
index ee985042b68..cea8efb1e3f 100644
--- a/protocols/upnp/src/behaviour.rs
+++ b/protocols/upnp/src/behaviour.rs
@@ -32,7 +32,6 @@ use std::{
time::Duration,
};
-use crate::tokio::{is_addr_global, Gateway};
use futures::{channel::oneshot, Future, StreamExt};
use futures_timer::Delay;
use igd_next::PortMappingProtocol;
@@ -46,6 +45,8 @@ use libp2p_swarm::{
NetworkBehaviour, NewListenAddr, ToSwarm,
};
+use crate::tokio::{is_addr_global, Gateway};
+
/// The duration in seconds of a port mapping on the gateway.
const MAPPING_DURATION: u32 = 3600;
@@ -286,8 +287,9 @@ impl NetworkBehaviour for Behaviour {
match &mut self.state {
GatewayState::Searching(_) => {
- // As the gateway is not yet available we add the mapping with `MappingState::Inactive`
- // so that when and if it becomes available we map it.
+ // As the gateway is not yet available we add the mapping with
+ // `MappingState::Inactive` so that when and if it
+ // becomes available we map it.
self.mappings.insert(
Mapping {
listener_id,
diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs
index 8a74d7e8f63..d7a746f78df 100644
--- a/protocols/upnp/src/lib.rs
+++ b/protocols/upnp/src/lib.rs
@@ -24,7 +24,6 @@
//! implements the [`libp2p_swarm::NetworkBehaviour`] trait.
//! This struct will automatically try to map the ports externally to internal
//! addresses on the gateway.
-//!
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs
index b2cad6fa5a7..67ef52f9608 100644
--- a/protocols/upnp/src/tokio.rs
+++ b/protocols/upnp/src/tokio.rs
@@ -20,7 +20,6 @@
use std::{error::Error, net::IpAddr};
-use crate::behaviour::{GatewayEvent, GatewayRequest};
use futures::{
channel::{mpsc, oneshot},
SinkExt, StreamExt,
@@ -28,8 +27,9 @@ use futures::{
use igd_next::SearchOptions;
pub use crate::behaviour::Behaviour;
+use crate::behaviour::{GatewayEvent, GatewayRequest};
-//TODO: remove when `IpAddr::is_global` stabilizes.
+// TODO: remove when `IpAddr::is_global` stabilizes.
pub(crate) fn is_addr_global(addr: IpAddr) -> bool {
match addr {
IpAddr::V4(ip) => {
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644
index 00000000000..1e61bc16abf
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1,10 @@
+# Imports
+reorder_imports = true
+imports_granularity = "Crate"
+group_imports = "StdExternalCrate"
+
+# Docs
+wrap_comments = true
+comment_width = 100
+normalize_comments = true
+format_code_in_doc_comments = true
diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs
index 258c0b976c8..41b909f329f 100644
--- a/swarm-derive/src/lib.rs
+++ b/swarm-derive/src/lib.rs
@@ -23,12 +23,12 @@
mod syn_ext;
-use crate::syn_ext::RequireStrLit;
use heck::ToUpperCamelCase;
use proc_macro::TokenStream;
use quote::quote;
-use syn::punctuated::Punctuated;
-use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token};
+use syn::{parse_macro_input, punctuated::Punctuated, Data, DataStruct, DeriveInput, Meta, Token};
+
+use crate::syn_ext::RequireStrLit;
/// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See
/// the trait documentation for better description.
diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs
index bcab6e5b700..0edf02473e6 100644
--- a/swarm-test/src/lib.rs
+++ b/swarm-test/src/lib.rs
@@ -18,27 +18,32 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
+use std::{fmt::Debug, future::IntoFuture, time::Duration};
+
use async_trait::async_trait;
-use futures::future::{BoxFuture, Either};
-use futures::{FutureExt, StreamExt};
+use futures::{
+ future::{BoxFuture, Either},
+ FutureExt, StreamExt,
+};
use libp2p_core::{multiaddr::Protocol, Multiaddr};
use libp2p_identity::PeerId;
-use libp2p_swarm::dial_opts::PeerCondition;
-use libp2p_swarm::{dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent};
-use std::fmt::Debug;
-use std::future::IntoFuture;
-use std::time::Duration;
+use libp2p_swarm::{
+ dial_opts::{DialOpts, PeerCondition},
+ NetworkBehaviour, Swarm, SwarmEvent,
+};
-/// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests.
+/// An extension trait for [`Swarm`] that makes it
+/// easier to set up a network of [`Swarm`]s for tests.
#[async_trait]
pub trait SwarmExt {
type NB: NetworkBehaviour;
/// Create a new [`Swarm`] with an ephemeral identity and the `async-std` runtime.
///
- /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and
- /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test
- /// and may change at any time.
+ /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a
+ /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the
+ /// multiplexer. However, these details should not be relied
+ /// upon by the test and may change at any time.
#[cfg(feature = "async-std")]
fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self
where
@@ -46,19 +51,22 @@ pub trait SwarmExt {
/// Create a new [`Swarm`] with an ephemeral identity and the `tokio` runtime.
///
- /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and
- /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test
- /// and may change at any time.
+ /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a
+ /// [`libp2p_plaintext::Config`] authentication layer and [`libp2p_yamux::Config`] as the
+ /// multiplexer. However, these details should not be relied
+ /// upon by the test and may change at any time.
#[cfg(feature = "tokio")]
fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self
where
Self: Sized;
- /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established.
+ /// Establishes a connection to the given [`Swarm`], polling both of them until the connection
+ /// is established.
///
/// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`].
/// By default, this iterator will not yield any addresses.
- /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`].
+ /// To add listen addresses as external addresses, use
+ /// [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`].
async fn connect(&mut self, other: &mut Swarm)
where
T: NetworkBehaviour + Send,
@@ -66,10 +74,12 @@ pub trait SwarmExt {
/// Dial the provided address and wait until a connection has been established.
///
- /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always possible.
- /// This function only abstracts away the "dial and wait for `ConnectionEstablished` event" part.
+ /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always
+ /// possible. This function only abstracts away the "dial and wait for
+ /// `ConnectionEstablished` event" part.
///
- /// Because we don't have access to the other [`Swarm`], we can't guarantee that it makes progress.
+ /// Because we don't have access to the other [`Swarm`],
+ /// we can't guarantee that it makes progress.
async fn dial_and_wait(&mut self, addr: Multiaddr) -> PeerId;
/// Wait for specified condition to return `Some`.
@@ -78,7 +88,8 @@ pub trait SwarmExt {
P: Fn(SwarmEvent<::ToSwarm>) -> Option,
P: Send;
- /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections.
+ /// Listens for incoming connections, polling the [`Swarm`] until the
+ /// transport is ready to accept connections.
///
/// The first address is for the memory transport, the second one for the TCP transport.
fn listen(&mut self) -> ListenFuture<&mut Self>;
@@ -102,17 +113,19 @@ pub trait SwarmExt {
///
/// ## Number of events
///
-/// The number of events is configured via const generics based on the array size of the return type.
-/// This allows the compiler to infer how many events you are expecting based on how you use this function.
-/// For example, if you expect the first [`Swarm`] to emit 2 events, you should assign the first variable of the returned tuple value to an array of size 2.
-/// This works especially well if you directly pattern-match on the return value.
+/// The number of events is configured via const generics based on the array size of the return
+/// type. This allows the compiler to infer how many events you are expecting based on how you use
+/// this function. For example, if you expect the first [`Swarm`] to emit 2 events, you should
+/// assign the first variable of the returned tuple value to an array of size 2. This works
+/// especially well if you directly pattern-match on the return value.
///
/// ## Type of event
///
/// This function utilizes the [`TryIntoOutput`] trait.
/// Similar as to the number of expected events, the type of event is inferred based on your usage.
/// If you match against a [`SwarmEvent`], the first [`SwarmEvent`] will be returned.
-/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event.
+/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not
+/// [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event.
///
/// You can implement the [`TryIntoOutput`] for any other type to further customize this behaviour.
///
@@ -120,13 +133,16 @@ pub trait SwarmExt {
///
/// This function is similar to joining two futures with two crucial differences:
/// 1. As described above, it allows you to obtain more than a single event.
-/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted all expected events**.
+/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted
+/// all expected events**.
///
/// Especially (2) is crucial for our usage of this function.
/// If a [`Swarm`] is not polled, nothing within it makes progress.
-/// This can "starve" the other swarm which for example may wait for another message to be sent on a connection.
+/// This can "starve" the other swarm which for example may wait for another message to be sent on a
+/// connection.
///
-/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be polled, even after it emitted its events.
+/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be
+/// polled, even after it emitted its events.
pub async fn drive<
TBehaviour1,
const NUM_EVENTS_SWARM_1: usize,
@@ -231,7 +247,12 @@ where
behaviour_fn(identity),
peer_id,
libp2p_swarm::Config::with_async_std_executor()
- .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures.,
+ // Some tests need
+ // connections to be kept
+ // alive beyond what the
+ // individual behaviour
+ // configures.,
+ .with_idle_connection_timeout(Duration::from_secs(5)),
)
}
@@ -259,7 +280,11 @@ where
behaviour_fn(identity),
peer_id,
libp2p_swarm::Config::with_tokio_executor()
- .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures.,
+ .with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need
+ * connections to be kept
+ * alive beyond what the
+ * individual behaviour
+ * configures., */
)
}
@@ -385,20 +410,24 @@ pub struct ListenFuture
{
}
impl ListenFuture {
- /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`].
+ /// Adds the memory address we are starting to listen on as an external address using
+ /// [`Swarm::add_external_address`].
///
- /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable.
- /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default.
+ /// This is typically "safe" for tests because within a process, memory addresses are "globally"
+ /// reachable. However, some tests depend on which addresses are external and need this to
+ /// be configurable so it is not a good default.
pub fn with_memory_addr_external(mut self) -> Self {
self.add_memory_external = true;
self
}
- /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`].
+ /// Adds the TCP address we are starting to listen on as an external address using
+ /// [`Swarm::add_external_address`].
///
- /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s.
- /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default.
+ /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for
+ /// other [`Swarm`]s. However, some tests depend on which addresses are external and need
+ /// this to be configurable so it is not a good default.
pub fn with_tcp_addr_external(mut self) -> Self {
self.add_tcp_external = true;
diff --git a/swarm/benches/connection_handler.rs b/swarm/benches/connection_handler.rs
index 09340421f83..a5e47528308 100644
--- a/swarm/benches/connection_handler.rs
+++ b/swarm/benches/connection_handler.rs
@@ -1,3 +1,5 @@
+use std::{convert::Infallible, sync::atomic::AtomicUsize};
+
use async_std::stream::StreamExt;
use criterion::{criterion_group, criterion_main, Criterion};
use libp2p_core::{
@@ -5,7 +7,6 @@ use libp2p_core::{
};
use libp2p_identity::PeerId;
use libp2p_swarm::{ConnectionHandler, NetworkBehaviour, StreamProtocol};
-use std::{convert::Infallible, sync::atomic::AtomicUsize};
use web_time::Duration;
macro_rules! gen_behaviour {
@@ -82,7 +83,7 @@ benchmarks! {
SpinningBehaviour20::bench().name(m).poll_count(500).protocols_per_behaviour(100),
];
}
-//fn main() {}
+// fn main() {}
trait BigBehaviour: Sized {
fn behaviours(&mut self) -> &mut [SpinningBehaviour];
diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs
index 35aed12fba5..8c8c5998f67 100644
--- a/swarm/src/behaviour.rs
+++ b/swarm/src/behaviour.rs
@@ -24,23 +24,22 @@ mod listen_addresses;
mod peer_addresses;
pub mod toggle;
-pub use external_addresses::ExternalAddresses;
-pub use listen_addresses::ListenAddresses;
-pub use peer_addresses::PeerAddresses;
+use std::task::{Context, Poll};
-use crate::connection::ConnectionId;
-use crate::dial_opts::DialOpts;
-use crate::listen_opts::ListenOpts;
-use crate::{
- ConnectionDenied, ConnectionError, ConnectionHandler, DialError, ListenError, THandler,
- THandlerInEvent, THandlerOutEvent,
-};
+pub use external_addresses::ExternalAddresses;
use libp2p_core::{
transport::{ListenerId, PortUse},
ConnectedPoint, Endpoint, Multiaddr,
};
use libp2p_identity::PeerId;
-use std::{task::Context, task::Poll};
+pub use listen_addresses::ListenAddresses;
+pub use peer_addresses::PeerAddresses;
+
+use crate::{
+ connection::ConnectionId, dial_opts::DialOpts, listen_opts::ListenOpts, ConnectionDenied,
+ ConnectionError, ConnectionHandler, DialError, ListenError, THandler, THandlerInEvent,
+ THandlerOutEvent,
+};
/// A [`NetworkBehaviour`] defines the behaviour of the local node on the network.
///
@@ -101,25 +100,25 @@ use std::{task::Context, task::Poll};
/// #[behaviour(to_swarm = "Event")]
/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
/// struct MyBehaviour {
-/// identify: identify::Behaviour,
-/// ping: ping::Behaviour,
+/// identify: identify::Behaviour,
+/// ping: ping::Behaviour,
/// }
///
/// enum Event {
-/// Identify(identify::Event),
-/// Ping(ping::Event),
+/// Identify(identify::Event),
+/// Ping(ping::Event),
/// }
///
/// impl From for Event {
-/// fn from(event: identify::Event) -> Self {
-/// Self::Identify(event)
-/// }
+/// fn from(event: identify::Event) -> Self {
+/// Self::Identify(event)
+/// }
/// }
///
/// impl From for Event {
-/// fn from(event: ping::Event) -> Self {
-/// Self::Ping(event)
-/// }
+/// fn from(event: ping::Event) -> Self {
+/// Self::Ping(event)
+/// }
/// }
/// ```
pub trait NetworkBehaviour: 'static {
@@ -131,8 +130,8 @@ pub trait NetworkBehaviour: 'static {
/// Callback that is invoked for every new inbound connection.
///
- /// At this point in the connection lifecycle, only the remote's and our local address are known.
- /// We have also already allocated a [`ConnectionId`].
+ /// At this point in the connection lifecycle, only the remote's and our local address are
+ /// known. We have also already allocated a [`ConnectionId`].
///
/// Any error returned from this function will immediately abort the dial attempt.
fn handle_pending_inbound_connection(
@@ -148,9 +147,10 @@ pub trait NetworkBehaviour: 'static {
///
/// This is invoked once another peer has successfully dialed us.
///
- /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial.
- /// In order to actually use this connection, this function must return a [`ConnectionHandler`].
- /// Returning an error will immediately close the connection.
+ /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`]
+ /// succeeded in the dial. In order to actually use this connection, this function must
+ /// return a [`ConnectionHandler`]. Returning an error will immediately close the
+ /// connection.
///
/// Note when any composed behaviour returns an error the connection will be closed and a
/// [`FromSwarm::ListenFailure`] event will be emitted.
@@ -168,10 +168,14 @@ pub trait NetworkBehaviour: 'static {
///
/// - The [`PeerId`], if known. Remember that we can dial without a [`PeerId`].
/// - All addresses passed to [`DialOpts`] are passed in here too.
- /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set to [`Endpoint::Dialer`] except if we are attempting a hole-punch.
- /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if successful.
+ /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set
+ /// to [`Endpoint::Dialer`] except if we are attempting a hole-punch.
+ /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if
+ /// successful.
///
- /// Note that the addresses returned from this function are only used for dialing if [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) is set.
+ /// Note that the addresses returned from this function are only used for dialing if
+ /// [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour)
+ /// is set.
///
/// Any error returned from this function will immediately abort the dial attempt.
fn handle_pending_outbound_connection(
@@ -187,9 +191,10 @@ pub trait NetworkBehaviour: 'static {
/// Callback that is invoked for every established outbound connection.
///
/// This is invoked once we have successfully dialed a peer.
- /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial.
- /// In order to actually use this connection, this function must return a [`ConnectionHandler`].
- /// Returning an error will immediately close the connection.
+ /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`]
+ /// succeeded in the dial. In order to actually use this connection, this function must
+ /// return a [`ConnectionHandler`]. Returning an error will immediately close the
+ /// connection.
///
/// Note when any composed behaviour returns an error the connection will be closed and a
/// [`FromSwarm::DialFailure`] event will be emitted.
@@ -240,8 +245,9 @@ pub enum ToSwarm {
/// On failure, [`NetworkBehaviour::on_swarm_event`] with `DialFailure` is invoked.
///
/// [`DialOpts`] provides access to the [`ConnectionId`] via [`DialOpts::connection_id`].
- /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate events with it.
- /// This allows a [`NetworkBehaviour`] to identify a connection that resulted out of its own dial request.
+ /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate
+ /// events with it. This allows a [`NetworkBehaviour`] to identify a connection that
+ /// resulted out of its own dial request.
Dial { opts: DialOpts },
/// Instructs the [`Swarm`](crate::Swarm) to listen on the provided address.
@@ -253,8 +259,8 @@ pub enum ToSwarm {
/// Instructs the `Swarm` to send an event to the handler dedicated to a
/// connection with a peer.
///
- /// If the `Swarm` is connected to the peer, the message is delivered to the [`ConnectionHandler`]
- /// instance identified by the peer ID and connection ID.
+ /// If the `Swarm` is connected to the peer, the message is delivered to the
+ /// [`ConnectionHandler`] instance identified by the peer ID and connection ID.
///
/// If the specified connection no longer exists, the event is silently dropped.
///
@@ -278,11 +284,12 @@ pub enum ToSwarm {
///
/// The emphasis on a **new** candidate is important.
/// Protocols MUST take care to only emit a candidate once per "source".
- /// For example, the observed address of a TCP connection does not change throughout its lifetime.
- /// Thus, only one candidate should be emitted per connection.
+ /// For example, the observed address of a TCP connection does not change throughout its
+ /// lifetime. Thus, only one candidate should be emitted per connection.
///
- /// This makes the report frequency of an address a meaningful data-point for consumers of this event.
- /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`].
+ /// This makes the report frequency of an address a meaningful data-point for consumers of this
+ /// event. This address will be shared with all [`NetworkBehaviour`]s via
+ /// [`FromSwarm::NewExternalAddrCandidate`].
///
/// This address could come from a variety of sources:
/// - A protocol such as identify obtained it from a remote.
@@ -290,25 +297,32 @@ pub enum ToSwarm {
/// - We made an educated guess based on one of our listen addresses.
NewExternalAddrCandidate(Multiaddr),
- /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable.
+ /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be
+ /// externally reachable.
///
- /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally reachable on this address.
- /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`].
+ /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if
+ /// we are indeed externally reachable on this address. This address will be shared with
+ /// all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`].
ExternalAddrConfirmed(Multiaddr),
- /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under the provided address.
+ /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under
+ /// the provided address.
///
/// This expires an address that was earlier confirmed via [`ToSwarm::ExternalAddrConfirmed`].
- /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`].
+ /// This address will be shared with all [`NetworkBehaviour`]s via
+ /// [`FromSwarm::ExternalAddrExpired`].
ExternalAddrExpired(Multiaddr),
- /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer.
+ /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given
+ /// peer.
///
- /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion.
- /// In most cases, stopping to "use" a connection is enough to have it closed.
- /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle.
+ /// Closing a connection via [`ToSwarm::CloseConnection`] will poll
+ /// [`ConnectionHandler::poll_close`] to completion. In most cases, stopping to "use" a
+ /// connection is enough to have it closed. The keep-alive algorithm will close a
+ /// connection automatically once all [`ConnectionHandler`]s are idle.
///
- /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers.
+ /// Use this command if you want to close a connection _despite_ it still being in use by one
+ /// or more handlers.
CloseConnection {
/// The peer to disconnect.
peer_id: PeerId,
@@ -316,7 +330,8 @@ pub enum ToSwarm {
connection: CloseConnection,
},
- /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that to other [`NetworkBehaviour`]s.
+ /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that
+ /// to other [`NetworkBehaviour`]s.
NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr },
}
@@ -440,8 +455,8 @@ pub enum FromSwarm<'a> {
/// Informs the behaviour that an error
/// happened on an incoming connection during its initial handshake.
///
- /// This can include, for example, an error during the handshake of the encryption layer, or the
- /// connection unexpectedly closed.
+ /// This can include, for example, an error during the handshake of the encryption layer, or
+ /// the connection unexpectedly closed.
ListenFailure(ListenFailure<'a>),
/// Informs the behaviour that a new listener was created.
NewListener(NewListener),
@@ -455,11 +470,13 @@ pub enum FromSwarm<'a> {
ListenerError(ListenerError<'a>),
/// Informs the behaviour that a listener closed.
ListenerClosed(ListenerClosed<'a>),
- /// Informs the behaviour that we have discovered a new candidate for an external address for us.
+ /// Informs the behaviour that we have discovered a new candidate for an external address for
+ /// us.
NewExternalAddrCandidate(NewExternalAddrCandidate<'a>),
/// Informs the behaviour that an external address of the local node was confirmed.
ExternalAddrConfirmed(ExternalAddrConfirmed<'a>),
- /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed.
+ /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer
+ /// confirmed.
ExternalAddrExpired(ExternalAddrExpired<'a>),
/// Informs the behaviour that we have discovered a new external address for a remote peer.
NewExternalAddrOfPeer(NewExternalAddrOfPeer<'a>),
@@ -559,7 +576,8 @@ pub struct ListenerClosed<'a> {
pub reason: Result<(), &'a std::io::Error>,
}
-/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us.
+/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address
+/// for us.
#[derive(Debug, Clone, Copy)]
pub struct NewExternalAddrCandidate<'a> {
pub addr: &'a Multiaddr,
@@ -577,7 +595,8 @@ pub struct ExternalAddrExpired<'a> {
pub addr: &'a Multiaddr,
}
-/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer was detected.
+/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer
+/// was detected.
#[derive(Clone, Copy, Debug)]
pub struct NewExternalAddrOfPeer<'a> {
pub peer_id: PeerId,
diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs
index 7a51303e74d..b9a86e1b9d8 100644
--- a/swarm/src/behaviour/either.rs
+++ b/swarm/src/behaviour/either.rs
@@ -18,14 +18,17 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::behaviour::{self, NetworkBehaviour, ToSwarm};
-use crate::connection::ConnectionId;
-use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent};
+use std::task::{Context, Poll};
+
use either::Either;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{Endpoint, Multiaddr};
+use libp2p_core::{transport::PortUse, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use std::{task::Context, task::Poll};
+
+use crate::{
+ behaviour::{self, NetworkBehaviour, ToSwarm},
+ connection::ConnectionId,
+ ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent,
+};
/// Implementation of [`NetworkBehaviour`] that can be either of two implementations.
impl NetworkBehaviour for Either
diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs
index 579f46fe486..ba2dd3eb890 100644
--- a/swarm/src/behaviour/external_addresses.rs
+++ b/swarm/src/behaviour/external_addresses.rs
@@ -1,6 +1,7 @@
-use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm};
use libp2p_core::Multiaddr;
+use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm};
+
/// The maximum number of local external addresses. When reached any
/// further externally reported addresses are ignored. The behaviour always
/// tracks all its listen addresses.
@@ -78,17 +79,20 @@ impl ExternalAddresses {
}
fn push_front(&mut self, addr: &Multiaddr) {
- self.addresses.insert(0, addr.clone()); // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so this isn't very expensive.
+ // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so
+ // this isn't very expensive.
+ self.addresses.insert(0, addr.clone());
}
}
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_core::multiaddr::Protocol;
use once_cell::sync::Lazy;
use rand::Rng;
+ use super::*;
+
#[test]
fn new_external_addr_returns_correct_changed_value() {
let mut addresses = ExternalAddresses::default();
diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs
index 6076f5e7923..0c685d798c7 100644
--- a/swarm/src/behaviour/listen_addresses.rs
+++ b/swarm/src/behaviour/listen_addresses.rs
@@ -1,7 +1,9 @@
-use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr};
-use libp2p_core::Multiaddr;
use std::collections::HashSet;
+use libp2p_core::Multiaddr;
+
+use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr};
+
/// Utility struct for tracking the addresses a [`Swarm`](crate::Swarm) is listening on.
#[derive(Debug, Default, Clone)]
pub struct ListenAddresses {
@@ -32,10 +34,11 @@ impl ListenAddresses {
#[cfg(test)]
mod tests {
- use super::*;
use libp2p_core::{multiaddr::Protocol, transport::ListenerId};
use once_cell::sync::Lazy;
+ use super::*;
+
#[test]
fn new_listen_addr_returns_correct_changed_value() {
let mut addresses = ListenAddresses::default();
diff --git a/swarm/src/behaviour/peer_addresses.rs b/swarm/src/behaviour/peer_addresses.rs
index 1eeead56ca1..5aeae7741d5 100644
--- a/swarm/src/behaviour/peer_addresses.rs
+++ b/swarm/src/behaviour/peer_addresses.rs
@@ -1,12 +1,10 @@
-use crate::behaviour::FromSwarm;
-use crate::{DialError, DialFailure, NewExternalAddrOfPeer};
+use std::num::NonZeroUsize;
use libp2p_core::Multiaddr;
use libp2p_identity::PeerId;
-
use lru::LruCache;
-use std::num::NonZeroUsize;
+use crate::{behaviour::FromSwarm, DialError, DialFailure, NewExternalAddrOfPeer};
/// Struct for tracking peers' external addresses of the [`Swarm`](crate::Swarm).
#[derive(Debug)]
@@ -46,7 +44,6 @@ impl PeerAddresses {
/// Appends address to the existing set if peer addresses already exist.
/// Creates a new cache entry for peer_id if no addresses are present.
/// Returns true if the newly added address was not previously in the cache.
- ///
pub fn add(&mut self, peer: PeerId, address: Multiaddr) -> bool {
match prepare_addr(&peer, &address) {
Ok(address) => {
@@ -98,17 +95,17 @@ impl Default for PeerAddresses {
#[cfg(test)]
mod tests {
- use super::*;
use std::io;
- use crate::ConnectionId;
use libp2p_core::{
multiaddr::Protocol,
transport::{memory::MemoryTransportError, TransportError},
};
-
use once_cell::sync::Lazy;
+ use super::*;
+ use crate::ConnectionId;
+
#[test]
fn new_peer_addr_returns_correct_changed_value() {
let mut cache = PeerAddresses::default();
diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs
index 3dde364bf19..e70e6cf9896 100644
--- a/swarm/src/behaviour/toggle.rs
+++ b/swarm/src/behaviour/toggle.rs
@@ -18,22 +18,24 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::behaviour::FromSwarm;
-use crate::connection::ConnectionId;
-use crate::handler::{
- AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
- FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol,
-};
-use crate::upgrade::SendWrapper;
-use crate::{
- ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
-};
+use std::task::{Context, Poll};
+
use either::Either;
use futures::future;
-use libp2p_core::transport::PortUse;
-use libp2p_core::{upgrade::DeniedUpgrade, Endpoint, Multiaddr};
+use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr};
use libp2p_identity::PeerId;
-use std::{task::Context, task::Poll};
+
+use crate::{
+ behaviour::FromSwarm,
+ connection::ConnectionId,
+ handler::{
+ AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent,
+ DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError,
+ SubstreamProtocol,
+ },
+ upgrade::SendWrapper,
+ ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm,
+};
/// Implementation of `NetworkBehaviour` that can be either in the disabled or enabled state.
///
diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs
index 78c007fd71d..32cae54a5ef 100644
--- a/swarm/src/connection.rs
+++ b/swarm/src/connection.rs
@@ -23,42 +23,47 @@ mod error;
pub(crate) mod pool;
mod supported_protocols;
+use std::{
+ collections::{HashMap, HashSet},
+ fmt,
+ fmt::{Display, Formatter},
+ future::Future,
+ io, mem,
+ pin::Pin,
+ sync::atomic::{AtomicUsize, Ordering},
+ task::{Context, Poll, Waker},
+ time::Duration,
+};
+
pub use error::ConnectionError;
pub(crate) use error::{
PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError,
};
-use libp2p_core::transport::PortUse;
+use futures::{future::BoxFuture, stream, stream::FuturesUnordered, FutureExt, StreamExt};
+use futures_timer::Delay;
+use libp2p_core::{
+ connection::ConnectedPoint,
+ multiaddr::Multiaddr,
+ muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox},
+ transport::PortUse,
+ upgrade,
+ upgrade::{NegotiationError, ProtocolError},
+ Endpoint,
+};
+use libp2p_identity::PeerId;
pub use supported_protocols::SupportedProtocols;
+use web_time::Instant;
-use crate::handler::{
- AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, FullyNegotiatedInbound,
- FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsChange, UpgradeInfoSend,
-};
-use crate::stream::ActiveStreamCounter;
-use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend};
use crate::{
+ handler::{
+ AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError,
+ FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport,
+ ProtocolsChange, UpgradeInfoSend,
+ },
+ stream::ActiveStreamCounter,
+ upgrade::{InboundUpgradeSend, OutboundUpgradeSend},
ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol,
};
-use futures::future::BoxFuture;
-use futures::stream::FuturesUnordered;
-use futures::StreamExt;
-use futures::{stream, FutureExt};
-use futures_timer::Delay;
-use libp2p_core::connection::ConnectedPoint;
-use libp2p_core::multiaddr::Multiaddr;
-use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox};
-use libp2p_core::upgrade;
-use libp2p_core::upgrade::{NegotiationError, ProtocolError};
-use libp2p_core::Endpoint;
-use libp2p_identity::PeerId;
-use std::collections::{HashMap, HashSet};
-use std::fmt::{Display, Formatter};
-use std::future::Future;
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::task::Waker;
-use std::time::Duration;
-use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll};
-use web_time::Instant;
static NEXT_CONNECTION_ID: AtomicUsize = AtomicUsize::new(1);
@@ -72,7 +77,8 @@ impl ConnectionId {
/// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and not reused.
/// This constructor does not, hence the _unchecked_.
///
- /// It is primarily meant for allowing manual tests of [`NetworkBehaviour`](crate::NetworkBehaviour)s.
+ /// It is primarily meant for allowing manual tests of
+ /// [`NetworkBehaviour`](crate::NetworkBehaviour)s.
pub fn new_unchecked(id: usize) -> Self {
Self(id)
}
@@ -147,8 +153,8 @@ where
max_negotiating_inbound_streams: usize,
/// Contains all upgrades that are waiting for a new outbound substream.
///
- /// The upgrade timeout is already ticking here so this may fail in case the remote is not quick
- /// enough in providing us with a new stream.
+ /// The upgrade timeout is already ticking here so this may fail in case the remote is not
+ /// quick enough in providing us with a new stream.
requested_substreams: FuturesUnordered<
SubstreamRequested,
>,
@@ -223,7 +229,8 @@ where
self.handler.on_behaviour_event(event);
}
- /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete.
+ /// Begins an orderly shutdown of the connection, returning a stream of final events and a
+ /// `Future` that resolves when connection shutdown is complete.
pub(crate) fn close(
self,
) -> (
@@ -320,7 +327,8 @@ where
}
}
- // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams.
+ // In case the [`ConnectionHandler`] can not make any more progress, poll the
+ // negotiating outbound streams.
match negotiating_out.poll_next_unpin(cx) {
Poll::Pending | Poll::Ready(None) => {}
Poll::Ready(Some((info, Ok(protocol)))) => {
@@ -368,7 +376,8 @@ where
}
// Check if the connection (and handler) should be shut down.
- // As long as we're still negotiating substreams or have any active streams shutdown is always postponed.
+ // As long as we're still negotiating substreams or have
+ // any active streams shutdown is always postponed.
if negotiating_in.is_empty()
&& negotiating_out.is_empty()
&& requested_substreams.is_empty()
@@ -419,7 +428,9 @@ where
stream_counter.clone(),
));
- continue; // Go back to the top, handler can potentially make progress again.
+ // Go back to the top,
+ // handler can potentially make progress again.
+ continue;
}
}
}
@@ -436,7 +447,9 @@ where
stream_counter.clone(),
));
- continue; // Go back to the top, handler can potentially make progress again.
+ // Go back to the top,
+ // handler can potentially make progress again.
+ continue;
}
}
}
@@ -451,10 +464,12 @@ where
for change in changes {
handler.on_connection_event(ConnectionEvent::LocalProtocolsChange(change));
}
- continue; // Go back to the top, handler can potentially make progress again.
+ // Go back to the top, handler can potentially make progress again.
+ continue;
}
- return Poll::Pending; // Nothing can make progress, return `Pending`.
+ // Nothing can make progress, return `Pending`.
+ return Poll::Pending;
}
}
@@ -482,7 +497,8 @@ fn compute_new_shutdown(
) -> Option {
match (current_shutdown, handler_keep_alive) {
(_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap),
- (Shutdown::Later(_), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick.
+ // Do nothing, i.e. let the shutdown timer continue to tick.
+ (Shutdown::Later(_), false) => None,
(_, false) => {
let now = Instant::now();
let safe_keep_alive = checked_add_fraction(now, idle_timeout);
@@ -493,10 +509,12 @@ fn compute_new_shutdown(
}
}
-/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds.
+/// Repeatedly halves and adds the [`Duration`]
+/// to the [`Instant`] until [`Instant::checked_add`] succeeds.
///
-/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent.
-/// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work.
+/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can
+/// represent. The [`Duration`] computed by the this function may not be the longest possible that
+/// we can add to `now` but it will work.
fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration {
while start.checked_add(duration).is_none() {
tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration");
@@ -767,19 +785,23 @@ impl> std::hash::Hash for AsStrHashEq {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::dummy;
- use futures::future;
- use futures::AsyncRead;
- use futures::AsyncWrite;
- use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo};
- use libp2p_core::StreamMuxer;
+ use std::{
+ convert::Infallible,
+ sync::{Arc, Weak},
+ time::Instant,
+ };
+
+ use futures::{future, AsyncRead, AsyncWrite};
+ use libp2p_core::{
+ upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo},
+ StreamMuxer,
+ };
use quickcheck::*;
- use std::convert::Infallible;
- use std::sync::{Arc, Weak};
- use std::time::Instant;
use tracing_subscriber::EnvFilter;
+ use super::*;
+ use crate::dummy;
+
#[test]
fn max_negotiating_inbound_streams() {
let _ = tracing_subscriber::fmt()
@@ -906,7 +928,8 @@ mod tests {
);
assert!(connection.handler.remote_removed.is_empty());
- // Third, stop listening on a protocol it never advertised (we can't control what handlers do so this needs to be handled gracefully).
+ // Third, stop listening on a protocol it never advertised (we can't control what handlers
+ // do so this needs to be handled gracefully).
connection.handler.remote_removes_support_for(&["/baz"]);
let _ = connection.poll_noop_waker();
diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs
index 33aa81c19a9..39e5a88fca6 100644
--- a/swarm/src/connection/error.rs
+++ b/swarm/src/connection/error.rs
@@ -18,11 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::transport::TransportError;
-use crate::Multiaddr;
-use crate::{ConnectedPoint, PeerId};
use std::{fmt, io};
+use crate::{transport::TransportError, ConnectedPoint, Multiaddr, PeerId};
+
/// Errors that can occur in the context of an established `Connection`.
#[derive(Debug)]
pub enum ConnectionError {
diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs
index 7964ecbfa69..f42fd1f305c 100644
--- a/swarm/src/connection/pool.rs
+++ b/swarm/src/connection/pool.rs
@@ -18,41 +18,41 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::connection::{Connection, ConnectionId, PendingPoint};
-use crate::{
- connection::{
- Connected, ConnectionError, IncomingInfo, PendingConnectionError,
- PendingInboundConnectionError, PendingOutboundConnectionError,
- },
- transport::TransportError,
- ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId,
+use std::{
+ collections::HashMap,
+ convert::Infallible,
+ fmt,
+ num::{NonZeroU8, NonZeroUsize},
+ pin::Pin,
+ task::{Context, Poll, Waker},
};
+
use concurrent_dial::ConcurrentDial;
use fnv::FnvHashMap;
-use futures::prelude::*;
-use futures::stream::SelectAll;
use futures::{
channel::{mpsc, oneshot},
future::{poll_fn, BoxFuture, Either},
+ prelude::*,
ready,
- stream::FuturesUnordered,
+ stream::{FuturesUnordered, SelectAll},
};
-use libp2p_core::connection::Endpoint;
-use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt};
-use libp2p_core::transport::PortUse;
-use std::convert::Infallible;
-use std::task::Waker;
-use std::{
- collections::HashMap,
- fmt,
- num::{NonZeroU8, NonZeroUsize},
- pin::Pin,
- task::Context,
- task::Poll,
+use libp2p_core::{
+ connection::Endpoint,
+ muxing::{StreamMuxerBox, StreamMuxerExt},
+ transport::PortUse,
};
use tracing::Instrument;
use web_time::{Duration, Instant};
+use crate::{
+ connection::{
+ Connected, Connection, ConnectionError, ConnectionId, IncomingInfo, PendingConnectionError,
+ PendingInboundConnectionError, PendingOutboundConnectionError, PendingPoint,
+ },
+ transport::TransportError,
+ ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId,
+};
+
mod concurrent_dial;
mod task;
@@ -115,7 +115,8 @@ where
/// See [`Connection::max_negotiating_inbound_streams`].
max_negotiating_inbound_streams: usize,
- /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is back-pressured.
+ /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is
+ /// back-pressured.
per_connection_event_buffer_size: usize,
/// The executor to use for running connection tasks. Can either be a global executor
@@ -247,13 +248,11 @@ pub(crate) enum PoolEvent {
///
/// A connection may close if
///
- /// * it encounters an error, which includes the connection being
- /// closed by the remote. In this case `error` is `Some`.
- /// * it was actively closed by [`EstablishedConnection::start_close`],
- /// i.e. a successful, orderly close.
- /// * it was actively closed by [`Pool::disconnect`], i.e.
- /// dropped without an orderly close.
- ///
+ /// * it encounters an error, which includes the connection being closed by the remote. In
+ /// this case `error` is `Some`.
+ /// * it was actively closed by [`EstablishedConnection::start_close`], i.e. a successful,
+ /// orderly close.
+ /// * it was actively closed by [`Pool::disconnect`], i.e. dropped without an orderly close.
ConnectionClosed {
id: ConnectionId,
/// Information about the connection that errored.
diff --git a/swarm/src/connection/pool/concurrent_dial.rs b/swarm/src/connection/pool/concurrent_dial.rs
index 57e4b078098..99f0b385884 100644
--- a/swarm/src/connection/pool/concurrent_dial.rs
+++ b/swarm/src/connection/pool/concurrent_dial.rs
@@ -18,7 +18,12 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::{transport::TransportError, Multiaddr};
+use std::{
+ num::NonZeroU8,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
use futures::{
future::{BoxFuture, Future},
ready,
@@ -26,11 +31,8 @@ use futures::{
};
use libp2p_core::muxing::StreamMuxerBox;
use libp2p_identity::PeerId;
-use std::{
- num::NonZeroU8,
- pin::Pin,
- task::{Context, Poll},
-};
+
+use crate::{transport::TransportError, Multiaddr};
type Dial = BoxFuture<
'static,
diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs
index 3b808a30fd1..3a82e5c11d1 100644
--- a/swarm/src/connection/pool/task.rs
+++ b/swarm/src/connection/pool/task.rs
@@ -21,6 +21,15 @@
//! Async functions driving pending and established connections in the form of a task.
+use std::{convert::Infallible, pin::Pin};
+
+use futures::{
+ channel::{mpsc, oneshot},
+ future::{poll_fn, Either, Future},
+ SinkExt, StreamExt,
+};
+use libp2p_core::muxing::StreamMuxerBox;
+
use super::concurrent_dial::ConcurrentDial;
use crate::{
connection::{
@@ -30,14 +39,6 @@ use crate::{
transport::TransportError,
ConnectionHandler, Multiaddr, PeerId,
};
-use futures::{
- channel::{mpsc, oneshot},
- future::{poll_fn, Either, Future},
- SinkExt, StreamExt,
-};
-use libp2p_core::muxing::StreamMuxerBox;
-use std::convert::Infallible;
-use std::pin::Pin;
/// Commands that can be sent to a task driving an established connection.
#[derive(Debug)]
diff --git a/swarm/src/connection/supported_protocols.rs b/swarm/src/connection/supported_protocols.rs
index 124ec93d669..c167bf88649 100644
--- a/swarm/src/connection/supported_protocols.rs
+++ b/swarm/src/connection/supported_protocols.rs
@@ -1,7 +1,7 @@
-use crate::handler::ProtocolsChange;
-use crate::StreamProtocol;
use std::collections::HashSet;
+use crate::{handler::ProtocolsChange, StreamProtocol};
+
#[derive(Default, Clone, Debug)]
pub struct SupportedProtocols {
protocols: HashSet,
diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs
index 4f5b621327c..cdaaeb358b2 100644
--- a/swarm/src/dial_opts.rs
+++ b/swarm/src/dial_opts.rs
@@ -19,14 +19,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
-use crate::ConnectionId;
-use libp2p_core::connection::Endpoint;
-use libp2p_core::multiaddr::Protocol;
-use libp2p_core::transport::PortUse;
-use libp2p_core::Multiaddr;
-use libp2p_identity::PeerId;
use std::num::NonZeroU8;
+use libp2p_core::{connection::Endpoint, multiaddr::Protocol, transport::PortUse, Multiaddr};
+use libp2p_identity::PeerId;
+
+use crate::ConnectionId;
+
macro_rules! fn_override_role {
() => {
/// Override role of local node on connection. I.e. execute the dial _as a
@@ -130,7 +129,8 @@ impl DialOpts {
/// Get the [`ConnectionId`] of this dial attempt.
///
/// All future events of this dial will be associated with this ID.
- /// See [`DialFailure`](crate::DialFailure) and [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished).
+ /// See [`DialFailure`](crate::DialFailure) and
+ /// [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished).
pub fn connection_id(&self) -> ConnectionId {
self.connection_id
}
@@ -324,8 +324,8 @@ impl WithoutPeerIdWithAddress {
/// # use libp2p_identity::PeerId;
/// #
/// DialOpts::peer_id(PeerId::random())
-/// .condition(PeerCondition::Disconnected)
-/// .build();
+/// .condition(PeerCondition::Disconnected)
+/// .build();
/// ```
#[derive(Debug, Copy, Clone, Default)]
pub enum PeerCondition {
diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs
index b87ef32c8f7..5452c382cd4 100644
--- a/swarm/src/dummy.rs
+++ b/swarm/src/dummy.rs
@@ -1,19 +1,18 @@
-use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm};
-use crate::connection::ConnectionId;
-use crate::handler::{
- ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound,
+use std::{
+ convert::Infallible,
+ task::{Context, Poll},
};
+
+use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr};
+use libp2p_identity::PeerId;
+
use crate::{
+ behaviour::{FromSwarm, NetworkBehaviour, ToSwarm},
+ connection::ConnectionId,
+ handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound},
ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler,
THandlerInEvent, THandlerOutEvent,
};
-use libp2p_core::transport::PortUse;
-use libp2p_core::upgrade::DeniedUpgrade;
-use libp2p_core::Endpoint;
-use libp2p_core::Multiaddr;
-use libp2p_identity::PeerId;
-use std::convert::Infallible;
-use std::task::{Context, Poll};
/// Implementation of [`NetworkBehaviour`] that doesn't do anything.
pub struct Behaviour;
@@ -61,7 +60,8 @@ impl NetworkBehaviour for Behaviour {
fn on_swarm_event(&mut self, _event: FromSwarm) {}
}
-/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive.
+/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep
+/// the connection alive.
#[derive(Clone)]
pub struct ConnectionHandler;
diff --git a/swarm/src/executor.rs b/swarm/src/executor.rs
index a2abbbde6ef..db5ed6b2da4 100644
--- a/swarm/src/executor.rs
+++ b/swarm/src/executor.rs
@@ -1,14 +1,15 @@
//! Provides executors for spawning background tasks.
-use futures::executor::ThreadPool;
use std::{future::Future, pin::Pin};
+use futures::executor::ThreadPool;
+
/// Implemented on objects that can run a `Future` in the background.
///
/// > **Note**: While it may be tempting to implement this trait on types such as
-/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is
-/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically
-/// > be used as fallback by libp2p. The `Executor` trait should therefore only be
-/// > about running `Future`s on a separate task.
+/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is
+/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically
+/// > be used as fallback by libp2p. The `Executor` trait should therefore only be
+/// > about running `Future`s on a separate task.
pub trait Executor {
/// Run the given future in the background until it ends.
#[track_caller]
diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs
index 9e31592d68d..3d0407b4f70 100644
--- a/swarm/src/handler.rs
+++ b/swarm/src/handler.rs
@@ -34,9 +34,9 @@
//! used protocol(s) determined by the associated types of the handlers.
//!
//! > **Note**: A [`ConnectionHandler`] handles one or more protocols in the context of a single
-//! > connection with a remote. In order to handle a protocol that requires knowledge of
-//! > the network as a whole, see the
-//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait.
+//! > connection with a remote. In order to handle a protocol that requires knowledge of
+//! > the network as a whole, see the
+//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait.
pub mod either;
mod map_in;
@@ -46,8 +46,15 @@ mod one_shot;
mod pending;
mod select;
-use crate::connection::AsStrHashEq;
-pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend};
+use core::slice;
+use std::{
+ collections::{HashMap, HashSet},
+ error, fmt, io,
+ task::{Context, Poll},
+ time::Duration,
+};
+
+use libp2p_core::Multiaddr;
pub use map_in::MapInEvent;
pub use map_out::MapOutEvent;
pub use one_shot::{OneShotHandler, OneShotHandlerConfig};
@@ -55,11 +62,8 @@ pub use pending::PendingConnectionHandler;
pub use select::ConnectionHandlerSelect;
use smallvec::SmallVec;
-use crate::StreamProtocol;
-use core::slice;
-use libp2p_core::Multiaddr;
-use std::collections::{HashMap, HashSet};
-use std::{error, fmt, io, task::Context, task::Poll, time::Duration};
+pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend};
+use crate::{connection::AsStrHashEq, StreamProtocol};
/// A handler for a set of protocols used on a connection with a remote.
///
@@ -71,17 +75,17 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration};
/// Communication with a remote over a set of protocols is initiated in one of two ways:
///
/// 1. Dialing by initiating a new outbound substream. In order to do so,
-/// [`ConnectionHandler::poll()`] must return an [`ConnectionHandlerEvent::OutboundSubstreamRequest`],
-/// providing an instance of [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the
-/// protocol(s). Upon success, [`ConnectionHandler::on_connection_event`] is called with
+/// [`ConnectionHandler::poll()`] must return an
+/// [`ConnectionHandlerEvent::OutboundSubstreamRequest`], providing an instance of
+/// [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the protocol(s). Upon
+/// success, [`ConnectionHandler::on_connection_event`] is called with
/// [`ConnectionEvent::FullyNegotiatedOutbound`] translating the final output of the upgrade.
///
-/// 2. Listening by accepting a new inbound substream. When a new inbound substream
-/// is created on a connection, [`ConnectionHandler::listen_protocol`] is called
-/// to obtain an instance of [`libp2p_core::upgrade::InboundUpgrade`] that is used to
-/// negotiate the protocol(s). Upon success,
-/// [`ConnectionHandler::on_connection_event`] is called with [`ConnectionEvent::FullyNegotiatedInbound`]
-/// translating the final output of the upgrade.
+/// 2. Listening by accepting a new inbound substream. When a new inbound substream is created on
+/// a connection, [`ConnectionHandler::listen_protocol`] is called to obtain an instance of
+/// [`libp2p_core::upgrade::InboundUpgrade`] that is used to negotiate the protocol(s). Upon
+/// success, [`ConnectionHandler::on_connection_event`] is called with
+/// [`ConnectionEvent::FullyNegotiatedInbound`] translating the final output of the upgrade.
///
///
/// # Connection Keep-Alive
@@ -95,9 +99,13 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration};
/// When a connection is closed gracefully, the substreams used by the handler may still
/// continue reading data until the remote closes its side of the connection.
pub trait ConnectionHandler: Send + 'static {
- /// A type representing the message(s) a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler)
+ /// A type representing the message(s) a
+ /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`]
+ /// via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler)
type FromBehaviour: fmt::Debug + Send + 'static;
- /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`].
+ /// A type representing message(s) a [`ConnectionHandler`] can send to a
+ /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via
+ /// [`ConnectionHandlerEvent::NotifyBehaviour`].
type ToBehaviour: fmt::Debug + Send + 'static;
/// The inbound upgrade for the protocol(s) used by the handler.
type InboundProtocol: InboundUpgradeSend;
@@ -112,9 +120,9 @@ pub trait ConnectionHandler: Send + 'static {
/// substreams to negotiate the desired protocols.
///
/// > **Note**: The returned `InboundUpgrade` should always accept all the generally
- /// > supported protocols, even if in a specific context a particular one is
- /// > not supported, (eg. when only allowing one substream at a time for a protocol).
- /// > This allows a remote to put the list of supported protocols in a cache.
+ /// > supported protocols, even if in a specific context a particular one is
+ /// > not supported, (eg. when only allowing one substream at a time for a protocol).
+ /// > This allows a remote to put the list of supported protocols in a cache.
fn listen_protocol(&self) -> SubstreamProtocol;
/// Returns whether the connection should be kept alive.
@@ -127,15 +135,21 @@ pub trait ConnectionHandler: Send + 'static {
/// - We are negotiating inbound or outbound streams.
/// - There are active [`Stream`](crate::Stream)s on the connection.
///
- /// The combination of the above means that _most_ protocols will not need to override this method.
- /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle.
+ /// The combination of the above means that _most_ protocols will not need to override this
+ /// method. This method is only invoked when all of the above are `false`, i.e. when the
+ /// connection is entirely idle.
///
/// ## Exceptions
///
- /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method.
- /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams.
+ /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md)
+ /// need to keep a connection alive beyond these circumstances and can thus override this
+ /// method.
+ /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't**
+ /// want to keep a connection alive despite an active streams.
///
- /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm.
+ /// In that case, protocol authors can use
+ /// [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a
+ /// particular stream from the keep-alive algorithm.
fn connection_keep_alive(&self) -> bool {
false
}
@@ -160,7 +174,8 @@ pub trait ConnectionHandler: Send + 'static {
/// To signal completion, [`Poll::Ready(None)`] should be returned.
///
/// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour.
- /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`].
+ /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to
+ /// [`ConnectionHandler::poll_close`].
fn poll_close(&mut self, _: &mut Context<'_>) -> Poll