hotshot_libp2p_networking/network/
mod.rs

1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7/// networking behaviours wrapping libp2p's behaviours
8pub mod behaviours;
9/// defines the swarm and network definition (internal)
10mod def;
11/// functionality of a libp2p network node
12mod node;
13/// Alternative Libp2p transport implementations
14pub mod transport;
15
16/// Forked `cbor` codec with altered request/response sizes
17pub mod cbor;
18
19use std::{collections::HashSet, fmt::Debug, sync::Arc};
20
21use async_lock::RwLock;
22use bimap::BiMap;
23use futures::channel::oneshot::Sender;
24use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType};
25use libp2p::{
26    build_multiaddr,
27    core::{muxing::StreamMuxerBox, transport::Boxed},
28    dns::tokio::Transport as DnsTransport,
29    gossipsub::Event as GossipEvent,
30    identify::Event as IdentifyEvent,
31    identity::Keypair,
32    quic,
33    request_response::ResponseChannel,
34    Multiaddr, Transport,
35};
36use libp2p_identity::PeerId;
37use parking_lot::Mutex;
38use quic::tokio::Transport as QuicTransport;
39use tracing::instrument;
40use transport::ConsensusKeyAuthentication;
41
42pub use self::{
43    def::NetworkDef,
44    node::{
45        spawn_network_node, GossipConfig, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder,
46        NetworkNodeConfigBuilderError, NetworkNodeHandle, NetworkNodeReceiver,
47        RequestResponseConfig, DEFAULT_REPLICATION_FACTOR,
48    },
49};
50
51/// Actions to send from the client to the swarm
52#[derive(Debug)]
53pub enum ClientRequest {
54    /// Start the bootstrap process to kademlia
55    BeginBootstrap,
56    /// kill the swarm
57    Shutdown,
58    /// broadcast a serialized message
59    GossipMsg(String, Vec<u8>),
60    /// subscribe to a topic
61    Subscribe(String, Option<Sender<()>>),
62    /// unsubscribe from a topic
63    Unsubscribe(String, Option<Sender<()>>),
64    /// client request to send a direct serialized message
65    DirectRequest {
66        /// peer id
67        pid: PeerId,
68        /// msg contents
69        contents: Vec<u8>,
70        /// number of retries
71        retry_count: u8,
72    },
73    /// client request to send a direct reply to a message
74    DirectResponse(ResponseChannel<Vec<u8>>, Vec<u8>),
75    /// prune a peer
76    Prune(PeerId),
77    /// add vec of known peers or addresses
78    AddKnownPeers(Vec<(PeerId, Multiaddr)>),
79    /// Ignore peers. Only here for debugging purposes.
80    /// Allows us to have nodes that are never pruned
81    IgnorePeers(Vec<PeerId>),
82    /// Put(Key, Value) into DHT
83    /// relay success back on channel
84    PutDHT {
85        /// Key to publish under
86        key: Vec<u8>,
87        /// Value to publish under
88        value: Vec<u8>,
89        /// Channel to notify caller of result of publishing
90        notify: Sender<()>,
91    },
92    /// Get(Key, Chan)
93    GetDHT {
94        /// Key to search for
95        key: Vec<u8>,
96        /// Channel to notify caller of value (or failure to find value)
97        notify: Vec<Sender<Vec<u8>>>,
98        /// number of retries to make
99        retry_count: u8,
100    },
101    /// Request the number of connected peers
102    GetConnectedPeerNum(Sender<usize>),
103    /// Request the set of connected peers
104    GetConnectedPeers(Sender<HashSet<PeerId>>),
105    /// Print the routing  table to stderr, debugging only
106    GetRoutingTable(Sender<()>),
107    /// Get address of peer
108    LookupPeer(PeerId, Sender<()>),
109}
110
111/// events generated by the swarm that we wish
112/// to relay to the client
113#[derive(Debug)]
114pub enum NetworkEvent {
115    /// Recv-ed a broadcast
116    GossipMsg(Vec<u8>),
117    /// Recv-ed a direct message from a node
118    DirectRequest(Vec<u8>, PeerId, ResponseChannel<Vec<u8>>),
119    /// Recv-ed a direct response from a node (that hopefully was initiated by this node)
120    DirectResponse(Vec<u8>, PeerId),
121    /// Report that kademlia has successfully bootstrapped into the network
122    IsBootstrapped,
123    /// The number of connected peers has possibly changed
124    ConnectedPeersUpdate(usize),
125}
126
127#[derive(Debug)]
128/// internal representation of the network events
129/// only used for event processing before relaying to client
130pub enum NetworkEventInternal {
131    /// a DHT event
132    DHTEvent(libp2p::kad::Event),
133    /// an identify event. Is boxed because this event is much larger than the other ones so we want
134    /// to store it on the heap.
135    IdentifyEvent(Box<IdentifyEvent>),
136    /// a gossip  event
137    GossipEvent(Box<GossipEvent>),
138    /// a direct message event
139    DMEvent(libp2p::request_response::Event<Vec<u8>, Vec<u8>>),
140    /// an autonat event
141    AutonatEvent(libp2p::autonat::Event),
142}
143
144/// Bind all interfaces on port `port`
145/// NOTE we may want something more general in the fture.
146#[must_use]
147pub fn gen_multiaddr(port: u16) -> Multiaddr {
148    build_multiaddr!(Ip4([0, 0, 0, 0]), Udp(port), QuicV1)
149}
150
151/// `BoxedTransport` is a type alias for a boxed tuple containing a `PeerId` and a `StreamMuxerBox`.
152///
153/// This type is used to represent a transport in the libp2p network framework. The `PeerId` is a unique identifier for each peer in the network, and the `StreamMuxerBox` is a type of multiplexer that can handle multiple substreams over a single connection.
154type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
155
156/// Generates an authenticated transport checked against the stake table.
157/// If the stake table or authentication message is not provided, the transport will
158/// not participate in stake table authentication.
159///
160/// # Errors
161/// If we could not create a DNS transport
162#[instrument(skip(identity))]
163pub async fn gen_transport<T: NodeType>(
164    identity: Keypair,
165    stake_table: Option<Arc<RwLock<T::Membership>>>,
166    auth_message: Option<Vec<u8>>,
167    consensus_key_to_pid_map: Arc<Mutex<BiMap<T::SignatureKey, PeerId>>>,
168) -> Result<BoxedTransport, NetworkError> {
169    // Create the initial `Quic` transport
170    let transport = {
171        let mut config = quic::Config::new(&identity);
172        config.handshake_timeout = std::time::Duration::from_secs(20);
173        QuicTransport::new(config)
174    };
175
176    // Require authentication against the stake table
177    let transport: ConsensusKeyAuthentication<_, T::SignatureKey, _> =
178        ConsensusKeyAuthentication::new(transport, auth_message, consensus_key_to_pid_map);
179
180    // Support DNS resolution
181    let transport = {
182        {
183            DnsTransport::system(transport)
184        }
185    }
186    .map_err(|e| NetworkError::ConfigError(format!("failed to build DNS transport: {e}")))?;
187
188    Ok(transport
189        .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection)))
190        .boxed())
191}