hotshot_libp2p_networking/network/
mod.rs

1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7/// networking behaviours wrapping libp2p's behaviours
8pub mod behaviours;
9/// defines the swarm and network definition (internal)
10mod def;
11/// functionality of a libp2p network node
12mod node;
13/// Alternative Libp2p transport implementations
14pub mod transport;
15
16/// Forked `cbor` codec with altered request/response sizes
17pub mod cbor;
18
19use std::{collections::HashSet, fmt::Debug, sync::Arc};
20
21use bimap::BiMap;
22use futures::channel::oneshot::Sender;
23use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType};
24use libp2p::{
25    build_multiaddr,
26    core::{muxing::StreamMuxerBox, transport::Boxed},
27    dns::tokio::Transport as DnsTransport,
28    gossipsub::Event as GossipEvent,
29    identify::Event as IdentifyEvent,
30    identity::Keypair,
31    quic,
32    request_response::ResponseChannel,
33    Multiaddr, Transport,
34};
35use libp2p_identity::PeerId;
36use parking_lot::Mutex;
37use quic::tokio::Transport as QuicTransport;
38use tracing::instrument;
39use transport::ConsensusKeyAuthentication;
40
41pub use self::{
42    def::NetworkDef,
43    node::{
44        spawn_network_node, GossipConfig, NetworkNode, NetworkNodeConfig, NetworkNodeConfigBuilder,
45        NetworkNodeConfigBuilderError, NetworkNodeHandle, NetworkNodeReceiver,
46        RequestResponseConfig, DEFAULT_REPLICATION_FACTOR,
47    },
48};
49
50/// Actions to send from the client to the swarm
51#[derive(Debug)]
52pub enum ClientRequest {
53    /// Start the bootstrap process to kademlia
54    BeginBootstrap,
55    /// kill the swarm
56    Shutdown,
57    /// broadcast a serialized message
58    GossipMsg(String, Vec<u8>),
59    /// subscribe to a topic
60    Subscribe(String, Option<Sender<()>>),
61    /// unsubscribe from a topic
62    Unsubscribe(String, Option<Sender<()>>),
63    /// client request to send a direct serialized message
64    DirectRequest {
65        /// peer id
66        pid: PeerId,
67        /// msg contents
68        contents: Vec<u8>,
69        /// number of retries
70        retry_count: u8,
71    },
72    /// client request to send a direct reply to a message
73    DirectResponse(ResponseChannel<Vec<u8>>, Vec<u8>),
74    /// prune a peer
75    Prune(PeerId),
76    /// add vec of known peers or addresses
77    AddKnownPeers(Vec<(PeerId, Multiaddr)>),
78    /// Ignore peers. Only here for debugging purposes.
79    /// Allows us to have nodes that are never pruned
80    IgnorePeers(Vec<PeerId>),
81    /// Put(Key, Value) into DHT
82    /// relay success back on channel
83    PutDHT {
84        /// Key to publish under
85        key: Vec<u8>,
86        /// Value to publish under
87        value: Vec<u8>,
88        /// Channel to notify caller of result of publishing
89        notify: Sender<()>,
90    },
91    /// Get(Key, Chan)
92    GetDHT {
93        /// Key to search for
94        key: Vec<u8>,
95        /// Channel to notify caller of value (or failure to find value)
96        notify: Vec<Sender<Vec<u8>>>,
97        /// number of retries to make
98        retry_count: u8,
99    },
100    /// Request the number of connected peers
101    GetConnectedPeerNum(Sender<usize>),
102    /// Request the set of connected peers
103    GetConnectedPeers(Sender<HashSet<PeerId>>),
104    /// Print the routing  table to stderr, debugging only
105    GetRoutingTable(Sender<()>),
106    /// Get address of peer
107    LookupPeer(PeerId, Sender<()>),
108}
109
110/// events generated by the swarm that we wish
111/// to relay to the client
112#[derive(Debug)]
113pub enum NetworkEvent {
114    /// Recv-ed a broadcast
115    GossipMsg(Vec<u8>),
116    /// Recv-ed a direct message from a node
117    DirectRequest(Vec<u8>, PeerId, ResponseChannel<Vec<u8>>),
118    /// Recv-ed a direct response from a node (that hopefully was initiated by this node)
119    DirectResponse(Vec<u8>, PeerId),
120    /// Report that kademlia has successfully bootstrapped into the network
121    IsBootstrapped,
122    /// The number of connected peers has possibly changed
123    ConnectedPeersUpdate(usize),
124}
125
126#[derive(Debug)]
127/// internal representation of the network events
128/// only used for event processing before relaying to client
129pub enum NetworkEventInternal {
130    /// a DHT event
131    DHTEvent(libp2p::kad::Event),
132    /// an identify event. Is boxed because this event is much larger than the other ones so we want
133    /// to store it on the heap.
134    IdentifyEvent(Box<IdentifyEvent>),
135    /// a gossip  event
136    GossipEvent(Box<GossipEvent>),
137    /// a direct message event
138    DMEvent(libp2p::request_response::Event<Vec<u8>, Vec<u8>>),
139    /// an autonat event
140    AutonatEvent(libp2p::autonat::Event),
141}
142
143/// Bind all interfaces on port `port`
144/// NOTE we may want something more general in the fture.
145#[must_use]
146pub fn gen_multiaddr(port: u16) -> Multiaddr {
147    build_multiaddr!(Ip4([0, 0, 0, 0]), Udp(port), QuicV1)
148}
149
150/// `BoxedTransport` is a type alias for a boxed tuple containing a `PeerId` and a `StreamMuxerBox`.
151///
152/// This type is used to represent a transport in the libp2p network framework. The `PeerId` is a unique identifier for each peer in the network, and the `StreamMuxerBox` is a type of multiplexer that can handle multiple substreams over a single connection.
153type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
154
155/// Generates an authenticated transport checked against the stake table.
156/// If the stake table or authentication message is not provided, the transport will
157/// not participate in stake table authentication.
158///
159/// # Errors
160/// If we could not create a DNS transport
161#[instrument(skip(identity))]
162pub async fn gen_transport<T: NodeType>(
163    identity: Keypair,
164    auth_message: Option<Vec<u8>>,
165    consensus_key_to_pid_map: Arc<Mutex<BiMap<T::SignatureKey, PeerId>>>,
166) -> Result<BoxedTransport, NetworkError> {
167    // Create the initial `Quic` transport
168    let transport = {
169        let mut config = quic::Config::new(&identity);
170        config.handshake_timeout = std::time::Duration::from_secs(20);
171        QuicTransport::new(config)
172    };
173
174    // Require authentication against the stake table
175    let transport: ConsensusKeyAuthentication<_, T::SignatureKey, _> =
176        ConsensusKeyAuthentication::new(transport, auth_message, consensus_key_to_pid_map);
177
178    // Support DNS resolution
179    let transport = {
180        {
181            DnsTransport::system(transport)
182        }
183    }
184    .map_err(|e| NetworkError::ConfigError(format!("failed to build DNS transport: {e}")))?;
185
186    Ok(transport
187        .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection)))
188        .boxed())
189}