hotshot_libp2p_networking/network/mod.rs
1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7/// networking behaviours wrapping libp2p's behaviours
8pub mod behaviours;
9/// defines the swarm and network definition (internal)
10mod def;
11/// functionality of a libp2p network node
12mod node;
13/// Alternative Libp2p transport implementations
14pub mod transport;
15
16/// Forked `cbor` codec with altered request/response sizes
17pub mod cbor;
18
19use std::{collections::HashSet, fmt::Debug, sync::Arc};
20
21use bimap::BiMap;
22use futures::channel::oneshot::Sender;
23use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType};
24use libp2p::{
25 Multiaddr, Transport, build_multiaddr,
26 core::{muxing::StreamMuxerBox, transport::Boxed},
27 dns::tokio::Transport as DnsTransport,
28 gossipsub::Event as GossipEvent,
29 identify::Event as IdentifyEvent,
30 identity::Keypair,
31 quic,
32 request_response::ResponseChannel,
33};
34use libp2p_identity::PeerId;
35use parking_lot::Mutex;
36use quic::tokio::Transport as QuicTransport;
37use tracing::instrument;
38use transport::ConsensusKeyAuthentication;
39
40pub use self::{
41 def::NetworkDef,
42 node::{
43 DEFAULT_REPLICATION_FACTOR, GossipConfig, NetworkNode, NetworkNodeConfig,
44 NetworkNodeConfigBuilder, NetworkNodeConfigBuilderError, NetworkNodeHandle,
45 NetworkNodeReceiver, RequestResponseConfig, spawn_network_node,
46 },
47};
48
49/// Actions to send from the client to the swarm
50#[derive(Debug)]
51pub enum ClientRequest {
52 /// Start the bootstrap process to kademlia
53 BeginBootstrap,
54 /// kill the swarm
55 Shutdown,
56 /// broadcast a serialized message
57 GossipMsg(String, Vec<u8>),
58 /// subscribe to a topic
59 Subscribe(String, Option<Sender<()>>),
60 /// unsubscribe from a topic
61 Unsubscribe(String, Option<Sender<()>>),
62 /// client request to send a direct serialized message
63 DirectRequest {
64 /// peer id
65 pid: PeerId,
66 /// msg contents
67 contents: Vec<u8>,
68 /// number of retries
69 retry_count: u8,
70 },
71 /// client request to send a direct reply to a message
72 DirectResponse(ResponseChannel<Vec<u8>>, Vec<u8>),
73 /// prune a peer
74 Prune(PeerId),
75 /// add vec of known peers or addresses
76 AddKnownPeers(Vec<(PeerId, Multiaddr)>),
77 /// Ignore peers. Only here for debugging purposes.
78 /// Allows us to have nodes that are never pruned
79 IgnorePeers(Vec<PeerId>),
80 /// Put(Key, Value) into DHT
81 /// relay success back on channel
82 PutDHT {
83 /// Key to publish under
84 key: Vec<u8>,
85 /// Value to publish under
86 value: Vec<u8>,
87 /// Channel to notify caller of result of publishing
88 notify: Sender<()>,
89 },
90 /// Get(Key, Chan)
91 GetDHT {
92 /// Key to search for
93 key: Vec<u8>,
94 /// Channel to notify caller of value (or failure to find value)
95 notify: Vec<Sender<Vec<u8>>>,
96 /// number of retries to make
97 retry_count: u8,
98 },
99 /// Request the number of connected peers
100 GetConnectedPeerNum(Sender<usize>),
101 /// Request the set of connected peers
102 GetConnectedPeers(Sender<HashSet<PeerId>>),
103 /// Print the routing table to stderr, debugging only
104 GetRoutingTable(Sender<()>),
105 /// Get address of peer
106 LookupPeer(PeerId, Sender<()>),
107}
108
109/// events generated by the swarm that we wish
110/// to relay to the client
111#[derive(Debug)]
112pub enum NetworkEvent {
113 /// Recv-ed a broadcast
114 GossipMsg(Vec<u8>),
115 /// Recv-ed a direct message from a node
116 DirectRequest(Vec<u8>, PeerId, ResponseChannel<Vec<u8>>),
117 /// Recv-ed a direct response from a node (that hopefully was initiated by this node)
118 DirectResponse(Vec<u8>, PeerId),
119 /// Report that kademlia has successfully bootstrapped into the network
120 IsBootstrapped,
121 /// The number of connected peers has possibly changed
122 ConnectedPeersUpdate(usize),
123}
124
125#[derive(Debug)]
126/// internal representation of the network events
127/// only used for event processing before relaying to client
128pub enum NetworkEventInternal {
129 /// a DHT event
130 DHTEvent(libp2p::kad::Event),
131 /// an identify event. Is boxed because this event is much larger than the other ones so we want
132 /// to store it on the heap.
133 IdentifyEvent(Box<IdentifyEvent>),
134 /// a gossip event
135 GossipEvent(Box<GossipEvent>),
136 /// a direct message event
137 DMEvent(libp2p::request_response::Event<Vec<u8>, Vec<u8>>),
138 /// an autonat event
139 AutonatEvent(libp2p::autonat::Event),
140}
141
142/// Bind all interfaces on port `port`
143/// NOTE we may want something more general in the fture.
144#[must_use]
145pub fn gen_multiaddr(port: u16) -> Multiaddr {
146 build_multiaddr!(Ip4([0, 0, 0, 0]), Udp(port), QuicV1)
147}
148
149/// `BoxedTransport` is a type alias for a boxed tuple containing a `PeerId` and a `StreamMuxerBox`.
150///
151/// This type is used to represent a transport in the libp2p network framework. The `PeerId` is a unique identifier for each peer in the network, and the `StreamMuxerBox` is a type of multiplexer that can handle multiple substreams over a single connection.
152type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
153
154/// Generates an authenticated transport checked against the stake table.
155/// If the stake table or authentication message is not provided, the transport will
156/// not participate in stake table authentication.
157///
158/// # Errors
159/// If we could not create a DNS transport
160#[instrument(skip(identity))]
161pub async fn gen_transport<T: NodeType>(
162 identity: Keypair,
163 auth_message: Option<Vec<u8>>,
164 consensus_key_to_pid_map: Arc<Mutex<BiMap<T::SignatureKey, PeerId>>>,
165) -> Result<BoxedTransport, NetworkError> {
166 // Create the initial `Quic` transport
167 let transport = {
168 let mut config = quic::Config::new(&identity);
169 config.handshake_timeout = std::time::Duration::from_secs(20);
170 QuicTransport::new(config)
171 };
172
173 // Require authentication against the stake table
174 let transport: ConsensusKeyAuthentication<_, T::SignatureKey, _> =
175 ConsensusKeyAuthentication::new(transport, auth_message, consensus_key_to_pid_map);
176
177 // Support DNS resolution
178 let transport = { DnsTransport::system(transport) }
179 .map_err(|e| NetworkError::ConfigError(format!("failed to build DNS transport: {e}")))?;
180
181 Ok(transport
182 .map(|(peer_id, connection), _| (peer_id, StreamMuxerBox::new(connection)))
183 .boxed())
184}