;
117 | }
118 |
119 | /// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local network and adds
120 | /// them to the topology.
121 | #[derive(Debug)]
122 | pub struct Behaviour
123 | where
124 | P: Provider,
125 | {
126 | /// InterfaceState config.
127 | config: Config,
128 |
129 | /// Iface watcher.
130 | if_watch: P::Watcher,
131 |
132 | /// Handles to tasks running the mDNS queries.
133 | if_tasks: HashMap,
134 |
135 | query_response_receiver: mpsc::Receiver<(PeerId, Multiaddr, Instant)>,
136 | query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>,
137 |
138 | /// List of nodes that we have discovered, the address, and when their TTL expires.
139 | ///
140 | /// Each combination of `PeerId` and `Multiaddr` can only appear once, but the same `PeerId`
141 | /// can appear multiple times.
142 | discovered_nodes: SmallVec<[(PeerId, Multiaddr, Instant); 8]>,
143 |
144 | /// Future that fires when the TTL of at least one node in `discovered_nodes` expires.
145 | ///
146 | /// `None` if `discovered_nodes` is empty.
147 | closest_expiration: Option,
148 |
149 | /// The current set of listen addresses.
150 | ///
151 | /// This is shared across all interface tasks using an [`RwLock`].
152 | /// The [`Behaviour`] updates this upon new [`FromSwarm`]
153 | /// events where as [`InterfaceState`]s read from it to answer inbound mDNS queries.
154 | listen_addresses: Arc>,
155 |
156 | local_peer_id: PeerId,
157 |
158 | /// Pending behaviour events to be emitted.
159 | pending_events: VecDeque>,
160 | }
161 |
162 | impl Behaviour
163 | where
164 | P: Provider,
165 | {
166 | /// Builds a new `Mdns` behaviour.
167 | pub fn new(config: Config, local_peer_id: PeerId) -> io::Result {
168 | let (tx, rx) = mpsc::channel(10); // Chosen arbitrarily.
169 |
170 | Ok(Self {
171 | config,
172 | if_watch: P::new_watcher()?,
173 | if_tasks: Default::default(),
174 | query_response_receiver: rx,
175 | query_response_sender: tx,
176 | discovered_nodes: Default::default(),
177 | closest_expiration: Default::default(),
178 | listen_addresses: Default::default(),
179 | local_peer_id,
180 | pending_events: Default::default(),
181 | })
182 | }
183 |
184 | /// Returns true if the given `PeerId` is in the list of nodes discovered through mDNS.
185 | #[deprecated(note = "Use `discovered_nodes` iterator instead.")]
186 | pub fn has_node(&self, peer_id: &PeerId) -> bool {
187 | self.discovered_nodes().any(|p| p == peer_id)
188 | }
189 |
190 | /// Returns the list of nodes that we have discovered through mDNS and that are not expired.
191 | pub fn discovered_nodes(&self) -> impl ExactSizeIterator- {
192 | self.discovered_nodes.iter().map(|(p, _, _)| p)
193 | }
194 |
195 | /// Expires a node before the ttl.
196 | #[deprecated(note = "Unused API. Will be removed in the next release.")]
197 | pub fn expire_node(&mut self, peer_id: &PeerId) {
198 | let now = Instant::now();
199 | for (peer, _addr, expires) in &mut self.discovered_nodes {
200 | if peer == peer_id {
201 | *expires = now;
202 | }
203 | }
204 | self.closest_expiration = Some(P::Timer::at(now));
205 | }
206 | }
207 |
208 | impl
NetworkBehaviour for Behaviour
209 | where
210 | P: Provider,
211 | {
212 | type ConnectionHandler = dummy::ConnectionHandler;
213 | type ToSwarm = Event;
214 |
215 | fn handle_established_inbound_connection(
216 | &mut self,
217 | _: ConnectionId,
218 | _: PeerId,
219 | _: &Multiaddr,
220 | _: &Multiaddr,
221 | ) -> Result, ConnectionDenied> {
222 | Ok(dummy::ConnectionHandler)
223 | }
224 |
225 | fn handle_pending_outbound_connection(
226 | &mut self,
227 | _connection_id: ConnectionId,
228 | maybe_peer: Option,
229 | _addresses: &[Multiaddr],
230 | _effective_role: Endpoint,
231 | ) -> Result, ConnectionDenied> {
232 | let Some(peer_id) = maybe_peer else {
233 | return Ok(vec![]);
234 | };
235 |
236 | Ok(self
237 | .discovered_nodes
238 | .iter()
239 | .filter(|(peer, _, _)| peer == &peer_id)
240 | .map(|(_, addr, _)| addr.clone())
241 | .collect())
242 | }
243 |
244 | fn handle_established_outbound_connection(
245 | &mut self,
246 | _: ConnectionId,
247 | _: PeerId,
248 | _: &Multiaddr,
249 | _: Endpoint,
250 | _: PortUse,
251 | ) -> Result, ConnectionDenied> {
252 | Ok(dummy::ConnectionHandler)
253 | }
254 |
255 | fn on_connection_handler_event(
256 | &mut self,
257 | _: PeerId,
258 | _: ConnectionId,
259 | ev: THandlerOutEvent,
260 | ) {
261 | libp2p_core::util::unreachable(ev)
262 | }
263 |
264 | fn on_swarm_event(&mut self, event: FromSwarm) {
265 | self.listen_addresses
266 | .write()
267 | .unwrap_or_else(|e| e.into_inner())
268 | .on_swarm_event(&event);
269 | }
270 |
271 | #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))]
272 | fn poll(
273 | &mut self,
274 | cx: &mut Context<'_>,
275 | ) -> Poll>> {
276 | if self.config.disabled {
277 | self.pending_events.clear();
278 | return Poll::Pending;
279 | }
280 |
281 | loop {
282 | // Check for pending events and emit them.
283 | if let Some(event) = self.pending_events.pop_front() {
284 | return Poll::Ready(event);
285 | }
286 |
287 | // Poll ifwatch.
288 | while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) {
289 | match event {
290 | Ok(IfEvent::Up(inet)) => {
291 | let addr = inet.addr();
292 | if addr.is_loopback() {
293 | continue;
294 | }
295 | if addr.is_ipv4() && self.config.enable_ipv6
296 | || addr.is_ipv6() && !self.config.enable_ipv6
297 | {
298 | continue;
299 | }
300 | if let Entry::Vacant(e) = self.if_tasks.entry(addr) {
301 | match InterfaceState::::new(
302 | addr,
303 | self.config.clone(),
304 | self.local_peer_id,
305 | self.listen_addresses.clone(),
306 | self.query_response_sender.clone(),
307 | ) {
308 | Ok(iface_state) => {
309 | e.insert(P::spawn(iface_state));
310 | }
311 | Err(err) => {
312 | tracing::error!("failed to create `InterfaceState`: {}", err)
313 | }
314 | }
315 | }
316 | }
317 | Ok(IfEvent::Down(inet)) => {
318 | if let Some(handle) = self.if_tasks.remove(&inet.addr()) {
319 | tracing::info!(instance=%inet.addr(), "dropping instance");
320 |
321 | handle.abort();
322 | }
323 | }
324 | Err(err) => tracing::error!("if watch returned an error: {}", err),
325 | }
326 | }
327 | // Emit discovered event.
328 | let mut discovered = Vec::new();
329 |
330 | while let Poll::Ready(Some((peer, addr, expiration))) =
331 | self.query_response_receiver.poll_next_unpin(cx)
332 | {
333 | if let Some((_, _, cur_expires)) = self
334 | .discovered_nodes
335 | .iter_mut()
336 | .find(|(p, a, _)| *p == peer && *a == addr)
337 | {
338 | *cur_expires = cmp::max(*cur_expires, expiration);
339 | } else {
340 | tracing::info!(%peer, address=%addr, "discovered peer on address");
341 | self.discovered_nodes.push((peer, addr.clone(), expiration));
342 | discovered.push((peer, addr.clone()));
343 |
344 | self.pending_events
345 | .push_back(ToSwarm::NewExternalAddrOfPeer {
346 | peer_id: peer,
347 | address: addr,
348 | });
349 | }
350 | }
351 |
352 | if !discovered.is_empty() {
353 | let event = Event::Discovered(discovered);
354 | // Push to the front of the queue so that the behavior event is reported before
355 | // the individual discovered addresses.
356 | self.pending_events
357 | .push_front(ToSwarm::GenerateEvent(event));
358 | continue;
359 | }
360 | // Emit expired event.
361 | let now = Instant::now();
362 | let mut closest_expiration = None;
363 | let mut expired = Vec::new();
364 | self.discovered_nodes.retain(|(peer, addr, expiration)| {
365 | if *expiration <= now {
366 | tracing::info!(%peer, address=%addr, "expired peer on address");
367 | expired.push((*peer, addr.clone()));
368 | return false;
369 | }
370 | closest_expiration =
371 | Some(closest_expiration.unwrap_or(*expiration).min(*expiration));
372 | true
373 | });
374 | if !expired.is_empty() {
375 | let event = Event::Expired(expired);
376 | self.pending_events.push_back(ToSwarm::GenerateEvent(event));
377 | continue;
378 | }
379 | if let Some(closest_expiration) = closest_expiration {
380 | let mut timer = P::Timer::at(closest_expiration);
381 | let _ = Pin::new(&mut timer).poll_next(cx);
382 |
383 | self.closest_expiration = Some(timer);
384 | }
385 |
386 | return Poll::Pending;
387 | }
388 | }
389 | }
390 |
391 | /// Event that can be produced by the `Mdns` behaviour.
392 | #[derive(Debug, Clone)]
393 | pub enum Event {
394 | /// Discovered nodes through mDNS.
395 | Discovered(Vec<(PeerId, Multiaddr)>),
396 |
397 | /// The given combinations of `PeerId` and `Multiaddr` have expired.
398 | ///
399 | /// Each discovered record has a time-to-live. When this TTL expires and the address hasn't
400 | /// been refreshed, we remove it from the list and emit it as an `Expired` event.
401 | Expired(Vec<(PeerId, Multiaddr)>),
402 | }
403 |
--------------------------------------------------------------------------------
/lib/mdns/src/behaviour/iface.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Parity Technologies (UK) Ltd.
2 | //
3 | // Permission is hereby granted, free of charge, to any person obtaining a
4 | // copy of this software and associated documentation files (the "Software"),
5 | // to deal in the Software without restriction, including without limitation
6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 | // and/or sell copies of the Software, and to permit persons to whom the
8 | // Software is furnished to do so, subject to the following conditions:
9 | //
10 | // The above copyright notice and this permission notice shall be included in
11 | // all copies or substantial portions of the Software.
12 | //
13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 | // DEALINGS IN THE SOFTWARE.
20 |
21 | mod dns;
22 | mod query;
23 |
24 | use std::{
25 | collections::VecDeque,
26 | future::Future,
27 | io,
28 | net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket},
29 | pin::Pin,
30 | sync::{Arc, RwLock},
31 | task::{Context, Poll},
32 | time::{Duration, Instant},
33 | };
34 |
35 | use futures::{channel::mpsc, SinkExt, StreamExt};
36 | use libp2p_core::Multiaddr;
37 | use libp2p_identity::PeerId;
38 | use libp2p_swarm::ListenAddresses;
39 | use socket2::{Domain, Socket, Type};
40 |
41 | use self::{
42 | dns::{build_query, build_query_response, build_service_discovery_response},
43 | query::MdnsPacket,
44 | };
45 | use crate::{
46 | behaviour::{socket::AsyncSocket, timer::Builder},
47 | Config,
48 | };
49 | use uuid::Uuid;
50 |
51 | /// Initial interval for starting probe
52 | const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500);
53 |
54 | #[derive(Debug, Clone)]
55 | enum ProbeState {
56 | Probing(Duration),
57 | Finished(Duration),
58 | }
59 |
60 | impl Default for ProbeState {
61 | fn default() -> Self {
62 | ProbeState::Probing(INITIAL_TIMEOUT_INTERVAL)
63 | }
64 | }
65 |
66 | impl ProbeState {
67 | fn interval(&self) -> &Duration {
68 | match self {
69 | ProbeState::Probing(query_interval) => query_interval,
70 | ProbeState::Finished(query_interval) => query_interval,
71 | }
72 | }
73 | }
74 |
75 | /// An mDNS instance for a networking interface. To discover all peers when having multiple
76 | /// interfaces an [`InterfaceState`] is required for each interface.
77 | #[derive(Debug)]
78 | pub(crate) struct InterfaceState {
79 | /// Address this instance is bound to.
80 | addr: IpAddr,
81 | /// Receive socket.
82 | recv_socket: U,
83 | /// Send socket.
84 | send_socket: U,
85 |
86 | listen_addresses: Arc>,
87 |
88 | query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>,
89 |
90 | /// Buffer used for receiving data from the main socket.
91 | /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000
92 | /// bytes, if it can be ensured that all participating devices can handle such large packets.
93 | /// For computers with several interfaces and IP addresses responses can easily reach sizes in
94 | /// the range of 3000 bytes, so 4096 seems sensible for now. For more information see
95 | /// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46).
96 | recv_buffer: [u8; 4096],
97 | /// Buffers pending to send on the main socket.
98 | send_buffer: VecDeque>,
99 | /// Discovery interval.
100 | query_interval: Duration,
101 | /// Discovery timer.
102 | timeout: T,
103 | /// Multicast address.
104 | multicast_addr: IpAddr,
105 | /// Discovered addresses.
106 | discovered: VecDeque<(PeerId, Multiaddr, Instant)>,
107 | /// TTL
108 | ttl: Duration,
109 | probe_state: ProbeState,
110 | local_peer_id: PeerId,
111 | service_name: Vec,
112 | service_name_fqdn: String,
113 | }
114 |
115 | impl InterfaceState
116 | where
117 | U: AsyncSocket,
118 | T: Builder + futures::Stream,
119 | {
120 | /// Builds a new [`InterfaceState`].
121 | pub(crate) fn new(
122 | addr: IpAddr,
123 | config: Config,
124 | local_peer_id: PeerId,
125 | listen_addresses: Arc>,
126 | query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>,
127 | ) -> io::Result {
128 | tracing::info!(address=%addr, "creating instance on iface address");
129 | let recv_socket = match addr {
130 | IpAddr::V4(addr) => {
131 | let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(socket2::Protocol::UDP))?;
132 | socket.set_reuse_address(true)?;
133 | #[cfg(unix)]
134 | socket.set_reuse_port(true)?;
135 | socket.bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 5353).into())?;
136 | socket.set_multicast_loop_v4(true)?;
137 | socket.set_multicast_ttl_v4(255)?;
138 | socket.join_multicast_v4(&crate::IPV4_MDNS_MULTICAST_ADDRESS, &addr)?;
139 | U::from_std(UdpSocket::from(socket))?
140 | }
141 | IpAddr::V6(_) => {
142 | let socket = Socket::new(Domain::IPV6, Type::DGRAM, Some(socket2::Protocol::UDP))?;
143 | socket.set_reuse_address(true)?;
144 | #[cfg(unix)]
145 | socket.set_reuse_port(true)?;
146 | socket.bind(&SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 5353).into())?;
147 | socket.set_multicast_loop_v6(true)?;
148 | // TODO: find interface matching addr.
149 | socket.join_multicast_v6(&crate::IPV6_MDNS_MULTICAST_ADDRESS, 0)?;
150 | U::from_std(UdpSocket::from(socket))?
151 | }
152 | };
153 | let bind_addr = match addr {
154 | IpAddr::V4(_) => SocketAddr::new(addr, 0),
155 | IpAddr::V6(_addr) => {
156 | // TODO: if-watch should return the scope_id of an address
157 | // as a workaround we bind to unspecified, which means that
158 | // this probably won't work when using multiple interfaces.
159 | // SocketAddr::V6(SocketAddrV6::new(addr, 0, 0, scope_id))
160 | SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0)
161 | }
162 | };
163 | let send_socket = U::from_std(UdpSocket::bind(bind_addr)?)?;
164 |
165 | // randomize timer to prevent all converging and firing at the same time.
166 | let query_interval = {
167 | use rand::Rng;
168 | let mut rng = rand::thread_rng();
169 | let jitter = rng.gen_range(0..100);
170 | config.query_interval + Duration::from_millis(jitter)
171 | };
172 | let multicast_addr = match addr {
173 | IpAddr::V4(_) => IpAddr::V4(crate::IPV4_MDNS_MULTICAST_ADDRESS),
174 | IpAddr::V6(_) => IpAddr::V6(crate::IPV6_MDNS_MULTICAST_ADDRESS),
175 | };
176 |
177 | let service_name = match config.service_fingerprint.as_deref() {
178 | Some(seed) => {
179 | let uuid = Uuid::new_v5(&Uuid::NAMESPACE_DNS, seed);
180 | format!("_p2pclip-{}._udp.local", uuid.simple())
181 | }
182 | None => crate::DEFAULT_SERVICE_NAME.to_string(),
183 | };
184 |
185 | Ok(Self {
186 | addr,
187 | recv_socket,
188 | send_socket,
189 | listen_addresses,
190 | query_response_sender,
191 | recv_buffer: [0; 4096],
192 | send_buffer: Default::default(),
193 | discovered: Default::default(),
194 | query_interval,
195 | timeout: T::interval_at(Instant::now(), INITIAL_TIMEOUT_INTERVAL),
196 | multicast_addr,
197 | ttl: config.ttl,
198 | probe_state: Default::default(),
199 | local_peer_id,
200 | service_name: service_name.as_bytes().to_vec(),
201 | service_name_fqdn: format!("{service_name}."),
202 | })
203 | }
204 |
205 | pub(crate) fn reset_timer(&mut self) {
206 | tracing::trace!(address=%self.addr, probe_state=?self.probe_state, "reset timer");
207 | let interval = *self.probe_state.interval();
208 | self.timeout = T::interval(interval);
209 | }
210 |
211 | fn mdns_socket(&self) -> SocketAddr {
212 | SocketAddr::new(self.multicast_addr, 5353)
213 | }
214 | }
215 |
216 | impl Future for InterfaceState
217 | where
218 | U: AsyncSocket,
219 | T: Builder + futures::Stream,
220 | {
221 | type Output = ();
222 |
223 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll {
224 | let this = self.get_mut();
225 |
226 | loop {
227 | // 1st priority: Low latency: Create packet ASAP after timeout.
228 | if this.timeout.poll_next_unpin(cx).is_ready() {
229 | tracing::trace!(address=%this.addr, "sending query on iface");
230 | this.send_buffer
231 | .push_back(build_query(&this.service_name));
232 | tracing::trace!(address=%this.addr, probe_state=?this.probe_state, "tick");
233 |
234 | // Stop to probe when the initial interval reach the query interval
235 | if let ProbeState::Probing(interval) = this.probe_state {
236 | let interval = interval * 2;
237 | this.probe_state = if interval >= this.query_interval {
238 | ProbeState::Finished(this.query_interval)
239 | } else {
240 | ProbeState::Probing(interval)
241 | };
242 | }
243 |
244 | this.reset_timer();
245 | }
246 |
247 | // 2nd priority: Keep local buffers small: Send packets to remote.
248 | if let Some(packet) = this.send_buffer.pop_front() {
249 | match this.send_socket.poll_write(cx, &packet, this.mdns_socket()) {
250 | Poll::Ready(Ok(_)) => {
251 | tracing::trace!(address=%this.addr, "sent packet on iface address");
252 | continue;
253 | }
254 | Poll::Ready(Err(err)) => {
255 | tracing::error!(address=%this.addr, "error sending packet on iface address {}", err);
256 | continue;
257 | }
258 | Poll::Pending => {
259 | this.send_buffer.push_front(packet);
260 | }
261 | }
262 | }
263 |
264 | // 3rd priority: Keep local buffers small: Return discovered addresses.
265 | if this.query_response_sender.poll_ready_unpin(cx).is_ready() {
266 | if let Some(discovered) = this.discovered.pop_front() {
267 | match this.query_response_sender.try_send(discovered) {
268 | Ok(()) => {}
269 | Err(e) if e.is_disconnected() => {
270 | return Poll::Ready(());
271 | }
272 | Err(e) => {
273 | this.discovered.push_front(e.into_inner());
274 | }
275 | }
276 |
277 | continue;
278 | }
279 | }
280 |
281 | // 4th priority: Remote work: Answer incoming requests.
282 | match this
283 | .recv_socket
284 | .poll_read(cx, &mut this.recv_buffer)
285 | .map_ok(|(len, from)| {
286 | MdnsPacket::new_from_bytes(
287 | &this.recv_buffer[..len],
288 | from,
289 | &this.service_name_fqdn,
290 | )
291 | })
292 | {
293 | Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => {
294 | tracing::trace!(
295 | address=%this.addr,
296 | remote_address=%query.remote_addr(),
297 | "received query from remote address on address"
298 | );
299 |
300 | this.send_buffer.extend(build_query_response(
301 | query.query_id(),
302 | this.local_peer_id,
303 | this.listen_addresses
304 | .read()
305 | .unwrap_or_else(|e| e.into_inner())
306 | .iter(),
307 | this.ttl,
308 | &this.service_name,
309 | ));
310 | continue;
311 | }
312 | Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => {
313 | tracing::trace!(
314 | address=%this.addr,
315 | remote_address=%response.remote_addr(),
316 | "received response from remote address on address"
317 | );
318 |
319 | this.discovered
320 | .extend(response.extract_discovered(Instant::now(), this.local_peer_id));
321 |
322 | // Stop probing when we have a valid response
323 | if !this.discovered.is_empty() {
324 | this.probe_state = ProbeState::Finished(this.query_interval);
325 | this.reset_timer();
326 | }
327 | continue;
328 | }
329 | Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => {
330 | tracing::trace!(
331 | address=%this.addr,
332 | remote_address=%disc.remote_addr(),
333 | "received service discovery from remote address on address"
334 | );
335 |
336 | this.send_buffer
337 | .push_back(build_service_discovery_response(
338 | disc.query_id(),
339 | this.ttl,
340 | &this.service_name,
341 | ));
342 | continue;
343 | }
344 | Poll::Ready(Err(err)) if err.kind() == std::io::ErrorKind::WouldBlock => {
345 | // No more bytes available on the socket to read
346 | continue;
347 | }
348 | Poll::Ready(Err(err)) => {
349 | tracing::error!("failed reading datagram: {}", err);
350 | return Poll::Ready(());
351 | }
352 | Poll::Ready(Ok(Err(err))) => {
353 | tracing::debug!("Parsing mdns packet failed: {:?}", err);
354 | continue;
355 | }
356 | Poll::Ready(Ok(Ok(None))) => continue,
357 | Poll::Pending => {}
358 | }
359 |
360 | return Poll::Pending;
361 | }
362 | }
363 | }
364 |
--------------------------------------------------------------------------------
/lib/tls/src/certificate.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Parity Technologies (UK) Ltd.
2 | //
3 | // Permission is hereby granted, free of charge, to any person obtaining a
4 | // copy of this software and associated documentation files (the "Software"),
5 | // to deal in the Software without restriction, including without limitation
6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 | // and/or sell copies of the Software, and to permit persons to whom the
8 | // Software is furnished to do so, subject to the following conditions:
9 | //
10 | // The above copyright notice and this permission notice shall be included in
11 | // all copies or substantial portions of the Software.
12 | //
13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 | // DEALINGS IN THE SOFTWARE.
20 |
21 | //! X.509 certificate handling for libp2p
22 | //!
23 | //! This module handles generation, signing, and verification of certificates.
24 |
25 | use libp2p_identity as identity;
26 | use libp2p_identity::PeerId;
27 | use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm};
28 | use ring::{hmac, aead};
29 | use std::sync::Arc;
30 |
31 | /// The libp2p Public Key Extension is a X.509 extension
32 | /// with the Object Identifier 1.3.6.1.4.1.53594.1.1,
33 | /// allocated by IANA to the libp2p project at Protocol Labs.
34 | const P2P_EXT_OID: [u64; 9] = [1, 3, 6, 1, 4, 1, 53594, 1, 1];
35 |
36 | /// The peer signs the concatenation of the string `libp2p-tls-handshake:`
37 | /// and the public key that it used to generate the certificate carrying
38 | /// the libp2p Public Key Extension, using its private host key.
39 | /// This signature provides cryptographic proof that the peer was
40 | /// in possession of the private host key at the time the certificate was signed.
41 | const P2P_SIGNING_PREFIX: [u8; 21] = *b"libp2p-tls-handshake:";
42 |
43 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters.
44 | // Similarly, hash functions with an output length less than 256 bits MUST NOT be used.
45 | static P2P_SIGNATURE_ALGORITHM: &rcgen::SignatureAlgorithm = &rcgen::PKCS_ECDSA_P256_SHA256;
46 |
47 | #[derive(Debug)]
48 | pub struct AlwaysResolvesCert(Arc);
49 |
50 | impl AlwaysResolvesCert {
51 | pub fn new(
52 | cert: rustls::pki_types::CertificateDer<'static>,
53 | key: &rustls::pki_types::PrivateKeyDer<'_>,
54 | ) -> Result {
55 | let certified_key = rustls::sign::CertifiedKey::new(
56 | vec![cert],
57 | rustls::crypto::ring::sign::any_ecdsa_type(key)?,
58 | );
59 | Ok(Self(Arc::new(certified_key)))
60 | }
61 | }
62 |
63 | impl rustls::client::ResolvesClientCert for AlwaysResolvesCert {
64 | fn resolve(
65 | &self,
66 | _root_hint_subjects: &[&[u8]],
67 | _sigschemes: &[rustls::SignatureScheme],
68 | ) -> Option> {
69 | Some(Arc::clone(&self.0))
70 | }
71 |
72 | fn has_certs(&self) -> bool {
73 | true
74 | }
75 | }
76 |
77 | impl rustls::server::ResolvesServerCert for AlwaysResolvesCert {
78 | fn resolve(
79 | &self,
80 | _client_hello: rustls::server::ClientHello<'_>,
81 | ) -> Option> {
82 | Some(Arc::clone(&self.0))
83 | }
84 | }
85 |
86 | /// Generates a self-signed TLS certificate that includes a libp2p-specific
87 | /// certificate extension containing the public key of the given keypair.
88 | pub fn generate(
89 | identity_keypair: &identity::Keypair,
90 | pre_shared_key: Option,
91 | ) -> Result<
92 | (
93 | rustls::pki_types::CertificateDer<'static>,
94 | rustls::pki_types::PrivateKeyDer<'static>,
95 | ),
96 | GenError,
97 | > {
98 | // Keypair used to sign the certificate.
99 | // SHOULD NOT be related to the host's key.
100 | // Endpoints MAY generate a new key and certificate
101 | // for every connection attempt, or they MAY reuse the same key
102 | // and certificate for multiple connections.
103 | let certificate_keypair = rcgen::KeyPair::generate_for(P2P_SIGNATURE_ALGORITHM)?;
104 | let rustls_key = rustls::pki_types::PrivateKeyDer::from(
105 | rustls::pki_types::PrivatePkcs8KeyDer::from(certificate_keypair.serialize_der()),
106 | );
107 |
108 | let certificate = {
109 | let mut params = rcgen::CertificateParams::default();
110 | params.distinguished_name = rcgen::DistinguishedName::new();
111 | params.custom_extensions.push(make_libp2p_extension(
112 | identity_keypair,
113 | &certificate_keypair,
114 | pre_shared_key,
115 | )?);
116 | params.self_signed(&certificate_keypair)?
117 | };
118 |
119 | Ok((certificate.into(), rustls_key))
120 | }
121 |
122 | /// Attempts to parse the provided bytes as a [`P2pCertificate`].
123 | ///
124 | /// For this to succeed, the certificate must contain the specified extension and the signature must
125 | /// match the embedded public key.
126 | pub fn parse<'a>(certificate: &'a rustls::pki_types::CertificateDer<'a>, pre_shared_key: Option) -> Result, ParseError> {
127 | let certificate = parse_unverified(certificate.as_ref(), pre_shared_key)?;
128 |
129 | certificate.verify()?;
130 |
131 | Ok(certificate)
132 | }
133 |
134 | /// Encrypt the signature in the libp2p Public Key Extension with PSK and public key
135 | fn encrypt_ext_signature(raw_signature: Vec, psk: &str, public_key: &[u8]) -> Result, ring::error::Unspecified> {
136 | let mut out = raw_signature.clone();
137 | let s_key = hmac::Key::new(hmac::HMAC_SHA256, public_key);
138 | let psk_key = hmac::sign(&s_key, psk.as_ref());
139 | let u_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, psk_key.as_ref())?;
140 | let nonce = aead::Nonce::assume_unique_for_key(*b"p2pclipboard");
141 | let cipher = aead::LessSafeKey::new(u_key);
142 | cipher.seal_in_place_append_tag(nonce, aead::Aad::empty(), &mut out)?;
143 | Ok(out)
144 | }
145 |
146 | /// Decrypt the signature in the libp2p Public Key Extension with PSK and public key
147 | fn decrypt_ext_signature(raw_signature: Vec, psk: &str, public_key: &[u8]) -> Result, ring::error::Unspecified> {
148 | let mut buf = raw_signature.clone();
149 | let s_key = hmac::Key::new(hmac::HMAC_SHA256, public_key);
150 | let psk_key = hmac::sign(&s_key, psk.as_ref());
151 | let u_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, psk_key.as_ref())?;
152 | let nonce = aead::Nonce::assume_unique_for_key(*b"p2pclipboard");
153 | let cipher = aead::LessSafeKey::new(u_key);
154 | Ok(Vec::from(cipher.open_in_place(nonce, aead::Aad::empty(), &mut buf)?))
155 | }
156 |
157 | /// An X.509 certificate with a libp2p-specific extension
158 | /// is used to secure libp2p connections.
159 | #[derive(Debug)]
160 | pub struct P2pCertificate<'a> {
161 | certificate: X509Certificate<'a>,
162 | /// This is a specific libp2p Public Key Extension with two values:
163 | /// * the public host key
164 | /// * a signature performed using the private host key
165 | extension: P2pExtension,
166 | }
167 |
168 | /// The contents of the specific libp2p extension, containing the public host key
169 | /// and a signature performed using the private host key.
170 | #[derive(Debug)]
171 | pub struct P2pExtension {
172 | public_key: identity::PublicKey,
173 | /// This signature provides cryptographic proof that the peer was
174 | /// in possession of the private host key at the time the certificate was signed.
175 | signature: Vec,
176 | }
177 |
178 | #[derive(Debug, thiserror::Error)]
179 | #[error(transparent)]
180 | pub struct GenError(#[from] rcgen::Error);
181 |
182 | #[derive(Debug, thiserror::Error)]
183 | #[error(transparent)]
184 | pub struct ParseError(#[from] pub(crate) webpki::Error);
185 |
186 | #[derive(Debug, thiserror::Error)]
187 | #[error(transparent)]
188 | pub struct VerificationError(#[from] pub(crate) webpki::Error);
189 |
190 | /// Internal function that only parses but does not verify the certificate.
191 | ///
192 | /// Useful for testing but unsuitable for production.
193 | fn parse_unverified(der_input: &[u8], pre_shared_key: Option) -> Result, webpki::Error> {
194 | let x509 = X509Certificate::from_der(der_input)
195 | .map(|(_rest_input, x509)| x509)
196 | .map_err(|_| webpki::Error::BadDer)?;
197 |
198 | let p2p_ext_oid = der_parser::oid::Oid::from(&P2P_EXT_OID)
199 | .expect("This is a valid OID of p2p extension; qed");
200 |
201 | let mut libp2p_extension = None;
202 |
203 | for ext in x509.extensions() {
204 | let oid = &ext.oid;
205 | if oid == &p2p_ext_oid && libp2p_extension.is_some() {
206 | // The extension was already parsed
207 | return Err(webpki::Error::BadDer);
208 | }
209 |
210 | if oid == &p2p_ext_oid {
211 | // The public host key and the signature are ASN.1-encoded
212 | // into the SignedKey data structure, which is carried
213 | // in the libp2p Public Key Extension.
214 | // SignedKey ::= SEQUENCE {
215 | // publicKey OCTET STRING,
216 | // signature OCTET STRING
217 | // }
218 | let (public_key, signature): (Vec, Vec) =
219 | yasna::decode_der(ext.value).map_err(|_| webpki::Error::ExtensionValueInvalid)?;
220 | // The publicKey field of SignedKey contains the public host key
221 | // of the endpoint, encoded using the following protobuf:
222 | // enum KeyType {
223 | // RSA = 0;
224 | // Ed25519 = 1;
225 | // Secp256k1 = 2;
226 | // ECDSA = 3;
227 | // }
228 | // message PublicKey {
229 | // required KeyType Type = 1;
230 | // required bytes Data = 2;
231 | // }
232 | let signature = match pre_shared_key {
233 | Some(ref psk) => decrypt_ext_signature(signature, psk, &public_key)
234 | .map_err(|_| webpki::Error::ExtensionValueInvalid)?,
235 | None => signature
236 | };
237 | let public_key = identity::PublicKey::try_decode_protobuf(&public_key)
238 | .map_err(|_| webpki::Error::UnknownIssuer)?;
239 |
240 | let ext = P2pExtension {
241 | public_key,
242 | signature,
243 | };
244 | libp2p_extension = Some(ext);
245 | continue;
246 | }
247 |
248 | if ext.critical {
249 | // Endpoints MUST abort the connection attempt if the certificate
250 | // contains critical extensions that the endpoint does not understand.
251 | return Err(webpki::Error::UnsupportedCriticalExtension);
252 | }
253 |
254 | // Implementations MUST ignore non-critical extensions with unknown OIDs.
255 | }
256 |
257 | // The certificate MUST contain the libp2p Public Key Extension.
258 | // If this extension is missing, endpoints MUST abort the connection attempt.
259 | let extension = libp2p_extension.ok_or(webpki::Error::BadDer)?;
260 |
261 | let certificate = P2pCertificate {
262 | certificate: x509,
263 | extension,
264 | };
265 |
266 | Ok(certificate)
267 | }
268 |
269 | fn make_libp2p_extension(
270 | identity_keypair: &identity::Keypair,
271 | certificate_keypair: &rcgen::KeyPair,
272 | pre_shared_key: Option,
273 | ) -> Result {
274 | let serialized_pubkey = identity_keypair.public().encode_protobuf();
275 | // The peer signs the concatenation of the string `libp2p-tls-handshake:`
276 | // and the public key that it used to generate the certificate carrying
277 | // the libp2p Public Key Extension, using its private host key.
278 | let signature = {
279 | let mut msg = vec![];
280 | msg.extend(P2P_SIGNING_PREFIX);
281 | msg.extend(certificate_keypair.public_key_der());
282 |
283 | let raw = identity_keypair
284 | .sign(&msg)
285 | .map_err(|_| rcgen::Error::RingUnspecified)?;
286 | match pre_shared_key {
287 | Some(psk) => encrypt_ext_signature(raw, &psk, &serialized_pubkey)
288 | .map_err(|_| rcgen::Error::RingKeyRejected("PSK encryption failed".into()))?,
289 | None => raw
290 | }
291 | };
292 |
293 | // The public host key and the signature are ASN.1-encoded
294 | // into the SignedKey data structure, which is carried
295 | // in the libp2p Public Key Extension.
296 | // SignedKey ::= SEQUENCE {
297 | // publicKey OCTET STRING,
298 | // signature OCTET STRING
299 | // }
300 | let extension_content = {
301 | yasna::encode_der(&(serialized_pubkey, signature))
302 | };
303 |
304 | // This extension MAY be marked critical.
305 | let mut ext = rcgen::CustomExtension::from_oid_content(&P2P_EXT_OID, extension_content);
306 | ext.set_criticality(true);
307 |
308 | Ok(ext)
309 | }
310 |
311 | impl P2pCertificate<'_> {
312 | /// The [`PeerId`] of the remote peer.
313 | pub fn peer_id(&self) -> PeerId {
314 | self.extension.public_key.to_peer_id()
315 | }
316 |
317 | /// Verify the `signature` of the `message` signed by the private key corresponding to the
318 | /// public key stored in the certificate.
319 | pub fn verify_signature(
320 | &self,
321 | signature_scheme: rustls::SignatureScheme,
322 | message: &[u8],
323 | signature: &[u8],
324 | ) -> Result<(), VerificationError> {
325 | let pk = self.public_key(signature_scheme)?;
326 | pk.verify(message, signature)
327 | .map_err(|_| webpki::Error::InvalidSignatureForPublicKey)?;
328 |
329 | Ok(())
330 | }
331 |
332 | /// Get a [`ring::signature::UnparsedPublicKey`] for this `signature_scheme`.
333 | /// Return `Error` if the `signature_scheme` does not match the public key signature
334 | /// and hashing algorithm or if the `signature_scheme` is not supported.
335 | fn public_key(
336 | &self,
337 | signature_scheme: rustls::SignatureScheme,
338 | ) -> Result, webpki::Error> {
339 | use ring::signature;
340 | use rustls::SignatureScheme::*;
341 |
342 | let current_signature_scheme = self.signature_scheme()?;
343 | if signature_scheme != current_signature_scheme {
344 | // This certificate was signed with a different signature scheme
345 | return Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey);
346 | }
347 |
348 | let verification_algorithm: &dyn signature::VerificationAlgorithm = match signature_scheme {
349 | RSA_PKCS1_SHA256 => &signature::RSA_PKCS1_2048_8192_SHA256,
350 | RSA_PKCS1_SHA384 => &signature::RSA_PKCS1_2048_8192_SHA384,
351 | RSA_PKCS1_SHA512 => &signature::RSA_PKCS1_2048_8192_SHA512,
352 | ECDSA_NISTP256_SHA256 => &signature::ECDSA_P256_SHA256_ASN1,
353 | ECDSA_NISTP384_SHA384 => &signature::ECDSA_P384_SHA384_ASN1,
354 | ECDSA_NISTP521_SHA512 => {
355 | // See https://github.com/briansmith/ring/issues/824
356 | return Err(webpki::Error::UnsupportedSignatureAlgorithm);
357 | }
358 | RSA_PSS_SHA256 => &signature::RSA_PSS_2048_8192_SHA256,
359 | RSA_PSS_SHA384 => &signature::RSA_PSS_2048_8192_SHA384,
360 | RSA_PSS_SHA512 => &signature::RSA_PSS_2048_8192_SHA512,
361 | ED25519 => &signature::ED25519,
362 | ED448 => {
363 | // See https://github.com/briansmith/ring/issues/463
364 | return Err(webpki::Error::UnsupportedSignatureAlgorithm);
365 | }
366 | // Similarly, hash functions with an output length less than 256 bits
367 | // MUST NOT be used, due to the possibility of collision attacks.
368 | // In particular, MD5 and SHA1 MUST NOT be used.
369 | RSA_PKCS1_SHA1 => return Err(webpki::Error::UnsupportedSignatureAlgorithm),
370 | ECDSA_SHA1_Legacy => return Err(webpki::Error::UnsupportedSignatureAlgorithm),
371 | _ => return Err(webpki::Error::UnsupportedSignatureAlgorithm),
372 | };
373 | let spki = &self.certificate.tbs_certificate.subject_pki;
374 | let key = signature::UnparsedPublicKey::new(
375 | verification_algorithm,
376 | spki.subject_public_key.as_ref(),
377 | );
378 |
379 | Ok(key)
380 | }
381 |
382 | /// This method validates the certificate according to libp2p TLS 1.3 specs.
383 | /// The certificate MUST:
384 | /// 1. be valid at the time it is received by the peer;
385 | /// 2. use the NamedCurve encoding;
386 | /// 3. use hash functions with an output length not less than 256 bits;
387 | /// 4. be self signed;
388 | /// 5. contain a valid signature in the specific libp2p extension.
389 | fn verify(&self) -> Result<(), webpki::Error> {
390 | use webpki::Error;
391 | // The certificate MUST have NotBefore and NotAfter fields set
392 | // such that the certificate is valid at the time it is received by the peer.
393 | if !self.certificate.validity().is_valid() {
394 | return Err(Error::InvalidCertValidity);
395 | }
396 |
397 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters.
398 | // Similarly, hash functions with an output length less than 256 bits
399 | // MUST NOT be used, due to the possibility of collision attacks.
400 | // In particular, MD5 and SHA1 MUST NOT be used.
401 | // Endpoints MUST abort the connection attempt if it is not used.
402 | let signature_scheme = self.signature_scheme()?;
403 | // Endpoints MUST abort the connection attempt if the certificate’s
404 | // self-signature is not valid.
405 | let raw_certificate = self.certificate.tbs_certificate.as_ref();
406 | let signature = self.certificate.signature_value.as_ref();
407 | // check if self signed
408 | self.verify_signature(signature_scheme, raw_certificate, signature)
409 | .map_err(|_| Error::SignatureAlgorithmMismatch)?;
410 |
411 | let subject_pki = self.certificate.public_key().raw;
412 |
413 | // The peer signs the concatenation of the string `libp2p-tls-handshake:`
414 | // and the public key that it used to generate the certificate carrying
415 | // the libp2p Public Key Extension, using its private host key.
416 | let mut msg = vec![];
417 | msg.extend(P2P_SIGNING_PREFIX);
418 | msg.extend(subject_pki);
419 |
420 | // This signature provides cryptographic proof that the peer was in possession
421 | // of the private host key at the time the certificate was signed.
422 | // Peers MUST verify the signature, and abort the connection attempt
423 | // if signature verification fails.
424 | let user_owns_sk = self
425 | .extension
426 | .public_key
427 | .verify(&msg, &self.extension.signature);
428 | if !user_owns_sk {
429 | return Err(Error::UnknownIssuer);
430 | }
431 |
432 | Ok(())
433 | }
434 |
435 | /// Return the signature scheme corresponding to [`AlgorithmIdentifier`]s
436 | /// of `subject_pki` and `signature_algorithm`
437 | /// according to .
438 | fn signature_scheme(&self) -> Result {
439 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters.
440 | // Endpoints MUST abort the connection attempt if it is not used.
441 | use oid_registry::*;
442 | use rustls::SignatureScheme::*;
443 |
444 | let signature_algorithm = &self.certificate.signature_algorithm;
445 | let pki_algorithm = &self.certificate.tbs_certificate.subject_pki.algorithm;
446 |
447 | if pki_algorithm.algorithm == OID_PKCS1_RSAENCRYPTION {
448 | if signature_algorithm.algorithm == OID_PKCS1_SHA256WITHRSA {
449 | return Ok(RSA_PKCS1_SHA256);
450 | }
451 | if signature_algorithm.algorithm == OID_PKCS1_SHA384WITHRSA {
452 | return Ok(RSA_PKCS1_SHA384);
453 | }
454 | if signature_algorithm.algorithm == OID_PKCS1_SHA512WITHRSA {
455 | return Ok(RSA_PKCS1_SHA512);
456 | }
457 | if signature_algorithm.algorithm == OID_PKCS1_RSASSAPSS {
458 | // According to https://datatracker.ietf.org/doc/html/rfc4055#section-3.1:
459 | // Inside of params there should be a sequence of:
460 | // - Hash Algorithm
461 | // - Mask Algorithm
462 | // - Salt Length
463 | // - Trailer Field
464 |
465 | // We are interested in Hash Algorithm only
466 |
467 | if let Ok(SignatureAlgorithm::RSASSA_PSS(params)) =
468 | SignatureAlgorithm::try_from(signature_algorithm)
469 | {
470 | let hash_oid = params.hash_algorithm_oid();
471 | if hash_oid == &OID_NIST_HASH_SHA256 {
472 | return Ok(RSA_PSS_SHA256);
473 | }
474 | if hash_oid == &OID_NIST_HASH_SHA384 {
475 | return Ok(RSA_PSS_SHA384);
476 | }
477 | if hash_oid == &OID_NIST_HASH_SHA512 {
478 | return Ok(RSA_PSS_SHA512);
479 | }
480 | }
481 |
482 | // Default hash algo is SHA-1, however:
483 | // In particular, MD5 and SHA1 MUST NOT be used.
484 | return Err(webpki::Error::UnsupportedSignatureAlgorithm);
485 | }
486 | }
487 |
488 | if pki_algorithm.algorithm == OID_KEY_TYPE_EC_PUBLIC_KEY {
489 | let signature_param = pki_algorithm
490 | .parameters
491 | .as_ref()
492 | .ok_or(webpki::Error::BadDer)?
493 | .as_oid()
494 | .map_err(|_| webpki::Error::BadDer)?;
495 | if signature_param == OID_EC_P256
496 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA256
497 | {
498 | return Ok(ECDSA_NISTP256_SHA256);
499 | }
500 | if signature_param == OID_NIST_EC_P384
501 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA384
502 | {
503 | return Ok(ECDSA_NISTP384_SHA384);
504 | }
505 | if signature_param == OID_NIST_EC_P521
506 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA512
507 | {
508 | return Ok(ECDSA_NISTP521_SHA512);
509 | }
510 | return Err(webpki::Error::UnsupportedSignatureAlgorithm);
511 | }
512 |
513 | if signature_algorithm.algorithm == OID_SIG_ED25519 {
514 | return Ok(ED25519);
515 | }
516 | if signature_algorithm.algorithm == OID_SIG_ED448 {
517 | return Ok(ED448);
518 | }
519 |
520 | Err(webpki::Error::UnsupportedSignatureAlgorithm)
521 | }
522 |
523 | }
--------------------------------------------------------------------------------
/src/network.rs:
--------------------------------------------------------------------------------
1 | use ed25519_dalek::{pkcs8::DecodePrivateKey, SigningKey};
2 | use futures::prelude::*;
3 | use hex_literal::hex;
4 | use libp2p::gossipsub::{Behaviour, PublishError};
5 | use libp2p::kad::QueryResult;
6 | use libp2p::swarm::ConnectionError;
7 | use libp2p::{
8 | gossipsub::{self, IdentTopic, MessageAuthenticity, MessageId, ValidationMode},
9 | identify,
10 | identity::{Keypair, PublicKey},
11 | kad::{self, store::MemoryStore},
12 | multiaddr::Protocol,
13 | swarm::{self, NetworkBehaviour, SwarmEvent},
14 | tcp, yamux, Multiaddr, PeerId, Swarm,
15 | };
16 | use libp2p_mdns as mdns;
17 | use libp2p_tls as tls;
18 | use log::{debug, error, info, warn};
19 | use machine_uid;
20 | use std::collections::{hash_map::DefaultHasher, HashMap, HashSet, VecDeque};
21 | use std::hash::{Hash, Hasher};
22 | use std::{
23 | error::Error,
24 | net::{Ipv4Addr, SocketAddrV4},
25 | time::{Duration, SystemTime},
26 | };
27 | use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
28 | use tokio::sync::{mpsc, oneshot};
29 |
30 | #[derive(NetworkBehaviour)]
31 | struct P2pClipboardBehaviour {
32 | gossipsub: Behaviour,
33 | kademlia: kad::Behaviour,
34 | identify: identify::Behaviour,
35 | mdns: mdns::tokio::Behaviour,
36 | }
37 |
38 | #[derive(Clone, Eq, Hash, PartialEq, Debug)]
39 | struct PeerEndpointCache {
40 | peer_id: PeerId,
41 | address: Multiaddr,
42 | }
43 |
44 | #[derive(Clone, Eq, Hash, PartialEq, Debug)]
45 | struct ConnectionRetryTask {
46 | target: PeerEndpointCache,
47 | retry_count: usize,
48 | }
49 |
50 | #[derive(Default, Clone)]
51 | struct CompressionTransform;
52 |
53 | // Not used directly, we will derive new keys using machine ID from this key.
54 | // We have to do this to have a stable Peer ID when no key is specified by the user.
55 | const ID_SEED: [u8; 118] = hex!("2d2d2d2d2d424547494e2050524956415445204b45592d2d2d2d2d0a4d43344341514177425159444b3256774243494549444c3968565958485271304f48386f774a72363169416a45385a52614263363254373761723564397339670a2d2d2d2d2d454e442050524956415445204b45592d2d2d2d2d");
56 |
57 | impl gossipsub::DataTransform for CompressionTransform {
58 | fn inbound_transform(
59 | &self,
60 | raw_message: gossipsub::RawMessage,
61 | ) -> Result {
62 | let buf: Vec = zstd::decode_all(&*raw_message.data)?;
63 | Ok(gossipsub::Message {
64 | source: raw_message.source,
65 | data: buf,
66 | sequence_number: raw_message.sequence_number,
67 | topic: raw_message.topic,
68 | })
69 | }
70 |
71 | fn outbound_transform(
72 | &self,
73 | _topic: &gossipsub::TopicHash,
74 | data: Vec,
75 | ) -> Result, std::io::Error> {
76 | let compressed_bytes = zstd::encode_all(&*data, 0)?;
77 | debug!("Compressed size {}", compressed_bytes.len());
78 | Ok(compressed_bytes)
79 | }
80 | }
81 |
82 | async fn retry_waiting_thread(
83 | mut rx: UnboundedReceiver,
84 | callback: UnboundedSender,
85 | mut shutdown: oneshot::Receiver<()>,
86 | ) {
87 | async fn delayed_callback(
88 | callback: UnboundedSender,
89 | delay: Duration,
90 | task: PeerEndpointCache,
91 | ) {
92 | tokio::time::sleep(delay).await;
93 | let _ = callback.send(task);
94 | }
95 | loop {
96 | tokio::select! {
97 | Some(task) = rx.recv() => {
98 | let seconds = task.retry_count.checked_pow(2).unwrap_or(0) + 1;
99 | let payload = task.target;
100 | tokio::spawn(delayed_callback(callback.clone(), Duration::from_secs(seconds as u64), payload));
101 | },
102 | _ = &mut shutdown => {
103 | debug!("Connection retry waiting thread shutdown received");
104 | return;
105 | },
106 | }
107 | }
108 | }
109 |
110 | pub async fn start_network(
111 | rx: mpsc::Receiver,
112 | tx: mpsc::Sender,
113 | connect_arg: Option>,
114 | key_arg: Option,
115 | listen_arg: Option,
116 | psk: Option,
117 | disable_mdns: bool,
118 | ) -> Result<(), Box> {
119 | let id_keys = match key_arg {
120 | Some(arg) => {
121 | let pem = std::fs::read_to_string(arg)?;
122 | let mut verifying_key_bytes = SigningKey::from_pkcs8_pem(&pem)?.to_bytes();
123 | Keypair::ed25519_from_bytes(&mut verifying_key_bytes)?
124 | }
125 | None => {
126 | let id: String = machine_uid::get()?;
127 | let mut key_bytes =
128 | SigningKey::from_pkcs8_pem(std::str::from_utf8(&ID_SEED)?)?.to_bytes();
129 | let key = Keypair::ed25519_from_bytes(&mut key_bytes)?;
130 | let mut new_key = key
131 | .derive_secret(id.as_ref())
132 | .expect("can derive secret for ed25519");
133 | Keypair::ed25519_from_bytes(&mut new_key)?
134 | }
135 | };
136 | let peer_id = PeerId::from(id_keys.public());
137 | info!("Local peer id: {}", peer_id.to_base58());
138 |
139 | // Create a Gossipsub topic
140 | let gossipsub_topic = IdentTopic::new("p2p_clipboard");
141 |
142 | // get optional boot address and peerId
143 | let (boot_addr, boot_peer_id) = match connect_arg {
144 | Some(arg) => {
145 | // Clap should already guarantee length == 2, just for sanity
146 | if arg.len() == 2 {
147 | let peer_id = arg[1].clone().parse::();
148 | let addr_input = arg[0].clone();
149 | let sock_addr = parse_ipv4_with_port(Some(addr_input));
150 | let multiaddr = match sock_addr {
151 | Ok((ip, port)) => Ok(format!("/ip4/{}/tcp/{}", ip, port)
152 | .parse::()
153 | .unwrap()),
154 | Err(_) => Err(()),
155 | }
156 | .unwrap_or_else(|_| {
157 | error!("Connect address is not a valid socket address");
158 | std::process::exit(1);
159 | });
160 | (Some(multiaddr), peer_id.ok())
161 | } else {
162 | (None, None)
163 | }
164 | }
165 | None => (None, None),
166 | };
167 |
168 | // Create a Swarm to manage peers and events.
169 | let mut swarm: Swarm = {
170 | let mut chat_behaviour = P2pClipboardBehaviour {
171 | gossipsub: create_gossipsub_behavior(id_keys.clone()),
172 | kademlia: create_kademlia_behavior(peer_id),
173 | identify: create_identify_behavior(id_keys.public()),
174 | mdns: create_mdns_behavior(peer_id, psk.clone(), disable_mdns),
175 | };
176 |
177 | // subscribes to our topic
178 | chat_behaviour
179 | .gossipsub
180 | .subscribe(&gossipsub_topic)
181 | .unwrap();
182 |
183 | libp2p::SwarmBuilder::with_existing_identity(id_keys)
184 | .with_tokio()
185 | .with_tcp(
186 | tcp::Config::default(),
187 | tls::Config::new_with_psk(psk),
188 | yamux::Config::default,
189 | )?
190 | .with_behaviour(|_key| chat_behaviour)?
191 | .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
192 | .build()
193 | };
194 |
195 | swarm
196 | .behaviour_mut()
197 | .kademlia
198 | .set_mode(Some(kad::Mode::Server));
199 |
200 | let multiaddr = match listen_arg {
201 | Some(socket_addr_string) => {
202 | if let Ok((ip, port)) = parse_ipv4_with_port(Some(socket_addr_string)) {
203 | Ok(format!("/ip4/{}/tcp/{}", ip, port))
204 | } else {
205 | Err(())
206 | }
207 | }
208 | // Listen on all interfaces and whatever port the OS assigns
209 | None => Ok("/ip4/0.0.0.0/tcp/0".to_string()),
210 | }
211 | .unwrap_or_else(|_| {
212 | error!("Listen address is not a valid socket address");
213 | std::process::exit(1);
214 | });
215 |
216 | let _ = swarm.listen_on(multiaddr.parse()?).unwrap_or_else(|_| {
217 | error!("Cannot listen on specified address");
218 | std::process::exit(1);
219 | });
220 |
221 | // FIXME: Can we swap the boot_node to some fallback node if it is temporarily unavailable?
222 | let boot_node = {
223 | // Reach out to another node if specified
224 | if let Some(boot_addr) = boot_addr {
225 | debug!("Will dial {}", &boot_addr);
226 | swarm
227 | .behaviour_mut()
228 | .kademlia
229 | .add_address(&boot_peer_id.unwrap(), boot_addr.clone());
230 | let _ = swarm.dial(boot_addr.clone());
231 | let _ = swarm.disconnect_peer_id(boot_peer_id.unwrap());
232 | Some(PeerEndpointCache {
233 | peer_id: boot_peer_id.unwrap(),
234 | address: boot_addr.clone(),
235 | })
236 | } else {
237 | None
238 | }
239 | };
240 | let (retry_queue_tx, retry_queue_rx) = mpsc::unbounded_channel::();
241 | let (retry_callback_queue_tx, retry_callback_queue_rx) =
242 | mpsc::unbounded_channel::();
243 | let (shutdown_channel_tx, shutdown_channel_rx) = oneshot::channel::<()>();
244 | let _retry_handle = tokio::spawn(retry_waiting_thread(
245 | retry_queue_rx,
246 | retry_callback_queue_tx,
247 | shutdown_channel_rx,
248 | ));
249 | let swarm_handle = tokio::spawn(run(
250 | swarm,
251 | gossipsub_topic,
252 | rx,
253 | tx,
254 | boot_node,
255 | retry_queue_tx,
256 | retry_callback_queue_rx,
257 | ));
258 | swarm_handle.await?;
259 | let _ = shutdown_channel_tx.send(());
260 | Ok(())
261 | }
262 |
263 | async fn run(
264 | mut swarm: Swarm,
265 | gossipsub_topic: IdentTopic,
266 | mut rx: mpsc::Receiver,
267 | tx: mpsc::Sender,
268 | boot_node: Option,
269 | retry_queue_tx: UnboundedSender,
270 | mut retry_callback_queue_rx: UnboundedReceiver,
271 | ) {
272 | // We have to cache all endpoints so that we can reconnect to the p2p networks when our IP has changed.
273 | let mut endpoint_cache: VecDeque = VecDeque::new();
274 | let mut unique_endpoints: HashSet = HashSet::new();
275 | let mut current_listen_addresses: HashSet = HashSet::new();
276 | // We also need to cache the announced identity for each node, because they will be different from what we actually connects.
277 | let mut announced_identities: HashMap> = HashMap::new();
278 | let mut failing_connections: HashMap = HashMap::new();
279 |
280 | let mut t = SystemTime::now();
281 | let mut sleep;
282 | loop {
283 | let to_publish = {
284 | sleep = Box::pin(tokio::time::sleep(Duration::from_secs(30)).fuse());
285 | tokio::select! {
286 | Some(message) = rx.recv() => {
287 | debug!("Received local clipboard: {}", message.clone());
288 | Some((gossipsub_topic.clone(), message.clone()))
289 | },
290 | event = swarm.select_next_some() => match event {
291 | SwarmEvent::Behaviour(P2pClipboardBehaviourEvent::Gossipsub(ref gossip_event)) => {
292 | if let gossipsub::Event::Message {
293 | propagation_source: peer_id,
294 | message_id: id,
295 | message,
296 | } = gossip_event
297 | {
298 | debug!("Got message: {} with id: {} from peer: {:?}",
299 | String::from_utf8_lossy(&message.data),
300 | id,
301 | peer_id);
302 | tx.send(String::from_utf8_lossy(&message.data).parse().unwrap()).await.expect("Panic when sending to channel");
303 | }
304 | None
305 | }
306 | SwarmEvent::Behaviour(P2pClipboardBehaviourEvent::Identify(ref identify_event)) => {
307 | match identify_event {
308 | identify::Event::Received {
309 | connection_id: _,
310 | peer_id,
311 | info:
312 | identify::Info {
313 | listen_addrs,
314 | ..
315 | },
316 | } => {
317 | // We will receive identify info for 3 reasons:
318 | // 1. A new peer want to give us its info for negotiation
319 | // 2. An existing peer periodically ping us with these info to show existence
320 | // 3. An existing peer has its network config changed and want to tell us its new address
321 | // FIXME: In some cases the peers could behind some kind of NAT so they can have their addresses changed without announcing the change.
322 | let old_addrs = announced_identities.insert(
323 | *peer_id,
324 | listen_addrs.clone()
325 | );
326 | if let Some(old_vec) = old_addrs {
327 | let new: HashSet = listen_addrs.iter().cloned().collect();
328 | let old: HashSet = old_vec.iter().cloned().collect();
329 | // Announced in old but not in new announcement, remove it from routing table
330 | let changes = old.difference(&new);
331 | for addr in changes {
332 | debug!("Removing expired addr {addr} trough identify");
333 | swarm.behaviour_mut().kademlia.remove_address(&peer_id, addr);
334 | }
335 | }
336 | for addr in listen_addrs {
337 | debug!("received addr {addr} trough identify");
338 | if !is_multiaddr_local(addr) {
339 | swarm.behaviour_mut().kademlia.add_address(&peer_id, addr.clone());
340 | }
341 | }
342 | }
343 | _ => {
344 | debug!("got other identify event");
345 | }
346 | }
347 | None
348 | }
349 | SwarmEvent::Behaviour(P2pClipboardBehaviourEvent::Kademlia(ref kad_event)) => {
350 | match kad_event {
351 | kad::Event::RoutingUpdated {peer, ..} => {
352 | debug!("Routing updated for {:#?}", peer);
353 | },
354 | kad::Event::OutboundQueryProgressed {
355 | result: QueryResult::GetClosestPeers(result),
356 | ..
357 | } => {
358 | match result {
359 | Ok(kad::GetClosestPeersOk { key: _, peers }) => {
360 | if !peers.is_empty() {
361 | debug!("Query finished with closest peers: {:#?}", peers);
362 | for peer in peers {
363 | debug!("Got peer {:?}", peer);
364 | }
365 | } else {
366 | error!("Query finished with no closest peers.")
367 | }
368 | }
369 | Err(kad::GetClosestPeersError::Timeout { peers, .. }) => {
370 | if !peers.is_empty() {
371 | error!("Query timed out with closest peers: {:#?}", peers);
372 | for peer in peers {
373 | debug!("Got peer {:?}", peer);
374 | }
375 | } else {
376 | error!("Query timed out with no closest peers.");
377 | }
378 | }
379 | };
380 | }
381 | _ => {}
382 | }
383 | None
384 | }
385 | SwarmEvent::NewListenAddr { ref address, .. } => {
386 | info!("Local node is listening on {address}");
387 | let non_local_addr_count = current_listen_addresses
388 | .iter()
389 | .filter(|&addr| !is_multiaddr_local(addr))
390 | .count();
391 | current_listen_addresses.insert(address.clone());
392 | let mut peers_to_push: Vec = Vec::new();
393 | if let Some(boot_node_clone) = boot_node.as_ref() {
394 | peers_to_push.push(boot_node_clone.peer_id.clone());
395 | }
396 | swarm.behaviour_mut().identify.push(peers_to_push.clone());
397 | let connected_peers = swarm.connected_peers();
398 | let connected_peers_count = connected_peers.count();
399 | debug!("Connected to {connected_peers_count} peers");
400 | if let None = boot_node.as_ref() {
401 | info!("No boot node specified. Waiting for connection.");
402 | } else if connected_peers_count == 0 || non_local_addr_count == 0 {
403 | debug!("No connected peers or recovered from no network, we need to manually re-dial to the boot node");
404 | if let Some(real_boot_node) = boot_node.as_ref() {
405 | let _ = swarm.dial(real_boot_node.address.clone());
406 | }
407 | }
408 | let _ = swarm.behaviour_mut().kademlia.bootstrap();
409 | None
410 | }
411 | SwarmEvent::ExpiredListenAddr { ref address, .. } => {
412 | warn!("Local node no longer listening on {address}");
413 | current_listen_addresses.remove(address);
414 | let non_local_addr_count = current_listen_addresses
415 | .iter()
416 | .filter(|&addr| !is_multiaddr_local(addr))
417 | .count();
418 | let mut peers_to_push: Vec = Vec::new();
419 | if let Some(boot_node_clone) = boot_node.as_ref() {
420 | peers_to_push.push(boot_node_clone.peer_id.clone());
421 | }
422 | swarm.behaviour_mut().identify.push(peers_to_push.clone());
423 | if non_local_addr_count > 0 {
424 | if let Some(real_boot_node) = boot_node.as_ref() {
425 | // Because when main address is teared down, the network usually needs some time to recover
426 | // We send it to the retry queue directly
427 | let retry_task = match failing_connections.get(&real_boot_node) {
428 | Some(task) => task.clone(),
429 | None => ConnectionRetryTask {
430 | target: real_boot_node.clone(),
431 | retry_count: 0,
432 | }
433 | };
434 | failing_connections.insert(real_boot_node.clone(), retry_task.clone());
435 | let _ = retry_queue_tx.send(retry_task);
436 | }
437 | }
438 | None
439 | }
440 | SwarmEvent::ConnectionEstablished {
441 | ref peer_id,
442 | ref endpoint,
443 | ..
444 | } => {
445 | // We only care about IP and protocol port, ignoring the p2p suffix
446 | let real_address = get_non_p2p_multiaddr(endpoint.get_remote_address().clone());
447 | let cache = PeerEndpointCache {
448 | peer_id: peer_id.clone(),
449 | address: real_address.clone(),
450 | };
451 | debug!("Adding endpoint {real_address} to cache");
452 | if !unique_endpoints.insert(cache.clone()) {
453 | // The item is already present in the set (duplicate)
454 | debug!("endpoint {real_address} already in cache, reordering");
455 | endpoint_cache.retain(|existing_item| existing_item != &cache);
456 | } else {
457 | info!("Connected to peer {}", peer_id);
458 | }
459 | failing_connections.retain(|c, _| c.peer_id != *peer_id);
460 | endpoint_cache.push_front(cache);
461 | None
462 | }
463 | SwarmEvent::ConnectionClosed { ref peer_id, ref cause,ref endpoint, ref num_established, .. } => {
464 | if *num_established == 0 {
465 | warn!("Peer {} has disconnected", peer_id);
466 | // When the last connection has closed, we should drop that peer from our cache.
467 | // Ideally, all entries related with the peer should have already been removed.
468 | // However, some edge cases that does not correctly trigger the event handlers do occur in rare cases.
469 | // Remove these entries explicitly if that happens.
470 | unique_endpoints.retain(|x| x.peer_id != *peer_id);
471 | endpoint_cache.retain(|x| x.peer_id != *peer_id);
472 | }
473 | if let Some(connection_error) = cause.as_ref().clone() {
474 | unique_endpoints.retain(|x| x.address != *endpoint.get_remote_address());
475 | endpoint_cache.retain(|x| x.address != *endpoint.get_remote_address());
476 | match connection_error {
477 | ConnectionError::IO(_io_error) => {
478 | // Handle IO error
479 | // An IO error usually means a network problem occurred on local side, and we will try to re-connect.
480 | let addr = endpoint.get_remote_address();
481 | if endpoint.is_dialer() {
482 | let failed_connection = PeerEndpointCache {
483 | address: addr.clone(),
484 | peer_id: peer_id.clone(),
485 | };
486 | let retry_task = match failing_connections.get(&failed_connection) {
487 | Some(task) => task.clone(),
488 | None => ConnectionRetryTask {
489 | target: failed_connection.clone(),
490 | retry_count: 0,
491 | }
492 | };
493 | failing_connections.insert(failed_connection, retry_task.clone());
494 | let _ = retry_queue_tx.send(retry_task);
495 | }
496 | }
497 | _ => {}
498 | }
499 | }
500 | None
501 | }
502 | SwarmEvent::OutgoingConnectionError {
503 | ref peer_id,
504 | ref error,
505 | connection_id,
506 | } => {
507 | debug!("OutgoingConnectionError to {peer_id:?} on {connection_id:?} - {error:?}");
508 | // We need to decide if this was a critical error and the peer should be removed from the routing table.
509 | // For a peer that has successfully connected but then disconnected, the SwarmEvent::ConnectionClosed handler handles that.
510 | // If the error goes here, it usually means we cannot connect to that peer with given address in the first place.
511 | let should_clean_peer = match error {
512 | swarm::DialError::Transport(errors) => {
513 | // Most of the transport error comes from local end and the remote peer should not be removed.
514 | // Even if it is really the remote end, it is hard to tell because if we have multiple
515 | // IP addresses and not all of them is able to connect to the remote endpoint, we may still
516 | // have other IP address that is able to connect to that peer.
517 | // To mitigate that, only remove that specific endpoint instead of everything about that peer.
518 | debug!("Dial errors len : {:?}", errors.len());
519 | let mut non_recoverable = false;
520 | for (addr, err) in errors {
521 | debug!("OutgoingTransport error : {err:?}");
522 | match err {
523 | libp2p::TransportError::MultiaddrNotSupported(addr) => {
524 | error!("Multiaddr not supported : {addr:?}");
525 | // If we can't dial a peer on a given address, we should remove it from the routing table
526 | // Currently we should not have such problem in production as all nodes using selected Multiaddr
527 | // This could occur during development, added for sanity.
528 | non_recoverable = true
529 | }
530 | libp2p::TransportError::Other(err) => {
531 | let should_hold_and_retry = ["NetworkUnreachable", "Timeout"];
532 | if let Some(inner) = err.get_ref() {
533 | let error_msg = format!("{inner:?}");
534 | debug!("Problematic error encountered: {inner:?}");
535 | if let Some(peer) = peer_id {
536 | let failed_connection = PeerEndpointCache {
537 | address: addr.clone(),
538 | peer_id: peer.clone(),
539 | };
540 | // This is not the best way to match an Error, but the Error we get here is very complicated, like:
541 | // `Other(Custom { kind: Other, error: Timeout })`
542 | // `Other(Left(Left(Os { code: 51, kind: NetworkUnreachable, message: "Network is unreachable" })`
543 | // `Other(Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })`
544 | // Makes appropriate matching very hard if we want to match a specific type of error.
545 | if should_hold_and_retry.iter().any(|err| error_msg.contains(err)) {
546 | let retry_task = match failing_connections.get(&failed_connection) {
547 | Some(task) => task.clone(),
548 | None => ConnectionRetryTask {
549 | target: failed_connection.clone(),
550 | retry_count: 0,
551 | }
552 | };
553 | failing_connections.insert(failed_connection, retry_task.clone());
554 | let _ = retry_queue_tx.send(retry_task);
555 | } else {
556 | // If we are not retrying, we should do some cleanup at this endpoint.
557 | unique_endpoints.retain(|endpoint| endpoint.address != *addr);
558 | endpoint_cache.retain(|endpoint| endpoint.address != *addr);
559 | failing_connections.remove(&failed_connection);
560 | swarm.behaviour_mut().kademlia.remove_address(peer, addr);
561 | }
562 | }
563 | };
564 | }
565 | }
566 | }
567 | non_recoverable
568 | }
569 | swarm::DialError::NoAddresses => {
570 | // We cannot dial peers without addresses
571 | error!("OutgoingConnectionError: No address provided");
572 | true
573 | }
574 | swarm::DialError::Aborted => {
575 | error!("OutgoingConnectionError: Aborted");
576 | false
577 | }
578 | swarm::DialError::DialPeerConditionFalse(_) => {
579 | error!("OutgoingConnectionError: DialPeerConditionFalse");
580 | false
581 | }
582 | swarm::DialError::LocalPeerId { .. } => {
583 | error!("OutgoingConnectionError: LocalPeerId: We are dialing ourselves");
584 | true
585 | }
586 | swarm::DialError::WrongPeerId { obtained, address } => {
587 | error!("OutgoingConnectionError: WrongPeerId: obtained: {obtained:?}, address: {address:?}");
588 | true
589 | }
590 | swarm::DialError::Denied { cause } => {
591 | error!("OutgoingConnectionError: Denied: {cause:?}");
592 | true
593 | }
594 | };
595 |
596 | if should_clean_peer {
597 | if let Some(dead_peer) = peer_id
598 | {
599 | warn!("Cleaning out dead peer {dead_peer:?}");
600 | unique_endpoints.retain(|endpoint| endpoint.peer_id != *dead_peer);
601 | endpoint_cache.retain(|endpoint| endpoint.peer_id != *dead_peer);
602 | swarm.behaviour_mut().kademlia.remove_peer(dead_peer);
603 | }
604 | }
605 | None
606 | }
607 | SwarmEvent::Behaviour(P2pClipboardBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => {
608 | for (_peer_id, addr) in list {
609 | debug!("mDNS discovered a new peer: {_peer_id}");
610 | let _ = swarm.dial(addr.clone());
611 | }
612 | None
613 | },
614 | SwarmEvent::Behaviour(P2pClipboardBehaviourEvent::Mdns(mdns::Event::Expired(list))) => {
615 | for (peer_id, addr) in list {
616 | debug!("mDNS expired a peer: {peer_id}");
617 | // For most time this address should already be removed.
618 | swarm.behaviour_mut().kademlia.remove_address(&peer_id, &addr);
619 | }
620 | None
621 | },
622 | _ => {None}
623 | },
624 | Some(failing_connection) = retry_callback_queue_rx.recv() => {
625 | if let Some(retry_task) = failing_connections.get_mut(&failing_connection) {
626 | // Perform some check to ensure doing a connection retry is reasonable
627 | let is_network_ok = {
628 | let non_local_listener_count = current_listen_addresses
629 | .iter()
630 | .filter(|&addr| !is_multiaddr_local(addr))
631 | .count();
632 | if swarm.connected_peers().count() > 0 {
633 | // If we already have other peers connected, we have at least one working network interface
634 | true
635 | } else if non_local_listener_count > 0 {
636 | // If we have a non-local listening address, let's hope that address will work.
637 | let link_local_listener_count = current_listen_addresses
638 | .iter()
639 | .filter(|&addr| is_multiaddr_link_local(addr))
640 | .count();
641 | // Special cases for link-local addresses.
642 | // Some OS services will use tun interfaces with link-local addresses which may confuses us.
643 | // Users could also have interfaces not configured.
644 | if is_multiaddr_link_local(&retry_task.target.address) {
645 | if link_local_listener_count == 0 {
646 | debug!("Connecting to link-local address {}, but we don't have any link-local addresses.", retry_task.target.address);
647 | false
648 | } else {
649 | true
650 | }
651 | } else {
652 | if link_local_listener_count < non_local_listener_count {
653 | true
654 | } else {
655 | debug!("Connecting to address {}, but all we have are link-local addresses.", retry_task.target.address);
656 | false
657 | }
658 | }
659 | } else {
660 | false
661 | }
662 | };
663 | let is_task_ok = retry_task.retry_count <= 3;
664 | let already_connected = swarm.is_connected(&retry_task.target.peer_id);
665 | if !is_network_ok {
666 | debug!("We don't have working network connections yet, waiting.");
667 | let _ = retry_queue_tx.send(retry_task.clone());
668 | } else if !is_task_ok {
669 | error!("Connect to {} with {} failed too many times, give up.", retry_task.target.peer_id, retry_task.target.address);
670 | failing_connections.remove(&failing_connection);
671 | } else if already_connected {
672 | debug!("Already connected to {}, stop retrying.", retry_task.target.peer_id);
673 | failing_connections.remove(&failing_connection);
674 | } else {
675 | retry_task.retry_count += 1;
676 | let _ = swarm.dial(retry_task.target.address.clone());
677 | }
678 | }
679 | None
680 | },
681 | _ = &mut sleep => {
682 | debug!("Long idle detected, doing periodic jobs");
683 | let stale_peers: Vec<_> = swarm.behaviour_mut().gossipsub.all_peers()
684 | .filter(|(_, topics)| topics.is_empty())
685 | .map(|(peer, _)| peer.clone())
686 | .collect();
687 | for peer in stale_peers {
688 | // This is a strange upstream bug. Sometimes a peer may appear connected but without any topic subscriptions.
689 | // If this happens we want to drop the connection and wait for the remote peer to reconnect later.
690 | let _ = &swarm.disconnect_peer_id(peer);
691 | }
692 | if let Some(boot) = boot_node.as_ref() {
693 | // We started with a boot node, so we want to make sure that we do have at least one peer.
694 | // Although the boot node may be offline, we want to connect to it if we don't have any other peers, in case it is started later.
695 | let all_peers = swarm.connected_peers();
696 | let should_redial_boot_node = all_peers.count() < 1;
697 | if should_redial_boot_node {
698 | let _ = swarm.dial(boot.address.clone());
699 | }
700 | }
701 | // Look up ourselves to improve awareness in the network.
702 | let self_id = *swarm.local_peer_id();
703 | swarm.behaviour_mut().kademlia.get_closest_peers(self_id);
704 | None
705 | }
706 | }
707 | };
708 | let d = SystemTime::now()
709 | .duration_since(t)
710 | .unwrap_or_else(|_| Duration::from_secs(0));
711 | t = SystemTime::now();
712 | if d > Duration::from_secs(60) {
713 | // We should already update the timer after each loop completes, and we have a 30-second timeout for periodic tasks.
714 | // If the duration of this loop execution is too long (2 * timeout), our execution may be suspended midway.
715 | // A common reason for such suspension is the host OS entering a power-saving energy state.
716 | // We need to break out of the loop and restart swarm since most of our underlying connections will break.
717 | // The easiest way to recover is to restart.
718 | warn!("Handler completed longer than expected, restarting swarm");
719 | return;
720 | }
721 | if let Some((topic, line)) = to_publish {
722 | if let Err(err) = swarm
723 | .behaviour_mut()
724 | .gossipsub
725 | .publish(topic.clone(), line.as_bytes())
726 | {
727 | match err {
728 | PublishError::Duplicate => {}
729 | _ => {
730 | error!("Error publishing message: {}", err);
731 | }
732 | }
733 | }
734 | }
735 | }
736 | }
737 |
738 | fn create_gossipsub_behavior(id_keys: Keypair) -> Behaviour {
739 | // Hash the message and use it as ID
740 | // Duplicated message will be ignored and not sent because they will have same hash
741 | let message_id_fn = |message: &gossipsub::Message| {
742 | let mut s = DefaultHasher::new();
743 | message.data.hash(&mut s);
744 | MessageId::from(s.finish().to_string())
745 | };
746 |
747 | let gossipsub_config = gossipsub::ConfigBuilder::default()
748 | .heartbeat_interval(Duration::from_secs(10))
749 | .validation_mode(ValidationMode::Strict)
750 | .message_id_fn(message_id_fn)
751 | .do_px()
752 | .build()
753 | .expect("Valid config");
754 | Behaviour::new_with_transform(
755 | MessageAuthenticity::Signed(id_keys),
756 | gossipsub_config,
757 | CompressionTransform,
758 | )
759 | .expect("Correct configuration")
760 | }
761 |
762 | fn create_kademlia_behavior(local_peer_id: PeerId) -> kad::Behaviour {
763 | let mut cfg = kad::Config::default();
764 | cfg.set_query_timeout(Duration::from_secs(5 * 60));
765 | let store = MemoryStore::new(local_peer_id);
766 | kad::Behaviour::with_config(local_peer_id, store, cfg)
767 | }
768 |
769 | fn create_identify_behavior(local_public_key: PublicKey) -> identify::Behaviour {
770 | identify::Behaviour::new(identify::Config::new(
771 | "/p2pclipboard/1.0.0".into(),
772 | local_public_key,
773 | ))
774 | }
775 |
776 | fn create_mdns_behavior(
777 | local_peer_id: PeerId,
778 | pre_shared_key: Option,
779 | disable_mdns: bool,
780 | ) -> mdns::Behaviour {
781 | let mut mdns_config = mdns::Config::default();
782 | let fingerprint = match pre_shared_key {
783 | Some(psk) => {
784 | let mut seed_key_bytes =
785 | SigningKey::from_pkcs8_pem(std::str::from_utf8(&ID_SEED).unwrap())
786 | .unwrap()
787 | .to_bytes();
788 | let seed_key = Keypair::ed25519_from_bytes(&mut seed_key_bytes).unwrap();
789 | Some(Vec::from(
790 | seed_key
791 | .derive_secret(psk.as_ref())
792 | .expect("can derive secret for ed25519"),
793 | ))
794 | }
795 | None => None,
796 | };
797 | mdns_config.service_fingerprint = fingerprint;
798 | mdns_config.disabled = disable_mdns;
799 | mdns::tokio::Behaviour::new(mdns_config, local_peer_id).expect("mdns correct")
800 | }
801 |
802 | fn parse_ipv4_with_port(input: Option) -> Result<(Ipv4Addr, u16), &'static str> {
803 | if let Some(input_str) = input {
804 | let parts: Vec<&str> = input_str.split(':').collect();
805 |
806 | if parts.len() == 2 {
807 | let socket_address: Result = input_str.parse();
808 | match socket_address {
809 | Ok(socket) => Ok((*socket.ip(), socket.port())),
810 | Err(_) => Err("Invalid input format"),
811 | }
812 | } else if parts.len() == 1 {
813 | let ip_addr: Result = parts[0].parse();
814 | match ip_addr {
815 | Ok(ip) => Ok((ip, 0)),
816 | _ => Err("Invalid IP address or port number"),
817 | }
818 | } else {
819 | Err("Invalid input format")
820 | }
821 | } else {
822 | Err("Input is None")
823 | }
824 | }
825 |
826 | fn get_non_p2p_multiaddr(mut origin_addr: Multiaddr) -> Multiaddr {
827 | while origin_addr.iter().count() > 2 {
828 | let _ = origin_addr.pop();
829 | }
830 | return origin_addr;
831 | }
832 |
833 | fn is_multiaddr_link_local(addr: &Multiaddr) -> bool {
834 | if let Protocol::Ip4(ip) = addr.iter().collect::>()[0] {
835 | return ip.is_link_local();
836 | }
837 | return false;
838 | }
839 |
840 | fn is_multiaddr_local(addr: &Multiaddr) -> bool {
841 | addr.iter().collect::>()[0] == Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))
842 | }
843 |
--------------------------------------------------------------------------------