diff --git a/examples/get_metadata/src/main.rs b/examples/get_metadata/src/main.rs index 156affe02..8a4efeb49 100644 --- a/examples/get_metadata/src/main.rs +++ b/examples/get_metadata/src/main.rs @@ -21,8 +21,9 @@ use peer::protocols::{NullProtocol, PeerExtensionProtocol, PeerWireProtocol}; use peer::{IPeerManagerMessage, OPeerManagerMessage, PeerInfo, PeerManagerBuilder, PeerProtocolCodec}; use pendulum::future::TimerBuilder; use pendulum::HashedWheelBuilder; +use select::discovery::error::DiscoveryError; use select::discovery::{IDiscoveryMessage, ODiscoveryMessage, UtMetadataModule}; -use select::{ControlMessage, IExtendedMessage, IUberMessage, OExtendedMessage, OUberMessage, UberModuleBuilder}; +use select::{ControlMessage, DiscoveryTrait, IExtendedMessage, IUberMessage, OExtendedMessage, OUberMessage, UberModuleBuilder}; use tokio_core::reactor::Core; // Legacy Handshaker, when bip_dht is migrated, it will accept S directly @@ -69,6 +70,7 @@ where fn metadata(&mut self, _data: ()) {} } +#[allow(clippy::too_many_lines)] fn main() { let matches = clap_app!(myapp => (version: "1.0") @@ -100,7 +102,7 @@ fn main() { // Set a low handshake timeout so we don't wait on peers that aren't listening on tcp HandshakerConfig::default().with_connect_timeout(Duration::from_millis(500)), ) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap() .into_parts(); // Create a peer manager that will hold our peers and heartbeat/send messages to them @@ -109,7 +111,7 @@ fn main() { // Hook up a future that feeds incoming (handshaken) peers over to the peer manager core.handle().spawn( handshaker_recv - .map_err(|_| ()) + .map_err(|()| ()) .map(|complete_msg| { // Our handshaker finished handshaking some peer, get // the peer info as well as the peer itself (socket) @@ -141,11 +143,22 @@ fn main() { ); // Create our UtMetadata selection module - let (uber_send, uber_recv) = UberModuleBuilder::new() - .with_extended_builder(Some(ExtendedMessageBuilder::new())) - .with_discovery_module(UtMetadataModule::new()) - .build() - .split(); + let (uber_send, uber_recv) = { + let mut this = UberModuleBuilder::new().with_extended_builder(Some(ExtendedMessageBuilder::new())); + let module = UtMetadataModule::new(); + this.discovery.push(Box::new(module) + as Box< + dyn DiscoveryTrait< + SinkItem = IDiscoveryMessage, + SinkError = Box, + Item = ODiscoveryMessage, + Error = Box, + >, + >); + this + } + .build() + .split(); // Tell the uber module we want to download metainfo for the given hash let uber_send = core @@ -159,7 +172,7 @@ fn main() { let timer = TimerBuilder::default().build(HashedWheelBuilder::default().build()); let timer_recv = timer.sleep_stream(Duration::from_millis(100)).unwrap().map(Either::B); - let merged_recv = peer_manager_recv.map(Either::A).map_err(|_| ()).select(timer_recv); + let merged_recv = peer_manager_recv.map(Either::A).map_err(|()| ()).select(timer_recv); // Hook up a future that receives messages from the peer manager core.handle().spawn(future::loop_fn( @@ -183,23 +196,23 @@ fn main() { info, message, ))), Either::A(OPeerManagerMessage::PeerAdded(info)) => { - println!("Connected To Peer: {:?}", info); + println!("Connected To Peer: {info:?}"); Some(IUberMessage::Control(ControlMessage::PeerConnected(info))) } Either::A(OPeerManagerMessage::PeerRemoved(info)) => { - println!("We Removed Peer {:?} From The Peer Manager", info); + println!("We Removed Peer {info:?} From The Peer Manager"); Some(IUberMessage::Control(ControlMessage::PeerDisconnected(info))) } Either::A(OPeerManagerMessage::PeerDisconnect(info)) => { - println!("Peer {:?} Disconnected From Us", info); + println!("Peer {info:?} Disconnected From Us"); Some(IUberMessage::Control(ControlMessage::PeerDisconnected(info))) } Either::A(OPeerManagerMessage::PeerError(info, error)) => { - println!("Peer {:?} Disconnected With Error: {:?}", info, error); + println!("Peer {info:?} Disconnected With Error: {error:?}"); Some(IUberMessage::Control(ControlMessage::PeerDisconnected(info))) } - Either::B(_) => Some(IUberMessage::Control(ControlMessage::Tick(Duration::from_millis(100)))), - _ => None, + Either::B(()) => Some(IUberMessage::Control(ControlMessage::Tick(Duration::from_millis(100)))), + Either::A(_) => None, }; match opt_message { diff --git a/examples/simple_torrent/src/main.rs b/examples/simple_torrent/src/main.rs index 7a0669faa..8a4e23da4 100644 --- a/examples/simple_torrent/src/main.rs +++ b/examples/simple_torrent/src/main.rs @@ -67,6 +67,7 @@ impl Handshaker for LegacyHandshaker where S: Sink + (version: "1.0") (author: "Andrew ") (about: "Simple torrent downloading") @@ -130,7 +133,7 @@ fn main() { // block when we reach our max peers). Setting these to low // values so we don't have more than 2 unused tcp connections. .with_config(HandshakerConfig::default().with_wait_buffer_size(0).with_done_buffer_size(0)) - .build::(TcpTransport, core.handle()) // Will handshake over TCP (could swap this for UTP in the future) + .build::(TcpTransport, &core.handle()) // Will handshake over TCP (could swap this for UTP in the future) .unwrap() .into_parts(); // Create a peer manager that will hold our peers and heartbeat/send messages to them @@ -146,7 +149,7 @@ fn main() { let map_peer_manager_send = peer_manager_send.clone().sink_map_err(|_| ()); core.handle().spawn( handshaker_recv - .map_err(|_| ()) + .map_err(|()| ()) .map(|complete_msg| { // Our handshaker finished handshaking some peer, get // the peer info as well as the peer itself (socket) @@ -173,7 +176,7 @@ fn main() { // Map out the errors for these sinks so they match let map_select_send = select_send.clone().sink_map_err(|_| ()); - let map_disk_manager_send = disk_manager_send.clone().sink_map_err(|_| ()); + let map_disk_manager_send = disk_manager_send.clone().sink_map_err(|()| ()); // Hook up a future that receives messages from the peer manager, and forwards request to the disk manager or selection manager (using loop fn // here because we need to be able to access state, like request_map and a different future combinator wouldn't let us keep it around to access) @@ -241,15 +244,15 @@ fn main() { OPeerManagerMessage::PeerAdded(info) => Some(Either::A(SelectState::NewPeer(info))), OPeerManagerMessage::SentMessage(_, _) => None, OPeerManagerMessage::PeerRemoved(info) => { - println!("We Removed Peer {:?} From The Peer Manager", info); + println!("We Removed Peer {info:?} From The Peer Manager"); Some(Either::A(SelectState::RemovedPeer(info))) } OPeerManagerMessage::PeerDisconnect(info) => { - println!("Peer {:?} Disconnected From Us", info); + println!("Peer {info:?} Disconnected From Us"); Some(Either::A(SelectState::RemovedPeer(info))) } OPeerManagerMessage::PeerError(info, error) => { - println!("Peer {:?} Disconnected With Error: {:?}", info, error); + println!("Peer {info:?} Disconnected With Error: {error:?}"); Some(Either::A(SelectState::RemovedPeer(info))) } }; @@ -305,6 +308,7 @@ fn main() { let peer_info = peer_list.remove(1); // Pack up our block into a peer wire protocol message and send it off to the peer + #[allow(clippy::cast_possible_truncation)] let piece = PieceMessage::new(metadata.piece_index() as u32, metadata.block_offset() as u32, block.freeze()); let pwp_message = PeerWireProtocolMessage::Piece(piece); @@ -360,13 +364,13 @@ fn main() { match opt_item.unwrap() { // Disk manager identified a good piece already downloaded SelectState::GoodPiece(index) => { - piece_requests.retain(|req| req.piece_index() != index as u32); + piece_requests.retain(|req| u64::from(req.piece_index()) != index); Loop::Continue((select_recv, piece_requests, cur_pieces + 1)) } // Disk manager is finished identifying good pieces, torrent has been added SelectState::TorrentAdded => Loop::Break((select_recv, piece_requests, cur_pieces)), // Shouldn't be receiving any other messages... - message => panic!("Unexpected Message Received In Selection Receiver: {:?}", message), + message => panic!("Unexpected Message Received In Selection Receiver: {message:?}"), } }) .map_err(|_| ()) @@ -467,14 +471,14 @@ fn main() { vec![IPeerManagerMessage::SendMessage( peer, 0, - PeerWireProtocolMessage::Have(HaveMessage::new(piece as u32)), + PeerWireProtocolMessage::Have(HaveMessage::new(piece.try_into().unwrap())), )] } else { vec![] } } // Decided not to handle these two cases here - SelectState::RemovedPeer(info) => panic!("Peer {:?} Got Disconnected", info), + SelectState::RemovedPeer(info) => panic!("Peer {info:?} Got Disconnected"), SelectState::BadPiece(_) => panic!("Peer Gave Us Bad Piece"), _ => vec![], }; @@ -507,11 +511,11 @@ fn main() { Box::new( map_peer_manager_send .send_all(stream::iter_result(send_messages.into_iter().map(Ok::<_, ()>))) - .map_err(|_| ()) + .map_err(|()| ()) .and_then(|(map_peer_manager_send, _)| { map_peer_manager_send.send_all(stream::iter_result(next_piece_requests)) }) - .map_err(|_| ()) + .map_err(|()| ()) .map(move |(map_peer_manager_send, _)| { Loop::Continue(( select_recv, @@ -567,6 +571,7 @@ fn generate_requests(info: &Info, block_size: usize) -> Vec { for block_index in 0..whole_blocks { let block_offset = block_index * block_size as u64; + #[allow(clippy::cast_possible_truncation)] requests.push(RequestMessage::new(piece_index as u32, block_offset as u32, block_size)); } @@ -576,9 +581,9 @@ fn generate_requests(info: &Info, block_size: usize) -> Vec { let block_offset = whole_blocks * block_size as u64; requests.push(RequestMessage::new( - piece_index as u32, - block_offset as u32, - partial_block_length as usize, + piece_index.try_into().unwrap(), + block_offset.try_into().unwrap(), + partial_block_length.try_into().unwrap(), )); } diff --git a/packages/bencode/src/access/dict.rs b/packages/bencode/src/access/dict.rs index 596d9535e..a3e56d1bb 100644 --- a/packages/bencode/src/access/dict.rs +++ b/packages/bencode/src/access/dict.rs @@ -21,7 +21,7 @@ pub trait BDictAccess { impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { fn to_list(&self) -> Vec<(&&'a [u8], &V)> { - self.iter().map(|(k, v)| (k, v)).collect() + self.iter().collect() } fn lookup(&self, key: &[u8]) -> Option<&V> { @@ -43,7 +43,7 @@ impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { impl<'a, V> BDictAccess, V> for BTreeMap, V> { fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { - self.iter().map(|(k, v)| (k, v)).collect() + self.iter().collect() } fn lookup(&self, key: &[u8]) -> Option<&V> { diff --git a/packages/bencode/src/reference/decode_opt.rs b/packages/bencode/src/reference/decode_opt.rs index ac94d0311..e8d9a8337 100644 --- a/packages/bencode/src/reference/decode_opt.rs +++ b/packages/bencode/src/reference/decode_opt.rs @@ -41,7 +41,7 @@ impl BDecodeOpt { /// /// It may be useful to disable this if for example, the input bencode is prepended to /// some payload and you would like to disassociate it. In this case, to find where the - /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call len(). + /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call `len()`. #[must_use] pub fn enforce_full_decode(&self) -> bool { self.enforce_full_decode diff --git a/packages/dht/examples/debug.rs b/packages/dht/examples/debug.rs index 6d769331b..87c00814e 100644 --- a/packages/dht/examples/debug.rs +++ b/packages/dht/examples/debug.rs @@ -57,7 +57,7 @@ impl HandshakerTrait for SimpleHandshaker { } /// Send the given Metadata back to the client. - fn metadata(&mut self, _: Self::MetadataEnvelope) {} + fn metadata(&mut self, (): Self::MetadataEnvelope) {} } fn main() { @@ -80,7 +80,7 @@ fn main() { let events = dht.events(); thread::spawn(move || { for event in events { - println!("\nReceived Dht Event {:?}", event); + println!("\nReceived Dht Event {event:?}"); } }); diff --git a/packages/dht/src/builder.rs b/packages/dht/src/builder.rs index fa2a62519..a584d2ab3 100644 --- a/packages/dht/src/builder.rs +++ b/packages/dht/src/builder.rs @@ -96,6 +96,7 @@ impl Drop for MainlineDht { // ----------------------------------------------------------------------------// /// Stores information for initializing a DHT. +#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug)] pub struct DhtBuilder { nodes: HashSet, @@ -193,6 +194,10 @@ impl DhtBuilder { } /// Start a mainline DHT with the current configuration. + /// + /// # Errors + /// + /// It would return error if unable to build from the handshaker. pub fn start_mainline(self, handshaker: H) -> io::Result where H: HandshakerTrait + 'static, diff --git a/packages/dht/src/message/announce_peer.rs b/packages/dht/src/message/announce_peer.rs index 7c939def2..e4cfa5f3c 100644 --- a/packages/dht/src/message/announce_peer.rs +++ b/packages/dht/src/message/announce_peer.rs @@ -19,6 +19,7 @@ pub enum ConnectPort { Explicit(u16), } +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct AnnouncePeerRequest<'a> { trans_id: &'a [u8], @@ -46,6 +47,11 @@ impl<'a> AnnouncePeerRequest<'a> { } } + /// Generate a `AnnouncePeerRequest` from parts + /// + /// # Errors + /// + /// This function will return an error unable to get bytes unable do lookup. pub fn from_parts(rqst_root: &'a dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, @@ -67,7 +73,8 @@ impl<'a> AnnouncePeerRequest<'a> { Some(Some(n)) if n != 0 => ConnectPort::Implied, _ => { // If we hit this, the port either was not provided or it was of the wrong bencode type - let port_number = port? as u16; + #[allow(clippy::cast_possible_truncation)] + let port_number = (port?).unsigned_abs() as u16; ConnectPort::Explicit(port_number) } }; @@ -126,6 +133,7 @@ impl<'a> AnnouncePeerRequest<'a> { } } +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct AnnouncePeerResponse<'a> { trans_id: &'a [u8], @@ -138,6 +146,11 @@ impl<'a> AnnouncePeerResponse<'a> { AnnouncePeerResponse { trans_id, node_id } } + /// Generate a `AnnouncePeerResponse` from parts + /// + /// # Errors + /// + /// This function will return an error unable to get bytes or unable to validate the node id. pub fn from_parts(rqst_root: &dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/message/compact_info.rs b/packages/dht/src/message/compact_info.rs index 05a5339b6..8a55d3217 100644 --- a/packages/dht/src/message/compact_info.rs +++ b/packages/dht/src/message/compact_info.rs @@ -5,7 +5,7 @@ use std::net::{Ipv4Addr, SocketAddrV4}; use bencode::{BListAccess, BRefAccess}; use util::bt::{self, NodeId}; -use util::error::{LengthError, LengthErrorKind, LengthResult}; +use util::error::{Error, LengthErrorKind, LengthResult}; use util::sha::ShaHash; // TODO: Update this module to accept data sources as both a slice of bytes and probably @@ -22,9 +22,14 @@ pub struct CompactNodeInfo<'a> { } impl<'a> CompactNodeInfo<'a> { + /// Make a new `CompactNodeInfo` from bytes + /// + /// # Errors + /// + /// This function will return an error if the byte array is the wrong length. pub fn new(nodes: &'a [u8]) -> LengthResult> { if nodes.len() % BYTES_PER_COMPACT_NODE_INFO != 0 { - Err(LengthError::new( + Err(Error::new( LengthErrorKind::LengthMultipleExpected, BYTES_PER_COMPACT_NODE_INFO, )) @@ -57,6 +62,7 @@ pub struct CompactNodeInfoIter<'a> { pos: usize, } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for CompactNodeInfoIter<'a> { type Item = (NodeId, SocketAddrV4); @@ -90,6 +96,12 @@ where { /// Creates a new `CompactValueInfo` container for the given values. /// + /// # Errors + /// + /// This function will return an error if the byte array is the wrong length. + /// + /// # Panics + /// /// It is VERY important that the values have been checked to contain only /// bencoded bytes and not other types as that will result in a panic. pub fn new(values: &'a dyn BListAccess) -> LengthResult> { @@ -98,7 +110,7 @@ where let compact_value = node.bytes().unwrap(); if compact_value.len() != BYTES_PER_COMPACT_IP { - return Err(LengthError::with_index( + return Err(Error::with_index( LengthErrorKind::LengthExpected, BYTES_PER_COMPACT_IP, index, @@ -175,9 +187,7 @@ fn parts_from_compact_info(compact_info: &[u8]) -> (NodeId, SocketAddrV4) { } fn socket_v4_from_bytes_be(bytes: &[u8]) -> LengthResult { - if bytes.len() != BYTES_PER_COMPACT_IP { - Err(LengthError::new(LengthErrorKind::LengthExpected, BYTES_PER_COMPACT_IP)) - } else { + if bytes.len() == BYTES_PER_COMPACT_IP { let (oc_one, oc_two, oc_three, oc_four) = (bytes[0], bytes[1], bytes[2], bytes[3]); let mut port = 0u16; @@ -188,6 +198,8 @@ fn socket_v4_from_bytes_be(bytes: &[u8]) -> LengthResult { let ip = Ipv4Addr::new(oc_one, oc_two, oc_three, oc_four); Ok(SocketAddrV4::new(ip, port)) + } else { + Err(Error::new(LengthErrorKind::LengthExpected, BYTES_PER_COMPACT_IP)) } } @@ -253,6 +265,7 @@ mod tests { #[test] fn positive_compact_values_one() { + #[allow(clippy::cast_possible_truncation)] let bytes = [127, 0, 0, 1, (6881 >> 8) as u8, (6881 & 0x00FF) as u8]; let bencode_values = ben_list!(ben_bytes!(&bytes[..])); let compact_value: CompactValueInfo<'_, BencodeMut<'_>> = CompactValueInfo::new(bencode_values.list().unwrap()).unwrap(); @@ -265,7 +278,9 @@ mod tests { #[test] fn positive_compact_values_many() { + #[allow(clippy::cast_possible_truncation)] let bytes_one = [127, 0, 0, 1, (6881 >> 8) as u8, (6881 & 0x00FF) as u8]; + #[allow(clippy::cast_possible_truncation)] let bytes_two = [10, 0, 0, 1, (6889 >> 8) as u8, (6889 & 0x00FF) as u8]; let bencode_values = ben_list!(ben_bytes!(&bytes_one[..]), ben_bytes!(&bytes_two[..])); let compact_value: CompactValueInfo<'_, BencodeMut<'_>> = CompactValueInfo::new(bencode_values.list().unwrap()).unwrap(); diff --git a/packages/dht/src/message/error.rs b/packages/dht/src/message/error.rs index 305025d23..5f252e564 100644 --- a/packages/dht/src/message/error.rs +++ b/packages/dht/src/message/error.rs @@ -17,6 +17,7 @@ const SERVER_ERROR_CODE: u8 = 202; const PROTOCOL_ERROR_CODE: u8 = 203; const METHOD_UNKNOWN_CODE: u8 = 204; +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum ErrorCode { GenericError, @@ -56,7 +57,7 @@ impl From for u8 { struct ErrorValidate; impl ErrorValidate { - fn extract_error_args(&self, args: &dyn BListAccess) -> DhtResult<(u8, String)> + fn extract_error_args(self, args: &dyn BListAccess) -> DhtResult<(u8, String)> where B: BRefAccess, { @@ -69,8 +70,9 @@ impl ErrorValidate { let code = self.convert_int(&args[0], format!("{ERROR_ARGS_KEY}[0]"))?; let message = String::from(self.convert_str(&args[1], &format!("{ERROR_ARGS_KEY}[1]"))?); - let code2 = code; + let code = code.unsigned_abs(); + #[allow(clippy::cast_possible_truncation)] Ok((code as u8, message)) } } @@ -87,6 +89,7 @@ impl BConvertExt for ErrorValidate {} // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct ErrorMessage<'a> { trans_id: Cow<'a, [u8]>, @@ -110,6 +113,11 @@ impl<'a> ErrorMessage<'a> { } } + /// Generate a `BRefAccess` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup the error. pub fn from_parts(root: &dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/message/find_node.rs b/packages/dht/src/message/find_node.rs index 4b7fcfd91..e5f5b65f3 100644 --- a/packages/dht/src/message/find_node.rs +++ b/packages/dht/src/message/find_node.rs @@ -7,6 +7,7 @@ use crate::message::compact_info::CompactNodeInfo; use crate::message::request::{self, RequestValidate}; use crate::message::response::ResponseValidate; +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct FindNodeRequest<'a> { trans_id: &'a [u8], @@ -28,6 +29,10 @@ impl<'a> FindNodeRequest<'a> { /// /// The `target_key` argument is provided for cases where, due to forward compatibility, /// the target key we are interested in could fall under the target key or another key. + /// + /// # Errors + /// + /// It will return an error if unable to lookup an validate the node parts. pub fn from_parts( rqst_root: &dyn BDictAccess, trans_id: &'a [u8], @@ -78,6 +83,7 @@ impl<'a> FindNodeRequest<'a> { } } +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct FindNodeResponse<'a> { trans_id: &'a [u8], @@ -86,6 +92,11 @@ pub struct FindNodeResponse<'a> { } impl<'a> FindNodeResponse<'a> { + /// Create a new `FindNodeResponse`. + /// + /// # Errors + /// + /// This function will return an error if unable to validate the nodes. pub fn new(trans_id: &'a [u8], node_id: NodeId, nodes: &'a [u8]) -> DhtResult> { let validate = ResponseValidate::new(trans_id); let compact_nodes = validate.validate_nodes(nodes)?; @@ -97,6 +108,11 @@ impl<'a> FindNodeResponse<'a> { }) } + /// Create a new `FindNodeResponse` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup and and validate node. pub fn from_parts(rsp_root: &'a dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/message/get_peers.rs b/packages/dht/src/message/get_peers.rs index 80126ba1c..152f3090f 100644 --- a/packages/dht/src/message/get_peers.rs +++ b/packages/dht/src/message/get_peers.rs @@ -12,6 +12,7 @@ use crate::message::compact_info::{CompactNodeInfo, CompactValueInfo}; use crate::message::request::{self, RequestValidate}; use crate::message::response::{self, ResponseValidate}; +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct GetPeersRequest<'a> { trans_id: &'a [u8], @@ -29,6 +30,11 @@ impl<'a> GetPeersRequest<'a> { } } + /// Create a `GetPeersRequest` from parts + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert, and validate node. pub fn from_parts(rqst_root: &dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, @@ -86,6 +92,7 @@ where Both(CompactNodeInfo<'a>, CompactValueInfo<'a, B::BType>), } +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct GetPeersResponse<'a, B> where @@ -120,6 +127,11 @@ where } } + /// Create a `GetPeersResponse` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert, and validate the nodes. pub fn from_parts( rsp_root: &'a dyn BDictAccess, trans_id: &'a [u8], @@ -182,6 +194,11 @@ where } impl<'a> GetPeersResponse<'a, BencodeMut<'a>> { + /// Returns the encode of this [`GetPeersResponse>`]. + /// + /// # Panics + /// + /// Panics if unable to get the bencoded list. #[must_use] pub fn encode(&self) -> Vec { let mut response_args = BTreeMap::new(); diff --git a/packages/dht/src/message/mod.rs b/packages/dht/src/message/mod.rs index 50de32fea..3f20d8f3b 100644 --- a/packages/dht/src/message/mod.rs +++ b/packages/dht/src/message/mod.rs @@ -56,6 +56,7 @@ impl BConvert for MessageValidate { impl BConvertExt for MessageValidate {} // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum MessageType<'a, B> where @@ -72,6 +73,11 @@ where B: BRefAccess + Clone, B::BType: PartialEq + Eq + core::hash::Hash + Debug, { + /// Create a new `MessageType` + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert and crate type. pub fn new(message: &'a B::BType, trans_mapper: T) -> DhtResult> where T: Fn(&[u8]) -> ExpectedResponse, @@ -90,7 +96,7 @@ where } RESPONSE_TYPE_KEY => { let rsp_type = trans_mapper(trans_id); - let rsp_message = ResponseType::from_parts(msg_root, trans_id, rsp_type)?; + let rsp_message = ResponseType::from_parts(msg_root, trans_id, &rsp_type)?; Ok(MessageType::Response(rsp_message)) } ERROR_TYPE_KEY => { diff --git a/packages/dht/src/message/ping.rs b/packages/dht/src/message/ping.rs index 942fd227f..0bb1ec6a7 100644 --- a/packages/dht/src/message/ping.rs +++ b/packages/dht/src/message/ping.rs @@ -8,6 +8,7 @@ use crate::error::DhtResult; use crate::message; use crate::message::request::{self, RequestValidate}; +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct PingRequest<'a> { trans_id: &'a [u8], @@ -20,6 +21,11 @@ impl<'a> PingRequest<'a> { PingRequest { trans_id, node_id } } + /// Create a `PingRequest` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert, and validate nodes. pub fn from_parts(rqst_root: &dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, @@ -57,6 +63,7 @@ impl<'a> PingRequest<'a> { } } +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct PingResponse<'a> { trans_id: &'a [u8], @@ -70,6 +77,11 @@ impl<'a> PingResponse<'a> { PingResponse { trans_id, node_id } } + /// Create a new `PingResponse` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to generate the ping request from the root. pub fn from_parts(rsp_root: &dyn BDictAccess, trans_id: &'a [u8]) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/message/request.rs b/packages/dht/src/message/request.rs index 6d4e72594..61e36a584 100644 --- a/packages/dht/src/message/request.rs +++ b/packages/dht/src/message/request.rs @@ -22,6 +22,7 @@ pub const ANNOUNCE_PEER_TYPE_KEY: &str = "announce_peer"; // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] pub struct RequestValidate<'a> { trans_id: &'a [u8], } @@ -32,6 +33,11 @@ impl<'a> RequestValidate<'a> { RequestValidate { trans_id } } + /// Validate and deserialize bytes into a `NodeId` + /// + /// # Errors + /// + /// This function will return an error if to generate the `NodeId`. pub fn validate_node_id(&self, node_id: &[u8]) -> DhtResult { NodeId::from_hash(node_id).map_err(|_| { let error_msg = ErrorMessage::new( @@ -44,6 +50,11 @@ impl<'a> RequestValidate<'a> { }) } + /// Validate and deserialize bytes into a `InfoHash` + /// + /// # Errors + /// + /// This function will return an error if to generate the `InfoHash`. pub fn validate_info_hash(&self, info_hash: &[u8]) -> DhtResult { InfoHash::from_hash(info_hash).map_err(|_| { let error_msg = ErrorMessage::new( @@ -68,6 +79,7 @@ impl<'a> BConvertExt for RequestValidate<'a> {} // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum RequestType<'a> { Ping(PingRequest<'a>), @@ -78,6 +90,11 @@ pub enum RequestType<'a> { } impl<'a> RequestType<'a> { + /// Creates a new `RequestType` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert, and generate correct type. pub fn from_parts(root: &'a dyn BDictAccess, trans_id: &'a [u8], rqst_type: &str) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/message/response.rs b/packages/dht/src/message/response.rs index 358fcf428..fb5a28250 100644 --- a/packages/dht/src/message/response.rs +++ b/packages/dht/src/message/response.rs @@ -15,6 +15,7 @@ pub const RESPONSE_ARGS_KEY: &str = "r"; // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] pub struct ResponseValidate<'a> { trans_id: &'a [u8], } @@ -25,6 +26,11 @@ impl<'a> ResponseValidate<'a> { ResponseValidate { trans_id } } + /// Validate and deserialize bytes into a `NodeId` + /// + /// # Errors + /// + /// This function will return an error if to generate the `NodeId`. pub fn validate_node_id(&self, node_id: &[u8]) -> DhtResult { NodeId::from_hash(node_id).map_err(|_| { DhtError::from_kind(DhtErrorKind::InvalidResponse { @@ -37,7 +43,11 @@ impl<'a> ResponseValidate<'a> { }) } - /// Validate the given nodes string which should be IPv4 compact + /// Validate and deserialize bytes into a `CompactNodeInfo` + /// + /// # Errors + /// + /// This function will return an error if to generate the `CompactNodeInfo`. pub fn validate_nodes<'b>(&self, nodes: &'b [u8]) -> DhtResult> { CompactNodeInfo::new(nodes).map_err(|_| { DhtError::from_kind(DhtErrorKind::InvalidResponse { @@ -51,6 +61,11 @@ impl<'a> ResponseValidate<'a> { }) } + /// Validate and deserialize bytes into a `CompactValueInfo` + /// + /// # Errors + /// + /// This function will return an error if to generate the `CompactValueInfo`. pub fn validate_values<'b, B>(&self, values: &'b dyn BListAccess) -> DhtResult> where B: BRefAccess + Clone, @@ -87,6 +102,7 @@ impl<'a> BConvertExt for ResponseValidate<'a> {} // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[allow(unused)] pub enum ExpectedResponse { Ping, @@ -98,6 +114,7 @@ pub enum ExpectedResponse { None, } +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum ResponseType<'a, B> where @@ -116,10 +133,15 @@ where B: BRefAccess + Clone, B::BType: PartialEq + Eq + core::hash::Hash + Debug, { + /// Creates a new `ResponseType` from parts. + /// + /// # Errors + /// + /// This function will return an error if unable to lookup, convert, and generate correct type. pub fn from_parts( root: &'a dyn BDictAccess, trans_id: &'a [u8], - rsp_type: ExpectedResponse, + rsp_type: &ExpectedResponse, ) -> DhtResult> where B: BRefAccess, diff --git a/packages/dht/src/router.rs b/packages/dht/src/router.rs index 9891444d5..69d8a9b15 100644 --- a/packages/dht/src/router.rs +++ b/packages/dht/src/router.rs @@ -16,9 +16,9 @@ const TRANSMISSION_DHT: (&str, u16) = ("dht.transmissionbt.com", 6881); pub enum Router { /// Bootstrap server maintained by uTorrent. uTorrent, - /// Bootstrap server maintained by BitTorrent. + /// Bootstrap server maintained by `BitTorrent`. BitTorrent, - /// Bootstrap server maintained by BitComet. + /// Bootstrap server maintained by `BitComet`. BitComet, /// Bootstrap server maintained by Transmission. Transmission, @@ -39,6 +39,11 @@ impl Router { // } // } + /// Returns the [`SocketAddrV4`] of this [`Router`]. + /// + /// # Errors + /// + /// This function will return an error if unable to fund any ipv4 address. pub fn ipv4_addr(&self) -> io::Result { let mut addrs = self.socket_addrs()?; @@ -47,6 +52,11 @@ impl Router { .ok_or(Error::new(ErrorKind::Other, "No IPv4 Addresses Found For Host")) } + /// Returns the [`SocketAddrV6`] of this [`Router`]. + /// + /// # Errors + /// + /// This function will return an error if unable to fund any ipv6 address. pub fn ipv6_addr(&self) -> io::Result { let mut addrs = self.socket_addrs()?; @@ -55,6 +65,11 @@ impl Router { .ok_or(Error::new(ErrorKind::Other, "No IPv6 Addresses Found For Host")) } + /// Returns the [`SocketAddr`] of this [`Router`]. + /// + /// # Errors + /// + /// This function will return an error if unable to fund a socket address. pub fn socket_addr(&self) -> io::Result { let mut addrs = self.socket_addrs()?; diff --git a/packages/dht/src/routing/bucket.rs b/packages/dht/src/routing/bucket.rs index 086835571..a7acece11 100644 --- a/packages/dht/src/routing/bucket.rs +++ b/packages/dht/src/routing/bucket.rs @@ -110,6 +110,7 @@ impl<'a> GoodNodes<'a> { } } +#[allow(clippy::trivially_copy_pass_by_ref)] fn good_nodes_filter(node: &&Node) -> bool { node.status() == NodeStatus::Good } @@ -136,6 +137,7 @@ impl<'a> PingableNodes<'a> { } } +#[allow(clippy::trivially_copy_pass_by_ref)] fn pingable_nodes_filter(node: &&Node) -> bool { // Function is moderately expensive let status = node.status(); @@ -174,6 +176,7 @@ mod tests { let mut bucket = Bucket::new(); let dummy_addr = bip_test::dummy_socket_addr_v4(); + #[allow(clippy::cast_possible_truncation)] let dummy_ids = bip_test::dummy_block_node_ids(super::MAX_BUCKET_SIZE as u8); for &id in dummy_ids.iter().take(super::MAX_BUCKET_SIZE) { let node = Node::as_questionable(id, dummy_addr); @@ -189,6 +192,7 @@ mod tests { let mut bucket = Bucket::new(); let dummy_addr = bip_test::dummy_socket_addr_v4(); + #[allow(clippy::cast_possible_truncation)] let dummy_ids = bip_test::dummy_block_node_ids(super::MAX_BUCKET_SIZE as u8); for &id in dummy_ids.iter().take(super::MAX_BUCKET_SIZE) { let node = Node::as_good(id, dummy_addr); @@ -204,6 +208,7 @@ mod tests { let mut bucket = Bucket::new(); let dummy_addr = bip_test::dummy_socket_addr_v4(); + #[allow(clippy::cast_possible_truncation)] let dummy_ids = bip_test::dummy_block_node_ids(super::MAX_BUCKET_SIZE as u8); for &id in dummy_ids.iter().take(super::MAX_BUCKET_SIZE) { let node = Node::as_questionable(id, dummy_addr); @@ -226,6 +231,7 @@ mod tests { let mut bucket = Bucket::new(); let dummy_addr = bip_test::dummy_socket_addr_v4(); + #[allow(clippy::cast_possible_truncation)] let dummy_ids = bip_test::dummy_block_node_ids((super::MAX_BUCKET_SIZE as u8) + 1); for &id in dummy_ids.iter().take(super::MAX_BUCKET_SIZE) { let node = Node::as_good(id, dummy_addr); @@ -254,6 +260,7 @@ mod tests { let mut bucket = Bucket::new(); let dummy_addr = bip_test::dummy_socket_addr_v4(); + #[allow(clippy::cast_possible_truncation)] let dummy_ids = bip_test::dummy_block_node_ids((super::MAX_BUCKET_SIZE as u8) + 1); for &id in dummy_ids.iter().take(super::MAX_BUCKET_SIZE) { let node = Node::as_questionable(id, dummy_addr); diff --git a/packages/dht/src/routing/node.rs b/packages/dht/src/routing/node.rs index bb3a1e2b1..cef493d04 100644 --- a/packages/dht/src/routing/node.rs +++ b/packages/dht/src/routing/node.rs @@ -29,6 +29,7 @@ const MAX_REFRESH_REQUESTS: usize = 2; /// Status of the node. /// Ordering of the enumerations is important, variants higher /// up are considered to be less than those further down. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Ord, PartialOrd)] pub enum NodeStatus { Bad, @@ -130,14 +131,17 @@ impl Node { *dst = *src; } } - _ => panic!("bip_dht: Cannot encode a SocketAddrV6..."), + SocketAddr::V6(_) => panic!("bip_dht: Cannot encode a SocketAddrV6..."), } } // Copy the port over let port = self.addr.port(); encoded[24] = (port >> 8) as u8; - encoded[25] = port as u8; + + #[allow(clippy::cast_possible_truncation)] + let port = port as u8; + encoded[25] = port; encoded } @@ -274,6 +278,7 @@ mod tests { let encoded_node = node.encode(); + #[allow(clippy::cast_possible_truncation)] let port_bytes = [(port >> 8) as u8, port as u8]; for (expected, actual) in node_id .iter() diff --git a/packages/dht/src/routing/table.rs b/packages/dht/src/routing/table.rs index dc4643e84..cdf02a0cf 100644 --- a/packages/dht/src/routing/table.rs +++ b/packages/dht/src/routing/table.rs @@ -16,6 +16,7 @@ pub const MAX_BUCKETS: usize = sha::SHA_HASH_LEN * 8; /// Routing table containing a table of routing nodes as well /// as the id of the local node participating in the dht. +#[allow(clippy::module_name_repetitions)] pub struct RoutingTable { // Important: Our node id will always fall within the range // of the last bucket in the buckets array. @@ -61,8 +62,7 @@ impl RoutingTable { } else { // Grab the assorted bucket (if it exists) self.buckets().find(|c| match *c { - BucketContents::Empty => false, - BucketContents::Sorted(_) => false, + BucketContents::Sorted(_) | BucketContents::Empty => false, BucketContents::Assorted(_) => true, }) }; @@ -76,7 +76,7 @@ impl RoutingTable { } /// Add the node to the `RoutingTable` if there is space for it. - pub fn add_node(&mut self, node: Node) { + pub fn add_node(&mut self, node: &Node) { // Doing some checks and calculations here, outside of the recursion if node.status() == NodeStatus::Bad { return; @@ -90,7 +90,7 @@ impl RoutingTable { } /// Recursively tries to place the node into some bucket. - fn bucket_node(&mut self, node: Node, num_same_bits: usize) { + fn bucket_node(&mut self, node: &Node, num_same_bits: usize) { let bucket_index = bucket_placement(num_same_bits, self.buckets.len()); // Try to place in correct bucket @@ -98,7 +98,7 @@ impl RoutingTable { // Bucket was full, try to split it if self.split_bucket(bucket_index) { // Bucket split successfully, try to add again - self.bucket_node(node.clone(), num_same_bits); + self.bucket_node(node, num_same_bits); } } } @@ -114,9 +114,8 @@ impl RoutingTable { // Implementation is easier if we just remove the whole bucket, pretty // cheap to copy and we can manipulate the new buckets while they are // in the RoutingTable already. - let split_bucket = match self.buckets.pop() { - Some(bucket) => bucket, - None => panic!("No buckets present in RoutingTable, implementation error..."), + let Some(split_bucket) = self.buckets.pop() else { + panic!("No buckets present in RoutingTable, implementation error...") }; // Push two more buckets to distribute nodes between @@ -124,7 +123,7 @@ impl RoutingTable { self.buckets.push(Bucket::new()); for node in split_bucket.iter() { - self.add_node(node.clone()); + self.add_node(node); } true @@ -213,6 +212,7 @@ impl<'a> Buckets<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for Buckets<'a> { type Item = BucketContents<'a>; @@ -367,6 +367,7 @@ fn good_node_filter(iter: Iter<'_, Node>) -> GoodNodes<'_> { } /// Shakes fist at iterator making me take a double reference (could avoid it by mapping, but oh well) +#[allow(clippy::trivially_copy_pass_by_ref)] fn is_good_node(node: &&Node) -> bool { let status = node.status(); @@ -467,11 +468,12 @@ mod tests { // Trigger a bucket overflow and since the ids are placed in the last bucket, all of // the buckets will be recursively created and inserted into the list of all buckets. + #[allow(clippy::cast_possible_truncation)] let block_addrs = bip_test::dummy_block_socket_addrs((bucket::MAX_BUCKET_SIZE + 1) as u16); for &addr in block_addrs.iter().take(bucket::MAX_BUCKET_SIZE + 1) { let node = Node::as_good(node_id.into(), addr); - table.add_node(node); + table.add_node(&node); } } @@ -503,11 +505,12 @@ mod tests { // Flip first bit so we are placed in the first bucket node_id[0] |= 128; + #[allow(clippy::cast_possible_truncation)] let block_addrs = bip_test::dummy_block_socket_addrs((bucket::MAX_BUCKET_SIZE + 1) as u16); for &addr in block_addrs.iter().take(bucket::MAX_BUCKET_SIZE + 1) { let node = Node::as_good(node_id.into(), addr); - table.add_node(node); + table.add_node(&node); } // First bucket should be sorted @@ -515,7 +518,7 @@ mod tests { for bucket in table.buckets().take(1) { match bucket { BucketContents::Sorted(b) => { - assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE) + assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE); } _ => panic!("Expected BucketContents::Sorted"), } @@ -551,11 +554,12 @@ mod tests { // Flip last bit so we are placed in the last bucket node_id[bt::NODE_ID_LEN - 1] = 0; + #[allow(clippy::cast_possible_truncation)] let block_addrs = bip_test::dummy_block_socket_addrs((bucket::MAX_BUCKET_SIZE + 1) as u16); for &addr in block_addrs.iter().take(bucket::MAX_BUCKET_SIZE + 1) { let node = Node::as_good(node_id.into(), addr); - table.add_node(node); + table.add_node(&node); } // First buckets should be sorted (although they are all empty) @@ -572,7 +576,7 @@ mod tests { for bucket in table.buckets().skip(table::MAX_BUCKETS - 1).take(1) { match bucket { BucketContents::Sorted(b) => { - assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE) + assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE); } _ => panic!("Expected BucketContents::Sorted"), } @@ -587,12 +591,13 @@ mod tests { let table_id = [1u8; bt::NODE_ID_LEN]; let mut table = RoutingTable::new(table_id.into()); + #[allow(clippy::cast_possible_truncation)] let block_addrs = bip_test::dummy_block_socket_addrs(bucket::MAX_BUCKET_SIZE as u16); for bit_flip_index in 0..table::MAX_BUCKETS { for &addr in &block_addrs { let bucket_node_id = flip_id_bit_at_index(table_id.into(), bit_flip_index); - table.add_node(Node::as_good(bucket_node_id, addr)); + table.add_node(&Node::as_good(bucket_node_id, addr)); } } @@ -600,7 +605,7 @@ mod tests { for bucket in table.buckets() { match bucket { BucketContents::Sorted(b) => { - assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE) + assert_eq!(b.pingable_nodes().count(), bucket::MAX_BUCKET_SIZE); } _ => panic!("Expected BucketContents::Sorted"), } @@ -615,7 +620,7 @@ mod tests { assert_eq!(table.closest_nodes(table_id.into()).count(), 0); let node = Node::as_good(table_id.into(), bip_test::dummy_socket_addr_v4()); - table.add_node(node); + table.add_node(&node); assert_eq!(table.closest_nodes(table_id.into()).count(), 0); } diff --git a/packages/dht/src/security.rs b/packages/dht/src/security.rs index 849a1eb65..72306083d 100644 --- a/packages/dht/src/security.rs +++ b/packages/dht/src/security.rs @@ -37,7 +37,10 @@ fn generate_compliant_id(masked_ip_be: u64, num_octets: usize, rand: u8) -> [u8; let mut node_id = [0u8; bt::NODE_ID_LEN]; node_id[0] = (crc32c_result >> 24) as u8; - node_id[1] = (crc32c_result >> 16) as u8; + + #[allow(clippy::cast_possible_truncation)] + let crc32c_result_1 = (crc32c_result >> 16) as u8; + node_id[1] = crc32c_result_1; node_id[2] = (((crc32c_result >> 8) & 0xF8) as u8) | (rand::random::() & 0x07); for byte in &mut node_id[3..19] { *byte = rand::random::(); @@ -107,6 +110,7 @@ fn is_compliant_addr(masked_ip_be: u64, num_octets: usize, id: NodeId) -> bool { /// /// We don't have to check the last byte of the node id since we used that byte to generate /// the `crc32c_result`. +#[allow(clippy::cast_possible_truncation)] fn is_compliant_id(crc32c_result: u32, id_bytes: [u8; bt::NODE_ID_LEN]) -> bool { let mut is_compliant = true; is_compliant = is_compliant && (id_bytes[0] == ((crc32c_result >> 24) as u8)); diff --git a/packages/dht/src/storage.rs b/packages/dht/src/storage.rs index 744fd405f..61601f9de 100644 --- a/packages/dht/src/storage.rs +++ b/packages/dht/src/storage.rs @@ -8,6 +8,7 @@ use util::bt::InfoHash; const MAX_ITEMS_STORED: usize = 500; /// Manages storage and expiration of contact information for a number of `InfoHash`(s). +#[allow(clippy::module_name_repetitions)] pub struct AnnounceStorage { storage: HashMap>, expires: Vec, @@ -55,7 +56,7 @@ impl AnnounceStorage { where F: FnMut(SocketAddr), { - self.find(info_hash, item_func, Utc::now()) + self.find(info_hash, item_func, Utc::now()); } fn find(&mut self, info_hash: &InfoHash, mut item_func: F, curr_time: DateTime) @@ -99,8 +100,7 @@ impl AnnounceStorage { Some(false) } (false, false) => None, - (true, false) => Some(true), - (true, true) => Some(true), + (true, false | true) => Some(true), } } @@ -224,6 +224,7 @@ mod tests { fn positive_add_and_retrieve_contacts() { let mut announce_store = AnnounceStorage::new(); let info_hash = [0u8; bt::INFO_HASH_LEN].into(); + #[allow(clippy::cast_possible_truncation)] let sock_addrs = bip_test::dummy_block_socket_addrs(storage::MAX_ITEMS_STORED as u16); for sock_addr in &sock_addrs { @@ -243,6 +244,7 @@ mod tests { fn positive_renew_contacts() { let mut announce_store = AnnounceStorage::new(); let info_hash = [0u8; bt::INFO_HASH_LEN].into(); + #[allow(clippy::cast_possible_truncation)] let sock_addrs = bip_test::dummy_block_socket_addrs((storage::MAX_ITEMS_STORED + 1) as u16); for sock_addr in sock_addrs.iter().take(storage::MAX_ITEMS_STORED) { @@ -269,6 +271,7 @@ mod tests { fn positive_full_storage_expire_one_infohash() { let mut announce_store = AnnounceStorage::new(); let info_hash = [0u8; bt::INFO_HASH_LEN].into(); + #[allow(clippy::cast_possible_truncation)] let sock_addrs = bip_test::dummy_block_socket_addrs((storage::MAX_ITEMS_STORED + 1) as u16); // Fill up the announce storage completely @@ -299,6 +302,7 @@ mod tests { let mut announce_store = AnnounceStorage::new(); let info_hash_one = [0u8; bt::INFO_HASH_LEN].into(); let info_hash_two = [1u8; bt::INFO_HASH_LEN].into(); + #[allow(clippy::cast_possible_truncation)] let sock_addrs = bip_test::dummy_block_socket_addrs((storage::MAX_ITEMS_STORED + 1) as u16); // Fill up first info hash diff --git a/packages/dht/src/token.rs b/packages/dht/src/token.rs index b069cef76..c34aa0abe 100644 --- a/packages/dht/src/token.rs +++ b/packages/dht/src/token.rs @@ -2,7 +2,7 @@ use std::net::{Ipv4Addr, Ipv6Addr}; use chrono::{DateTime, Duration, Utc}; use util::convert; -use util::error::{LengthError, LengthErrorKind, LengthResult}; +use util::error::{Error, LengthErrorKind, LengthResult}; use util::net::IpAddr; use util::sha::{self, ShaHash}; @@ -33,15 +33,15 @@ pub struct Token { impl Token { pub fn new(bytes: &[u8]) -> LengthResult { - if bytes.len() != sha::SHA_HASH_LEN { - Err(LengthError::new(LengthErrorKind::LengthExpected, sha::SHA_HASH_LEN)) - } else { + if bytes.len() == sha::SHA_HASH_LEN { let mut token = [0u8; sha::SHA_HASH_LEN]; for (src, dst) in bytes.iter().zip(token.iter_mut()) { *dst = *src; } Ok(Token::from(token)) + } else { + Err(Error::new(LengthErrorKind::LengthExpected, sha::SHA_HASH_LEN)) } } } @@ -66,6 +66,7 @@ impl AsRef<[u8]> for Token { // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone)] pub struct TokenStore { curr_secret: u32, @@ -249,7 +250,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "assertion failed: store.checkin(v4_addr, valid_token)")] fn negative_reject_expired_v4_token() { let mut store = TokenStore::new(); let v4_addr = bip_test::dummy_ipv4_addr(); @@ -264,7 +265,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "assertion failed: store.checkin(v6_addr, valid_token)")] fn negative_reject_expired_v6_token() { let mut store = TokenStore::new(); let v6_addr = bip_test::dummy_ipv6_addr(); diff --git a/packages/dht/src/transaction.rs b/packages/dht/src/transaction.rs index 782a41e3e..abf1e85eb 100644 --- a/packages/dht/src/transaction.rs +++ b/packages/dht/src/transaction.rs @@ -188,6 +188,7 @@ fn generate_mids(next_alloc: u64) -> (u64, [u64; MESSAGE_ID_PREALLOC_LEN]) { // ----------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct TransactionID { trans_id: u64, diff --git a/packages/dht/src/worker/bootstrap.rs b/packages/dht/src/worker/bootstrap.rs index 94270af5f..ff2bdeae6 100644 --- a/packages/dht/src/worker/bootstrap.rs +++ b/packages/dht/src/worker/bootstrap.rs @@ -20,6 +20,7 @@ const BOOTSTRAP_NODE_TIMEOUT: u64 = 500; const BOOTSTRAP_PINGS_PER_BUCKET: usize = 8; +#[allow(clippy::module_name_repetitions)] #[derive(Debug, PartialEq, Eq)] pub enum BootstrapStatus { /// Bootstrap has been finished. @@ -32,6 +33,7 @@ pub enum BootstrapStatus { Failed, } +#[allow(clippy::module_name_repetitions)] pub struct TableBootstrap { table_id: NodeId, id_generator: MIDGenerator, @@ -78,9 +80,7 @@ impl TableBootstrap { (BOOTSTRAP_INITIAL_TIMEOUT, ScheduledTaskCheck::BootstrapTimeout(trans_id)), BOOTSTRAP_INITIAL_TIMEOUT, ); - let timeout = if let Ok(t) = res_timeout { - t - } else { + let Ok(timeout) = res_timeout else { error!("bip_dht: Failed to set a timeout for the start of a table bootstrap..."); return BootstrapStatus::Failed; }; @@ -197,8 +197,7 @@ impl TableBootstrap { let percent_25_bucket = if let Some(bucket) = buckets.next() { match bucket { BucketContents::Empty => dummy_bucket.iter(), - BucketContents::Sorted(b) => b.iter(), - BucketContents::Assorted(b) => b.iter(), + BucketContents::Sorted(b) | BucketContents::Assorted(b) => b.iter(), } } else { dummy_bucket.iter() @@ -206,8 +205,7 @@ impl TableBootstrap { let percent_50_bucket = if let Some(bucket) = buckets.next() { match bucket { BucketContents::Empty => dummy_bucket.iter(), - BucketContents::Sorted(b) => b.iter(), - BucketContents::Assorted(b) => b.iter(), + BucketContents::Sorted(b) | BucketContents::Assorted(b) => b.iter(), } } else { dummy_bucket.iter() @@ -215,8 +213,7 @@ impl TableBootstrap { let percent_100_bucket = if let Some(bucket) = buckets.next() { match bucket { BucketContents::Empty => dummy_bucket.iter(), - BucketContents::Sorted(b) => b.iter(), - BucketContents::Assorted(b) => b.iter(), + BucketContents::Sorted(b) | BucketContents::Assorted(b) => b.iter(), } } else { dummy_bucket.iter() @@ -261,9 +258,7 @@ impl TableBootstrap { (BOOTSTRAP_NODE_TIMEOUT, ScheduledTaskCheck::BootstrapTimeout(trans_id)), BOOTSTRAP_NODE_TIMEOUT, ); - let timeout = if let Ok(t) = res_timeout { - t - } else { + let Ok(timeout) = res_timeout else { error!("bip_dht: Failed to set a timeout for the start of a table bootstrap..."); return BootstrapStatus::Failed; }; diff --git a/packages/dht/src/worker/handler.rs b/packages/dht/src/worker/handler.rs index 5eb4babd2..0ba9d901e 100644 --- a/packages/dht/src/worker/handler.rs +++ b/packages/dht/src/worker/handler.rs @@ -38,6 +38,7 @@ const MAX_BOOTSTRAP_ATTEMPTS: usize = 3; const BOOTSTRAP_GOOD_NODE_THRESHOLD: usize = 10; /// Spawns a DHT handler that maintains our routing table and executes our actions on the DHT. +#[allow(clippy::module_name_repetitions)] pub fn create_dht_handler( table: RoutingTable, out: SyncSender<(Vec, SocketAddr)>, @@ -102,6 +103,7 @@ enum PostBootstrapAction { } /// Storage for our `EventLoop` to invoke actions upon. +#[allow(clippy::module_name_repetitions)] pub struct DhtHandler { detached: DetachedDhtHandler, table_actions: HashMap, @@ -323,6 +325,7 @@ where // ----------------------------------------------------------------------------// +#[allow(clippy::too_many_lines)] fn handle_incoming(handler: &mut DhtHandler, event_loop: &mut EventLoop>, buffer: &[u8], addr: SocketAddr) where H: HandshakerTrait, @@ -330,9 +333,7 @@ where let (work_storage, table_actions) = (&mut handler.detached, &mut handler.table_actions); // Parse the buffer as a bencoded message - let bencode = if let Ok(b) = BencodeRef::decode(buffer, BDecodeOpt::default()) { - b - } else { + let Ok(bencode) = BencodeRef::decode(buffer, BDecodeOpt::default()) else { warn!("bip_dht: Received invalid bencode data..."); return; }; @@ -341,17 +342,14 @@ where // Check to make sure we issued the transaction id (or that it is still valid) let message = MessageType::>::new(&bencode, |trans| { // Check if we can interpret the response transaction id as one of ours. - let trans_id = if let Some(t) = TransactionID::from_bytes(trans) { - t - } else { + let Some(trans_id) = TransactionID::from_bytes(trans) else { return ExpectedResponse::None; }; // Match the response action id with our current actions match table_actions.get(&trans_id.action_id()) { Some(&TableAction::Lookup(_)) => ExpectedResponse::GetPeers, - Some(&TableAction::Refresh(_)) => ExpectedResponse::FindNode, - Some(&TableAction::Bootstrap(_, _)) => ExpectedResponse::FindNode, + Some(&TableAction::Refresh(_) | &TableAction::Bootstrap(_, _)) => ExpectedResponse::FindNode, None => ExpectedResponse::None, } }); @@ -374,7 +372,7 @@ where // Node requested from us, mark it in the routing table if let Some(n) = work_storage.routing_table.find_node(&node) { - n.remote_request() + n.remote_request(); } let ping_rsp = PingResponse::new(p.transaction_id(), work_storage.routing_table.node_id()); @@ -391,7 +389,7 @@ where // Node requested from us, mark it in the routing table if let Some(n) = work_storage.routing_table.find_node(&node) { - n.remote_request() + n.remote_request(); } // Grab the closest nodes @@ -415,7 +413,7 @@ where // Node requested from us, mark it in the routing table if let Some(n) = work_storage.routing_table.find_node(&node) { - n.remote_request() + n.remote_request(); } // TODO: Move socket address serialization code into bip_util @@ -459,13 +457,13 @@ where // Wrap up the nodes/values we are going to be giving them let token = work_storage.token_store.checkout(IpAddr::from_socket_addr(addr)); - let compact_info_type = if !contact_info_bencode.is_empty() { + let compact_info_type = if contact_info_bencode.is_empty() { + CompactInfoType::Nodes(CompactNodeInfo::new(&closest_nodes_bytes).unwrap()) + } else { CompactInfoType::>::Both( CompactNodeInfo::new(&closest_nodes_bytes).unwrap(), CompactValueInfo::new(&contact_info_bencode).unwrap(), ) - } else { - CompactInfoType::Nodes(CompactNodeInfo::new(&closest_nodes_bytes).unwrap()) }; let get_peers_rsp = GetPeersResponse::>::new( @@ -487,7 +485,7 @@ where // Node requested from us, mark it in the routing table if let Some(n) = work_storage.routing_table.find_node(&node) { - n.remote_request() + n.remote_request(); } // Validate the token @@ -549,18 +547,18 @@ where for (id, v4_addr) in f.nodes() { let sock_addr = SocketAddr::V4(v4_addr); - work_storage.routing_table.add_node(Node::as_questionable(id, sock_addr)); + work_storage.routing_table.add_node(&Node::as_questionable(id, sock_addr)); } let bootstrap_complete = { let opt_bootstrap = match table_actions.get_mut(&trans_id.action_id()) { Some(&mut TableAction::Refresh(_)) => { - work_storage.routing_table.add_node(node); + work_storage.routing_table.add_node(&node); None } Some(&mut TableAction::Bootstrap(ref mut bootstrap, ref mut attempts)) => { if !bootstrap.is_router(&node.addr()) { - work_storage.routing_table.add_node(node); + work_storage.routing_table.add_node(&node); } Some((bootstrap, attempts)) } @@ -626,7 +624,7 @@ where let trans_id = TransactionID::from_bytes(g.transaction_id()).unwrap(); let node = Node::as_good(g.node_id(), addr); - work_storage.routing_table.add_node(node.clone()); + work_storage.routing_table.add_node(&node); let opt_lookup = { match table_actions.get_mut(&trans_id.action_id()) { @@ -734,9 +732,9 @@ fn handle_start_bootstrap( BootstrapStatus::Completed => { // Check if our bootstrap was actually good if should_rebootstrap(&work_storage.routing_table) { - let (bootstrap, attempts) = match table_actions.get_mut(&action_id) { - Some(&mut TableAction::Bootstrap(ref mut bootstrap, ref mut attempts)) => (bootstrap, attempts), - _ => panic!("bip_dht: Bug, in DhtHandler..."), + let Some(&mut TableAction::Bootstrap(ref mut bootstrap, ref mut attempts)) = table_actions.get_mut(&action_id) + else { + panic!("bip_dht: Bug, in DhtHandler...") }; attempt_rebootstrap(bootstrap, attempts, work_storage, event_loop) == Some(false) @@ -834,8 +832,7 @@ fn handle_check_table_refresh( }; match opt_refresh_status { - None => (), - Some(RefreshStatus::Refreshing) => (), + Some(RefreshStatus::Refreshing) | None => (), Some(RefreshStatus::Failed) => shutdown_event_loop(event_loop, ShutdownCause::Unspecified), } } @@ -880,9 +877,8 @@ fn handle_check_bootstrap_timeout( }; match opt_bootstrap_info { - None => false, Some((BootstrapStatus::Idle, _, _)) => true, - Some((BootstrapStatus::Bootstrapping, _, _)) => false, + Some((BootstrapStatus::Bootstrapping, _, _)) | None => false, Some((BootstrapStatus::Failed, _, _)) => { shutdown_event_loop(event_loop, ShutdownCause::Unspecified); false @@ -938,10 +934,9 @@ where }; match opt_lookup_info { - None => (), - Some((LookupStatus::Searching, _)) => (), + Some((LookupStatus::Searching, _)) | None => (), Some((LookupStatus::Completed, info_hash)) => { - broadcast_dht_event(&mut work_storage.event_notifiers, DhtEvent::LookupCompleted(info_hash)) + broadcast_dht_event(&mut work_storage.event_notifiers, DhtEvent::LookupCompleted(info_hash)); } Some((LookupStatus::Failed, _)) => shutdown_event_loop(event_loop, ShutdownCause::Unspecified), Some((LookupStatus::Values(v), info_hash)) => { @@ -994,10 +989,9 @@ where }; match opt_lookup_info { - None => (), - Some((LookupStatus::Searching, _)) => (), + Some((LookupStatus::Searching, _)) | None => (), Some((LookupStatus::Completed, info_hash)) => { - broadcast_dht_event(&mut work_storage.event_notifiers, DhtEvent::LookupCompleted(info_hash)) + broadcast_dht_event(&mut work_storage.event_notifiers, DhtEvent::LookupCompleted(info_hash)); } Some((LookupStatus::Failed, _)) => shutdown_event_loop(event_loop, ShutdownCause::Unspecified), Some((LookupStatus::Values(v), info_hash)) => { diff --git a/packages/dht/src/worker/lookup.rs b/packages/dht/src/worker/lookup.rs index 288e2d427..f79152d78 100644 --- a/packages/dht/src/worker/lookup.rs +++ b/packages/dht/src/worker/lookup.rs @@ -36,6 +36,7 @@ const ANNOUNCE_PICK_NUM: usize = 8; // # Announces type Distance = ShaHash; type DistanceToBeat = ShaHash; +#[allow(clippy::module_name_repetitions)] #[derive(Debug, PartialEq, Eq)] pub enum LookupStatus { Searching, @@ -44,6 +45,7 @@ pub enum LookupStatus { Failed, } +#[allow(clippy::module_name_repetitions)] pub struct TableLookup { table_id: NodeId, target_id: InfoHash, @@ -111,10 +113,10 @@ impl TableLookup { }; // Call start_request_round with the list of initial_nodes (return even if the search completed...for now :D) - if table_lookup.start_request_round(initial_pick_nodes_filtered, table, out, event_loop) != LookupStatus::Failed { - Some(table_lookup) - } else { + if table_lookup.start_request_round(initial_pick_nodes_filtered, table, out, event_loop) == LookupStatus::Failed { None + } else { + Some(table_lookup) } } @@ -137,9 +139,7 @@ impl TableLookup { B::BType: PartialEq + Eq + core::hash::Hash + Debug, { // Process the message transaction id - let (dist_to_beat, timeout) = if let Some(lookup) = self.active_lookups.remove(trans_id) { - lookup - } else { + let Some((dist_to_beat, timeout)) = self.active_lookups.remove(trans_id) else { warn!( "bip_dht: Received expired/unsolicited node response for an active table \ lookup..." @@ -317,7 +317,7 @@ impl TableLookup { if !fatal_error { // We requested from the node, mark it down if the node is in our routing table if let Some(n) = table.find_node(node) { - n.local_request() + n.local_request(); } } } @@ -361,9 +361,7 @@ impl TableLookup { // Try to start a timeout for the node let res_timeout = event_loop.timeout_ms((0, ScheduledTaskCheck::LookupTimeout(trans_id)), LOOKUP_TIMEOUT_MS); - let timeout = if let Ok(t) = res_timeout { - t - } else { + let Ok(timeout) = res_timeout else { error!("bip_dht: Failed to set a timeout for a table lookup..."); return LookupStatus::Failed; }; @@ -383,7 +381,7 @@ impl TableLookup { // Update the node in the routing table if let Some(n) = table.find_node(node) { - n.local_request() + n.local_request(); } messages_sent += 1; @@ -414,9 +412,7 @@ impl TableLookup { (0, ScheduledTaskCheck::LookupEndGame(self.id_generator.generate())), ENDGAME_TIMEOUT_MS, ); - let timeout = if let Ok(t) = res_timeout { - t - } else { + let Ok(timeout) = res_timeout else { error!("bip_dht: Failed to set a timeout for table lookup endgame..."); return LookupStatus::Failed; }; @@ -443,7 +439,7 @@ impl TableLookup { // Mark that we requested from the node in the RoutingTable if let Some(n) = table.find_node(node) { - n.local_request() + n.local_request(); } // Mark that we requested from the node @@ -500,12 +496,7 @@ fn insert_closest_nodes(nodes: &mut [(Node, bool)], target_id: InfoHash, new_nod let new_distance = target_id ^ new_node.id(); for &mut (ref mut old_node, ref mut used) in &mut *nodes { - if !*used { - // Slot was not in use, go ahead and place the node - *old_node = new_node; - *used = true; - return; - } else { + if *used { // Slot is in use, see if our node is closer to the target let old_distance = target_id ^ old_node.id(); @@ -513,6 +504,11 @@ fn insert_closest_nodes(nodes: &mut [(Node, bool)], target_id: InfoHash, new_nod *old_node = new_node; return; } + } else { + // Slot was not in use, go ahead and place the node + *old_node = new_node; + *used = true; + return; } } } diff --git a/packages/dht/src/worker/messenger.rs b/packages/dht/src/worker/messenger.rs index 1e88dacf5..dd237c895 100644 --- a/packages/dht/src/worker/messenger.rs +++ b/packages/dht/src/worker/messenger.rs @@ -9,6 +9,7 @@ use crate::worker::OneshotTask; const OUTGOING_MESSAGE_CAPACITY: usize = 4096; +#[allow(clippy::module_name_repetitions)] pub fn create_outgoing_messenger(socket: UdpSocket) -> SyncSender<(Vec, SocketAddr)> { let (send, recv) = mpsc::sync_channel::<(Vec, SocketAddr)>(OUTGOING_MESSAGE_CAPACITY); @@ -43,6 +44,7 @@ fn send_bytes(socket: &UdpSocket, bytes: &[u8], addr: SocketAddr) { } } +#[allow(clippy::module_name_repetitions)] pub fn create_incoming_messenger(socket: UdpSocket, send: Sender) { thread::spawn(move || { let mut channel_is_open = true; @@ -50,13 +52,12 @@ pub fn create_incoming_messenger(socket: UdpSocket, send: Sender) { while channel_is_open { let mut buffer = vec![0u8; 1500]; - match socket.recv_from(&mut buffer) { - Ok((size, addr)) => { - buffer.truncate(size); - channel_is_open = send_message(&send, buffer, addr); - } - Err(_) => warn!("bip_dht: Incoming messenger failed to receive bytes..."), - } + if let Ok((size, addr)) = socket.recv_from(&mut buffer) { + buffer.truncate(size); + channel_is_open = send_message(&send, buffer, addr); + } else { + warn!("bip_dht: Incoming messenger failed to receive bytes..."); + }; } info!("bip_dht: Incoming messenger received a channel hangup, exiting thread..."); diff --git a/packages/dht/src/worker/mod.rs b/packages/dht/src/worker/mod.rs index a1538015c..a99f338ae 100644 --- a/packages/dht/src/worker/mod.rs +++ b/packages/dht/src/worker/mod.rs @@ -20,11 +20,11 @@ pub mod refresh; pub enum OneshotTask { /// Process an incoming message from a remote node. Incoming(Vec, SocketAddr), - /// Register a sender to send DhtEvents to. + /// Register a sender to send `DhtEvents` to. RegisterSender(mpsc::Sender), /// Load a new bootstrap operation into worker storage. StartBootstrap(Vec, Vec), - /// Start a lookup for the given InfoHash. + /// Start a lookup for the given `InfoHash`. StartLookup(InfoHash, bool), /// Gracefully shutdown the DHT and associated workers. Shutdown(ShutdownCause), @@ -48,7 +48,7 @@ pub enum ScheduledTaskCheck { pub enum DhtEvent { /// DHT completed the bootstrap. BootstrapCompleted, - /// Lookup operation for the given InfoHash completed. + /// Lookup operation for the given `InfoHash` completed. LookupCompleted(InfoHash), /// DHT is shutting down for some reason. ShuttingDown(ShutdownCause), diff --git a/packages/dht/src/worker/refresh.rs b/packages/dht/src/worker/refresh.rs index 14705345f..0bae2945c 100644 --- a/packages/dht/src/worker/refresh.rs +++ b/packages/dht/src/worker/refresh.rs @@ -15,6 +15,7 @@ use crate::worker::ScheduledTaskCheck; const REFRESH_INTERVAL_TIMEOUT: u64 = 6000; +#[allow(clippy::module_name_repetitions)] pub enum RefreshStatus { /// Refresh is in progress. Refreshing, @@ -22,6 +23,7 @@ pub enum RefreshStatus { Failed, } +#[allow(clippy::module_name_repetitions)] pub struct TableRefresh { id_generator: MIDGenerator, curr_refresh_bucket: usize, diff --git a/packages/disk/benches/disk_benchmark.rs b/packages/disk/benches/disk_benchmark.rs index e3dfa5650..327ce1b0e 100644 --- a/packages/disk/benches/disk_benchmark.rs +++ b/packages/disk/benches/disk_benchmark.rs @@ -1,5 +1,4 @@ use std::fs; -use std::ops::DerefMut; use std::sync::{Arc, Mutex}; use bytes::BytesMut; @@ -104,11 +103,10 @@ where } } - for res_message in block_recv.lock().unwrap().deref_mut() { + for res_message in &mut *block_recv.lock().unwrap() { match res_message.unwrap() { ODiskMessage::BlockProcessed(_) => blocks_sent -= 1, - ODiskMessage::FoundGoodPiece(_, _) => (), - ODiskMessage::FoundBadPiece(_, _) => (), + ODiskMessage::FoundGoodPiece(_, _) | ODiskMessage::FoundBadPiece(_, _) => (), _ => panic!("Unexpected Message Received In process_blocks"), } diff --git a/packages/disk/src/disk/builder.rs b/packages/disk/src/disk/builder.rs index b833a9240..324b66df2 100644 --- a/packages/disk/src/disk/builder.rs +++ b/packages/disk/src/disk/builder.rs @@ -7,6 +7,7 @@ const DEFAULT_PENDING_SIZE: usize = 10; const DEFAULT_COMPLETED_SIZE: usize = 10; /// `DiskManagerBuilder` for building `DiskManager`s with different settings. +#[allow(clippy::module_name_repetitions)] pub struct DiskManagerBuilder { builder: Builder, pending_size: usize, diff --git a/packages/disk/src/disk/fs/cache/file_handle.rs b/packages/disk/src/disk/fs/cache/file_handle.rs index 1e54d9b5f..b5428d24a 100644 --- a/packages/disk/src/disk/fs/cache/file_handle.rs +++ b/packages/disk/src/disk/fs/cache/file_handle.rs @@ -11,6 +11,7 @@ use crate::disk::fs::FileSystem; /// This is especially useful for consumer computers that have anti-virus software /// installed, which will significantly increase the cost for opening any files /// (with windows built in anti virus, I saw 20x slow downs). +#[allow(clippy::module_name_repetitions)] pub struct FileHandleCache where F: FileSystem, diff --git a/packages/disk/src/disk/fs/mod.rs b/packages/disk/src/disk/fs/mod.rs index 3e1f230f4..46260a8fa 100644 --- a/packages/disk/src/disk/fs/mod.rs +++ b/packages/disk/src/disk/fs/mod.rs @@ -14,27 +14,47 @@ pub trait FileSystem { /// Open a file, create it if it does not exist. /// /// Intermediate directories will be created if necessary. + /// + /// # Errors + /// + /// It would return an IO error if there is an problem. fn open_file

(&self, path: P) -> io::Result where P: AsRef + Send + 'static; /// Sync the file. + /// + /// # Errors + /// + /// It would return an IO error if there is an problem. fn sync_file

(&self, path: P) -> io::Result<()> where P: AsRef + Send + 'static; /// Get the size of the file in bytes. + /// + /// # Errors + /// + /// It would return an IO error if there is an problem. fn file_size(&self, file: &Self::File) -> io::Result; /// Read the contents of the file at the given offset. /// /// On success, return the number of bytes read. + /// + /// # Errors + /// + /// It would return an IO error if there is an problem. fn read_file(&self, file: &mut Self::File, offset: u64, buffer: &mut [u8]) -> io::Result; /// Write the contents of the file at the given offset. /// /// On success, return the number of bytes written. If offset is /// past the current size of the file, zeroes will be filled in. + /// + /// # Errors + /// + /// It would return an IO error if there is an problem. fn write_file(&self, file: &mut Self::File, offset: u64, buffer: &[u8]) -> io::Result; } diff --git a/packages/disk/src/disk/fs/native.rs b/packages/disk/src/disk/fs/native.rs index fbaf4f3df..fc53638f4 100644 --- a/packages/disk/src/disk/fs/native.rs +++ b/packages/disk/src/disk/fs/native.rs @@ -8,6 +8,7 @@ use crate::disk::fs::FileSystem; // TODO: This should be sanitizing paths passed into it so they don't escape the base directory!!! /// File that exists on disk. +#[allow(clippy::module_name_repetitions)] pub struct NativeFile { file: File, } @@ -20,6 +21,7 @@ impl NativeFile { } /// File system that maps to the OS file system. +#[allow(clippy::module_name_repetitions)] pub struct NativeFileSystem { current_dir: PathBuf, } @@ -84,7 +86,12 @@ where Some(parent_dir) => { fs::create_dir_all(parent_dir)?; - OpenOptions::new().read(true).write(true).create(true).open(&path) + OpenOptions::new() + .read(true) + .write(true) + .create(true) + .truncate(false) + .open(&path) } None => Err(io::Error::new(io::ErrorKind::InvalidInput, "File Path Has No Paren't")), } diff --git a/packages/disk/src/disk/manager.rs b/packages/disk/src/disk/manager.rs index 96c9cd2ce..17c46b377 100644 --- a/packages/disk/src/disk/manager.rs +++ b/packages/disk/src/disk/manager.rs @@ -14,6 +14,7 @@ use crate::disk::tasks::context::DiskManagerContext; use crate::disk::{tasks, IDiskMessage, ODiskMessage}; /// `DiskManager` object which handles the storage of `Blocks` to the `FileSystem`. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct DiskManager { sink: DiskManagerSink, diff --git a/packages/disk/src/disk/tasks/context.rs b/packages/disk/src/disk/tasks/context.rs index a147aeb70..21a3389b9 100644 --- a/packages/disk/src/disk/tasks/context.rs +++ b/packages/disk/src/disk/tasks/context.rs @@ -9,6 +9,7 @@ use util::bt::InfoHash; use crate::disk::tasks::helpers::piece_checker::PieceCheckerState; use crate::disk::ODiskMessage; +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct DiskManagerContext { torrents: Arc>>>, diff --git a/packages/disk/src/disk/tasks/helpers/piece_accessor.rs b/packages/disk/src/disk/tasks/helpers/piece_accessor.rs index 7b9a319ed..f81b78b83 100644 --- a/packages/disk/src/disk/tasks/helpers/piece_accessor.rs +++ b/packages/disk/src/disk/tasks/helpers/piece_accessor.rs @@ -66,6 +66,7 @@ where let actual_bytes_to_access = cmp::min(total_max_bytes_to_access, bytes_to_access); let offset = total_file_size - bytes_to_access; + #[allow(clippy::cast_possible_truncation)] let (begin, end) = ( total_bytes_accessed as usize, (total_bytes_accessed + actual_bytes_to_access) as usize, diff --git a/packages/disk/src/disk/tasks/helpers/piece_checker.rs b/packages/disk/src/disk/tasks/helpers/piece_checker.rs index 5f21e048d..8c1637c89 100644 --- a/packages/disk/src/disk/tasks/helpers/piece_checker.rs +++ b/packages/disk/src/disk/tasks/helpers/piece_checker.rs @@ -31,7 +31,7 @@ where let mut piece_checker = PieceChecker::with_state(fs, info_dict, &mut checker_state); piece_checker.validate_files_sizes()?; - piece_checker.fill_checker_state()?; + piece_checker.fill_checker_state(); piece_checker.calculate_diff()?; } @@ -52,25 +52,26 @@ where pub fn calculate_diff(self) -> io::Result<()> { let piece_length = self.info_dict.piece_length(); // TODO: Use Block Allocator - let mut piece_buffer = vec![0u8; piece_length as usize]; + let mut piece_buffer = vec![0u8; piece_length.try_into().unwrap()]; let info_dict = self.info_dict; let piece_accessor = PieceAccessor::new(&self.fs, self.info_dict); - self.checker_state.run_with_whole_pieces(piece_length as usize, |message| { - piece_accessor.read_piece(&mut piece_buffer[..message.block_length()], message)?; + self.checker_state + .run_with_whole_pieces(piece_length.try_into().unwrap(), |message| { + piece_accessor.read_piece(&mut piece_buffer[..message.block_length()], message)?; - let calculated_hash = InfoHash::from_bytes(&piece_buffer[..message.block_length()]); - let expected_hash = InfoHash::from_hash( - info_dict - .pieces() - .nth(message.piece_index() as usize) - .expect("bip_peer: Piece Checker Failed To Retrieve Expected Hash"), - ) - .expect("bip_peer: Wrong Length Of Expected Hash Received"); + let calculated_hash = InfoHash::from_bytes(&piece_buffer[..message.block_length()]); + let expected_hash = InfoHash::from_hash( + info_dict + .pieces() + .nth(message.piece_index().try_into().unwrap()) + .expect("bip_peer: Piece Checker Failed To Retrieve Expected Hash"), + ) + .expect("bip_peer: Wrong Length Of Expected Hash Received"); - Ok(calculated_hash == expected_hash) - })?; + Ok(calculated_hash == expected_hash) + })?; Ok(()) } @@ -79,7 +80,7 @@ where /// /// This is done once when a torrent file is added to see if we have any good pieces that /// the caller can use to skip (if the torrent was partially downloaded before). - fn fill_checker_state(&mut self) -> io::Result<()> { + fn fill_checker_state(&mut self) { let piece_length = self.info_dict.piece_length(); let total_bytes: u64 = self.info_dict.files().map(metainfo::File::length).sum(); @@ -87,16 +88,17 @@ where let last_piece_size = last_piece_size(self.info_dict); for piece_index in 0..full_pieces { - self.checker_state - .add_pending_block(BlockMetadata::with_default_hash(piece_index, 0, piece_length as usize)); + self.checker_state.add_pending_block(BlockMetadata::with_default_hash( + piece_index, + 0, + piece_length.try_into().unwrap(), + )); } if last_piece_size != 0 { self.checker_state .add_pending_block(BlockMetadata::with_default_hash(full_pieces, 0, last_piece_size)); } - - Ok(()) } /// Validates the file sizes for the given torrent file and block allocates them if they do not exist. @@ -145,12 +147,13 @@ fn last_piece_size(info_dict: &Info) -> usize { let piece_length = info_dict.piece_length(); let total_bytes: u64 = info_dict.files().map(metainfo::File::length).sum(); - (total_bytes % piece_length) as usize + (total_bytes % piece_length).try_into().unwrap() } // ----------------------------------------------------------------------------// /// Stores state for the `PieceChecker` between invocations. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct PieceCheckerState { new_states: Vec, @@ -265,12 +268,12 @@ impl PieceCheckerState { /// True if the piece is ready to be hashed and checked (full) as good or not. fn piece_is_complete(total_blocks: usize, last_block_size: usize, piece_length: usize, messages: &[BlockMetadata]) -> bool { let is_single_message = messages.len() == 1; - let is_piece_length = messages.get(0).is_some_and(|message| message.block_length() == piece_length); + let is_piece_length = messages.first().is_some_and(|message| message.block_length() == piece_length); let is_last_block = messages - .get(0) + .first() .is_some_and(|message| message.piece_index() == (total_blocks - 1) as u64); let is_last_block_length = messages - .get(0) + .first() .is_some_and(|message| message.block_length() == last_block_size); is_single_message && (is_piece_length || (is_last_block && is_last_block_length)) @@ -299,12 +302,22 @@ fn merge_piece_messages(message_a: &BlockMetadata, message_b: &BlockMetadata) -> let end_to_take = cmp::max(end_a, end_b); let length = end_to_take - start_a; - Some(BlockMetadata::new(info_hash, piece_index, start_a, length as usize)) + Some(BlockMetadata::new( + info_hash, + piece_index, + start_a, + length.try_into().unwrap(), + )) } else if start_a >= start_b && start_a <= end_b { let end_to_take = cmp::max(end_a, end_b); let length = end_to_take - start_b; - Some(BlockMetadata::new(info_hash, piece_index, start_b, length as usize)) + Some(BlockMetadata::new( + info_hash, + piece_index, + start_b, + length.try_into().unwrap(), + )) } else { None } diff --git a/packages/disk/src/disk/tasks/mod.rs b/packages/disk/src/disk/tasks/mod.rs index 5649b773f..546cfbc07 100644 --- a/packages/disk/src/disk/tasks/mod.rs +++ b/packages/disk/src/disk/tasks/mod.rs @@ -28,24 +28,24 @@ where let info_hash = metainfo.info().info_hash(); match execute_add_torrent(metainfo, &context, &mut blocking_sender) { - Ok(_) => ODiskMessage::TorrentAdded(info_hash), + Ok(()) => ODiskMessage::TorrentAdded(info_hash), Err(err) => ODiskMessage::TorrentError(info_hash, err), } } IDiskMessage::RemoveTorrent(hash) => match execute_remove_torrent(hash, &context) { - Ok(_) => ODiskMessage::TorrentRemoved(hash), + Ok(()) => ODiskMessage::TorrentRemoved(hash), Err(err) => ODiskMessage::TorrentError(hash, err), }, IDiskMessage::SyncTorrent(hash) => match execute_sync_torrent(hash, &context) { - Ok(_) => ODiskMessage::TorrentSynced(hash), + Ok(()) => ODiskMessage::TorrentSynced(hash), Err(err) => ODiskMessage::TorrentError(hash, err), }, IDiskMessage::LoadBlock(mut block) => match execute_load_block(&mut block, &context) { - Ok(_) => ODiskMessage::BlockLoaded(block), + Ok(()) => ODiskMessage::BlockLoaded(block), Err(err) => ODiskMessage::LoadBlockError(block, err), }, IDiskMessage::ProcessBlock(block) => match execute_process_block(&block, &context, &mut blocking_sender) { - Ok(_) => ODiskMessage::BlockProcessed(block), + Ok(()) => ODiskMessage::BlockProcessed(block), Err(err) => ODiskMessage::ProcessBlockError(block, err), }, }; @@ -59,7 +59,7 @@ where Ok::<(), ()>(()) }) - .forget() + .forget(); } fn execute_add_torrent( @@ -132,7 +132,7 @@ where let piece_accessor = PieceAccessor::new(context.filesystem(), metainfo_file.info()); // Read The Piece In From The Filesystem - access_result = piece_accessor.read_piece(&mut *block, &metadata) + access_result = piece_accessor.read_piece(&mut *block, &metadata); }); if found_hash { @@ -163,7 +163,7 @@ where let piece_accessor = PieceAccessor::new(context.filesystem(), metainfo_file.info()); // Write Out Piece Out To The Filesystem And Recalculate The Diff - block_result = piece_accessor.write_piece(block, &metadata).and_then(|_| { + block_result = piece_accessor.write_piece(block, &metadata).and_then(|()| { checker_state.add_pending_block(metadata); PieceChecker::with_state(context.filesystem(), metainfo_file.info(), checker_state).calculate_diff() @@ -205,5 +205,5 @@ fn send_piece_diff( .flush() .expect("bip_disk: Failed To Flush Piece State Message"); } - }) + }); } diff --git a/packages/disk/src/memory/block.rs b/packages/disk/src/memory/block.rs index 8e488e2b3..b57bc0efa 100644 --- a/packages/disk/src/memory/block.rs +++ b/packages/disk/src/memory/block.rs @@ -6,6 +6,7 @@ use util::bt::{self, InfoHash}; //----------------------------------------------------------------------------// /// `BlockMetadata` which tracks metadata associated with a `Block` of memory. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] pub struct BlockMetadata { info_hash: InfoHash, @@ -99,6 +100,7 @@ impl Deref for Block { //----------------------------------------------------------------------------// /// `BlockMut` of mutable memory. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct BlockMut { metadata: BlockMetadata, diff --git a/packages/disk/test/add_torrent.rs b/packages/disk/test/add_torrent.rs index 2e66599a6..74150dbcd 100644 --- a/packages/disk/test/add_torrent.rs +++ b/packages/disk/test/add_torrent.rs @@ -37,7 +37,7 @@ fn positive_add_torrent() { let good_pieces = crate::core_loop_with_timeout(&mut core, 500, (0, recv), |good_pieces, recv, msg| match msg { ODiskMessage::TorrentAdded(_) => Loop::Break(good_pieces), ODiskMessage::FoundGoodPiece(_, _) => Loop::Continue((good_pieces + 1, recv)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); assert_eq!(0, good_pieces); diff --git a/packages/disk/test/complete_torrent.rs b/packages/disk/test/complete_torrent.rs index 1369966cc..02088e3aa 100644 --- a/packages/disk/test/complete_torrent.rs +++ b/packages/disk/test/complete_torrent.rs @@ -7,6 +7,7 @@ use tokio_core::reactor::Core; use crate::{InMemoryFileSystem, MultiFileDirectAccessor}; +#[allow(clippy::too_many_lines)] #[test] fn positive_complete_torrent() { // Create some "files" as random bytes @@ -36,7 +37,7 @@ fn positive_complete_torrent() { let (good_pieces, recv) = crate::core_loop_with_timeout(&mut core, 500, (0, recv), |good_pieces, recv, msg| match msg { ODiskMessage::TorrentAdded(_) => Loop::Break((good_pieces, recv)), ODiskMessage::FoundGoodPiece(_, _) => Loop::Continue((good_pieces + 1, recv)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); // Make sure we have no good pieces @@ -140,7 +141,7 @@ fn positive_complete_torrent() { ODiskMessage::FoundGoodPiece(_, index) => (Some(index), true), ODiskMessage::FoundBadPiece(_, index) => (Some(index), false), ODiskMessage::BlockProcessed(_) => (None, false), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }; let (piece_zero_good, piece_one_good, piece_two_good) = match opt_piece_index { @@ -148,7 +149,7 @@ fn positive_complete_torrent() { Some(0) => (new_value, piece_one_good, piece_two_good), Some(1) => (piece_zero_good, new_value, piece_two_good), Some(2) => (piece_zero_good, piece_one_good, new_value), - Some(x) => panic!("Unexpected Index {:?}", x), + Some(x) => panic!("Unexpected Index {x:?}"), }; // One message for each block (8 blocks), plus 3 messages for bad/good @@ -207,13 +208,13 @@ fn positive_complete_torrent() { ODiskMessage::FoundGoodPiece(_, index) => (Some(index), true), ODiskMessage::FoundBadPiece(_, index) => (Some(index), false), ODiskMessage::BlockProcessed(_) => (None, false), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }; let piece_zero_good = match opt_piece_index { None => piece_zero_good, Some(0) => new_value, - Some(x) => panic!("Unexpected Index {:?}", x), + Some(x) => panic!("Unexpected Index {x:?}"), }; // One message for each block (3 blocks), plus 1 messages for bad/good diff --git a/packages/disk/test/load_block.rs b/packages/disk/test/load_block.rs index f16f1c959..3f91d0d10 100644 --- a/packages/disk/test/load_block.rs +++ b/packages/disk/test/load_block.rs @@ -57,7 +57,7 @@ fn positive_load_block() { Loop::Continue(((blocking_send, Some(block), None), recv)) } ODiskMessage::BlockLoaded(block) => Loop::Break((opt_pblock.unwrap(), block)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }, ); diff --git a/packages/disk/test/mod.rs b/packages/disk/test/mod.rs index c7dcd6e7b..f96e09f4c 100644 --- a/packages/disk/test/mod.rs +++ b/packages/disk/test/mod.rs @@ -122,7 +122,7 @@ impl Accessor for MultiFileDirectAccessor { C: FnMut(u64, &Path), { for (buffer, path) in &self.files { - callback(buffer.len() as u64, path) + callback(buffer.len() as u64, path); } Ok(()) @@ -133,7 +133,7 @@ impl Accessor for MultiFileDirectAccessor { C: for<'a> FnMut(PieceAccess<'a>) -> io::Result<()>, { for (buffer, _) in &self.files { - callback(PieceAccess::Compute(&mut &buffer[..]))? + callback(PieceAccess::Compute(&mut &buffer[..]))?; } Ok(()) @@ -208,7 +208,7 @@ impl FileSystem for InMemoryFileSystem { files .get(&file.path) .map(|file_buffer| { - let cast_offset = offset as usize; + let cast_offset: usize = offset.try_into().unwrap(); let bytes_to_copy = cmp::min(file_buffer.len() - cast_offset, buffer.len()); let bytes = &file_buffer[cast_offset..(bytes_to_copy + cast_offset)]; @@ -225,7 +225,7 @@ impl FileSystem for InMemoryFileSystem { files .get_mut(&file.path) .map(|file_buffer| { - let cast_offset = offset as usize; + let cast_offset: usize = offset.try_into().unwrap(); let last_byte_pos = cast_offset + buffer.len(); if last_byte_pos > file_buffer.len() { diff --git a/packages/disk/test/process_block.rs b/packages/disk/test/process_block.rs index 21356fa2a..67db96690 100644 --- a/packages/disk/test/process_block.rs +++ b/packages/disk/test/process_block.rs @@ -49,7 +49,7 @@ fn positive_process_block() { Loop::Continue(((blocking_send, None), recv)) } ODiskMessage::BlockProcessed(_) => Loop::Break(()), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }, ); diff --git a/packages/disk/test/remove_torrent.rs b/packages/disk/test/remove_torrent.rs index f8aa8f352..b225d2c48 100644 --- a/packages/disk/test/remove_torrent.rs +++ b/packages/disk/test/remove_torrent.rs @@ -47,7 +47,7 @@ fn positive_remove_torrent() { } ODiskMessage::TorrentRemoved(_) => Loop::Break((blocking_send, good_pieces, recv)), ODiskMessage::FoundGoodPiece(_, _) => Loop::Continue(((blocking_send, good_pieces + 1), recv)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }, ); @@ -60,8 +60,8 @@ fn positive_remove_torrent() { blocking_send.send(IDiskMessage::ProcessBlock(process_block)).unwrap(); - crate::core_loop_with_timeout(&mut core, 500, ((), recv), |_, _, msg| match msg { + crate::core_loop_with_timeout(&mut core, 500, ((), recv), |(), _, msg| match msg { ODiskMessage::ProcessBlockError(_, _) => Loop::Break(()), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); } diff --git a/packages/disk/test/resume_torrent.rs b/packages/disk/test/resume_torrent.rs index 56b449ac6..1485fa8fe 100644 --- a/packages/disk/test/resume_torrent.rs +++ b/packages/disk/test/resume_torrent.rs @@ -7,6 +7,7 @@ use tokio_core::reactor::Core; use crate::{InMemoryFileSystem, MultiFileDirectAccessor}; +#[allow(clippy::too_many_lines)] #[test] fn positive_complete_torrent() { // Create some "files" as random bytes @@ -37,7 +38,7 @@ fn positive_complete_torrent() { let (good_pieces, recv) = crate::core_loop_with_timeout(&mut core, 500, (0, recv), |good_pieces, recv, msg| match msg { ODiskMessage::TorrentAdded(_) => Loop::Break((good_pieces, recv)), ODiskMessage::FoundGoodPiece(_, _) => Loop::Continue((good_pieces + 1, recv)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); // Make sure we have no good pieces @@ -90,13 +91,13 @@ fn positive_complete_torrent() { ODiskMessage::FoundGoodPiece(_, index) => (Some(index), true), ODiskMessage::FoundBadPiece(_, index) => (Some(index), false), ODiskMessage::BlockProcessed(_) => (None, false), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }; let piece_zero_good = match opt_piece_index { None => piece_zero_good, Some(0) => new_value, - Some(x) => panic!("Unexpected Index {:?}", x), + Some(x) => panic!("Unexpected Index {x:?}"), }; if messages_recvd == (3 + 1) { @@ -114,9 +115,9 @@ fn positive_complete_torrent() { blocking_send.send(IDiskMessage::RemoveTorrent(info_hash)).unwrap(); // Verify that our torrent was removed - let recv = crate::core_loop_with_timeout(&mut core, 500, ((), recv), |_, recv, msg| match msg { + let recv = crate::core_loop_with_timeout(&mut core, 500, ((), recv), |(), recv, msg| match msg { ODiskMessage::TorrentRemoved(_) => Loop::Break(recv), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); // Re-add our torrent and verify that we see our good first block @@ -126,7 +127,7 @@ fn positive_complete_torrent() { crate::core_loop_with_timeout(&mut core, 500, (false, recv), |piece_zero_good, recv, msg| match msg { ODiskMessage::TorrentAdded(_) => Loop::Break((recv, piece_zero_good)), ODiskMessage::FoundGoodPiece(_, 0) => Loop::Continue((true, recv)), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }); assert!(piece_zero_good); @@ -193,14 +194,14 @@ fn positive_complete_torrent() { ODiskMessage::FoundGoodPiece(_, index) => (Some(index), true), ODiskMessage::FoundBadPiece(_, index) => (Some(index), false), ODiskMessage::BlockProcessed(_) => (None, false), - unexpected => panic!("Unexpected Message: {:?}", unexpected), + unexpected => panic!("Unexpected Message: {unexpected:?}"), }; let (piece_one_good, piece_two_good) = match opt_piece_index { None => (piece_one_good, piece_two_good), Some(1) => (new_value, piece_two_good), Some(2) => (piece_one_good, new_value), - Some(x) => panic!("Unexpected Index {:?}", x), + Some(x) => panic!("Unexpected Index {x:?}"), }; if messages_recvd == (5 + 2) { diff --git a/packages/handshake/examples/handshake_torrent.rs b/packages/handshake/examples/handshake_torrent.rs index 2343dad94..5f250a8c2 100644 --- a/packages/handshake/examples/handshake_torrent.rs +++ b/packages/handshake/examples/handshake_torrent.rs @@ -31,7 +31,7 @@ fn main() { let peer_id = (*b"-UT2060-000000000000").into(); let handshaker = HandshakerBuilder::new() .with_peer_id(peer_id) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap() .send(InitiateMessage::new(Protocol::BitTorrent, hash, addr)) .wait() diff --git a/packages/handshake/src/bittorrent/framed.rs b/packages/handshake/src/bittorrent/framed.rs index f476a0fae..27faf59a2 100644 --- a/packages/handshake/src/bittorrent/framed.rs +++ b/packages/handshake/src/bittorrent/framed.rs @@ -20,6 +20,7 @@ enum HandshakeState { // bytes than we need for a handshake. That is unacceptable for us // because we are giving a raw socket to the client of this library. // We don't want to steal any of their bytes during our handshake! +#[allow(clippy::module_name_repetitions)] pub struct FramedHandshake { sock: S, write_buffer: BytesMut, diff --git a/packages/handshake/src/bittorrent/message.rs b/packages/handshake/src/bittorrent/message.rs index 5449153de..caa42429c 100644 --- a/packages/handshake/src/bittorrent/message.rs +++ b/packages/handshake/src/bittorrent/message.rs @@ -1,5 +1,5 @@ +use std::io; use std::io::Write; -use std::{io, u8}; use nom::{call, do_parse, take, IResult}; use util::bt::{self, InfoHash, PeerId}; @@ -7,6 +7,7 @@ use util::bt::{self, InfoHash, PeerId}; use crate::message::extensions::{self, Extensions}; use crate::message::protocol::Protocol; +#[allow(clippy::module_name_repetitions)] #[derive(Clone, PartialEq, Eq, Debug)] pub struct HandshakeMessage { prot: Protocol, @@ -22,7 +23,7 @@ impl HandshakeMessage { assert!( u8::try_from(custom.len()).is_ok(), "bip_handshake: Handshake Message With Protocol Length Greater Than {} Found", - u8::max_value() + u8::MAX ); } @@ -47,6 +48,7 @@ impl HandshakeMessage { } pub fn write_len(&self) -> usize { + #[allow(clippy::cast_possible_truncation)] write_len_with_protocol_len(self.prot.write_len() as u8) } @@ -169,7 +171,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "bip_handshake: Handshake Message With Protocol Length Greater Than 255 Found")] fn negative_create_overflow_protocol() { let overflow_protocol = Protocol::Custom(vec![0u8; 256]); diff --git a/packages/handshake/src/discovery.rs b/packages/handshake/src/discovery.rs index 37a192c8b..a4895d1e1 100644 --- a/packages/handshake/src/discovery.rs +++ b/packages/handshake/src/discovery.rs @@ -1,6 +1,7 @@ use util::bt::PeerId; /// Trait for advertisement information that other peers can discover. +#[allow(clippy::module_name_repetitions)] pub trait DiscoveryInfo { /// Retrieve our public port that we advertise to others. fn port(&self) -> u16; diff --git a/packages/handshake/src/filter/filters.rs b/packages/handshake/src/filter/filters.rs index d73b63584..8af1cf151 100644 --- a/packages/handshake/src/filter/filters.rs +++ b/packages/handshake/src/filter/filters.rs @@ -30,12 +30,12 @@ impl Filters { }); } - pub fn remove_filter(&self, filter: F) + pub fn remove_filter(&self, filter: &F) where F: HandshakeFilter + PartialEq + Eq + 'static, { self.write_filters(|mut_filters| { - let opt_found = check_index(&mut_filters[..], &filter); + let opt_found = check_index(&mut_filters[..], filter); if let Some(index) = opt_found { mut_filters.swap_remove(index); @@ -47,7 +47,7 @@ impl Filters { where B: FnOnce(&[Box]), { - self.read_filters(|ref_filters| block(ref_filters)) + self.read_filters(|ref_filters| block(ref_filters)); } pub fn clear_filters(&self) { @@ -102,6 +102,7 @@ where None } +#[allow(clippy::module_name_repetitions)] #[cfg(test)] pub mod test_filters { use std::any::Any; @@ -241,7 +242,7 @@ mod tests { let filters = Filters::new(); filters.add_filter(BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); - filters.remove_filter(BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); + filters.remove_filter(&BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); let mut num_filters = 0; filters.access_filters(|filters| { @@ -256,7 +257,7 @@ mod tests { let filters = Filters::new(); filters.add_filter(BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); - filters.remove_filter(BlockAddrFilter::new("43.43.43.43:4342".parse().unwrap())); + filters.remove_filter(&BlockAddrFilter::new("43.43.43.43:4342".parse().unwrap())); let mut num_filters = 0; filters.access_filters(|filters| { @@ -272,7 +273,7 @@ mod tests { filters.add_filter(BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); filters.add_filter(BlockAddrFilter::new("43.43.43.43:4344".parse().unwrap())); - filters.remove_filter(BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); + filters.remove_filter(&BlockAddrFilter::new("43.43.43.43:4343".parse().unwrap())); let mut num_filters = 0; filters.access_filters(|filters| { diff --git a/packages/handshake/src/filter/mod.rs b/packages/handshake/src/filter/mod.rs index 7de0cb7dc..09f3a2a02 100644 --- a/packages/handshake/src/filter/mod.rs +++ b/packages/handshake/src/filter/mod.rs @@ -16,7 +16,7 @@ pub trait HandshakeFilters { where F: HandshakeFilter + PartialEq + Eq + Send + Sync + 'static; - /// Remove the filter from the current set of filters. + /// Remove the filter from the current set of filters./* panic message */ fn remove_filter(&self, filter: F) where F: HandshakeFilter + PartialEq + Eq + Send + Sync + 'static; @@ -33,18 +33,18 @@ where where F: HandshakeFilter + PartialEq + Eq + Send + Sync + 'static, { - (*self).add_filter(filter) + (*self).add_filter(filter); } fn remove_filter(&self, filter: F) where F: HandshakeFilter + PartialEq + Eq + Send + Sync + 'static, { - (*self).remove_filter(filter) + (*self).remove_filter(filter); } fn clear_filters(&self) { - (*self).clear_filters() + (*self).clear_filters(); } } @@ -59,6 +59,7 @@ where /// /// In order for a handshake to pass the filter, each field has to be either not blocked, or /// effectively "whitelisted" (see `FilterDecision::Allow`). +#[allow(clippy::module_name_repetitions)] #[allow(unused)] pub trait HandshakeFilter { /// Used to implement generic equality. @@ -95,6 +96,7 @@ pub trait HandshakeFilter { //----------------------------------------------------------------------------------// /// Filtering decision made for a given handshake. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FilterDecision { /// Pass on making a filter decision for the given field. diff --git a/packages/handshake/src/handshake/config.rs b/packages/handshake/src/handshake/config.rs index 3181e0b90..a09a783ae 100644 --- a/packages/handshake/src/handshake/config.rs +++ b/packages/handshake/src/handshake/config.rs @@ -11,6 +11,7 @@ const DEFAULT_HANDSHAKE_TIMEOUT_MILLIS: u64 = 1000; const DEFAULT_HANDSHAKE_CONNECT_TIMEOUT_MILLIS: u64 = 1000; /// Configures the internals of a `Handshaker`. +#[allow(clippy::module_name_repetitions)] #[derive(PartialEq, Eq, Debug, Copy, Clone)] pub struct HandshakerConfig { sink_buffer_size: usize, diff --git a/packages/handshake/src/handshake/handler/handshaker.rs b/packages/handshake/src/handshake/handler/handshaker.rs index a367d0178..1e899ee2c 100644 --- a/packages/handshake/src/handshake/handler/handshaker.rs +++ b/packages/handshake/src/handshake/handler/handshaker.rs @@ -86,7 +86,7 @@ where } }) }) - .or_else(|_| Ok(None)); + .or_else(|()| Ok(None)); Box::new(composed_future) } @@ -142,7 +142,7 @@ where } }) .flatten() - .or_else(|_| Ok(None)); + .or_else(|()| Ok(None)); Box::new(composed_future) } diff --git a/packages/handshake/src/handshake/handler/initiator.rs b/packages/handshake/src/handshake/handler/initiator.rs index a0cf0a173..7dafbcad1 100644 --- a/packages/handshake/src/handshake/handler/initiator.rs +++ b/packages/handshake/src/handshake/handler/initiator.rs @@ -9,6 +9,7 @@ use crate::message::initiate::InitiateMessage; use crate::transport::Transport; /// Handle the initiation of connections, which are returned as a `HandshakeType`. +#[allow(clippy::module_name_repetitions)] pub fn initiator_handler( item: InitiateMessage, context: &(T, Filters, Handle, HandshakeTimer), diff --git a/packages/handshake/src/handshake/handler/listener.rs b/packages/handshake/src/handshake/handler/listener.rs index ebc141e95..d688a141c 100644 --- a/packages/handshake/src/handshake/handler/listener.rs +++ b/packages/handshake/src/handshake/handler/listener.rs @@ -7,6 +7,7 @@ use crate::filter::filters::Filters; use crate::handshake::handler; use crate::handshake::handler::HandshakeType; +#[allow(clippy::module_name_repetitions)] pub struct ListenerHandler { opt_item: Option>, } diff --git a/packages/handshake/src/handshake/handler/mod.rs b/packages/handshake/src/handshake/handler/mod.rs index f5fa27502..e40d6920f 100644 --- a/packages/handshake/src/handshake/handler/mod.rs +++ b/packages/handshake/src/handshake/handler/mod.rs @@ -30,6 +30,7 @@ enum LoopError { /// Create loop for feeding the handler with the items coming from the stream, and forwarding the result to the sink. /// /// If the stream is used up, or an error is propagated from any of the elements, the loop will terminate. +#[allow(clippy::module_name_repetitions)] pub fn loop_handler(stream: M, handler: H, sink: K, context: C, handle: &Handle) where M: Stream + 'static, diff --git a/packages/handshake/src/handshake/handler/timer.rs b/packages/handshake/src/handshake/handler/timer.rs index e7b70a4c9..da2b15c0b 100644 --- a/packages/handshake/src/handshake/handler/timer.rs +++ b/packages/handshake/src/handshake/handler/timer.rs @@ -3,6 +3,7 @@ use std::time::Duration; use futures::Future; use tokio_timer::{Timeout, TimeoutError, Timer}; +#[allow(clippy::module_name_repetitions)] #[derive(Clone)] pub struct HandshakeTimer { timer: Timer, @@ -41,7 +42,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "called `Result::unwrap()` on an `Err` value: ()")] fn negative_finish_after_timeout() { let timer = HandshakeTimer::new(tokio_timer::wheel().build(), Duration::from_millis(50)); diff --git a/packages/handshake/src/handshake/handshaker.rs b/packages/handshake/src/handshake/handshaker.rs index dc3533b02..69e83d6bc 100644 --- a/packages/handshake/src/handshake/handshaker.rs +++ b/packages/handshake/src/handshake/handshaker.rs @@ -28,6 +28,7 @@ use crate::message::initiate::InitiateMessage; use crate::transport::Transport; /// Build configuration for `Handshaker` object creation. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone)] pub struct HandshakerBuilder { bind: SocketAddr, @@ -51,8 +52,8 @@ impl Default for HandshakerBuilder { bind, port: Default::default(), pid, - ext: Default::default(), - config: Default::default(), + ext: Extensions::default(), + config: HandshakerConfig::default(), } } } @@ -87,7 +88,7 @@ impl HandshakerBuilder { /// /// Defaults to a random SHA-1 hash; official clients should use an encoding scheme. /// - /// See http://www.bittorrent.org/beps/bep_0020.html. + /// See [BEP 0020](http://www.bittorrent.org/beps/bep_0020.html). pub fn with_peer_id(&mut self, peer_id: PeerId) -> &mut HandshakerBuilder { self.pid = peer_id; @@ -111,7 +112,11 @@ impl HandshakerBuilder { } /// Build a `Handshaker` over the given `Transport` with a `Remote` instance. - pub fn build(&self, transport: T, handle: Handle) -> io::Result> + /// + /// # Errors + /// + /// Returns a IO error if unable to build. + pub fn build(&self, transport: T, handle: &Handle) -> io::Result> where T: Transport + 'static, { @@ -152,11 +157,11 @@ impl Handshaker where S: AsyncRead + AsyncWrite + 'static, { - fn with_builder(builder: &HandshakerBuilder, transport: T, handle: Handle) -> io::Result> + fn with_builder(builder: &HandshakerBuilder, transport: T, handle: &Handle) -> io::Result> where T: Transport + 'static, { - let listener = transport.listen(&builder.bind, &handle)?; + let listener = transport.listen(&builder.bind, handle)?; // Resolve our "real" public port let open_port = if builder.port == 0 { @@ -179,15 +184,15 @@ where initiator::initiator_handler, hand_send.clone(), (transport, filters.clone(), handle.clone(), initiate_timer), - &handle, + handle, ); - handler::loop_handler(listener, ListenerHandler::new, hand_send, filters.clone(), &handle); + handler::loop_handler(listener, ListenerHandler::new, hand_send, filters.clone(), handle); handler::loop_handler( hand_recv.map(Result::Ok).buffer_unordered(100), handshaker::execute_handshake, sock_send, (builder.ext, builder.pid, filters.clone(), handshake_timer), - &handle, + handle, ); let sink = HandshakerSink::new(addr_send, open_port, builder.pid, filters); @@ -255,6 +260,7 @@ impl HandshakeFilters for Handshaker { //----------------------------------------------------------------------------------// /// `Sink` portion of the `Handshaker` for initiating handshakes. +#[allow(clippy::module_name_repetitions)] #[derive(Clone)] pub struct HandshakerSink { send: Sender, @@ -309,7 +315,7 @@ impl HandshakeFilters for HandshakerSink { where F: HandshakeFilter + PartialEq + Eq + Send + Sync + 'static, { - self.filters.remove_filter(filter); + self.filters.remove_filter(&filter); } fn clear_filters(&self) { @@ -320,6 +326,7 @@ impl HandshakeFilters for HandshakerSink { //----------------------------------------------------------------------------------// /// `Stream` portion of the `Handshaker` for completed handshakes. +#[allow(clippy::module_name_repetitions)] pub struct HandshakerStream { recv: Receiver>, } diff --git a/packages/handshake/src/local_addr.rs b/packages/handshake/src/local_addr.rs index 7a688294c..1b1e90032 100644 --- a/packages/handshake/src/local_addr.rs +++ b/packages/handshake/src/local_addr.rs @@ -4,8 +4,13 @@ use std::net::SocketAddr; use tokio_core::net::TcpStream; /// Trait for getting the local address. + pub trait LocalAddr { /// Get the local address. + /// + /// # Errors + /// + /// It would return an IO Error if unable to obtain the local address. fn local_addr(&self) -> io::Result; } diff --git a/packages/handshake/src/message/complete.rs b/packages/handshake/src/message/complete.rs index d700fe190..d1cffe44e 100644 --- a/packages/handshake/src/message/complete.rs +++ b/packages/handshake/src/message/complete.rs @@ -6,6 +6,7 @@ use crate::message::extensions::Extensions; use crate::message::protocol::Protocol; /// Message containing completed handshaking information. +#[allow(clippy::module_name_repetitions)] pub struct CompleteMessage { prot: Protocol, ext: Extensions, diff --git a/packages/handshake/src/message/extensions.rs b/packages/handshake/src/message/extensions.rs index a12f51b4a..9e65a0971 100644 --- a/packages/handshake/src/message/extensions.rs +++ b/packages/handshake/src/message/extensions.rs @@ -68,6 +68,10 @@ impl Extensions { } /// Write the `Extensions` to the given writer. + /// + /// # Errors + /// + /// It would return an IO error if unable to write bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/handshake/src/message/initiate.rs b/packages/handshake/src/message/initiate.rs index adb1b1a56..b20cfa739 100644 --- a/packages/handshake/src/message/initiate.rs +++ b/packages/handshake/src/message/initiate.rs @@ -5,6 +5,7 @@ use util::bt::InfoHash; use crate::message::protocol::Protocol; /// Message used to initiate a handshake with the `Handshaker`. +#[allow(clippy::module_name_repetitions)] #[derive(PartialEq, Eq, Debug, Clone)] pub struct InitiateMessage { prot: Protocol, diff --git a/packages/handshake/src/message/protocol.rs b/packages/handshake/src/message/protocol.rs index ec58ab8a6..7a1480ed1 100644 --- a/packages/handshake/src/message/protocol.rs +++ b/packages/handshake/src/message/protocol.rs @@ -1,5 +1,5 @@ +use std::io; use std::io::Write; -use std::{io, u8}; use nom::{be_u8, call, do_parse, error_node_position, error_position, map, switch, take, value, IResult}; @@ -21,6 +21,10 @@ impl Protocol { } /// Write the `Protocol` out to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -30,6 +34,7 @@ impl Protocol { Protocol::Custom(prot) => (prot.len(), &prot[..]), }; + #[allow(clippy::cast_possible_truncation)] writer.write_all(&[len as u8][..])?; writer.write_all(bytes)?; diff --git a/packages/handshake/src/transport.rs b/packages/handshake/src/transport.rs index 82e88d973..9071357e4 100644 --- a/packages/handshake/src/transport.rs +++ b/packages/handshake/src/transport.rs @@ -22,15 +22,24 @@ pub trait Transport { type Listener: Stream + LocalAddr + 'static; /// Connect to the given address over this transport, using the supplied `Handle`. + /// + /// # Errors + /// + /// It would return an IO Error if unable to connect to socket. fn connect(&self, addr: &SocketAddr, handle: &Handle) -> io::Result; /// Listen to the given address for this transport, using the supplied `Handle`. + /// + /// # Errors + /// + /// It would return an IO Error if unable to listen to socket. fn listen(&self, addr: &SocketAddr, handle: &Handle) -> io::Result; } //----------------------------------------------------------------------------------// /// Defines a `Transport` operating over TCP. +#[allow(clippy::module_name_repetitions)] pub struct TcpTransport; impl Transport for TcpTransport { diff --git a/packages/handshake/test/test_byte_after_handshake.rs b/packages/handshake/test/test_byte_after_handshake.rs index 79d441ff1..c399cf171 100644 --- a/packages/handshake/test/test_byte_after_handshake.rs +++ b/packages/handshake/test/test_byte_after_handshake.rs @@ -20,7 +20,7 @@ fn positive_recover_bytes() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); diff --git a/packages/handshake/test/test_bytes_after_handshake.rs b/packages/handshake/test/test_bytes_after_handshake.rs index f9f79641b..2e4d852f5 100644 --- a/packages/handshake/test/test_bytes_after_handshake.rs +++ b/packages/handshake/test/test_bytes_after_handshake.rs @@ -20,7 +20,7 @@ fn positive_recover_bytes() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); diff --git a/packages/handshake/test/test_connect.rs b/packages/handshake/test/test_connect.rs index 6e699965a..a37097955 100644 --- a/packages/handshake/test/test_connect.rs +++ b/packages/handshake/test/test_connect.rs @@ -16,7 +16,7 @@ fn positive_connect() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); @@ -27,7 +27,7 @@ fn positive_connect() { let handshaker_two = HandshakerBuilder::new() .with_bind_addr(handshaker_two_addr) .with_peer_id(handshaker_two_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_two_addr.set_port(handshaker_two.port()); diff --git a/packages/handshake/test/test_filter_allow_all.rs b/packages/handshake/test/test_filter_allow_all.rs index 5ffa0be46..5bb6591dc 100644 --- a/packages/handshake/test/test_filter_allow_all.rs +++ b/packages/handshake/test/test_filter_allow_all.rs @@ -50,7 +50,7 @@ fn test_filter_all() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); @@ -63,7 +63,7 @@ fn test_filter_all() { let handshaker_two = HandshakerBuilder::new() .with_bind_addr(handshaker_two_addr) .with_peer_id(handshaker_two_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_two_addr.set_port(handshaker_two.port()); @@ -83,7 +83,7 @@ fn test_filter_all() { .and_then(|_| { let timeout = Timeout::new(Duration::from_millis(50), &handle) .unwrap() - .map(|_| TimeoutResult::TimedOut) + .map(|()| TimeoutResult::TimedOut) .map_err(|_| ()); let result_one = stream_one.into_future().map(|_| TimeoutResult::GotResult).map_err(|_| ()); diff --git a/packages/handshake/test/test_filter_block_all.rs b/packages/handshake/test/test_filter_block_all.rs index a0da86dea..8378b75f5 100644 --- a/packages/handshake/test/test_filter_block_all.rs +++ b/packages/handshake/test/test_filter_block_all.rs @@ -50,7 +50,7 @@ fn test_filter_all() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); @@ -63,7 +63,7 @@ fn test_filter_all() { let handshaker_two = HandshakerBuilder::new() .with_bind_addr(handshaker_two_addr) .with_peer_id(handshaker_two_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_two_addr.set_port(handshaker_two.port()); @@ -83,7 +83,7 @@ fn test_filter_all() { .and_then(|_| { let timeout = Timeout::new(Duration::from_millis(50), &handle) .unwrap() - .map(|_| TimeoutResult::TimedOut) + .map(|()| TimeoutResult::TimedOut) .map_err(|_| ()); let result_one = stream_one.into_future().map(|_| TimeoutResult::GotResult).map_err(|_| ()); diff --git a/packages/handshake/test/test_filter_whitelist_diff_data.rs b/packages/handshake/test/test_filter_whitelist_diff_data.rs index a29fcbacf..86ec118c9 100644 --- a/packages/handshake/test/test_filter_whitelist_diff_data.rs +++ b/packages/handshake/test/test_filter_whitelist_diff_data.rs @@ -56,7 +56,7 @@ fn test_filter_whitelist_diff_data() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); @@ -72,7 +72,7 @@ fn test_filter_whitelist_diff_data() { let handshaker_two = HandshakerBuilder::new() .with_bind_addr(handshaker_two_addr) .with_peer_id(handshaker_two_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_two_addr.set_port(handshaker_two.port()); @@ -92,7 +92,7 @@ fn test_filter_whitelist_diff_data() { .and_then(|_| { let timeout = Timeout::new(Duration::from_millis(50), &handle) .unwrap() - .map(|_| TimeoutResult::TimedOut) + .map(|()| TimeoutResult::TimedOut) .map_err(|_| ()); let result_one = stream_one.into_future().map(|_| TimeoutResult::GotResult).map_err(|_| ()); diff --git a/packages/handshake/test/test_filter_whitelist_same_data.rs b/packages/handshake/test/test_filter_whitelist_same_data.rs index 3e2e9580e..092675be8 100644 --- a/packages/handshake/test/test_filter_whitelist_same_data.rs +++ b/packages/handshake/test/test_filter_whitelist_same_data.rs @@ -56,7 +56,7 @@ fn test_filter_whitelist_same_data() { let handshaker_one = HandshakerBuilder::new() .with_bind_addr(handshaker_one_addr) .with_peer_id(handshaker_one_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_one_addr.set_port(handshaker_one.port()); @@ -72,7 +72,7 @@ fn test_filter_whitelist_same_data() { let handshaker_two = HandshakerBuilder::new() .with_bind_addr(handshaker_two_addr) .with_peer_id(handshaker_two_pid) - .build(TcpTransport, core.handle()) + .build(TcpTransport, &core.handle()) .unwrap(); handshaker_two_addr.set_port(handshaker_two.port()); @@ -92,7 +92,7 @@ fn test_filter_whitelist_same_data() { .and_then(|_| { let timeout = Timeout::new(Duration::from_millis(50), &handle) .unwrap() - .map(|_| TimeoutResult::TimedOut) + .map(|()| TimeoutResult::TimedOut) .map_err(|_| ()); let result_one = stream_one.into_future().map(|_| TimeoutResult::GotResult).map_err(|_| ()); diff --git a/packages/magnet/src/lib.rs b/packages/magnet/src/lib.rs index 635d66fb6..7cc3fdf23 100644 --- a/packages/magnet/src/lib.rs +++ b/packages/magnet/src/lib.rs @@ -67,10 +67,8 @@ impl MagnetLink { #[must_use] pub fn parse(s: &str) -> Option { // Parse URL - let url = match Url::parse(s) { - Ok(url) => url, - Err(_) => return None, - }; + let Ok(url) = Url::parse(s) else { return None }; + // Is Magnet Link? if url.scheme() != "magnet" { return None; @@ -81,7 +79,7 @@ impl MagnetLink { for (k, v) in url.query_pairs() { if result.is_none() { - result = Some(Self::default()) + result = Some(Self::default()); }; if let Some(ref mut r) = result { @@ -89,12 +87,12 @@ impl MagnetLink { "dn" => r.display_name = Some(v.to_string()), "xl" => { if let Ok(exact_length) = v[..].parse::() { - r.exact_length = Some(exact_length) + r.exact_length = Some(exact_length); } } "xt" => { if let Some(topic) = Topic::parse(&v[..]) { - r.exact_topic = Some(topic) + r.exact_topic = Some(topic); } } "as" => r.acceptable_source.push(v.to_string()), @@ -172,7 +170,7 @@ mod tests { /* cSpell:enable */ let link = crate::MagnetLink::parse(url).unwrap(); - println!("link {:?}", link); + println!("link {link:?}"); let expected_info_hash = [ 0xd9, 0xbe, 0x69, 0x09, 0x32, 0x5d, 0x28, 0x91, 0x2f, 0x40, 0x0f, 0xcb, 0x32, 0x40, 0x05, 0xdd, 0x58, 0x61, 0xe4, diff --git a/packages/metainfo/benches/metainfo_benchmark.rs b/packages/metainfo/benches/metainfo_benchmark.rs index 7b2374d50..ed1b36b07 100644 --- a/packages/metainfo/benches/metainfo_benchmark.rs +++ b/packages/metainfo/benches/metainfo_benchmark.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "bench")] -use criterion::{black_box, criterion_group, criterion_main, Criterion}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use metainfo::{DirectAccessor, Metainfo, MetainfoBuilder}; @@ -20,11 +18,11 @@ fn criterion_benchmark(c: &mut Criterion) { let file_content_buffer = file_content.as_slice(); c.bench_function("metainfo build multi kb", |b| { - b.iter(|| bench_build_multi_kb_metainfo(black_box(file_content_buffer))) + b.iter(|| bench_build_multi_kb_metainfo(black_box(file_content_buffer))); }); c.bench_function("metainfo parse multi kb", |b| { - b.iter(|| bench_parse_multi_kb_metainfo(black_box(MULTI_KB_METAINFO))) + b.iter(|| bench_parse_multi_kb_metainfo(black_box(MULTI_KB_METAINFO))); }); } diff --git a/packages/metainfo/examples/create_torrent.rs b/packages/metainfo/examples/create_torrent.rs index 97b9585cf..fed862b37 100644 --- a/packages/metainfo/examples/create_torrent.rs +++ b/packages/metainfo/examples/create_torrent.rs @@ -48,6 +48,9 @@ where let mut prev_progress = 0; builder.build(2, src_path, move |progress| { + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_precision_loss)] let whole_progress = (progress * (count as f64)) as u64; let delta_progress = whole_progress - prev_progress; @@ -62,16 +65,17 @@ where fn print_metainfo_overview(bytes: &[u8]) { let metainfo = Metainfo::from_bytes(bytes).unwrap(); let info = metainfo.info(); - let info_hash_hex = metainfo - .info() - .info_hash() - .as_ref() - .iter() - .map(|b| format!("{:02X}", b)) - .fold(String::new(), |mut acc, nex| { - acc.push_str(&nex); - acc - }); + let info_hash_hex = + metainfo + .info() + .info_hash() + .as_ref() + .iter() + .map(|b| format!("{b:02X}")) + .fold(String::new(), |mut acc, nex| { + acc.push_str(&nex); + acc + }); let utc_creation_date = metainfo.creation_date().map(|c| Utc.timestamp_opt(c, 0)); println!("\n\n-----------------------------Metainfo File Overview-----------------------------"); diff --git a/packages/metainfo/src/accessor.rs b/packages/metainfo/src/accessor.rs index 3e1508b22..cf979b2b2 100644 --- a/packages/metainfo/src/accessor.rs +++ b/packages/metainfo/src/accessor.rs @@ -11,6 +11,10 @@ pub trait IntoAccessor { type Accessor: Accessor; /// Convert the type into some Accessor as a Result. + /// + /// # Errors + /// + /// It would return an IO error if unable to convert to an ancestor. fn into_accessor(self) -> io::Result; } @@ -20,11 +24,19 @@ pub trait Accessor { fn access_directory(&self) -> Option<&Path>; /// Access the metadata for all files including their length and path. + /// + /// # Errors + /// + /// It would return an IO error if unable to access the metadata. fn access_metadata(&self, callback: C) -> io::Result<()> where C: FnMut(u64, &Path); /// Access the sequential pieces that make up all of the files. + /// + /// # Errors + /// + /// It would return an IO error if unable to access the pieces. fn access_pieces(&self, callback: C) -> io::Result<()> where C: for<'a> FnMut(PieceAccess<'a>) -> io::Result<()>; @@ -73,6 +85,7 @@ pub enum PieceAccess<'a> { // ----------------------------------------------------------------------------// /// Accessor that pulls data in from the file system. +#[allow(clippy::module_name_repetitions)] pub struct FileAccessor { absolute_path: PathBuf, directory_name: Option, @@ -80,6 +93,14 @@ pub struct FileAccessor { impl FileAccessor { /// Create a new `FileAccessor` from the given file/directory path. + /// + /// # Errors + /// + /// It would return an error if unable to canonicalize the path. + /// + /// # Panics + /// + /// It would panic if unable to get the last directory name. pub fn new(path: T) -> io::Result where T: AsRef, @@ -174,6 +195,7 @@ fn entry_file_filter(res_entry: &walkdir::Result) -> bool { // ----------------------------------------------------------------------------// /// Accessor that pulls data in directly from memory. +#[allow(clippy::module_name_repetitions)] pub struct DirectAccessor<'a> { file_name: &'a str, file_contents: &'a [u8], diff --git a/packages/metainfo/src/builder/buffer.rs b/packages/metainfo/src/builder/buffer.rs index 2cff2fa58..6175549fc 100644 --- a/packages/metainfo/src/builder/buffer.rs +++ b/packages/metainfo/src/builder/buffer.rs @@ -40,13 +40,12 @@ impl PieceBuffers { while pb.is_none() { pb = self.piece_queue.pop(); - match pb { - Some(_) => break, - None => { - thread::sleep(ten_millis); - continue; - } + if pb.is_some() { + break; } + + thread::sleep(ten_millis); + continue; } pb.expect("Checked is_some in loop above.") @@ -61,6 +60,7 @@ fn calculate_total_buffers(num_workers: usize) -> usize { // ----------------------------------------------------------------------------// /// Piece buffer that can be filled up until it contains a full piece. +#[allow(clippy::module_name_repetitions)] #[derive(PartialEq, Eq)] pub struct PieceBuffer { buffer: Vec, diff --git a/packages/metainfo/src/builder/mod.rs b/packages/metainfo/src/builder/mod.rs index d5a10b4b4..cfe3b7be9 100644 --- a/packages/metainfo/src/builder/mod.rs +++ b/packages/metainfo/src/builder/mod.rs @@ -38,6 +38,7 @@ const TRANSFER_MAX_PIECES_SIZE: usize = 60000; const TRANSFER_MIN_PIECE_LENGTH: usize = 1024; /// Enumerates settings for piece length for generating a torrent file. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum PieceLength { /// Optimize piece length for torrent file size and file transfer. OptBalanced, @@ -50,6 +51,7 @@ pub enum PieceLength { } /// Builder for generating a torrent file from some accessor. +#[allow(clippy::module_name_repetitions)] pub struct MetainfoBuilder<'a> { root: BencodeMut<'a>, info: InfoBuilder<'a>, @@ -72,6 +74,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Set announce-list content + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary, or the data is somehow corrupt. #[must_use] pub fn set_trackers(mut self, opt_trackers: Option<&'a Vec>>) -> MetainfoBuilder<'a> { { @@ -108,6 +114,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Set or unset the main tracker that this torrent file points to. + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn set_main_tracker(mut self, opt_tracker_url: Option<&'a str>) -> MetainfoBuilder<'a> { { @@ -124,6 +134,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Set or unset the creation date for the torrent. + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn set_creation_date(mut self, opt_secs_epoch: Option) -> MetainfoBuilder<'a> { { @@ -140,6 +154,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Set or unset a comment for the torrent file. + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn set_comment(mut self, opt_comment: Option<&'a str>) -> MetainfoBuilder<'a> { { @@ -156,6 +174,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Set or unset the created by for the torrent file. + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn set_created_by(mut self, opt_created_by: Option<&'a str>) -> MetainfoBuilder<'a> { { @@ -188,6 +210,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Get decoded value of announce-list key + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. pub fn get_trackers(&self) -> Option>> { let dict_access = self.root.dict().unwrap(); @@ -195,6 +221,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Get decoded value of announce-url key + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. pub fn get_main_tracker(&self) -> Option { let dict_access = self.root.dict().unwrap(); @@ -202,6 +232,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Get decoded value of creation-date key + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn get_creation_date(&self) -> Option { let dict_access = self.root.dict().unwrap(); @@ -210,6 +244,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Get decoded value of comment key + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. pub fn get_comment(&self) -> Option { let dict_access = self.root.dict().unwrap(); @@ -217,6 +255,10 @@ impl<'a> MetainfoBuilder<'a> { } /// Get decoded value of created-by key + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. pub fn get_created_by(&self) -> Option { let dict_access = self.root.dict().unwrap(); @@ -225,7 +267,9 @@ impl<'a> MetainfoBuilder<'a> { /// Build the metainfo file from the given accessor and the number of worker threads. /// - /// Panics if threads is equal to zero. + /// # Errors + /// + /// It would return an error if unable to get the accessor. pub fn build(self, threads: usize, accessor: A, progress: C) -> ParseResult> where A: IntoAccessor, @@ -247,7 +291,7 @@ impl<'a> MetainfoBuilder<'a> { // ----------------------------------------------------------------------------// /// Builder for generating an info dictionary file from some accessor. - +#[allow(clippy::module_name_repetitions)] pub struct InfoBuilder<'a> { info: BencodeMut<'a>, // Stored outside of root as some of the variants need the total @@ -271,6 +315,10 @@ impl<'a> InfoBuilder<'a> { } /// Set or unset the private flag for the torrent file. + /// + /// # Panics + /// + /// It would panic if unable to get the dictionary. #[must_use] pub fn set_private_flag(mut self, opt_is_private: Option) -> InfoBuilder<'a> { let opt_numeric_is_private = opt_is_private.map(i64::from); @@ -295,7 +343,9 @@ impl<'a> InfoBuilder<'a> { /// Build the metainfo file from the given accessor and the number of worker threads. /// - /// Panics if threads is equal to zero. + /// # Errors + /// + /// It would return an error if unable to get the accessor. pub fn build(self, threads: usize, accessor: A, progress: C) -> ParseResult> where A: IntoAccessor, @@ -333,9 +383,26 @@ where // Build the pieces for the data our accessor is pointing at let total_files_len = files_info.iter().fold(0, |acc, nex| acc + nex.0); - let piece_length = determine_piece_length(total_files_len, piece_length); - let total_num_pieces = ((total_files_len as f64) / (piece_length as f64)).ceil() as u64; - let pieces_list = worker::start_hasher_workers(&accessor, piece_length, total_num_pieces, threads, progress)?; + let piece_length = determine_piece_length(total_files_len, &piece_length); + + #[allow(clippy::cast_precision_loss)] + let total_num_pieces = (total_files_len as f64) / (piece_length as f64); + + assert!( + (0.0..=9_223_372_036_854_775_807_f64).contains(&total_num_pieces), /* i64::MAX */ + "Value is outside the range of i64" + ); + + #[allow(clippy::cast_possible_truncation)] + let total_num_pieces: i64 = total_num_pieces.ceil() as i64; + + let pieces_list = worker::start_hasher_workers( + &accessor, + piece_length, + total_num_pieces.try_into().unwrap(), + threads, + progress, + )?; let pieces = map_pieces_list(pieces_list.into_iter().map(|(_, piece)| piece)); let mut single_file_name = String::new(); @@ -348,7 +415,7 @@ where { let info_access = info.dict_mut().unwrap(); - info_access.insert(parse::PIECE_LENGTH_KEY.into(), ben_int!(piece_length as i64)); + info_access.insert(parse::PIECE_LENGTH_KEY.into(), ben_int!(piece_length.try_into().unwrap())); info_access.insert(parse::PIECES_KEY.into(), ben_bytes!(&pieces[..])); // If the accessor specifies a directory OR there are multiple files, we will build a multi file torrent @@ -373,7 +440,7 @@ where } bencode_files_access.push(ben_map! { - parse::LENGTH_KEY => ben_int!(len as i64), + parse::LENGTH_KEY => ben_int!(len.try_into().unwrap()), parse::PATH_KEY => bencode_path }); } @@ -401,7 +468,7 @@ where } bencode_files_access.push(ben_map! { - parse::LENGTH_KEY => ben_int!(len as i64), + parse::LENGTH_KEY => ben_int!(len.try_into().unwrap()), parse::PATH_KEY => bencode_path }); } @@ -416,7 +483,7 @@ where single_file_name.push_str(name_component); } - info_access.insert(parse::LENGTH_KEY.into(), ben_int!(files_info[0].0 as i64)); + info_access.insert(parse::LENGTH_KEY.into(), ben_int!(files_info[0].0.try_into().unwrap())); info_access.insert(parse::NAME_KEY.into(), ben_bytes!(&single_file_name[..])); } } @@ -434,9 +501,9 @@ where /// Calculate the final piece length given the total file size and piece length strategy. /// /// Lower piece length will result in a bigger file but better transfer reliability and vice versa. -fn determine_piece_length(total_file_size: u64, piece_length: PieceLength) -> usize { +fn determine_piece_length(total_file_size: u64, piece_length: &PieceLength) -> usize { match piece_length { - PieceLength::Custom(len) => len, + PieceLength::Custom(len) => *len, PieceLength::OptBalanced => calculate_piece_length(total_file_size, BALANCED_MAX_PIECES_SIZE, BALANCED_MIN_PIECE_LENGTH), PieceLength::OptFileSize => { calculate_piece_length(total_file_size, FILE_SIZE_MAX_PIECES_SIZE, FILE_SIZE_MIN_PIECE_LENGTH) @@ -447,17 +514,30 @@ fn determine_piece_length(total_file_size: u64, piece_length: PieceLength) -> us /// Calculate the minimum power of 2 piece length for the given max pieces size and total file size. fn calculate_piece_length(total_file_size: u64, max_pieces_size: usize, min_piece_length: usize) -> usize { + #[allow(clippy::cast_precision_loss)] let num_pieces = (max_pieces_size as f64) / (sha::SHA_HASH_LEN as f64); - let piece_length = ((total_file_size as f64) / num_pieces + 0.5) as usize; + + #[allow(clippy::cast_precision_loss)] + let piece_length = (total_file_size as f64) / num_pieces + 0.5; + + assert!( + (0.0..=9_223_372_036_854_775_807_f64).contains(&piece_length), /* i64::MAX */ + "Value is outside the range of i64" + ); + + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + let piece_length = piece_length as u64; let pot_piece_length = piece_length.next_power_of_two(); + let min_piece_length = min_piece_length as u64; match ( pot_piece_length > min_piece_length, - pot_piece_length < ALL_OPT_MAX_PIECE_LENGTH, + pot_piece_length < ALL_OPT_MAX_PIECE_LENGTH.try_into().unwrap(), ) { - (true, true) => pot_piece_length, - (false, _) => min_piece_length, + (true, true) => pot_piece_length.try_into().unwrap(), + (false, _) => min_piece_length.try_into().unwrap(), (_, false) => ALL_OPT_MAX_PIECE_LENGTH, } } diff --git a/packages/metainfo/src/builder/worker.rs b/packages/metainfo/src/builder/worker.rs index 3969acd5f..ffcd3072d 100644 --- a/packages/metainfo/src/builder/worker.rs +++ b/packages/metainfo/src/builder/worker.rs @@ -54,7 +54,7 @@ where let share_piece_buffers = piece_buffers.clone(); thread::spawn(move || { - start_hash_worker(share_master_send, share_work_queue, share_piece_buffers); + start_hash_worker(&share_master_send, &share_work_queue, &share_piece_buffers); }); } @@ -64,7 +64,7 @@ where }); // Create the master worker to coordinate between the workers - start_hash_master(accessor, num_workers, master_recv, work_queue, piece_buffers, prog_send) + start_hash_master(accessor, num_workers, &master_recv, &work_queue, &piece_buffers, &prog_send) } // ----------------------------------------------------------------------------// @@ -74,10 +74,10 @@ where fn start_hash_master( accessor: A, num_workers: usize, - recv: Receiver, - work: Arc>, - buffers: Arc, - progress_sender: Sender, + recv: &Receiver, + work: &Arc>, + buffers: &Arc, + progress_sender: &Sender, ) -> ParseResult> where A: Accessor, @@ -144,10 +144,13 @@ where // Wait for all of the workers to finish up the last pieces let mut workers_finished = 0; while workers_finished < num_workers { - match recv.recv() { - Ok(MasterMessage::AcceptPiece(index, piece)) => pieces.push((index, piece)), - Ok(MasterMessage::WorkerFinished) => workers_finished += 1, - Err(_) => panic!("bip_metainfo: Master failed to verify all workers shutdown..."), + let Ok(recv) = recv.recv() else { + panic!("bip_metainfo: Master failed to verify all workers shutdown...") + }; + + match recv { + MasterMessage::AcceptPiece(index, piece) => pieces.push((index, piece)), + MasterMessage::WorkerFinished => workers_finished += 1, } } @@ -164,6 +167,7 @@ where C: FnMut(f64), { for finished_piece in recv { + #[allow(clippy::cast_precision_loss)] let percent_complete = (finished_piece as f64) / (num_pieces as f64); progress(percent_complete); @@ -173,7 +177,7 @@ where // ----------------------------------------------------------------------------// /// Starts a hasher worker which will hash all of the buffers it receives. -fn start_hash_worker(send: Sender, work: Arc>, buffers: Arc) { +fn start_hash_worker(send: &Sender, work: &Arc>, buffers: &Arc) { let mut work_to_do = true; // Loop until we are instructed to stop working @@ -276,15 +280,30 @@ mod tests { } } - fn validate_entries_pieces(accessor: MockAccessor, piece_length: usize, num_threads: usize) { + fn validate_entries_pieces(accessor: &MockAccessor, piece_length: usize, num_threads: usize) { let (prog_send, prog_recv) = mpsc::channel(); - let total_num_pieces = ((accessor.as_slice().len() as f64) / (piece_length as f64)).ceil() as u64; - let received_pieces = - worker::start_hasher_workers(&accessor, piece_length, total_num_pieces, num_threads, move |update| { + #[allow(clippy::cast_precision_loss)] + let total_num_pieces = (accessor.as_slice().len() as f64) / (piece_length as f64); + + assert!( + (0.0..=9_223_372_036_854_775_807_f64).contains(&total_num_pieces), /* i64::MAX */ + "Value is outside the range of i64" + ); + + #[allow(clippy::cast_possible_truncation)] + let total_num_pieces: i64 = total_num_pieces.ceil() as i64; + + let received_pieces = worker::start_hasher_workers( + accessor, + piece_length, + total_num_pieces.try_into().unwrap(), + num_threads, + move |update| { prog_send.send(update).unwrap(); - }) - .unwrap(); + }, + ) + .unwrap(); let computed_pieces = accessor .as_slice() @@ -295,7 +314,7 @@ mod tests { let updates_received = prog_recv.iter().count() as u64; - assert_eq!(total_num_pieces, updates_received); + assert_eq!(total_num_pieces, updates_received.try_into().unwrap()); assert_eq!(received_pieces, computed_pieces); } @@ -306,7 +325,7 @@ mod tests { let region_length = DEFAULT_PIECE_LENGTH * DEFAULT_NUM_PIECES; accessor.create_region(region_length); - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 1); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 1); } #[test] @@ -316,7 +335,7 @@ mod tests { let region_length = DEFAULT_PIECE_LENGTH * DEFAULT_NUM_PIECES; accessor.create_region(region_length); - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 4); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 4); } #[test] @@ -326,7 +345,7 @@ mod tests { let region_length = DEFAULT_PIECE_LENGTH * DEFAULT_NUM_PIECES + 1; accessor.create_region(region_length); - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 1); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 1); } #[test] @@ -336,7 +355,7 @@ mod tests { let region_length = DEFAULT_PIECE_LENGTH * DEFAULT_NUM_PIECES + 1; accessor.create_region(region_length); - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 4); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 4); } #[test] @@ -352,7 +371,7 @@ mod tests { accessor.create_region(region_length); } - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 1); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 1); } #[test] @@ -368,7 +387,7 @@ mod tests { accessor.create_region(region_length); } - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 4); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 4); } #[test] @@ -385,7 +404,7 @@ mod tests { accessor.create_region(region_length); } - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 1); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 1); } #[test] @@ -402,6 +421,6 @@ mod tests { accessor.create_region(region_length); } - validate_entries_pieces(accessor, DEFAULT_PIECE_LENGTH, 4); + validate_entries_pieces(&accessor, DEFAULT_PIECE_LENGTH, 4); } } diff --git a/packages/metainfo/src/metainfo.rs b/packages/metainfo/src/metainfo.rs index e5b90a6be..fe6d63829 100644 --- a/packages/metainfo/src/metainfo.rs +++ b/packages/metainfo/src/metainfo.rs @@ -28,6 +28,10 @@ pub struct Metainfo { impl Metainfo { /// Read a `Metainfo` from metainfo file bytes. + /// + /// # Errors + /// + /// It would return an error if unable to parse the bytes as a [`Metainfo`] pub fn from_bytes(bytes: B) -> ParseResult where B: AsRef<[u8]>, @@ -80,6 +84,10 @@ impl Metainfo { } /// Retrieve the bencoded bytes for the `Metainfo` file. + /// + /// # Panics + /// + /// It would panic if unable to convert to bytes. #[must_use] pub fn to_bytes(&self) -> Vec { // Since there are no file system accesses here, should be fine to unwrap @@ -90,7 +98,7 @@ impl Metainfo { .set_created_by(self.created_by()) .set_private_flag(self.info().is_private()) // TODO: Revisit this cast... - .set_piece_length(PieceLength::Custom(self.info().piece_length() as usize)) + .set_piece_length(PieceLength::Custom(self.info().piece_length().try_into().unwrap())) .build(1, &self.info, |_| ()) .unwrap() } @@ -158,6 +166,10 @@ pub struct Info { impl Info { /// Read an `Info` from info dictionary bytes. + /// + /// # Errors + /// + /// It would return an error if unable to parse bytes into [`Info`]. pub fn from_bytes(bytes: B) -> ParseResult where B: AsRef<[u8]>, @@ -216,13 +228,17 @@ impl Info { } /// Retrieve the bencoded bytes for the `Info` dictionary. + /// + /// # Panics + /// + /// It would panic if unable to get the bytes. #[must_use] pub fn to_bytes(&self) -> Vec { // Since there are no file system accesses here, should be fine to unwrap InfoBuilder::new() .set_private_flag(self.is_private()) // TODO: Revisit this cast... - .set_piece_length(PieceLength::Custom(self.piece_length() as usize)) + .set_piece_length(PieceLength::Custom(self.piece_length().try_into().unwrap())) .build(1, self, |_| ()) .unwrap() } @@ -446,6 +462,7 @@ mod tests { /// If the metainfo file builds successfully, assertions will be made about the contents of it based /// on the parameters given. #[allow(clippy::too_many_arguments)] + #[allow(clippy::too_many_lines)] fn validate_parse_from_params( tracker: Option<&str>, create_date: Option, @@ -456,7 +473,7 @@ mod tests { pieces: Option<&[u8]>, private: Option, directory: Option<&str>, - files: FilesOpt<'_>, + files: &FilesOpt<'_>, ) { let mut root_dict = BencodeMut::new_dict(); let info_hash = { @@ -516,7 +533,7 @@ mod tests { opt_md5.map(|m| file_dict_access.insert(parse::MD5SUM_KEY.into(), ben_bytes!(m))); } - bencode_files_access.push(file_dict) + bencode_files_access.push(file_dict); } } @@ -556,7 +573,7 @@ mod tests { assert_eq!(metainfo_file.creation_date, create_date); assert_eq!(metainfo_file.info().directory(), directory.map(std::convert::AsRef::as_ref)); - assert_eq!(metainfo_file.info().piece_length(), piece_length.unwrap() as u64); + assert_eq!(metainfo_file.info().piece_length(), piece_length.unwrap().try_into().unwrap()); assert_eq!(metainfo_file.info().is_private(), private.map(|private| private == 1)); let pieces = pieces.unwrap(); @@ -577,7 +594,7 @@ mod tests { let meta_file = meta_files.next().unwrap(); let supp_file = supp_files.next().unwrap(); - assert_eq!(meta_file.length(), supp_file.0.unwrap() as u64); + assert_eq!(meta_file.length(), supp_file.0.unwrap().try_into().unwrap()); assert_eq!(meta_file.md5sum(), supp_file.1); let meta_paths: &Path = meta_file.path(); @@ -609,7 +626,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -636,7 +653,7 @@ mod tests { Some(&pieces), None, Some(directory), - Some(files), + &Some(files), ); } @@ -666,7 +683,7 @@ mod tests { Some(&pieces), None, Some(directory), - Some(files), + &Some(files), ); } @@ -689,7 +706,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -714,7 +731,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -739,7 +756,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -764,7 +781,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -789,7 +806,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -814,7 +831,7 @@ mod tests { Some(&pieces), Some(private), None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -839,7 +856,7 @@ mod tests { Some(&pieces), Some(private), None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -864,7 +881,7 @@ mod tests { Some(&pieces), Some(private), None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } @@ -886,18 +903,22 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: ParseError(BencodeParse(BencodeParseError(BytesEmpty { pos: 0 }, State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })), State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })" + )] fn negative_parse_from_empty_bytes() { Metainfo::from_bytes(b"").unwrap(); } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: ParseError(BencodeConvert(BencodeConvertError(MissingKey { key: [112, 105, 101, 99, 101, 32, 108, 101, 110, 103, 116, 104] }, State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })), State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })" + )] fn negative_parse_with_no_piece_length() { let tracker = "udp://dummy_domain.com:8989"; let pieces = [0u8; sha::SHA_HASH_LEN]; @@ -917,12 +938,14 @@ mod tests { Some(&pieces), Some(private), None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: ParseError(BencodeConvert(BencodeConvertError(MissingKey { key: [112, 105, 101, 99, 101, 115] }, State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })), State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })" + )] fn negative_parse_with_no_pieces() { let tracker = "udp://dummy_domain.com:8989"; let piece_len = 1024; @@ -940,12 +963,14 @@ mod tests { None, None, None, - Some(vec![(Some(file_len), None, Some(file_paths))]), + &Some(vec![(Some(file_len), None, Some(file_paths))]), ); } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: ParseError(BencodeConvert(BencodeConvertError(MissingKey { key: [102, 105, 108, 101, 115] }, State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })), State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })" + )] fn negative_parse_from_single_file_with_no_file_length() { let tracker = "udp://dummy_domain.com:8989"; let piece_len = 1024; @@ -963,12 +988,14 @@ mod tests { Some(&pieces), None, None, - Some(vec![(None, None, Some(file_paths))]), + &Some(vec![(None, None, Some(file_paths))]), ); } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: ParseError(BencodeConvert(BencodeConvertError(MissingKey { key: [110, 97, 109, 101] }, State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })), State { next_error: None, backtrace: InternalBacktrace { backtrace: None } })" + )] fn negative_parse_from_single_file_with_no_file_name() { let tracker = "udp://dummy_domain.com:8989"; let piece_len = 1024; @@ -986,7 +1013,7 @@ mod tests { Some(&pieces), None, None, - Some(vec![(Some(file_len), None, None)]), + &Some(vec![(Some(file_len), None, None)]), ); } } diff --git a/packages/metainfo/src/parse.rs b/packages/metainfo/src/parse.rs index 79f951f3f..a7274ba71 100644 --- a/packages/metainfo/src/parse.rs +++ b/packages/metainfo/src/parse.rs @@ -41,6 +41,7 @@ pub const MD5SUM_KEY: &[u8] = b"md5sum"; pub const PATH_KEY: &[u8] = b"path"; /// Parses the root bencode as a dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_root_dict(root_bencode: &B) -> ParseResult<&dyn BDictAccess> where B: BRefAccess, @@ -49,6 +50,7 @@ where } /// Parses the announce list from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_announce_list(root_dict: &dyn BDictAccess) -> Option<&dyn BListAccess> where B: BRefAccess, @@ -74,6 +76,7 @@ where } /// Parses the announce url from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_announce_url<'a, B>(root_dict: &'a dyn BDictAccess) -> Option<&'a str> where B: BRefAccess + 'a, @@ -82,6 +85,7 @@ where } /// Parses the creation date from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_creation_date(root_dict: &dyn BDictAccess) -> Option where B: BRefAccess, @@ -90,6 +94,7 @@ where } /// Parses the comment from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_comment<'a, B>(root_dict: &'a dyn BDictAccess) -> Option<&'a str> where B: BRefAccess + 'a, @@ -98,6 +103,7 @@ where } /// Parses the created by from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_created_by<'a, B>(root_dict: &'a dyn BDictAccess) -> Option<&'a str> where B: BRefAccess + 'a, @@ -106,6 +112,7 @@ where } /// Parses the encoding from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_encoding<'a, B>(root_dict: &'a dyn BDictAccess) -> Option<&'a str> where B: BRefAccess + 'a, @@ -114,6 +121,7 @@ where } /// Parses the info dictionary from the root dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_info_bencode(root_dict: &dyn BDictAccess) -> ParseResult<&B> where B: BRefAccess, @@ -124,16 +132,18 @@ where // ----------------------------------------------------------------------------// /// Parses the piece length from the info dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_piece_length(info_dict: &dyn BDictAccess) -> ParseResult where B: BRefAccess, { CONVERT .lookup_and_convert_int(info_dict, PIECE_LENGTH_KEY) - .map(|len| len as u64) + .map(|len| len.try_into().unwrap()) } /// Parses the pieces from the info dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_pieces<'a, B>(info_dict: &'a dyn BDictAccess) -> ParseResult<&'a [u8]> where B: BRefAccess + 'a, @@ -142,6 +152,7 @@ where } /// Parses the private flag from the info dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_private(info_dict: &dyn BDictAccess) -> Option where B: BRefAccess, @@ -150,6 +161,7 @@ where } /// Parses the name from the info dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_name<'a, B>(info_dict: &'a dyn BDictAccess) -> ParseResult<&'a str> where B: BRefAccess + 'a, @@ -158,6 +170,7 @@ where } /// Parses the files list from the info dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_files_list(info_dict: &dyn BDictAccess) -> ParseResult<&dyn BListAccess> where B: BRefAccess + PartialEq, @@ -168,6 +181,7 @@ where // ----------------------------------------------------------------------------// /// Parses the file dictionary from the file bencode. +#[allow(clippy::module_name_repetitions)] pub fn parse_file_dict(file_bencode: &B) -> ParseResult<&dyn BDictAccess> where B: BRefAccess, @@ -176,16 +190,18 @@ where } /// Parses the length from the info or file dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_length(info_or_file_dict: &dyn BDictAccess) -> ParseResult where B: BRefAccess, { CONVERT .lookup_and_convert_int(info_or_file_dict, LENGTH_KEY) - .map(|len| len as u64) + .map(|len| len.try_into().unwrap()) } /// Parses the md5sum from the info or file dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_md5sum<'a, B>(info_or_file_dict: &'a dyn BDictAccess) -> Option<&'a [u8]> where B: BRefAccess + 'a, @@ -194,6 +210,7 @@ where } /// Parses the path list from the file dictionary. +#[allow(clippy::module_name_repetitions)] pub fn parse_path_list(file_dict: &dyn BDictAccess) -> ParseResult<&dyn BListAccess> where B: BRefAccess, @@ -202,6 +219,7 @@ where } /// Parses the path string from the path bencode. +#[allow(clippy::module_name_repetitions)] pub fn parse_path_str(path_bencode: &B) -> ParseResult<&str> where B: BRefAccess, diff --git a/packages/peer/src/codec.rs b/packages/peer/src/codec.rs index 18636e36b..18be8ce58 100644 --- a/packages/peer/src/codec.rs +++ b/packages/peer/src/codec.rs @@ -8,6 +8,7 @@ use tokio_io::codec::{Decoder, Encoder}; use crate::protocol::PeerProtocol; /// Codec operating over some `PeerProtocol`. +#[allow(clippy::module_name_repetitions)] pub struct PeerProtocolCodec

{ protocol: P, max_payload: Option, diff --git a/packages/peer/src/lib.rs b/packages/peer/src/lib.rs index 145f971ff..03d09cbe1 100644 --- a/packages/peer/src/lib.rs +++ b/packages/peer/src/lib.rs @@ -30,6 +30,7 @@ pub mod messages { } /// `PeerManager` error types. +#[allow(clippy::module_name_repetitions)] pub mod error { pub use crate::manager::error::{PeerManagerError, PeerManagerErrorKind, PeerManagerResult, PeerManagerResultExt}; } diff --git a/packages/peer/src/manager/builder.rs b/packages/peer/src/manager/builder.rs index 1bc5ba685..27bce0c0d 100644 --- a/packages/peer/src/manager/builder.rs +++ b/packages/peer/src/manager/builder.rs @@ -14,6 +14,7 @@ const DEFAULT_HEARTBEAT_INTERVAL_MILLIS: u64 = 60 * 1000; const DEFAULT_HEARTBEAT_TIMEOUT_MILLIS: u64 = 2 * 60 * 1000; /// Builder for configuring a `PeerManager`. +#[allow(clippy::module_name_repetitions)] #[derive(Default, Copy, Clone)] pub struct PeerManagerBuilder { peer: usize, diff --git a/packages/peer/src/manager/future/mod.rs b/packages/peer/src/manager/future/mod.rs index fe0da4464..afc043fb7 100644 --- a/packages/peer/src/manager/future/mod.rs +++ b/packages/peer/src/manager/future/mod.rs @@ -119,16 +119,19 @@ where Err(_) => return Err(RecurringTimeoutError::Disconnect), } + let Ok(poll) = self.sleep.poll() else { + panic!("bip_peer: Timer Error In Manager Stream, Timer Capacity Is Probably Too Small...") + }; + // Now check the timer - match self.sleep.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(_)) => { + match poll { + Async::NotReady => Ok(Async::NotReady), + Async::Ready(()) => { // Reset the timeout self.sleep = self.timer.sleep(self.dur); Err(RecurringTimeoutError::Timeout) } - Err(_) => panic!("bip_peer: Timer Error In Manager Stream, Timer Capacity Is Probably Too Small..."), } } } diff --git a/packages/peer/src/manager/mod.rs b/packages/peer/src/manager/mod.rs index 812b6be24..86a16cba6 100644 --- a/packages/peer/src/manager/mod.rs +++ b/packages/peer/src/manager/mod.rs @@ -28,6 +28,7 @@ mod task; const DEFAULT_TIMER_SLOTS: usize = 2048; /// Manages a set of peers with beating hearts. +#[allow(clippy::module_name_repetitions)] pub struct PeerManager

where P: Sink + Stream, @@ -220,7 +221,7 @@ where if took_lock { // Just notify a single person waiting on the lock to reduce contention if let Some(task) = self.task_queue.pop() { - task.notify() + task.notify(); } } @@ -270,7 +271,7 @@ where if took_lock { // Just notify a single person waiting on the lock to reduce contention if let Some(task) = self.task_queue.pop() { - task.notify() + task.notify(); } } @@ -355,6 +356,7 @@ where //----------------------------------------------------------------------------// /// Stream half of a `PeerManager`. +#[allow(clippy::option_option)] pub struct PeerManagerStream

where P: Sink + Stream, @@ -415,7 +417,7 @@ where if took_lock { // Just notify a single person waiting on the lock to reduce contention if let Some(task) = self.task_queue.pop() { - task.notify() + task.notify(); } } diff --git a/packages/peer/src/manager/task.rs b/packages/peer/src/manager/task.rs index a03283a10..9d2ea5376 100644 --- a/packages/peer/src/manager/task.rs +++ b/packages/peer/src/manager/task.rs @@ -26,6 +26,7 @@ enum PeerError { PeerNoHeartbeat, } +#[allow(dead_code)] enum MergedError { Peer(PeerError), // Fake error types (used to stash future "futures" into an error type to be @@ -37,6 +38,7 @@ enum MergedError { //----------------------------------------------------------------------------// +#[allow(clippy::too_many_lines)] pub fn run_peer

( peer: P, info: PeerInfo, @@ -140,7 +142,7 @@ where Err((PeerError::ManagerDisconnect, _)) => { Err(MergedError::Peer(PeerError::ManagerDisconnect)) } - Err((PeerError::PeerDisconnect, merged_stream)) => Ok(( + Err((PeerError::PeerDisconnect | PeerError::PeerNoHeartbeat, merged_stream)) => Ok(( merged_stream, None, None, @@ -154,13 +156,6 @@ where Some(OPeerManagerMessage::PeerError(info, err)), false, )), - Err((PeerError::PeerNoHeartbeat, merged_stream)) => Ok(( - merged_stream, - None, - None, - Some(OPeerManagerMessage::PeerDisconnect(info)), - false, - )), }; match result { diff --git a/packages/peer/src/message/bencode_util.rs b/packages/peer/src/message/bencode_util.rs index 4e5363e35..fe9c3885a 100644 --- a/packages/peer/src/message/bencode_util.rs +++ b/packages/peer/src/message/bencode_util.rs @@ -40,7 +40,8 @@ where if let Ok(ben_id_map) = CONVERT.lookup_and_convert_dict(root, ID_MAP_KEY) { for (id, ben_value) in ben_id_map.to_list() { if let (Ok(str_id), Ok(value)) = (str::from_utf8(id.as_ref()), CONVERT.convert_int(ben_value, id)) { - id_map.insert(ExtendedType::from_id(str_id), value as u8); + let value: u8 = value.try_into().unwrap(); + id_map.insert(ExtendedType::from_id(str_id), value); } } } @@ -66,11 +67,8 @@ where .lookup_and_convert_int(root, CLIENT_TCP_PORT_KEY) .ok() .and_then(|port| { - if i64::from(port as u16) == port { - Some(port as u16) - } else { - None - } + let port: Option = port.try_into().ok(); + port }) } @@ -172,7 +170,7 @@ where { CONVERT .lookup_and_convert_int(root, MESSAGE_TYPE_KEY) - .map(|msg_type| msg_type as u8) + .map(|msg_type| msg_type.try_into().unwrap()) } pub fn parse_piece_index(root: &dyn BDictAccess) -> io::Result diff --git a/packages/peer/src/message/bits_ext/handshake.rs b/packages/peer/src/message/bits_ext/handshake.rs index a934c1d6b..7301f0fcb 100644 --- a/packages/peer/src/message/bits_ext/handshake.rs +++ b/packages/peer/src/message/bits_ext/handshake.rs @@ -295,12 +295,24 @@ impl ExtendedMessage { } /// Write the `ExtendedMessage` out to the given writer. + /// + /// # Errors + /// + /// It will return an IP error if unable to write the bytes. + /// + /// # Panics + /// + /// It would panic if the bencode size it too large. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, { let real_length = 2 + self.bencode_size(); - message::write_length_id_pair(&mut writer, real_length as u32, Some(bits_ext::EXTENDED_MESSAGE_ID))?; + message::write_length_id_pair( + &mut writer, + real_length.try_into().unwrap(), + Some(bits_ext::EXTENDED_MESSAGE_ID), + )?; writer.write_all(&[bits_ext::EXTENDED_MESSAGE_HANDSHAKE_ID]); @@ -353,6 +365,10 @@ impl ExtendedMessage { } /// Retrieve a raw `BencodeRef` representing the current message. + /// + /// # Panics + /// + /// It would panic if unable to decode the bencode. pub fn bencode_ref(&self) -> BencodeRef<'_> { // We already verified that this is valid bencode BencodeRef::decode(&self.raw_bencode, BDecodeOpt::default()).unwrap() diff --git a/packages/peer/src/message/bits_ext/mod.rs b/packages/peer/src/message/bits_ext/mod.rs index 41c7d2684..b350717ba 100644 --- a/packages/peer/src/message/bits_ext/mod.rs +++ b/packages/peer/src/message/bits_ext/mod.rs @@ -43,6 +43,11 @@ impl BitsExtensionMessage { parse_extension(bytes) } + /// Writes bytes into the current [`BitsExtensionMessage`]. + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. pub fn write_bytes(&self, writer: W) -> io::Result<()> where W: Write, @@ -68,7 +73,7 @@ fn parse_extension(mut bytes: Bytes) -> IResult<(), io::Result map!( - call!(PortMessage::parse_bytes, bytes.split_off(message::HEADER_LEN)), + call!(PortMessage::parse_bytes, &bytes.split_off(message::HEADER_LEN)), |res_port| res_port.map(BitsExtensionMessage::Port) ) )) | ignore_input!(switch!(header_bytes.as_ref(), throwaway_input!(tuple!(be_u32, be_u8, be_u8)), diff --git a/packages/peer/src/message/bits_ext/port.rs b/packages/peer/src/message/bits_ext/port.rs index 25398b2f1..3bc45eca4 100644 --- a/packages/peer/src/message/bits_ext/port.rs +++ b/packages/peer/src/message/bits_ext/port.rs @@ -9,6 +9,7 @@ use crate::message; use crate::message::bits_ext; /// Message for notifying a peer of our DHT port. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct PortMessage { port: u16, @@ -20,7 +21,7 @@ impl PortMessage { PortMessage { port } } - pub fn parse_bytes(_input: (), bytes: Bytes) -> IResult<(), io::Result> { + pub fn parse_bytes(_input: (), bytes: &Bytes) -> IResult<(), io::Result> { match parse_port(bytes.as_ref()) { IResult::Done(_, result) => IResult::Done((), Ok(result)), IResult::Error(err) => IResult::Error(err), @@ -28,6 +29,11 @@ impl PortMessage { } } + /// Writes bytes into the current [`PortMessage`]. + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/peer/src/message/mod.rs b/packages/peer/src/message/mod.rs index 30b004b13..c11dbe3d2 100644 --- a/packages/peer/src/message/mod.rs +++ b/packages/peer/src/message/mod.rs @@ -46,14 +46,19 @@ mod null; mod prot_ext; mod standard; +#[allow(clippy::module_name_repetitions)] pub use crate::message::bits_ext::{BitsExtensionMessage, ExtendedMessage, ExtendedMessageBuilder, ExtendedType, PortMessage}; +#[allow(clippy::module_name_repetitions)] pub use crate::message::null::NullProtocolMessage; +#[allow(clippy::module_name_repetitions)] pub use crate::message::prot_ext::{ PeerExtensionProtocolMessage, UtMetadataDataMessage, UtMetadataMessage, UtMetadataRejectMessage, UtMetadataRequestMessage, }; +#[allow(clippy::module_name_repetitions)] pub use crate::message::standard::{BitFieldIter, BitFieldMessage, CancelMessage, HaveMessage, PieceMessage, RequestMessage}; /// Enumeration of messages for `PeerWireProtocol`. +#[allow(clippy::module_name_repetitions)] pub enum PeerWireProtocolMessage

where P: PeerProtocol, @@ -62,7 +67,7 @@ where KeepAlive, /// Message to tell a peer we will not be responding to their requests. /// - /// Peers may wish to send *Interested and/or KeepAlive messages. + /// Peers may wish to send *Interested and/or `KeepAlive` messages. Choke, /// Message to tell a peer we will now be responding to their requests. UnChoke, @@ -72,7 +77,7 @@ where UnInterested, /// Message to tell a peer we have some (validated) piece. Have(HaveMessage), - /// Message to effectively send multiple HaveMessages in a single message. + /// Message to effectively send multiple `HaveMessages` in a single message. /// /// This message is only valid when the connection is initiated with the peer. BitField(BitFieldMessage), @@ -108,6 +113,11 @@ impl

PeerWireProtocolMessage

where P: PeerProtocol, { + /// Bytes Needed to encode Byte Slice + /// + /// # Errors + /// + /// This function will not return an error. pub fn bytes_needed(bytes: &[u8]) -> io::Result> { match be_u32(bytes) { // We need 4 bytes for the length, plus whatever the length is... @@ -116,9 +126,14 @@ where } } + /// Parse Bytes into a [`PeerWireProtocolMessage`] + /// + /// # Errors + /// + /// This function will return an error if unable to parse bytes for supplied protocol. pub fn parse_bytes(bytes: Bytes, ext_protocol: &mut P) -> io::Result> { match parse_message(bytes, ext_protocol) { - IResult::Done(_, result) => result, + IResult::Done((), result) => result, _ => Err(io::Error::new( io::ErrorKind::Other, "Failed To Parse PeerWireProtocolMessage", @@ -126,6 +141,11 @@ where } } + /// Write out current states as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. pub fn write_bytes(&self, writer: W, ext_protocol: &mut P) -> io::Result<()> where W: Write, @@ -197,12 +217,7 @@ fn parse_message_length(bytes: &[u8]) -> usize { /// Panics if the conversion from a u32 to usize is not valid. fn u32_to_usize(value: u32) -> usize { - assert!( - value as usize as u32 == value, - "bip_peer: Cannot Convert u32 To usize, usize Is Less Than 32-Bits" - ); - - value as usize + value.try_into().expect("it should be able to convert from u32 to usize") } // Since these messages may come over a stream oriented protocol, if a message is incomplete @@ -235,7 +250,7 @@ where Ok(PeerWireProtocolMessage::UnInterested) ) | (HAVE_MESSAGE_LEN, Some(HAVE_MESSAGE_ID)) => map!( - call!(HaveMessage::parse_bytes, bytes.split_off(HEADER_LEN)), + call!(HaveMessage::parse_bytes, &bytes.split_off(HEADER_LEN)), |res_have| res_have.map(|have| PeerWireProtocolMessage::Have(have)) ) | (message_len, Some(BITFIELD_MESSAGE_ID)) => map!( @@ -243,15 +258,15 @@ where |res_bitfield| res_bitfield.map(|bitfield| PeerWireProtocolMessage::BitField(bitfield)) ) | (REQUEST_MESSAGE_LEN, Some(REQUEST_MESSAGE_ID)) => map!( - call!(RequestMessage::parse_bytes, bytes.split_off(HEADER_LEN)), + call!(RequestMessage::parse_bytes, &bytes.split_off(HEADER_LEN)), |res_request| res_request.map(|request| PeerWireProtocolMessage::Request(request)) ) | (message_len, Some(PIECE_MESSAGE_ID)) => map!( - call!(PieceMessage::parse_bytes, bytes.split_off(HEADER_LEN), message_len - 1), + call!(PieceMessage::parse_bytes, &bytes.split_off(HEADER_LEN), message_len - 1), |res_piece| res_piece.map(|piece| PeerWireProtocolMessage::Piece(piece)) ) | (CANCEL_MESSAGE_LEN, Some(CANCEL_MESSAGE_ID)) => map!( - call!(CancelMessage::parse_bytes, bytes.split_off(HEADER_LEN)), + call!(CancelMessage::parse_bytes, &bytes.split_off(HEADER_LEN)), |res_cancel| res_cancel.map(|cancel| PeerWireProtocolMessage::Cancel(cancel)) ) )) | map!(call!(BitsExtensionMessage::parse_bytes, bytes.clone()), |res_bits_ext| { diff --git a/packages/peer/src/message/null.rs b/packages/peer/src/message/null.rs index 294472ff2..ec8976886 100644 --- a/packages/peer/src/message/null.rs +++ b/packages/peer/src/message/null.rs @@ -1,2 +1,3 @@ /// Enumeration of messages for `NullProtocol`. +#[allow(clippy::module_name_repetitions)] pub enum NullProtocolMessage {} diff --git a/packages/peer/src/message/prot_ext/mod.rs b/packages/peer/src/message/prot_ext/mod.rs index f2bc088ec..8d53411fe 100644 --- a/packages/peer/src/message/prot_ext/mod.rs +++ b/packages/peer/src/message/prot_ext/mod.rs @@ -30,18 +30,28 @@ impl

PeerExtensionProtocolMessage

where P: PeerProtocol, { + /// Returns the number of bytes needed encode a given slice. + /// + /// # Errors + /// + /// This function should not return an error. pub fn bytes_needed(bytes: &[u8]) -> io::Result> { // Follows same length prefix logic as our normal wire protocol... PeerWireProtocolMessage::

::bytes_needed(bytes) } + /// Parse Bytes to create a [`PeerExtensionProtocolMessage`] + /// + /// # Errors + /// + /// This function will return an error if unable to parse. pub fn parse_bytes( bytes: Bytes, extended: &ExtendedMessage, custom_prot: &mut P, ) -> io::Result> { match parse_extensions(bytes, extended, custom_prot) { - IResult::Done(_, result) => result, + IResult::Done((), result) => result, _ => Err(io::Error::new( io::ErrorKind::Other, "Failed To Parse PeerExtensionProtocolMessage", @@ -49,24 +59,35 @@ where } } + /// Write Bytes from the current state. + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. + /// + /// # Panics + /// + /// This function will panic if the message is too long. pub fn write_bytes(&self, mut writer: W, extended: &ExtendedMessage, custom_prot: &mut P) -> io::Result<()> where W: Write, { match self { PeerExtensionProtocolMessage::UtMetadata(msg) => { - let ext_id = if let Some(ext_id) = extended.query_id(&ExtendedType::UtMetadata) { - ext_id - } else { + let Some(ext_id) = extended.query_id(&ExtendedType::UtMetadata) else { return Err(io::Error::new( io::ErrorKind::Other, "Can't Send UtMetadataMessage As We Have No Id Mapping", )); }; - let total_len = (2 + msg.message_size()) as u32; + let total_len = (2 + msg.message_size()); - message::write_length_id_pair(&mut writer, total_len, Some(bits_ext::EXTENDED_MESSAGE_ID))?; + message::write_length_id_pair( + &mut writer, + total_len.try_into().unwrap(), + Some(bits_ext::EXTENDED_MESSAGE_ID), + )?; writer.write_u8(ext_id)?; msg.write_bytes(writer) diff --git a/packages/peer/src/message/prot_ext/ut_metadata.rs b/packages/peer/src/message/prot_ext/ut_metadata.rs index b8ab686ee..f3a4eb0f5 100644 --- a/packages/peer/src/message/prot_ext/ut_metadata.rs +++ b/packages/peer/src/message/prot_ext/ut_metadata.rs @@ -13,6 +13,7 @@ const REJECT_MESSAGE_TYPE_ID: u8 = 2; const ROOT_ERROR_KEY: &str = "PeerExtensionProtocolMessage"; /// Enumeration of messages for `PeerExtensionProtocolMessage::UtMetadata`. +#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub enum UtMetadataMessage { Request(UtMetadataRequestMessage), @@ -21,6 +22,11 @@ pub enum UtMetadataMessage { } impl UtMetadataMessage { + /// Create a new [`UtMetadataMessage`] from [`Bytes`] + /// + /// # Errors + /// + /// This function will return an error if unable to parse given bytes into type. pub fn parse_bytes(mut bytes: Bytes) -> io::Result { // Our bencode is pretty flat, and we don't want to enforce a full decode, as data // messages have the raw data appended outside of the bencode structure... @@ -38,11 +44,11 @@ impl UtMetadataMessage { match msg_type { REQUEST_MESSAGE_TYPE_ID => Ok(UtMetadataMessage::Request(UtMetadataRequestMessage::with_bytes( piece, - bencode_bytes, + &bencode_bytes, ))), REJECT_MESSAGE_TYPE_ID => Ok(UtMetadataMessage::Reject(UtMetadataRejectMessage::with_bytes( piece, - bencode_bytes, + &bencode_bytes, ))), DATA_MESSAGE_TYPE_ID => { let total_size = bencode_util::parse_total_size(bencode_dict)?; @@ -51,7 +57,7 @@ impl UtMetadataMessage { piece, total_size, extra_bytes, - bencode_bytes, + &bencode_bytes, ))) } other => Err(io::Error::new( @@ -67,6 +73,11 @@ impl UtMetadataMessage { } } + /// Writes Bytes from Current State + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. pub fn write_bytes(&self, writer: W) -> io::Result<()> where W: Write, @@ -90,6 +101,7 @@ impl UtMetadataMessage { // ----------------------------------------------------------------------------// /// Message for requesting a piece of metadata from a peer. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct UtMetadataRequestMessage { piece: i64, @@ -112,13 +124,18 @@ impl UtMetadataRequestMessage { } } - pub fn with_bytes(piece: i64, bytes: Bytes) -> UtMetadataRequestMessage { + pub fn with_bytes(piece: i64, bytes: &Bytes) -> UtMetadataRequestMessage { UtMetadataRequestMessage { piece, bencode_size: bytes.len(), } } + /// Writes bytes from the current state. + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -144,6 +161,7 @@ impl UtMetadataRequestMessage { } /// Message for sending a piece of metadata from a peer. +#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct UtMetadataDataMessage { piece: i64, @@ -170,7 +188,7 @@ impl UtMetadataDataMessage { } } - pub fn with_bytes(piece: i64, total_size: i64, data: Bytes, bytes: Bytes) -> UtMetadataDataMessage { + pub fn with_bytes(piece: i64, total_size: i64, data: Bytes, bytes: &Bytes) -> UtMetadataDataMessage { UtMetadataDataMessage { piece, total_size, @@ -179,6 +197,11 @@ impl UtMetadataDataMessage { } } + /// Write Bytes from current state. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -213,6 +236,7 @@ impl UtMetadataDataMessage { } /// Message for rejecting a request for metadata from a peer. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct UtMetadataRejectMessage { piece: i64, @@ -235,13 +259,18 @@ impl UtMetadataRejectMessage { } } - pub fn with_bytes(piece: i64, bytes: Bytes) -> UtMetadataRejectMessage { + pub fn with_bytes(piece: i64, bytes: &Bytes) -> UtMetadataRejectMessage { UtMetadataRejectMessage { piece, bencode_size: bytes.len(), } } + /// Write bytes from the current state. + /// + /// # Errors + /// + /// This function will return an error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/peer/src/message/standard.rs b/packages/peer/src/message/standard.rs index adcd8fb87..238080851 100644 --- a/packages/peer/src/message/standard.rs +++ b/packages/peer/src/message/standard.rs @@ -18,10 +18,15 @@ impl HaveMessage { HaveMessage { piece_index } } - pub fn parse_bytes(_input: (), bytes: Bytes) -> IResult<(), io::Result> { + pub fn parse_bytes(_input: (), bytes: &Bytes) -> IResult<(), io::Result> { throwaway_input!(parse_have(bytes.as_ref())) } + /// Write-out current state as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -71,12 +76,25 @@ impl BitFieldMessage { } } + /// Write-out current state as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. + /// + /// # Panics + /// + /// This function will panic if the the length is too long. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, { - let actual_length = (1 + self.bytes.len()) as u32; - message::write_length_id_pair(&mut writer, actual_length, Some(message::BITFIELD_MESSAGE_ID))?; + let actual_length = self.bytes.len() + 1; + message::write_length_id_pair( + &mut writer, + actual_length.try_into().unwrap(), + Some(message::BITFIELD_MESSAGE_ID), + )?; writer.write_all(&self.bytes) } @@ -85,6 +103,7 @@ impl BitFieldMessage { &self.bytes } + #[allow(clippy::iter_without_into_iter)] pub fn iter(&self) -> BitFieldIter { BitFieldIter::new(self.bytes.clone()) } @@ -112,7 +131,7 @@ impl Iterator for BitFieldIter { let opt_byte = self.bytes.get(byte_in_bytes).copied(); opt_byte.and_then(|byte| { - let have_message = HaveMessage::new(self.cur_bit as u32); + let have_message = HaveMessage::new(self.cur_bit.try_into().unwrap()); self.cur_bit += 1; if (byte << bit_in_byte) >> 7 == 1 { @@ -144,10 +163,19 @@ impl RequestMessage { } } - pub fn parse_bytes(_input: (), bytes: Bytes) -> IResult<(), io::Result> { + pub fn parse_bytes(_input: (), bytes: &Bytes) -> IResult<(), io::Result> { throwaway_input!(parse_request(bytes.as_ref())) } + /// Write-out current state as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. + /// + /// # Panics + /// + /// This function will panic if the `block_length` is too large. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -156,7 +184,7 @@ impl RequestMessage { writer.write_u32::(self.piece_index)?; writer.write_u32::(self.block_offset)?; - writer.write_u32::(self.block_length as u32) + writer.write_u32::(self.block_length.try_into().unwrap()) } #[must_use] @@ -204,16 +232,29 @@ impl PieceMessage { } } - pub fn parse_bytes(_input: (), bytes: Bytes, len: u32) -> IResult<(), io::Result> { - throwaway_input!(parse_piece(&bytes, len)) + pub fn parse_bytes(_input: (), bytes: &Bytes, len: u32) -> IResult<(), io::Result> { + throwaway_input!(parse_piece(bytes, len)) } + /// Write-out current state as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. + /// + /// # Panics + /// + /// This function will panic if the block length is too large. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, { - let actual_length = (9 + self.block_length()) as u32; - message::write_length_id_pair(&mut writer, actual_length, Some(message::PIECE_MESSAGE_ID))?; + let actual_length = self.block_length() + 9; + message::write_length_id_pair( + &mut writer, + actual_length.try_into().unwrap(), + Some(message::PIECE_MESSAGE_ID), + )?; writer.write_u32::(self.piece_index)?; writer.write_u32::(self.block_offset)?; @@ -268,10 +309,19 @@ impl CancelMessage { } } - pub fn parse_bytes(_input: (), bytes: Bytes) -> IResult<(), io::Result> { + pub fn parse_bytes(_input: (), bytes: &Bytes) -> IResult<(), io::Result> { throwaway_input!(parse_cancel(bytes.as_ref())) } + /// Write-out current state as bytes. + /// + /// # Errors + /// + /// This function will return an error if unable to write bytes. + /// + /// # Panics + /// + /// This function will panic if the block length is too large. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -280,7 +330,7 @@ impl CancelMessage { writer.write_u32::(self.piece_index)?; writer.write_u32::(self.block_offset)?; - writer.write_u32::(self.block_length as u32) + writer.write_u32::(self.block_length.try_into().unwrap()) } #[must_use] diff --git a/packages/peer/src/protocol/mod.rs b/packages/peer/src/protocol/mod.rs index 828180055..3b204abbe 100644 --- a/packages/peer/src/protocol/mod.rs +++ b/packages/peer/src/protocol/mod.rs @@ -10,6 +10,7 @@ pub mod unit; pub mod wire; /// Trait for implementing a bittorrent protocol message. +#[allow(clippy::module_name_repetitions)] pub trait PeerProtocol { /// Type of message the protocol operates with. type ProtocolMessage; @@ -21,12 +22,24 @@ pub trait PeerProtocol { /// If none is returned, it means we need more bytes to determine the number /// of bytes needed. If an error is returned, it means the connection should /// be dropped, as probably the message exceeded some maximum length. + /// + /// # Errors + /// + /// This function will return an IO result if unable to calculate the bytes needed. fn bytes_needed(&mut self, bytes: &[u8]) -> io::Result>; /// Parse a `ProtocolMessage` from the given bytes. + /// + /// # Errors + /// + /// This function will return an IO error if unable to parse the bytes into a [`ProtocolMessage`]. fn parse_bytes(&mut self, bytes: Bytes) -> io::Result; /// Write a `ProtocolMessage` to the given writer. + /// + /// # Errors + /// + /// This function will return an error if it fails to write-out. fn write_bytes(&mut self, message: &Self::ProtocolMessage, writer: W) -> io::Result<()> where W: Write; @@ -48,6 +61,7 @@ pub trait PeerProtocol { /// We need to pass the `ExtensionMessage` down to them before we start receiving /// messages with those ids (otherwise we will receive unrecognized messages and /// kill the connection). +#[allow(clippy::module_name_repetitions)] pub trait NestedPeerProtocol { /// Notify a nested protocol that we have received the given message. fn received_message(&mut self, message: &M); diff --git a/packages/peer/src/protocol/null.rs b/packages/peer/src/protocol/null.rs index 71af0c43c..87741b680 100644 --- a/packages/peer/src/protocol/null.rs +++ b/packages/peer/src/protocol/null.rs @@ -13,7 +13,7 @@ use crate::protocol::{NestedPeerProtocol, PeerProtocol}; /// /// Of course, you should make sure that you don't tell peers /// that you support any extended messages. - +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct NullProtocol; diff --git a/packages/peer/src/protocol/unit.rs b/packages/peer/src/protocol/unit.rs index 5483ae072..b6f492c27 100644 --- a/packages/peer/src/protocol/unit.rs +++ b/packages/peer/src/protocol/unit.rs @@ -5,6 +5,7 @@ use bytes::Bytes; use crate::protocol::{NestedPeerProtocol, PeerProtocol}; /// Unit protocol which will always return a unit if called. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct UnitProtocol; diff --git a/packages/peer/test/mod.rs b/packages/peer/test/mod.rs index 9e5e13de6..fcbaff09d 100644 --- a/packages/peer/test/mod.rs +++ b/packages/peer/test/mod.rs @@ -36,7 +36,7 @@ impl Stream for ConnectedChannel { fn poll(&mut self) -> Poll, Self::Error> { self.recv .poll() - .map_err(|_| io::Error::new(io::ErrorKind::Other, "Receiver Failed To Receive")) + .map_err(|()| io::Error::new(io::ErrorKind::Other, "Receiver Failed To Receive")) } } diff --git a/packages/peer/test/peer_manager_send_backpressure.rs b/packages/peer/test/peer_manager_send_backpressure.rs index cd5b6de44..5f33ac2b9 100644 --- a/packages/peer/test/peer_manager_send_backpressure.rs +++ b/packages/peer/test/peer_manager_send_backpressure.rs @@ -12,11 +12,11 @@ use crate::ConnectedChannel; #[test] fn positive_peer_manager_send_backpressure() { + type Peer = ConnectedChannel, PeerWireProtocolMessage>; + let mut core = Core::new().unwrap(); let manager = PeerManagerBuilder::new().with_peer_capacity(1).build(core.handle()); - type Peer = ConnectedChannel, PeerWireProtocolMessage>; - // Create two peers let (peer_one, peer_two): (Peer, Peer) = crate::connected_channel(5); let peer_one_info = PeerInfo::new( diff --git a/packages/select/src/discovery/mod.rs b/packages/select/src/discovery/mod.rs index 676bcfd3a..16acede48 100644 --- a/packages/select/src/discovery/mod.rs +++ b/packages/select/src/discovery/mod.rs @@ -23,7 +23,7 @@ pub enum IDiscoveryMessage { Control(ControlMessage), /// Find peers and download the metainfo for the `InfoHash`. DownloadMetainfo(InfoHash), - /// Received a UtMetadata message. + /// Received a `UtMetadata` message. ReceivedUtMetadataMessage(PeerInfo, UtMetadataMessage), } @@ -34,7 +34,7 @@ pub enum ODiscoveryMessage { SendDhtAnnounce(InfoHash), /// Send a udp tracker announce for the `InfoHash`. SendUdpTrackerAnnounce(InfoHash, SocketAddr, ClientState), - /// Send a UtMetadata message. + /// Send a `UtMetadata` message. SendUtMetadataMessage(PeerInfo, UtMetadataMessage), /// We have finished downloading the given `Metainfo`. DownloadedMetainfo(Metainfo), diff --git a/packages/select/src/discovery/ut_metadata.rs b/packages/select/src/discovery/ut_metadata.rs index 5354d8f9c..d24e0aa85 100644 --- a/packages/select/src/discovery/ut_metadata.rs +++ b/packages/select/src/discovery/ut_metadata.rs @@ -59,6 +59,7 @@ struct ActivePeers { /// is received, and will be served when /// `IDiscoveryMessage::Control(ControlMessage::AddTorrent)` is received. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct UtMetadataModule { completed_map: HashMap>, @@ -85,7 +86,7 @@ impl UtMetadataModule { } } - fn add_torrent(&mut self, metainfo: Metainfo) -> StartSend> { + fn add_torrent(&mut self, metainfo: &Metainfo) -> StartSend> { let info_hash = metainfo.info().info_hash(); match self.completed_map.entry(info_hash) { @@ -101,7 +102,7 @@ impl UtMetadataModule { } } - fn remove_torrent(&mut self, metainfo: Metainfo) -> StartSend> { + fn remove_torrent(&mut self, metainfo: &Metainfo) -> StartSend> { if self.completed_map.remove(&metainfo.info().info_hash()).is_none() { Err(Box::new(DiscoveryError::from_kind( DiscoveryErrorKind::InvalidMetainfoNotExists { @@ -113,7 +114,7 @@ impl UtMetadataModule { } } - fn add_peer(&mut self, info: PeerInfo, ext_info: &ExtendedPeerInfo) -> StartSend> { + fn add_peer(&mut self, info: PeerInfo, ext_info: &ExtendedPeerInfo) -> futures::AsyncSink { let our_support = ext_info .our_message() .and_then(|msg| msg.query_id(&ExtendedType::UtMetadata)) @@ -143,10 +144,10 @@ impl UtMetadataModule { .insert(info); } - Ok(AsyncSink::Ready) + AsyncSink::Ready } - fn remove_peer(&mut self, info: PeerInfo) -> StartSend> { + fn remove_peer(&mut self, info: PeerInfo) -> futures::AsyncSink { let empty_peers = if let Some(active_peers) = self.active_peers.get_mut(info.hash()) { active_peers.peers.remove(&info); @@ -159,10 +160,10 @@ impl UtMetadataModule { self.active_peers.remove(info.hash()); } - Ok(AsyncSink::Ready) + AsyncSink::Ready } - fn apply_tick(&mut self, duration: Duration) -> StartSend> { + fn apply_tick(&mut self, duration: Duration) -> futures::AsyncSink { let active_requests = &mut self.active_requests; let active_peers = &mut self.active_peers; let pending_map = &mut self.pending_map; @@ -193,33 +194,29 @@ impl UtMetadataModule { active_request.left -= duration; } - Ok(AsyncSink::Ready) + AsyncSink::Ready } - fn download_metainfo(&mut self, hash: InfoHash) -> StartSend> { + fn download_metainfo(&mut self, hash: InfoHash) -> futures::AsyncSink { self.pending_map.entry(hash).or_insert(None); - Ok(AsyncSink::Ready) + AsyncSink::Ready } - fn recv_request( - &mut self, - info: PeerInfo, - request: UtMetadataRequestMessage, - ) -> StartSend> { + fn recv_request(&mut self, info: PeerInfo, request: UtMetadataRequestMessage) -> futures::AsyncSink { if self.peer_requests.len() == MAX_PEER_REQUESTS { - Ok(AsyncSink::NotReady(IDiscoveryMessage::ReceivedUtMetadataMessage( + AsyncSink::NotReady(IDiscoveryMessage::ReceivedUtMetadataMessage( info, UtMetadataMessage::Request(request), - ))) + )) } else { self.peer_requests.push_back(PeerRequest { send_to: info, request }); - Ok(AsyncSink::Ready) + AsyncSink::Ready } } - fn recv_data(&mut self, info: PeerInfo, data: UtMetadataDataMessage) -> StartSend> { + fn recv_data(&mut self, info: PeerInfo, data: &UtMetadataDataMessage) -> futures::AsyncSink { // See if we can find the request that we made to the peer for that piece let opt_index = self .active_requests @@ -231,7 +228,8 @@ impl UtMetadataModule { self.active_requests.swap_remove(index); if let Some(&mut Some(ref mut pending)) = self.pending_map.get_mut(info.hash()) { - let data_offset = (data.piece() as usize) * MAX_REQUEST_SIZE; + let piece: usize = data.piece().try_into().unwrap(); + let data_offset = piece.checked_mul(MAX_REQUEST_SIZE).unwrap(); pending.left -= 1; (&mut pending.bytes.as_mut_slice()[data_offset..]) @@ -240,16 +238,12 @@ impl UtMetadataModule { } } - Ok(AsyncSink::Ready) + AsyncSink::Ready } - fn recv_reject( - &mut self, - _info: PeerInfo, - _reject: UtMetadataRejectMessage, - ) -> StartSend> { + fn recv_reject(_info: PeerInfo, _reject: UtMetadataRejectMessage) -> futures::AsyncSink { // TODO: Remove any requests after receiving a reject, for now, we will just timeout - Ok(AsyncSink::Ready) + AsyncSink::Ready } //-------------------------------------------------------------------------------// @@ -310,9 +304,9 @@ impl UtMetadataModule { fn retrieve_piece_response(&mut self) -> Option>> { while let Some(request) = self.peer_requests.pop_front() { let hash = request.send_to.hash(); - let piece = request.request.piece(); + let piece: usize = request.request.piece().try_into().unwrap(); - let start = piece as usize * MAX_REQUEST_SIZE; + let start = piece * MAX_REQUEST_SIZE; let end = start + MAX_REQUEST_SIZE; if let Some(data) = self.completed_map.get(hash) { @@ -321,15 +315,20 @@ impl UtMetadataModule { let mut info_payload = BytesMut::with_capacity(info_slice.len()); info_payload.extend_from_slice(info_slice); - let message = UtMetadataDataMessage::new(piece, info_slice.len() as i64, info_payload.freeze()); + let message = UtMetadataDataMessage::new( + piece.try_into().unwrap(), + info_slice.len().try_into().unwrap(), + info_payload.freeze(), + ); return Some(Ok(ODiscoveryMessage::SendUtMetadataMessage( request.send_to, UtMetadataMessage::Data(message), ))); - } else { - // Peer asked for a piece outside of the range...don't respond to that } + // else { + // // Peer asked for a piece outside of the range...don't respond to that + // } } } @@ -428,7 +427,7 @@ fn generate_active_request(message: UtMetadataRequestMessage, peer: PeerInfo) -> } fn pending_info_from_metadata_size(metadata_size: i64) -> PendingInfo { - let cast_metadata_size = metadata_size as usize; + let cast_metadata_size: usize = metadata_size.try_into().unwrap(); let bytes = vec![0u8; cast_metadata_size]; let mut messages = Vec::new(); @@ -440,7 +439,7 @@ fn pending_info_from_metadata_size(metadata_size: i64) -> PendingInfo { }; for index in 0..num_pieces { - messages.push(UtMetadataRequestMessage::new((index) as i64)); + messages.push(UtMetadataRequestMessage::new(index.try_into().unwrap())); } PendingInfo { @@ -458,8 +457,7 @@ impl ExtendedListener for UtMetadataModule { } fn on_update(&mut self, info: &PeerInfo, extended: &ExtendedPeerInfo) { - self.add_peer(*info, extended) - .expect("bip_select: UtMetadataModule::on_update Failed To Add Peer..."); + self.add_peer(*info, extended); // Check if we need to unblock the stream after performing our work self.check_stream_unblock(); @@ -474,16 +472,22 @@ impl Sink for UtMetadataModule { fn start_send(&mut self, item: Self::SinkItem) -> StartSend { let start_send = match item { - IDiscoveryMessage::Control(ControlMessage::AddTorrent(metainfo)) => self.add_torrent(metainfo), - IDiscoveryMessage::Control(ControlMessage::RemoveTorrent(metainfo)) => self.remove_torrent(metainfo), + IDiscoveryMessage::Control(ControlMessage::AddTorrent(metainfo)) => self.add_torrent(&metainfo), + IDiscoveryMessage::Control(ControlMessage::RemoveTorrent(metainfo)) => self.remove_torrent(&metainfo), // don't add the peer yet, use listener to get notified when they send extension messages IDiscoveryMessage::Control(ControlMessage::PeerConnected(_)) => Ok(AsyncSink::Ready), - IDiscoveryMessage::Control(ControlMessage::PeerDisconnected(info)) => self.remove_peer(info), - IDiscoveryMessage::Control(ControlMessage::Tick(duration)) => self.apply_tick(duration), - IDiscoveryMessage::DownloadMetainfo(hash) => self.download_metainfo(hash), - IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Request(msg)) => self.recv_request(info, msg), - IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Data(msg)) => self.recv_data(info, msg), - IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Reject(msg)) => self.recv_reject(info, msg), + IDiscoveryMessage::Control(ControlMessage::PeerDisconnected(info)) => StartSend::Ok(self.remove_peer(info)), + IDiscoveryMessage::Control(ControlMessage::Tick(duration)) => StartSend::Ok(self.apply_tick(duration)), + IDiscoveryMessage::DownloadMetainfo(hash) => StartSend::Ok(self.download_metainfo(hash)), + IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Request(msg)) => { + StartSend::Ok(self.recv_request(info, msg)) + } + IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Data(msg)) => { + StartSend::Ok(self.recv_data(info, &msg)) + } + IDiscoveryMessage::ReceivedUtMetadataMessage(info, UtMetadataMessage::Reject(msg)) => { + StartSend::Ok(UtMetadataModule::recv_reject(info, msg)) + } }; // Check if we need to unblock the stream after performing our work @@ -518,12 +522,11 @@ impl Stream for UtMetadataModule { self.check_sink_unblock(); // Check if we need to block the stream, if so, set the task - match opt_result { - Some(result) => result.map(|value| Async::Ready(Some(value))), - None => { - self.opt_stream = Some(task::current()); - Ok(Async::NotReady) - } + if let Some(result) = opt_result { + result.map(|value| Async::Ready(Some(value))) + } else { + self.opt_stream = Some(task::current()); + Ok(Async::NotReady) } } } diff --git a/packages/select/src/extended/mod.rs b/packages/select/src/extended/mod.rs index 4f1ee801e..9786d6335 100644 --- a/packages/select/src/extended/mod.rs +++ b/packages/select/src/extended/mod.rs @@ -23,10 +23,11 @@ pub enum OExtendedMessage { } /// Trait for a module to take part in constructing the extended message for a peer. +#[allow(clippy::module_name_repetitions)] pub trait ExtendedListener { /// Extend the given extended message builder for the given peer. - fn extend(&self, _info: &PeerInfo, _builder: ExtendedMessageBuilder) -> ExtendedMessageBuilder { - _builder + fn extend(&self, _info: &PeerInfo, builder: ExtendedMessageBuilder) -> ExtendedMessageBuilder { + builder } /// One or both sides of a peer connection had their extended information updated. @@ -36,6 +37,7 @@ pub trait ExtendedListener { } /// Container for both the local and remote `ExtendedMessage`. +#[allow(clippy::module_name_repetitions)] pub struct ExtendedPeerInfo { ours: Option, theirs: Option, @@ -65,6 +67,7 @@ impl ExtendedPeerInfo { //------------------------------------------------------------------------------// +#[allow(clippy::module_name_repetitions)] pub struct ExtendedModule { builder: ExtendedMessageBuilder, peers: HashMap, @@ -117,7 +120,7 @@ impl ExtendedModule { d_module.on_update(&info, ext_peer_info); } } - _ => (), + IExtendedMessage::Control(_) => (), } self.check_stream_unblock(); diff --git a/packages/select/src/lib.rs b/packages/select/src/lib.rs index 1418d9e51..ecf600dce 100644 --- a/packages/select/src/lib.rs +++ b/packages/select/src/lib.rs @@ -10,7 +10,7 @@ pub mod revelation; mod extended; mod uber; -pub use uber::{IUberMessage, OUberMessage, UberModule, UberModuleBuilder}; +pub use uber::{DiscoveryTrait, IUberMessage, OUberMessage, UberModule, UberModuleBuilder}; pub use crate::extended::{ExtendedListener, ExtendedPeerInfo, IExtendedMessage, OExtendedMessage}; diff --git a/packages/select/src/revelation/honest.rs b/packages/select/src/revelation/honest.rs index ee1d74f77..ffa6dfb96 100644 --- a/packages/select/src/revelation/honest.rs +++ b/packages/select/src/revelation/honest.rs @@ -16,6 +16,7 @@ use crate::ControlMessage; /// Revelation module that will honestly report any pieces we have to peers. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct HonestRevealModule { torrents: HashMap, @@ -85,80 +86,73 @@ impl HonestRevealModule { let out_bytes = &mut self.out_bytes; let out_queue = &mut self.out_queue; - self.torrents - .get_mut(&info_hash) - .map(|peers_info| { - // Add the peer to our list, so we send have messages to them - peers_info.peers.insert(peer); - - // If our bitfield has any pieces in it, send the bitfield, otherwise, don't send it - if !peers_info.status.is_empty() { - // Get our current bitfield, write it to our shared bytes - let bitfield_slice = peers_info.status.get_ref().storage(); - // Bitfield stores index 0 at bit 7 from the left, we want index 0 to be at bit 0 from the left - insert_reversed_bits(out_bytes, bitfield_slice); - - // Split off what we wrote, send this in the message, will be re-used on drop - let bitfield_bytes = out_bytes.split_off(0).freeze(); - let bitfield = BitFieldMessage::new(bitfield_bytes); - - // Enqueue the bitfield message so that we send it to the peer - out_queue.push_back(ORevealMessage::SendBitField(peer, bitfield)); - } + let Some(peers_info) = self.torrents.get_mut(&info_hash) else { + return Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { + hash: info_hash, + }))); + }; - Ok(AsyncSink::Ready) - }) - .unwrap_or_else(|| { - Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { - hash: info_hash, - }))) - }) + // Add the peer to our list, so we send have messages to them + peers_info.peers.insert(peer); + + // If our bitfield has any pieces in it, send the bitfield, otherwise, don't send it + if !peers_info.status.is_empty() { + // Get our current bitfield, write it to our shared bytes + let bitfield_slice = peers_info.status.get_ref().storage(); + // Bitfield stores index 0 at bit 7 from the left, we want index 0 to be at bit 0 from the left + insert_reversed_bits(out_bytes, bitfield_slice); + + // Split off what we wrote, send this in the message, will be re-used on drop + let bitfield_bytes = out_bytes.split_off(0).freeze(); + let bitfield = BitFieldMessage::new(bitfield_bytes); + + // Enqueue the bitfield message so that we send it to the peer + out_queue.push_back(ORevealMessage::SendBitField(peer, bitfield)); + } + + Ok(AsyncSink::Ready) } fn remove_peer(&mut self, peer: PeerInfo) -> StartSend> { let info_hash = *peer.hash(); - self.torrents - .get_mut(&info_hash) - .map(|peers_info| { - peers_info.peers.remove(&peer); + let Some(peers_info) = self.torrents.get_mut(&info_hash) else { + return Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { + hash: info_hash, + }))); + }; - Ok(AsyncSink::Ready) - }) - .unwrap_or_else(|| { - Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { - hash: info_hash, - }))) - }) + peers_info.peers.remove(&peer); + + Ok(AsyncSink::Ready) } fn insert_piece(&mut self, hash: InfoHash, index: u64) -> StartSend> { let out_queue = &mut self.out_queue; - self.torrents - .get_mut(&hash) - .map(|peers_info| { - if index as usize >= peers_info.num_pieces { - Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidPieceOutOfRange { - index, - hash, - }))) - } else { - // Queue up all have messages - for peer in &peers_info.peers { - out_queue.push_back(ORevealMessage::SendHave(*peer, HaveMessage::new(index as u32))); - } - - // Insert into bitfield - peers_info.status.insert(index as usize); - - Ok(AsyncSink::Ready) - } - }) - .unwrap_or_else(|| { - Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { - hash, - }))) - }) + let Some(peers_info) = self.torrents.get_mut(&hash) else { + return Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidMetainfoNotExists { + hash, + }))); + }; + + let index: usize = index.try_into().unwrap(); + + if index >= peers_info.num_pieces { + Err(Box::new(RevealError::from_kind(RevealErrorKind::InvalidPieceOutOfRange { + index: index.try_into().unwrap(), + hash, + }))) + } else { + // Queue up all have messages + for peer in &peers_info.peers { + out_queue.push_back(ORevealMessage::SendHave(*peer, HaveMessage::new(index.try_into().unwrap()))); + } + + // Insert into bitfield + peers_info.status.insert(index); + + Ok(AsyncSink::Ready) + } } //------------------------------------------------------// @@ -298,11 +292,8 @@ mod tests { .send(IRevealMessage::Control(ControlMessage::PeerConnected(peer_info))) .unwrap(); - let (info, bitfield) = match block_recv.next().unwrap().unwrap() { - ORevealMessage::SendBitField(info, bitfield) => (info, bitfield), - _ => { - panic!("Received Unexpected Message") - } + let ORevealMessage::SendBitField(info, bitfield) = block_recv.next().unwrap().unwrap() else { + panic!("Received Unexpected Message") }; assert_eq!(peer_info, info); @@ -330,11 +321,8 @@ mod tests { .send(IRevealMessage::Control(ControlMessage::PeerConnected(peer_info))) .unwrap(); - let (info, bitfield) = match block_recv.next().unwrap().unwrap() { - ORevealMessage::SendBitField(info, bitfield) => (info, bitfield), - _ => { - panic!("Received Unexpected Message") - } + let ORevealMessage::SendBitField(info, bitfield) = block_recv.next().unwrap().unwrap() else { + panic!("Received Unexpected Message") }; assert_eq!(peer_info, info); diff --git a/packages/select/src/uber.rs b/packages/select/src/uber.rs index 7c9fa43b8..fd99496df 100644 --- a/packages/select/src/uber.rs +++ b/packages/select/src/uber.rs @@ -7,7 +7,7 @@ use crate::error::UberError; use crate::extended::{ExtendedListener, ExtendedModule, IExtendedMessage, OExtendedMessage}; use crate::ControlMessage; -trait DiscoveryTrait: +pub trait DiscoveryTrait: ExtendedListener + Sink> + Stream> @@ -53,10 +53,11 @@ type UberDiscovery = Vec< /// Builder for constructing an `UberModule`. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct UberModuleBuilder { // TODO: Remove these bounds when something like https://github.com/rust-lang/rust/pull/45047 lands - discovery: UberDiscovery, + pub discovery: UberDiscovery, ext_builder: Option, } @@ -81,6 +82,7 @@ impl UberModuleBuilder { } /// Add the given discovery module to the list of discovery modules. + #[must_use] pub fn with_discovery_module(mut self, module: T) -> UberModuleBuilder where T: ExtendedListener @@ -128,6 +130,7 @@ impl IsReady for Async { //----------------------------------------------------------------------// /// Module for multiplexing messages across zero or more other modules. +#[allow(clippy::module_name_repetitions)] pub struct UberModule { discovery: UberDiscovery, extended: Option, @@ -169,10 +172,10 @@ impl UberModule { } } Some(ModuleState::Extended) => { - if !self.discovery.is_empty() { - Some(ModuleState::Discovery(0)) - } else { + if self.discovery.is_empty() { None + } else { + Some(ModuleState::Discovery(0)) } } Some(ModuleState::Discovery(index)) => { @@ -329,7 +332,7 @@ impl Sink for UberModule { fn start_send(&mut self, item: Self::SinkItem) -> StartSend { // Currently we don't return NotReady from the module directly, so no saving our task state here - self.start_sink_state(&item).map(|a| a.map(|_| item)) + self.start_sink_state(&item).map(|a| a.map(|()| item)) } fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { diff --git a/packages/util/src/contiguous.rs b/packages/util/src/contiguous.rs index 2675bf992..b517c0f69 100644 --- a/packages/util/src/contiguous.rs +++ b/packages/util/src/contiguous.rs @@ -1,6 +1,7 @@ use std::cmp; /// Trait for metadata, reading, and writing to a contiguous buffer that doesn't re allocate. +#[allow(clippy::module_name_repetitions)] pub trait ContiguousBuffer { /// Total capacity of the underlying buffer. fn capacity(&self) -> usize; @@ -63,6 +64,7 @@ where //----------------------------------------------------------------------------// /// Struct for providing a `ContiguousBuffer` abstraction over many contiguous buffers. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct ContiguousBuffers { buffers: Vec, @@ -153,7 +155,7 @@ mod tests { use super::{ContiguousBuffer, ContiguousBuffers}; #[test] - #[should_panic] + #[should_panic(expected = "bip_util: ContiguousBuffer::write Detected Write That Overflows ContiguousBuffers")] fn positive_write_no_buffers() { let mut buffers: ContiguousBuffers> = ContiguousBuffers::new(); diff --git a/packages/util/src/convert.rs b/packages/util/src/convert.rs index be78ef200..a8494f541 100644 --- a/packages/util/src/convert.rs +++ b/packages/util/src/convert.rs @@ -2,14 +2,19 @@ use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; /// Convert a 4 byte value to an array of 4 bytes. #[must_use] -pub fn four_bytes_to_array(bytes: u32) -> [u8; 4] { - let eight_bytes = eight_bytes_to_array(u64::from(bytes)); +pub fn four_bytes_to_array(unsigned: u32) -> [u8; 4] { + unsigned.to_be_bytes() +} - [eight_bytes[4], eight_bytes[5], eight_bytes[6], eight_bytes[7]] +/// Convert a 4 byte value to an array of 4 bytes. +#[must_use] +pub fn four_bytes_to_array_signed(signed: i32) -> [u8; 4] { + signed.to_be_bytes() } /// Convert an 8 byte value to an array of 8 bytes. #[must_use] +#[allow(clippy::cast_possible_truncation)] pub fn eight_bytes_to_array(bytes: u64) -> [u8; 8] { [ (bytes >> 56) as u8, @@ -41,6 +46,7 @@ pub fn ipv6_to_bytes_be(v6_addr: Ipv6Addr) -> [u8; 16] { let segment_byte_index = index % 2; let byte_shift_bits = 8 - (segment_byte_index * 8); + #[allow(clippy::cast_possible_truncation)] let byte = (segments[segment_index] >> byte_shift_bits) as u8; *item = byte; @@ -52,6 +58,7 @@ pub fn ipv6_to_bytes_be(v6_addr: Ipv6Addr) -> [u8; 16] { // Convert a port to an array of 2 bytes big endian. #[must_use] pub fn port_to_bytes_be(port: u16) -> [u8; 2] { + #[allow(clippy::cast_possible_truncation)] [(port >> 8) as u8, port as u8] } @@ -174,6 +181,7 @@ mod tests { let sock_addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1600); let received = super::sock_v4_to_bytes_be(sock_addr); + #[allow(clippy::cast_possible_truncation)] let expected = [127, 0, 0, 1, (1600 >> 8) as u8, 1600_u32 as u8]; assert_eq!(received, expected); @@ -184,6 +192,8 @@ mod tests { let sock_addr = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 1821, 0, 0); let received = super::sock_v6_to_bytes_be(sock_addr); + + #[allow(clippy::cast_possible_truncation)] let expected = [ 0, 0, diff --git a/packages/util/src/error.rs b/packages/util/src/error.rs index 0449e2118..76aebec5e 100644 --- a/packages/util/src/error.rs +++ b/packages/util/src/error.rs @@ -1,5 +1,5 @@ /// Result type for a `LengthError`. -pub type LengthResult = Result; +pub type LengthResult = Result; /// Enumerates a set of length related errors. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] @@ -14,17 +14,17 @@ pub enum LengthErrorKind { /// Generic length error for various types. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct LengthError { +pub struct Error { kind: LengthErrorKind, length: usize, index: Option, } -impl LengthError { +impl Error { /// Create a `LengthError`. #[must_use] - pub fn new(kind: LengthErrorKind, length: usize) -> LengthError { - LengthError { + pub fn new(kind: LengthErrorKind, length: usize) -> Error { + Error { kind, length, index: None, @@ -33,8 +33,8 @@ impl LengthError { /// Create a `LengthError` for a given element index. #[must_use] - pub fn with_index(kind: LengthErrorKind, length: usize, index: usize) -> LengthError { - LengthError { + pub fn with_index(kind: LengthErrorKind, length: usize, index: usize) -> Error { + Error { kind, length, index: Some(index), diff --git a/packages/util/src/send/split_sender.rs b/packages/util/src/send/split_sender.rs index 8eedb755f..ec4bf7432 100644 --- a/packages/util/src/send/split_sender.rs +++ b/packages/util/src/send/split_sender.rs @@ -84,6 +84,7 @@ where // ----------------------------------------------------------------------------// /// `SplitSenderAck` allows a client to ack messages received from a `SplitSender`. +#[allow(clippy::module_name_repetitions)] pub struct SplitSenderAck { count: Arc, } diff --git a/packages/util/src/sha/builder.rs b/packages/util/src/sha/builder.rs index 82f7eaafa..406ce2764 100644 --- a/packages/util/src/sha/builder.rs +++ b/packages/util/src/sha/builder.rs @@ -4,6 +4,7 @@ use crypto::sha1::Sha1; use crate::sha::{self, ShaHash}; /// Building `ShaHash` objects by adding byte slices to the hash. +#[allow(clippy::module_name_repetitions)] #[derive(Clone)] pub struct ShaHashBuilder { sha: Sha1, diff --git a/packages/util/src/sha/mod.rs b/packages/util/src/sha/mod.rs index 76a186ed8..dd985095a 100644 --- a/packages/util/src/sha/mod.rs +++ b/packages/util/src/sha/mod.rs @@ -1,15 +1,17 @@ use std::ops::BitXor; -use crate::error::{LengthError, LengthErrorKind, LengthResult}; +use crate::error::{Error, LengthErrorKind, LengthResult}; mod builder; +#[allow(clippy::module_name_repetitions)] pub use crate::sha::builder::ShaHashBuilder; /// Length of a SHA-1 hash. pub const SHA_HASH_LEN: usize = 20; /// SHA-1 hash wrapper type for performing operations on the hash. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] pub struct ShaHash { hash: [u8; SHA_HASH_LEN], @@ -23,15 +25,19 @@ impl ShaHash { } /// Create a `ShaHash` directly from the given hash. + /// + /// # Errors + /// + /// It would error if the hash is the wrong group. pub fn from_hash(hash: &[u8]) -> LengthResult { - if hash.len() != SHA_HASH_LEN { - Err(LengthError::new(LengthErrorKind::LengthExpected, SHA_HASH_LEN)) - } else { + if hash.len() == SHA_HASH_LEN { let mut my_hash = [0u8; SHA_HASH_LEN]; my_hash.iter_mut().zip(hash.iter()).map(|(dst, src)| *dst = *src).count(); Ok(ShaHash { hash: my_hash }) + } else { + Err(Error::new(LengthErrorKind::LengthExpected, SHA_HASH_LEN)) } } @@ -104,6 +110,7 @@ pub struct XorBits<'a> { bits: Bits<'a>, } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for XorBits<'a> { type Item = XorRep; @@ -145,6 +152,7 @@ impl<'a> Bits<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for Bits<'a> { type Item = BitRep; @@ -220,7 +228,9 @@ mod tests { } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: Error { kind: LengthExpected, length: 20, index: None }" + )] fn negative_from_hash_too_long() { let bits = [0u8; super::SHA_HASH_LEN + 1]; @@ -228,7 +238,9 @@ mod tests { } #[test] - #[should_panic] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: Error { kind: LengthExpected, length: 20, index: None }" + )] fn negative_from_hash_too_short() { let bits = [0u8; super::SHA_HASH_LEN - 1]; diff --git a/packages/util/src/test.rs b/packages/util/src/test.rs index 0de8e9794..38a87bf01 100644 --- a/packages/util/src/test.rs +++ b/packages/util/src/test.rs @@ -6,12 +6,20 @@ use crate::bt::{self, NodeId}; use crate::net::IpAddr; /// Allows us to time travel into the future. +/// +/// # Panics +/// +/// Panics if offset is out of range. #[must_use] pub fn travel_into_future(offset: Duration) -> DateTime { Utc::now().checked_add_signed(offset).unwrap() } /// Allows us to time travel into the past. +/// +/// # Panics +/// +/// Panics if offset is out of range. #[must_use] pub fn travel_into_past(offset: Duration) -> DateTime { Utc::now().checked_sub_signed(offset).unwrap() @@ -75,7 +83,7 @@ pub fn dummy_block_node_ids(num_ids: u8) -> Vec { *byte = repeat; } - id_block.push(id.into()) + id_block.push(id.into()); } id_block diff --git a/packages/util/src/trans/locally_shuffled.rs b/packages/util/src/trans/locally_shuffled.rs index 5534f54b8..f9a2faec4 100644 --- a/packages/util/src/trans/locally_shuffled.rs +++ b/packages/util/src/trans/locally_shuffled.rs @@ -19,6 +19,7 @@ const TRANSACTION_ID_PREALLOC_LEN: usize = 2048; /// This allows us to uphold the uniqueness property for any large /// transaction type (such as u64) but also works with smaller types. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct LocallyShuffledIds { sequential: SequentialIds, @@ -108,6 +109,7 @@ mod tests { tid_count[index] += 1; } + #[allow(clippy::cast_possible_truncation)] for count in &tid_count { assert_eq!(*count, duplicates_to_find as u8); } @@ -128,6 +130,7 @@ mod tests { tid_count[index] += 1; } + #[allow(clippy::cast_possible_truncation)] for count in &tid_count { assert_eq!(*count, duplicates_to_find as u8); } @@ -148,6 +151,7 @@ mod tests { tid_count[index] += 1; } + #[allow(clippy::cast_possible_truncation)] for count in &tid_count { assert_eq!(*count, duplicates_to_find as i8); } @@ -168,6 +172,7 @@ mod tests { tid_count[index] += 1; } + #[allow(clippy::cast_possible_truncation)] for count in &tid_count { assert_eq!(*count, duplicates_to_find as i8); } diff --git a/packages/util/src/trans/sequential.rs b/packages/util/src/trans/sequential.rs index a8a7e9c70..ea414540d 100644 --- a/packages/util/src/trans/sequential.rs +++ b/packages/util/src/trans/sequential.rs @@ -6,6 +6,7 @@ use num::{One, Zero}; use crate::trans::TransactionIds; /// Generates sequentially unique ids and wraps when overflow occurs. +#[allow(clippy::module_name_repetitions)] #[derive(Default)] pub struct SequentialIds { next_id: T, diff --git a/packages/utracker/src/announce.rs b/packages/utracker/src/announce.rs index 2ee4735c4..ba131fea1 100644 --- a/packages/utracker/src/announce.rs +++ b/packages/utracker/src/announce.rs @@ -28,6 +28,7 @@ const ANNOUNCE_STOPPED_EVENT: i32 = 3; /// Announce request sent from the client to the server. /// /// IPv6 is supported but is [not standard](http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/). +#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, PartialEq, Eq)] pub struct AnnounceRequest<'a> { info_hash: InfoHash, @@ -77,6 +78,10 @@ impl<'a> AnnounceRequest<'a> { } /// Write the `AnnounceRequest` to the given writer. + /// + /// # Errors + /// + /// It would return an IO error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -185,6 +190,7 @@ fn parse_request(bytes: &[u8], ip_type: fn(bytes: &[u8]) -> IResult<&[u8], Sourc // ----------------------------------------------------------------------------// /// Announce response sent from the server to the client. +#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, PartialEq, Eq)] pub struct AnnounceResponse<'a> { interval: i32, @@ -216,6 +222,10 @@ impl<'a> AnnounceResponse<'a> { } /// Write the `AnnounceResponse` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -311,6 +321,10 @@ impl ClientState { } /// Write the `ClientState` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -362,6 +376,7 @@ fn parse_state(bytes: &[u8]) -> IResult<&[u8], ClientState> { // ----------------------------------------------------------------------------// /// Announce event of a client reported to the server. +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum AnnounceEvent { /// No event is reported. @@ -382,6 +397,10 @@ impl AnnounceEvent { } /// Write the `AnnounceEvent` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -441,15 +460,19 @@ impl SourceIP { } /// Write the `SourceIP` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, writer: W) -> io::Result<()> where W: Write, { match *self { - SourceIP::ImpliedV4 => self.write_bytes_slice(writer, &IMPLIED_IPV4_ID[..]), - SourceIP::ImpliedV6 => self.write_bytes_slice(writer, &IMPLIED_IPV6_ID[..]), - SourceIP::ExplicitV4(addr) => self.write_bytes_slice(writer, &convert::ipv4_to_bytes_be(addr)[..]), - SourceIP::ExplicitV6(addr) => self.write_bytes_slice(writer, &convert::ipv6_to_bytes_be(addr)[..]), + SourceIP::ImpliedV4 => SourceIP::write_bytes_slice(writer, &IMPLIED_IPV4_ID[..]), + SourceIP::ImpliedV6 => SourceIP::write_bytes_slice(writer, &IMPLIED_IPV6_ID[..]), + SourceIP::ExplicitV4(addr) => SourceIP::write_bytes_slice(writer, &convert::ipv4_to_bytes_be(addr)[..]), + SourceIP::ExplicitV6(addr) => SourceIP::write_bytes_slice(writer, &convert::ipv6_to_bytes_be(addr)[..]), } } @@ -457,10 +480,8 @@ impl SourceIP { #[must_use] pub fn is_ipv6(&self) -> bool { match *self { - SourceIP::ImpliedV6 => true, - SourceIP::ExplicitV6(_) => true, - SourceIP::ImpliedV4 => false, - SourceIP::ExplicitV4(_) => false, + SourceIP::ExplicitV6(_) | SourceIP::ImpliedV6 => true, + SourceIP::ImpliedV4 | SourceIP::ExplicitV4(_) => false, } } @@ -471,7 +492,7 @@ impl SourceIP { } /// Write the given byte slice to the given writer. - fn write_bytes_slice(&self, mut writer: W, bytes: &[u8]) -> io::Result<()> + fn write_bytes_slice(mut writer: W, bytes: &[u8]) -> io::Result<()> where W: Write, { @@ -520,6 +541,10 @@ impl DesiredPeers { } /// Write the `DesiredPeers` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -790,9 +815,8 @@ mod tests { bytes.write_i32::(num_want).unwrap(); bytes.write_u16::(port).unwrap(); - let received = match AnnounceRequest::from_bytes_v4(&bytes) { - IResult::Done(_, rec) => rec, - _ => panic!("AnnounceRequest Parsing Failed..."), + let IResult::Done(_, received) = AnnounceRequest::from_bytes_v4(&bytes) else { + panic!("AnnounceRequest Parsing Failed...") }; assert_eq!(received.info_hash(), InfoHash::from(info_hash)); @@ -1045,7 +1069,7 @@ mod tests { #[test] fn positive_parse_desired_peers_default() { - let default_bytes = convert::four_bytes_to_array(-1i32 as u32); + let default_bytes = convert::four_bytes_to_array(u32::MAX); let received = DesiredPeers::from_bytes(&default_bytes); let expected = DesiredPeers::Default; diff --git a/packages/utracker/src/client/dispatcher.rs b/packages/utracker/src/client/dispatcher.rs index a4a062c92..248b5a762 100644 --- a/packages/utracker/src/client/dispatcher.rs +++ b/packages/utracker/src/client/dispatcher.rs @@ -43,6 +43,7 @@ pub enum DispatchMessage { /// Create a new background dispatcher to execute request and send responses back. /// /// Assumes `msg_capacity` is less than `usize::max_value`(). +#[allow(clippy::module_name_repetitions)] pub fn create_dispatcher( bind: SocketAddr, handshaker: H, @@ -160,7 +161,7 @@ where &mut self, provider: &mut Provider<'_, ClientDispatcher>, addr: SocketAddr, - response: TrackerResponse<'_>, + response: &TrackerResponse<'_>, ) { let token = ClientToken(response.transaction_id()); @@ -216,20 +217,15 @@ where /// /// If this call is the result of a timeout, that will decide whether to cancel the request or not. fn process_request(&mut self, provider: &mut Provider<'_, ClientDispatcher>, token: ClientToken, timed_out: bool) { - let mut conn_timer = if let Some(conn_timer) = self.active_requests.remove(&token) { - conn_timer - } else { + let Some(mut conn_timer) = self.active_requests.remove(&token) else { return; }; // TODO: Add logging // Resolve the duration of the current timeout to use - let next_timeout = match conn_timer.current_timeout(timed_out) { - Some(timeout) => timeout, - None => { - self.notify_client(token, Err(ClientError::MaxTimeout)); + let Some(next_timeout) = conn_timer.current_timeout(timed_out) else { + self.notify_client(token, Err(ClientError::MaxTimeout)); - return; - } + return; }; let addr = conn_timer.message_params().0; @@ -275,16 +271,14 @@ where write_success = tracker_request.write_bytes(&mut writer).is_ok(); if write_success { - Some((writer.position() as usize, addr)) + Some((writer.position().try_into().unwrap(), addr)) } else { None } }); // If message was not sent (too long to fit) then end the request - if !write_success { - self.notify_client(token, Err(ClientError::MaxLength)); - } else { + if write_success { conn_timer.set_timeout_id( provider .set_timeout(DispatchTimeout::Connect(token), next_timeout) @@ -292,6 +286,8 @@ where ); self.active_requests.insert(token, conn_timer); + } else { + self.notify_client(token, Err(ClientError::MaxLength)); } } } @@ -305,12 +301,11 @@ where type Message = DispatchMessage; fn incoming(&mut self, mut provider: Provider<'_, Self>, message: &[u8], addr: SocketAddr) { - let response = match TrackerResponse::from_bytes(message) { - IResult::Done(_, rsp) => rsp, - _ => return, // TODO: Add Logging + let IResult::Done(_, response) = TrackerResponse::from_bytes(message) else { + return; // TODO: Add Logging }; - self.recv_response(&mut provider, addr, response); + self.recv_response(&mut provider, addr, &response); } fn notify(&mut self, mut provider: Provider<'_, Self>, message: DispatchMessage) { @@ -390,7 +385,9 @@ impl ConnectTimer { /// Calculates the timeout for the request given the attempt count. fn calculate_message_timeout_millis(attempt: u64) -> u64 { - (15 * 2u64.pow(attempt as u32)) * 1000 + #[allow(clippy::cast_possible_truncation)] + let attempt = attempt as u32; + (15 * 2u64.pow(attempt)) * 1000 } // ----------------------------------------------------------------------------// diff --git a/packages/utracker/src/client/error.rs b/packages/utracker/src/client/error.rs index 4edcd19c3..69af8cb65 100644 --- a/packages/utracker/src/client/error.rs +++ b/packages/utracker/src/client/error.rs @@ -4,6 +4,7 @@ use crate::error::ErrorResponse; pub type ClientResult = Result; /// Errors occurring as the result of a `ClientRequest`. +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Clone, PartialEq, Eq)] pub enum ClientError { /// Request timeout reached. diff --git a/packages/utracker/src/client/mod.rs b/packages/utracker/src/client/mod.rs index 477677abd..f6690e746 100644 --- a/packages/utracker/src/client/mod.rs +++ b/packages/utracker/src/client/mod.rs @@ -22,6 +22,7 @@ pub mod error; const DEFAULT_CAPACITY: usize = 4096; /// Request made by the `TrackerClient`. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub enum ClientRequest { Announce(InfoHash, ClientState), @@ -29,6 +30,7 @@ pub enum ClientRequest { } /// Response metadata from a request. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct ClientMetadata { token: ClientToken, @@ -55,6 +57,7 @@ impl ClientMetadata { } /// Response received by the `TrackerClient`. +#[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub enum ClientResponse { /// Announce response. @@ -96,6 +99,7 @@ impl ClientResponse { /// Tracker client that executes requests asynchronously. /// /// Client will shutdown on drop. +#[allow(clippy::module_name_repetitions)] pub struct TrackerClient { send: Sender, // We are in charge of incrementing this, background worker is in charge of decrementing @@ -105,6 +109,10 @@ pub struct TrackerClient { impl TrackerClient { /// Create a new `TrackerClient`. + /// + /// # Errors + /// + /// It would return a IO error if unable build a new client. pub fn new(bind: SocketAddr, handshaker: H) -> io::Result where H: Sink + DiscoveryInfo + Send + 'static, @@ -116,6 +124,14 @@ impl TrackerClient { /// Create a new `TrackerClient` with the given message capacity. /// /// Panics if capacity == `usize::max_value`(). + /// + /// # Errors + /// + /// It would return a IO error if unable build a new client. + /// + /// # Panics + /// + /// It would panic if the desired capacity is too large. pub fn with_capacity(bind: SocketAddr, handshaker: H, capacity: usize) -> io::Result where H: Sink + DiscoveryInfo + Send + 'static, @@ -141,6 +157,10 @@ impl TrackerClient { /// Execute an asynchronous request to the given tracker. /// /// If the maximum number of requests are currently in progress, return None. + /// + /// # Panics + /// + /// It would panic if unable to send request message. pub fn request(&mut self, addr: SocketAddr, request: ClientRequest) -> Option { if self.limiter.can_initiate() { let token = self.generator.generate(); @@ -166,6 +186,7 @@ impl Drop for TrackerClient { // ----------------------------------------------------------------------------// /// Associates a `ClientRequest` with a `ClientResponse`. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct ClientToken(u32); diff --git a/packages/utracker/src/contact.rs b/packages/utracker/src/contact.rs index 704402f14..129123e13 100644 --- a/packages/utracker/src/contact.rs +++ b/packages/utracker/src/contact.rs @@ -13,9 +13,9 @@ const SOCKET_ADDR_V6_BYTES: usize = 18; /// Container for peers to be sent/received from a tracker. #[derive(Clone, Debug, PartialEq, Eq)] pub enum CompactPeers<'a> { - /// IPv4 variant of CompactPeers. + /// IPv4 variant of `CompactPeers`. V4(CompactPeersV4<'a>), - /// IPv6 variant of CompactPeers. + /// IPv6 variant of `CompactPeers`. V6(CompactPeersV6<'a>), } @@ -41,6 +41,10 @@ impl<'a> CompactPeers<'a> { } /// Write the underlying `CompactPeers` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, writer: W) -> io::Result<()> where W: Write, @@ -52,6 +56,7 @@ impl<'a> CompactPeers<'a> { } /// Iterator over all of the contact information. + #[allow(clippy::iter_without_into_iter)] #[must_use] pub fn iter(&self) -> CompactPeersIter<'_> { match self { @@ -92,6 +97,7 @@ impl<'a> CompactPeersIter<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for CompactPeersIter<'a> { type Item = SocketAddr; @@ -127,6 +133,10 @@ impl<'a> CompactPeersV4<'a> { } /// Write the `CompactPeersV4` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -144,6 +154,7 @@ impl<'a> CompactPeersV4<'a> { } /// Iterator over all of the contact information. + #[allow(clippy::iter_without_into_iter)] #[must_use] pub fn iter(&self) -> CompactPeersV4Iter<'_> { CompactPeersV4Iter::new(&self.peers) @@ -191,6 +202,7 @@ impl<'a> CompactPeersV4Iter<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for CompactPeersV4Iter<'a> { type Item = SocketAddrV4; @@ -240,6 +252,10 @@ impl<'a> CompactPeersV6<'a> { } /// Write the `CompactPeersV6` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -257,6 +273,7 @@ impl<'a> CompactPeersV6<'a> { } /// Iterator over all of the contact information. + #[allow(clippy::iter_without_into_iter)] #[must_use] pub fn iter(&self) -> CompactPeersV6Iter<'_> { CompactPeersV6Iter::new(&self.peers) @@ -304,6 +321,7 @@ impl<'a> CompactPeersV6Iter<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for CompactPeersV6Iter<'a> { type Item = SocketAddrV6; diff --git a/packages/utracker/src/error.rs b/packages/utracker/src/error.rs index 478569447..93ab3e4d3 100644 --- a/packages/utracker/src/error.rs +++ b/packages/utracker/src/error.rs @@ -6,6 +6,7 @@ use std::io::{self, Write}; use nom::{call, error_position, map, map_res, take, take_str, IResult}; /// Error reported by the server and sent to the client. +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Clone, PartialEq, Eq)] pub struct ErrorResponse<'a> { message: Cow<'a, str>, @@ -26,6 +27,10 @@ impl<'a> ErrorResponse<'a> { } /// Write the `ErrorResponse` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/utracker/src/option.rs b/packages/utracker/src/option.rs index 8a64ac9cd..ce637007c 100644 --- a/packages/utracker/src/option.rs +++ b/packages/utracker/src/option.rs @@ -13,6 +13,7 @@ const NO_OPERATION_BYTE: u8 = 0x01; const URL_DATA_BYTE: u8 = 0x02; /// Trait for supplying optional information in an `AnnounceRequest`. +#[allow(clippy::module_name_repetitions)] pub trait AnnounceOption<'a>: Sized { /// Byte specifying what option this is. fn option_byte() -> u8; @@ -55,14 +56,21 @@ impl<'a> AnnounceOptions<'a> { } /// Write the `AnnounceOptions` to the given writer. - #[allow(unused)] + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. + /// + /// # Panics + /// + /// It would panic if the chuck length is too large. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, { for (byte, content) in &self.raw_options { - for content_chunk in content.chunks(u8::max_value() as usize) { - let content_chunk_len = content_chunk.len() as u8; + for content_chunk in content.chunks(u8::MAX as usize) { + let content_chunk_len: u8 = content_chunk.len().try_into().unwrap(); writer.write_u8(*byte)?; writer.write_u8(content_chunk_len)?; @@ -187,6 +195,7 @@ named!(byte_usize<&[u8], usize>, map!( // ----------------------------------------------------------------------------// /// Concatenated PATH and QUERY of a UDP tracker URL. +#[allow(clippy::module_name_repetitions)] #[derive(Debug, PartialEq, Eq, Clone)] pub struct URLDataOption<'a> { url_data: &'a [u8], @@ -221,7 +230,6 @@ impl<'a> AnnounceOption<'a> for URLDataOption<'a> { #[cfg(test)] mod tests { use std::io::Write; - use std::u8; use nom::IResult; @@ -390,7 +398,7 @@ mod tests { let mut bytes = [0u8; NUM_BYTES]; bytes[0] = super::URL_DATA_BYTE; - bytes[1] = u8::max_value(); + bytes[1] = u8::MAX; bytes[256] = 230; let received = AnnounceOptions::from_bytes(&bytes); @@ -411,7 +419,7 @@ mod tests { { let bytes_one = &mut bytes[..NUM_BYTES]; bytes_one[0] = super::URL_DATA_BYTE; - bytes_one[1] = u8::max_value(); + bytes_one[1] = u8::MAX; bytes_one[256] = 230; url_data_bytes.extend_from_slice(&bytes_one[2..]); @@ -419,7 +427,7 @@ mod tests { { let bytes_two = &mut bytes[NUM_BYTES..]; bytes_two[0] = super::URL_DATA_BYTE; - bytes_two[1] = u8::max_value(); + bytes_two[1] = u8::MAX; bytes_two[256] = 210; url_data_bytes.extend_from_slice(&bytes_two[2..]); @@ -445,7 +453,7 @@ mod tests { { let bytes_one = &mut bytes[..NUM_BYTES]; bytes_one[0] = super::URL_DATA_BYTE; - bytes_one[1] = u8::max_value(); + bytes_one[1] = u8::MAX; bytes_one[256] = 230; url_data_bytes.extend_from_slice(&bytes_one[2..]); diff --git a/packages/utracker/src/request.rs b/packages/utracker/src/request.rs index 6fd5ee61d..8bfe07fd1 100644 --- a/packages/utracker/src/request.rs +++ b/packages/utracker/src/request.rs @@ -15,6 +15,7 @@ use crate::scrape::ScrapeRequest; pub const CONNECT_ID_PROTOCOL_ID: u64 = 0x0417_2710_1980; /// Enumerates all types of requests that can be made to a tracker. +#[allow(clippy::module_name_repetitions)] pub enum RequestType<'a> { Connect, Announce(AnnounceRequest<'a>), @@ -34,6 +35,7 @@ impl<'a> RequestType<'a> { } /// `TrackerRequest` which encapsulates any request sent to a tracker. +#[allow(clippy::module_name_repetitions)] pub struct TrackerRequest<'a> { // Both the connection id and transaction id are technically not unsigned according // to the spec, but since they are just bits we will keep them as unsigned since it @@ -61,6 +63,10 @@ impl<'a> TrackerRequest<'a> { } /// Write the `TrackerRequest` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/utracker/src/response.rs b/packages/utracker/src/response.rs index b9b6a087c..88084b83c 100644 --- a/packages/utracker/src/response.rs +++ b/packages/utracker/src/response.rs @@ -14,6 +14,7 @@ use crate::scrape::ScrapeResponse; const ERROR_ACTION_ID: u32 = 3; /// Enumerates all types of responses that can be received from a tracker. +#[allow(clippy::module_name_repetitions)] pub enum ResponseType<'a> { Connect(u64), Announce(AnnounceResponse<'a>), @@ -35,6 +36,7 @@ impl<'a> ResponseType<'a> { } /// `TrackerResponse` which encapsulates any response sent from a tracker. +#[allow(clippy::module_name_repetitions)] pub struct TrackerResponse<'a> { transaction_id: u32, response_type: ResponseType<'a>, @@ -57,6 +59,10 @@ impl<'a> TrackerResponse<'a> { } /// Write the `TrackerResponse` to the given writer. + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, diff --git a/packages/utracker/src/scrape.rs b/packages/utracker/src/scrape.rs index 0fa2c53ad..855c0a32c 100644 --- a/packages/utracker/src/scrape.rs +++ b/packages/utracker/src/scrape.rs @@ -10,6 +10,7 @@ use util::convert; const SCRAPE_STATS_BYTES: usize = 12; /// Status for a given `InfoHash`. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct ScrapeStats { seeders: i32, @@ -64,6 +65,7 @@ fn parse_stats(bytes: &[u8]) -> IResult<&[u8], ScrapeStats> { // ----------------------------------------------------------------------------// /// Scrape request sent from the client to the server. +#[allow(clippy::module_name_repetitions)] #[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct ScrapeRequest<'a> { hashes: Cow<'a, [u8]>, @@ -87,6 +89,10 @@ impl<'a> ScrapeRequest<'a> { /// Write the `ScrapeRequest` to the given writer. /// /// Ordering of the written `InfoHash` is identical to that of `ScrapeRequest::iter`(). + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -102,6 +108,7 @@ impl<'a> ScrapeRequest<'a> { } /// Iterator over all of the hashes in the request. + #[allow(clippy::iter_without_into_iter)] #[must_use] pub fn iter(&self) -> ScrapeRequestIter<'_> { ScrapeRequestIter::new(&self.hashes) @@ -136,6 +143,7 @@ fn parse_request(bytes: &[u8]) -> IResult<&[u8], ScrapeRequest<'_>> { // ----------------------------------------------------------------------------// /// Scrape response sent from the server to the client. +#[allow(clippy::module_name_repetitions)] #[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct ScrapeResponse<'a> { stats: Cow<'a, [u8]>, @@ -159,6 +167,10 @@ impl<'a> ScrapeResponse<'a> { /// Write the `ScrapeResponse` to the given writer. /// /// Ordering of the written stats is identical to that of `ScrapeResponse::iter`(). + /// + /// # Errors + /// + /// It would return an IO Error if unable to write the bytes. pub fn write_bytes(&self, mut writer: W) -> io::Result<()> where W: Write, @@ -167,10 +179,14 @@ impl<'a> ScrapeResponse<'a> { } /// Add the scrape statistics to the current response. + /// + /// # Panics + /// + /// It would panic if the stats are negative. pub fn insert(&mut self, stats: ScrapeStats) { - let seeders_bytes = convert::four_bytes_to_array(stats.num_seeders() as u32); - let downloads_bytes = convert::four_bytes_to_array(stats.num_downloads() as u32); - let leechers_bytes = convert::four_bytes_to_array(stats.num_leechers() as u32); + let seeders_bytes = convert::four_bytes_to_array_signed(stats.num_seeders()); + let downloads_bytes = convert::four_bytes_to_array_signed(stats.num_downloads()); + let leechers_bytes = convert::four_bytes_to_array_signed(stats.num_leechers()); self.stats.to_mut().reserve(SCRAPE_STATS_BYTES); @@ -183,6 +199,7 @@ impl<'a> ScrapeResponse<'a> { /// /// Ordering of the status corresponds to the ordering of the `InfoHash` in the /// initial request. + #[allow(clippy::iter_without_into_iter)] #[must_use] pub fn iter(&self) -> ScrapeResponseIter<'_> { ScrapeResponseIter::new(&self.stats) @@ -217,6 +234,7 @@ fn parse_response(bytes: &[u8]) -> IResult<&[u8], ScrapeResponse<'_>> { // ----------------------------------------------------------------------------// /// Iterator over a number of `InfoHashes`. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct ScrapeRequestIter<'a> { hashes: &'a [u8], @@ -231,7 +249,7 @@ impl<'a> ScrapeRequestIter<'a> { } } } - +#[allow(clippy::copy_iterator)] impl<'a> Iterator for ScrapeRequestIter<'a> { type Item = InfoHash; @@ -256,6 +274,7 @@ impl<'a> ExactSizeIterator for ScrapeRequestIter<'a> { // ----------------------------------------------------------------------------// /// Iterator over a number of `ScrapeStats`. +#[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct ScrapeResponseIter<'a> { stats: &'a [u8], @@ -268,6 +287,7 @@ impl<'a> ScrapeResponseIter<'a> { } } +#[allow(clippy::copy_iterator)] impl<'a> Iterator for ScrapeResponseIter<'a> { type Item = ScrapeStats; diff --git a/packages/utracker/src/server/dispatcher.rs b/packages/utracker/src/server/dispatcher.rs index baef07be8..1bb216456 100644 --- a/packages/utracker/src/server/dispatcher.rs +++ b/packages/utracker/src/server/dispatcher.rs @@ -21,6 +21,7 @@ pub enum DispatchMessage { } /// Create a new background dispatcher to service requests. +#[allow(clippy::module_name_repetitions)] pub fn create_dispatcher(bind: SocketAddr, handler: H) -> io::Result> where H: ServerHandler + 'static, @@ -66,7 +67,7 @@ where fn process_request( &mut self, provider: &mut Provider<'_, ServerDispatcher>, - request: TrackerRequest<'_>, + request: &TrackerRequest<'_>, addr: SocketAddr, ) { let conn_id = request.connection_id(); @@ -96,7 +97,7 @@ where }; let response = TrackerResponse::new(trans_id, response_type); - write_response(provider, response, addr); + write_response(provider, &response, addr); }); } @@ -116,7 +117,7 @@ where }; let response = TrackerResponse::new(trans_id, response_type); - write_response(provider, response, addr); + write_response(provider, &response, addr); }); } @@ -136,13 +137,13 @@ where }; let response = TrackerResponse::new(trans_id, response_type); - write_response(provider, response, addr); + write_response(provider, &response, addr); }); } } /// Write the given tracker response through to the given provider. -fn write_response(provider: &mut Provider<'_, ServerDispatcher>, response: TrackerResponse<'_>, addr: SocketAddr) +fn write_response(provider: &mut Provider<'_, ServerDispatcher>, response: &TrackerResponse<'_>, addr: SocketAddr) where H: ServerHandler, { @@ -151,7 +152,7 @@ where let success = response.write_bytes(&mut cursor).is_ok(); if success { - Some((cursor.position() as usize, addr)) + Some((cursor.position().try_into().unwrap(), addr)) } else { None } // TODO: Add Logging @@ -166,12 +167,11 @@ where type Message = DispatchMessage; fn incoming(&mut self, mut provider: Provider<'_, Self>, message: &[u8], addr: SocketAddr) { - let request = match TrackerRequest::from_bytes(message) { - IResult::Done(_, req) => req, - _ => return, // TODO: Add Logging + let IResult::Done(_, request) = TrackerRequest::from_bytes(message) else { + return; // TODO: Add Logging }; - self.process_request(&mut provider, request, addr); + self.process_request(&mut provider, &request, addr); } fn notify(&mut self, mut provider: Provider<'_, Self>, message: DispatchMessage) { @@ -180,5 +180,5 @@ where } } - fn timeout(&mut self, _: Provider<'_, Self>, _: ()) {} + fn timeout(&mut self, _: Provider<'_, Self>, (): ()) {} } diff --git a/packages/utracker/src/server/handler.rs b/packages/utracker/src/server/handler.rs index 91df83a38..45f9ff563 100644 --- a/packages/utracker/src/server/handler.rs +++ b/packages/utracker/src/server/handler.rs @@ -9,6 +9,8 @@ use crate::scrape::{ScrapeRequest, ScrapeResponse}; pub type ServerResult<'a, T> = Result; /// Trait for providing a `TrackerServer` with methods to service `TrackerRequests`. +#[allow(clippy::module_name_repetitions)] + pub trait ServerHandler: Send { /// Service a connection id request from the given address. /// diff --git a/packages/utracker/src/server/mod.rs b/packages/utracker/src/server/mod.rs index 1dfc6b9b9..2dd9f03b0 100644 --- a/packages/utracker/src/server/mod.rs +++ b/packages/utracker/src/server/mod.rs @@ -12,12 +12,17 @@ pub mod handler; /// Tracker server that executes responses asynchronously. /// /// Server will shutdown on drop. +#[allow(clippy::module_name_repetitions)] pub struct TrackerServer { send: Sender, } impl TrackerServer { /// Run a new `TrackerServer`. + /// + /// # Errors + /// + /// It would return an IO Error if unable to run the server. pub fn run(bind: SocketAddr, handler: H) -> io::Result where H: ServerHandler + 'static, diff --git a/packages/utracker/test/mod.rs b/packages/utracker/test/mod.rs index 8962f2d32..02a723ece 100644 --- a/packages/utracker/test/mod.rs +++ b/packages/utracker/test/mod.rs @@ -82,9 +82,7 @@ impl ServerHandler for MockTrackerHandler { // Resolve what to do with the event match req.state().event() { - AnnounceEvent::None => peers.insert(store_addr), - AnnounceEvent::Completed => peers.insert(store_addr), - AnnounceEvent::Started => peers.insert(store_addr), + AnnounceEvent::Started | AnnounceEvent::Completed | AnnounceEvent::None => peers.insert(store_addr), AnnounceEvent::Stopped => peers.remove(&store_addr), }; @@ -123,8 +121,8 @@ impl ServerHandler for MockTrackerHandler { result(Ok(AnnounceResponse::new( 1800, - peers.len() as i32, - peers.len() as i32, + peers.len().try_into().unwrap(), + peers.len().try_into().unwrap(), compact_peers, ))); } else { @@ -144,7 +142,11 @@ impl ServerHandler for MockTrackerHandler { for hash in req.iter() { let peers = inner_lock.peers_map.entry(hash).or_default(); - response.insert(ScrapeStats::new(peers.len() as i32, 0, peers.len() as i32)); + response.insert(ScrapeStats::new( + peers.len().try_into().unwrap(), + 0, + peers.len().try_into().unwrap(), + )); } result(Ok(response));