chore: clippy

This commit is contained in:
Joonas Koivunen 2021-01-18 17:08:57 +02:00
parent 307e80be42
commit ffd043c618
10 changed files with 38 additions and 51 deletions

View File

@ -148,7 +148,7 @@ impl Bitswap {
///
/// Called from Kademlia behaviour.
pub fn connect(&mut self, peer_id: PeerId) {
if self.target_peers.insert(peer_id.clone()) {
if self.target_peers.insert(peer_id) {
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id,
condition: DialPeerCondition::Disconnected,
@ -224,9 +224,9 @@ impl NetworkBehaviour for Bitswap {
fn inject_connected(&mut self, peer_id: &PeerId) {
debug!("bitswap: inject_connected {}", peer_id);
let ledger = Ledger::new();
self.stats.entry(peer_id.clone()).or_default();
self.connected_peers.insert(peer_id.clone(), ledger);
self.send_want_list(peer_id.clone());
self.stats.entry(*peer_id).or_default();
self.connected_peers.insert(*peer_id, ledger);
self.send_want_list(*peer_id);
}
fn inject_disconnected(&mut self, peer_id: &PeerId) {
@ -259,7 +259,7 @@ impl NetworkBehaviour for Bitswap {
for cid in message.cancel() {
ledger.received_want_list.remove(cid);
let event = BitswapEvent::ReceivedCancel(source.clone(), cid.clone());
let event = BitswapEvent::ReceivedCancel(source, cid.clone());
self.events
.push_back(NetworkBehaviourAction::GenerateEvent(event));
}
@ -272,7 +272,7 @@ impl NetworkBehaviour for Bitswap {
{
ledger.received_want_list.insert(cid.to_owned(), *priority);
let event = BitswapEvent::ReceivedWant(source.clone(), cid.clone(), *priority);
let event = BitswapEvent::ReceivedWant(source, cid.clone(), *priority);
self.events
.push_back(NetworkBehaviourAction::GenerateEvent(event));
}
@ -281,7 +281,7 @@ impl NetworkBehaviour for Bitswap {
for block in mem::take(&mut message.blocks) {
self.cancel_block(&block.cid());
let event = BitswapEvent::ReceivedBlock(source.clone(), block);
let event = BitswapEvent::ReceivedBlock(source, block);
self.events
.push_back(NetworkBehaviourAction::GenerateEvent(event));
}
@ -308,7 +308,7 @@ impl NetworkBehaviour for Bitswap {
}
return Poll::Ready(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id: *peer_id,
handler: NotifyHandler::Any,
event: message,
});

View File

@ -46,7 +46,7 @@ async fn find_peer_query<T: IpfsTypes>(
} = query;
let peer_id = arg.into_inner();
let addrs = ipfs
.find_peer(peer_id.clone())
.find_peer(peer_id)
.maybe_timeout(timeout.map(StringSerialized::into_inner))
.await
.map_err(StringError::from)?
@ -189,7 +189,7 @@ async fn get_closest_peers_query<T: IpfsTypes>(
} = query;
let peer_id = arg.into_inner();
let closest_peers = ipfs
.get_closest_peers(peer_id.clone())
.get_closest_peers(peer_id)
.maybe_timeout(timeout.map(StringSerialized::into_inner))
.await
.map_err(StringError::from)?

View File

@ -921,7 +921,7 @@ impl<Types: IpfsTypes> Ipfs<Types> {
self.to_task
.clone()
.send(IpfsEvent::FindPeer(peer_id.clone(), false, tx))
.send(IpfsEvent::FindPeer(peer_id, false, tx))
.await?;
match rx.await? {
@ -934,7 +934,7 @@ impl<Types: IpfsTypes> Ipfs<Types> {
self.to_task
.clone()
.send(IpfsEvent::FindPeer(peer_id.clone(), true, tx))
.send(IpfsEvent::FindPeer(peer_id, true, tx))
.await?;
match rx.await? {

View File

@ -473,7 +473,7 @@ impl<Types: IpfsTypes> Behaviour<Types> {
pub fn add_peer(&mut self, peer: PeerId, addr: Multiaddr) {
self.kademlia.add_address(&peer, addr);
self.swarm.add_peer(peer.clone());
self.swarm.add_peer(peer);
// FIXME: the call below automatically performs a dial attempt
// to the given peer; it is unsure that we want it done within
// add_peer, especially since that peer might not belong to the

View File

@ -58,7 +58,7 @@ pub async fn create_swarm<TIpfsTypes: IpfsTypes>(
swarm_span: Span,
repo: Arc<Repo<TIpfsTypes>>,
) -> io::Result<TSwarm<TIpfsTypes>> {
let peer_id = options.peer_id.clone();
let peer_id = options.peer_id;
// Set up an encrypted TCP transport over the Mplex protocol.
let transport = transport::build_transport(options.keypair.clone())?;

View File

@ -207,13 +207,7 @@ impl Pubsub {
pub fn subscribed_peers(&self, topic: &Topic) -> Vec<PeerId> {
self.peers
.iter()
.filter_map(|(k, v)| {
if v.contains(topic) {
Some(k.clone())
} else {
None
}
})
.filter_map(|(k, v)| if v.contains(topic) { Some(*k) } else { None })
.collect()
}
@ -384,7 +378,7 @@ impl NetworkBehaviour for Pubsub {
peer_id,
topic,
}) => {
let topics = self.peers.entry(peer_id.clone()).or_insert_with(Vec::new);
let topics = self.peers.entry(peer_id).or_insert_with(Vec::new);
let appeared = topics.is_empty();
if topics.iter().find(|&t| t == &topic).is_none() {
topics.push(topic);
@ -400,7 +394,7 @@ impl NetworkBehaviour for Pubsub {
peer_id,
topic,
}) => {
if let Entry::Occupied(mut oe) = self.peers.entry(peer_id.clone()) {
if let Entry::Occupied(mut oe) = self.peers.entry(peer_id) {
let topics = oe.get_mut();
if let Some(pos) = topics.iter().position(|t| t == &topic) {
topics.swap_remove(pos);

View File

@ -27,7 +27,7 @@ pub struct Disconnector {
impl Disconnector {
pub fn disconnect<T: NetworkBehaviour>(self, swarm: &mut Swarm<T>) {
Swarm::ban_peer_id(swarm, self.peer_id.clone());
Swarm::ban_peer_id(swarm, self.peer_id);
Swarm::unban_peer_id(swarm, self.peer_id);
}
}
@ -67,7 +67,7 @@ impl SwarmApi {
if let Some(any) = conns.first() {
Some(Connection {
addr: MultiaddrWithPeerId::from((any.clone(), peer.clone())),
addr: MultiaddrWithPeerId::from((any.clone(), *peer)),
rtt,
})
} else {
@ -78,7 +78,7 @@ impl SwarmApi {
pub fn set_rtt(&mut self, peer_id: &PeerId, rtt: Duration) {
// FIXME: this is for any connection
self.roundtrip_times.insert(peer_id.clone(), rtt);
self.roundtrip_times.insert(*peer_id, rtt);
}
pub fn connect(&mut self, addr: MultiaddrWithPeerId) -> Option<SubscriptionFuture<(), String>> {
@ -156,16 +156,16 @@ impl NetworkBehaviour for SwarmApi {
trace!("inject_connected {} {:?}", peer_id, cp);
let addr: MultiaddrWithoutPeerId = connection_point_addr(cp).to_owned().try_into().unwrap();
self.peers.insert(peer_id.clone());
let connections = self.connected_peers.entry(peer_id.clone()).or_default();
self.peers.insert(*peer_id);
let connections = self.connected_peers.entry(*peer_id).or_default();
connections.push(addr.clone());
self.connections.insert(addr.clone(), peer_id.clone());
self.connections.insert(addr.clone(), *peer_id);
if let ConnectedPoint::Dialer { .. } = cp {
let addr = MultiaddrWithPeerId {
multiaddr: addr,
peer_id: peer_id.clone(),
peer_id: *peer_id,
};
self.connect_registry
@ -268,7 +268,7 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn swarm_api() {
let (peer1_id, trans) = mk_transport();
let mut swarm1 = Swarm::new(trans, SwarmApi::default(), peer1_id.clone());
let mut swarm1 = Swarm::new(trans, SwarmApi::default(), peer1_id);
let (peer2_id, trans) = mk_transport();
let mut swarm2 = Swarm::new(trans, SwarmApi::default(), peer2_id);

View File

@ -51,7 +51,7 @@ async fn connect_two_nodes_by_peer_id() {
let node_b = Node::new("b").await;
node_a
.add_peer(node_b.id.clone(), node_b.addrs[0].clone())
.add_peer(node_b.id, node_b.addrs[0].clone())
.await
.unwrap();
let b_id_multiaddr: Multiaddr = format!("/p2p/{}", &node_b.id).parse().unwrap();
@ -93,7 +93,7 @@ async fn connect_two_nodes_with_two_connections_doesnt_panic() {
assert_eq!(addresses.len(), 2);
for mut addr in addresses.into_iter() {
addr.push(Protocol::P2p(node_a.id.clone().into()));
addr.push(Protocol::P2p(node_a.id.into()));
timeout(TIMEOUT, node_b.connect(addr))
.await

View File

@ -22,10 +22,10 @@ async fn find_peer_local() {
// while nodes[0] is connected to nodes[1], they know each
// other's addresses and can find them without using the DHT
let mut found_addrs = nodes[0].find_peer(nodes[1].id.clone()).await.unwrap();
let mut found_addrs = nodes[0].find_peer(nodes[1].id).await.unwrap();
for addr in &mut found_addrs {
addr.push(Protocol::P2p(nodes[1].id.clone().into()));
addr.push(Protocol::P2p(nodes[1].id.into()));
assert!(nodes[1].addrs.contains(addr));
}
}
@ -42,12 +42,12 @@ async fn spawn_bootstrapped_nodes(n: usize) -> (Vec<Node>, Option<ForeignNode>)
// they don't have a chance to form the full picture in the DHT
for i in 0..n {
let (next_id, next_addr) = if i < n - 1 {
(nodes[i + 1].id.clone(), nodes[i + 1].addrs[0].clone())
(nodes[i + 1].id, nodes[i + 1].addrs[0].clone())
} else {
// the last node in the chain also needs to know some address
// in order to bootstrap, so give it its neighbour's information
// and then bootstrap it as well
(nodes[n - 2].id.clone(), nodes[n - 2].addrs[0].clone())
(nodes[n - 2].id, nodes[n - 2].addrs[0].clone())
};
nodes[i].add_peer(next_id, next_addr).await.unwrap();
@ -80,13 +80,13 @@ async fn spawn_bootstrapped_nodes(n: usize) -> (Vec<Node>, Option<ForeignNode>)
for i in 0..(n - 1) {
let (next_id, next_addr) = if i == n / 2 - 1 || i == n / 2 {
println!("telling rust node {} about the foreign node", i);
(foreign_node.id.clone(), foreign_node.addrs[0].clone())
(foreign_node.id, foreign_node.addrs[0].clone())
} else if i < n / 2 {
println!("telling rust node {} about rust node {}", i, i + 1);
(nodes[i + 1].id.clone(), nodes[i + 1].addrs[0].clone())
(nodes[i + 1].id, nodes[i + 1].addrs[0].clone())
} else {
println!("telling rust node {} about rust node {}", i, i - 1);
(nodes[i - 1].id.clone(), nodes[i - 1].addrs[0].clone())
(nodes[i - 1].id, nodes[i - 1].addrs[0].clone())
};
nodes[i].add_peer(next_id, next_addr).await.unwrap();
@ -112,10 +112,7 @@ async fn dht_find_peer() {
// node 0 now tries to find the address of the very last node in the
// chain; the chain should be long enough for it not to automatically
// be connected to it after the bootstrap
let found_addrs = nodes[0]
.find_peer(nodes[last_index].id.clone())
.await
.unwrap();
let found_addrs = nodes[0].find_peer(nodes[last_index].id).await.unwrap();
let to_be_found = strip_peer_id(nodes[last_index].addrs[0].clone());
assert_eq!(found_addrs, vec![to_be_found]);
@ -127,11 +124,7 @@ async fn dht_get_closest_peers() {
let (nodes, _foreign_node) = spawn_bootstrapped_nodes(CHAIN_LEN).await;
assert_eq!(
nodes[0]
.get_closest_peers(nodes[0].id.clone())
.await
.unwrap()
.len(),
nodes[0].get_closest_peers(nodes[0].id).await.unwrap().len(),
CHAIN_LEN - 1
);
}
@ -179,7 +172,7 @@ async fn dht_providing() {
.get_providers(cid)
.await
.unwrap()
.contains(&nodes[last_index].id.clone()));
.contains(&nodes[last_index].id));
}
/// Check if Ipfs::{get, put} does its job.

View File

@ -103,7 +103,7 @@ async fn publish_between_two_nodes() {
]
.iter()
.cloned()
.map(|(topics, id, data)| (topics.to_vec(), id.clone(), data.to_vec()))
.map(|(topics, id, data)| (topics.to_vec(), *id, data.to_vec()))
.collect::<HashSet<_>>();
for st in &mut [b_msgs.by_ref(), a_msgs.by_ref()] {