clippy all codes (#1214)

1. clippy code
2. add fmt and clippy check in ci
This commit is contained in:
Sijie.Sun
2025-08-10 22:56:41 +08:00
committed by GitHub
parent 0087ac3ffc
commit e43537939a
144 changed files with 1475 additions and 1531 deletions
+1 -1
View File
@@ -90,7 +90,7 @@ impl PeerRoutePair {
}
}
pub fn get_udp_nat_type(self: &Self) -> String {
pub fn get_udp_nat_type(&self) -> String {
use crate::proto::common::NatType;
let mut ret = NatType::Unknown;
if let Some(r) = &self.route.clone().unwrap_or_default().stun_info {
+18 -18
View File
@@ -38,13 +38,13 @@ impl From<String> for Uuid {
impl fmt::Display for Uuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", uuid::Uuid::from(self.clone()))
write!(f, "{}", uuid::Uuid::from(*self))
}
}
impl fmt::Debug for Uuid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", uuid::Uuid::from(self.clone()))
write!(f, "{}", uuid::Uuid::from(*self))
}
}
@@ -62,9 +62,9 @@ impl From<Ipv4Addr> for std::net::Ipv4Addr {
}
}
impl ToString for Ipv4Addr {
fn to_string(&self) -> String {
std::net::Ipv4Addr::from(self.addr).to_string()
impl Display for Ipv4Addr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", std::net::Ipv4Addr::from(self.addr))
}
}
@@ -93,9 +93,9 @@ impl From<Ipv6Addr> for std::net::Ipv6Addr {
}
}
impl ToString for Ipv6Addr {
fn to_string(&self) -> String {
std::net::Ipv6Addr::from(self.clone()).to_string()
impl Display for Ipv6Addr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", std::net::Ipv6Addr::from(*self))
}
}
@@ -120,7 +120,7 @@ impl From<Ipv4Inet> for cidr::Ipv4Inet {
impl fmt::Display for Ipv4Inet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", cidr::Ipv4Inet::from(self.clone()))
write!(f, "{}", cidr::Ipv4Inet::from(*self))
}
}
@@ -155,7 +155,7 @@ impl From<Ipv6Inet> for cidr::Ipv6Inet {
impl fmt::Display for Ipv6Inet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", cidr::Ipv6Inet::from(self.clone()))
write!(f, "{}", cidr::Ipv6Inet::from(*self))
}
}
@@ -194,7 +194,7 @@ impl From<IpInet> for cidr::IpInet {
impl Display for IpInet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", cidr::IpInet::from(self.clone()))
write!(f, "{}", cidr::IpInet::from(*self))
}
}
@@ -240,11 +240,11 @@ impl From<std::net::SocketAddr> for SocketAddr {
fn from(value: std::net::SocketAddr) -> Self {
match value {
std::net::SocketAddr::V4(v4) => SocketAddr {
ip: Some(socket_addr::Ip::Ipv4(v4.ip().clone().into())),
ip: Some(socket_addr::Ip::Ipv4((*v4.ip()).into())),
port: v4.port() as u32,
},
std::net::SocketAddr::V6(v6) => SocketAddr {
ip: Some(socket_addr::Ip::Ipv6(v6.ip().clone().into())),
ip: Some(socket_addr::Ip::Ipv6((*v6.ip()).into())),
port: v6.port() as u32,
},
}
@@ -271,9 +271,9 @@ impl From<SocketAddr> for std::net::SocketAddr {
}
}
impl ToString for SocketAddr {
fn to_string(&self) -> String {
std::net::SocketAddr::from(self.clone()).to_string()
impl Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", std::net::SocketAddr::from(*self))
}
}
@@ -302,14 +302,14 @@ impl TryFrom<CompressorAlgo> for CompressionAlgoPb {
impl fmt::Debug for Ipv4Addr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let std_ipv4_addr = std::net::Ipv4Addr::from(self.clone());
let std_ipv4_addr = std::net::Ipv4Addr::from(*self);
write!(f, "{}", std_ipv4_addr)
}
}
impl fmt::Debug for Ipv6Addr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let std_ipv6_addr = std::net::Ipv6Addr::from(self.clone());
let std_ipv6_addr = std::net::Ipv6Addr::from(*self);
write!(f, "{}", std_ipv6_addr)
}
}
+2
View File
@@ -1,3 +1,5 @@
#![allow(clippy::module_inception)]
use prost::DecodeError;
use super::rpc_types;
+12 -3
View File
@@ -24,6 +24,12 @@ pub struct BidirectRpcManager {
tasks: Mutex<Option<JoinSet<()>>>,
}
impl Default for BidirectRpcManager {
fn default() -> Self {
Self::new()
}
}
impl BidirectRpcManager {
pub fn new() -> Self {
Self {
@@ -42,7 +48,10 @@ impl BidirectRpcManager {
pub fn new_with_stats_manager(stats_manager: Arc<StatsManager>) -> Self {
Self {
rpc_client: Client::new_with_stats_manager(stats_manager.clone()),
rpc_server: Server::new_with_registry_and_stats_manager(Arc::new(ServiceRegistry::new()), stats_manager),
rpc_server: Server::new_with_registry_and_stats_manager(
Arc::new(ServiceRegistry::new()),
stats_manager,
),
rx_timeout: None,
error: Arc::new(Mutex::new(None)),
@@ -176,7 +185,7 @@ impl BidirectRpcManager {
return;
};
tasks.abort_all();
while let Some(_) = tasks.join_next().await {}
while tasks.join_next().await.is_some() {}
}
pub fn take_error(&self) -> Option<Error> {
@@ -187,7 +196,7 @@ impl BidirectRpcManager {
let Some(mut tasks) = self.tasks.lock().unwrap().take() else {
return;
};
while let Some(_) = tasks.join_next().await {
while tasks.join_next().await.is_some() {
// when any task is done, abort all tasks
tasks.abort_all();
}
+35 -20
View File
@@ -18,7 +18,9 @@ use crate::defer;
use crate::proto::common::{
CompressionAlgoPb, RpcCompressionInfo, RpcDescriptor, RpcPacket, RpcRequest, RpcResponse,
};
use crate::proto::rpc_impl::packet::{build_rpc_packet, compress_packet, decompress_packet};
use crate::proto::rpc_impl::packet::{
build_rpc_packet, compress_packet, decompress_packet, BuildRpcPacketArgs,
};
use crate::proto::rpc_types::controller::Controller;
use crate::proto::rpc_types::descriptor::MethodDescriptor;
use crate::proto::rpc_types::{
@@ -53,6 +55,15 @@ struct InflightRequest {
start_time: std::time::Instant,
}
impl std::fmt::Debug for InflightRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("InflightRequest")
.field("sender", &self.sender)
.field("start_time", &self.start_time)
.finish()
}
}
#[derive(Debug, Clone, Default)]
pub struct PeerInfo {
pub peer_id: PeerId,
@@ -72,6 +83,12 @@ pub struct Client {
stats_manager: Option<Arc<StatsManager>>,
}
impl Default for Client {
fn default() -> Self {
Self::new()
}
}
impl Client {
pub fn new() -> Self {
let (ring_a, ring_b) = create_ring_tunnel_pair();
@@ -86,15 +103,9 @@ impl Client {
}
pub fn new_with_stats_manager(stats_manager: Arc<StatsManager>) -> Self {
let (ring_a, ring_b) = create_ring_tunnel_pair();
Self {
mpsc: Mutex::new(MpscTunnel::new(ring_a, None)),
transport: Mutex::new(MpscTunnel::new(ring_b, None)),
inflight_requests: Arc::new(DashMap::new()),
peer_info: Arc::new(DashMap::new()),
tasks: Mutex::new(JoinSet::new()),
stats_manager: Some(stats_manager),
}
let mut ret = Self::new();
ret.stats_manager = Some(stats_manager);
ret
}
pub fn get_transport_sink(&self) -> MpscTunnelSender {
@@ -151,7 +162,11 @@ impl Client {
};
let Some(mut inflight_request) = inflight_requests.get_mut(&key) else {
tracing::warn!(?key, "No inflight request found for key");
tracing::warn!(
?key,
?inflight_requests,
"No inflight request found for key"
);
continue;
};
@@ -276,19 +291,19 @@ impl Client {
.await
.unwrap();
let packets = build_rpc_packet(
self.from_peer_id,
self.to_peer_id,
let packets = build_rpc_packet(BuildRpcPacketArgs {
from_peer: self.from_peer_id,
to_peer: self.to_peer_id,
rpc_desc,
transaction_id,
true,
&buf,
ctrl.trace_id(),
RpcCompressionInfo {
is_req: true,
content: &buf,
trace_id: ctrl.trace_id(),
compression_info: RpcCompressionInfo {
algo: c_algo.into(),
accepted_algo: CompressionAlgoPb::Zstd.into(),
},
);
});
let timeout_dur = std::time::Duration::from_millis(ctrl.timeout_ms() as u64);
let mut rpc_packet = timeout(timeout_dur, self.do_rpc(packets, &mut rx)).await??;
@@ -298,7 +313,7 @@ impl Client {
self.to_peer_id,
PeerInfo {
peer_id: self.to_peer_id,
compression_info: compression_info.clone(),
compression_info,
last_active: Some(std::time::Instant::now()),
},
);
+37 -29
View File
@@ -21,7 +21,7 @@ pub async fn compress_packet(
let algo = accepted_compression_algo
.try_into()
.unwrap_or(CompressorAlgo::None);
let compressed = compressor.compress_raw(&content, algo).await?;
let compressed = compressor.compress_raw(content, algo).await?;
if compressed.len() >= content.len() {
Ok((content.to_vec(), CompressionAlgoPb::None))
} else {
@@ -35,7 +35,7 @@ pub async fn decompress_packet(
) -> Result<Vec<u8>, Error> {
let compressor = DefaultCompressor::new();
let algo = compression_algo.try_into()?;
let decompressed = compressor.decompress_raw(&content, algo).await?;
let decompressed = compressor.decompress_raw(content, algo).await?;
Ok(decompressed)
}
@@ -45,6 +45,12 @@ pub struct PacketMerger {
last_updated: std::time::Instant,
}
impl Default for PacketMerger {
fn default() -> Self {
Self::new()
}
}
impl PacketMerger {
pub fn new() -> Self {
Self {
@@ -133,55 +139,57 @@ impl PacketMerger {
}
}
pub fn build_rpc_packet(
from_peer: PeerId,
to_peer: PeerId,
rpc_desc: RpcDescriptor,
transaction_id: RpcTransactId,
is_req: bool,
content: &Vec<u8>,
trace_id: i32,
compression_info: RpcCompressionInfo,
) -> Vec<ZCPacket> {
pub struct BuildRpcPacketArgs<'a> {
pub from_peer: PeerId,
pub to_peer: PeerId,
pub rpc_desc: RpcDescriptor,
pub transaction_id: RpcTransactId,
pub is_req: bool,
pub content: &'a [u8],
pub trace_id: i32,
pub compression_info: RpcCompressionInfo,
}
pub fn build_rpc_packet(args: BuildRpcPacketArgs<'_>) -> Vec<ZCPacket> {
let mut ret = Vec::new();
let content_mtu = RPC_PACKET_CONTENT_MTU;
let total_pieces = (content.len() + content_mtu - 1) / content_mtu;
let total_pieces = args.content.len().div_ceil(content_mtu);
let mut cur_offset = 0;
while cur_offset < content.len() || content.len() == 0 {
while cur_offset < args.content.len() || args.content.is_empty() {
let mut cur_len = content_mtu;
if cur_offset + cur_len > content.len() {
cur_len = content.len() - cur_offset;
if cur_offset + cur_len > args.content.len() {
cur_len = args.content.len() - cur_offset;
}
let mut cur_content = Vec::new();
cur_content.extend_from_slice(&content[cur_offset..cur_offset + cur_len]);
cur_content.extend_from_slice(&args.content[cur_offset..cur_offset + cur_len]);
let cur_packet = RpcPacket {
from_peer,
to_peer,
from_peer: args.from_peer,
to_peer: args.to_peer,
descriptor: if cur_offset == 0
|| compression_info.algo == CompressionAlgoPb::None as i32
|| args.compression_info.algo == CompressionAlgoPb::None as i32
{
// old version must have descriptor on every piece
Some(rpc_desc.clone())
Some(args.rpc_desc.clone())
} else {
None
},
is_request: is_req,
is_request: args.is_req,
total_pieces: total_pieces as u32,
piece_idx: (cur_offset / content_mtu) as u32,
transaction_id,
piece_idx: (cur_offset / RPC_PACKET_CONTENT_MTU) as u32,
transaction_id: args.transaction_id,
body: cur_content,
trace_id,
trace_id: args.trace_id,
compression_info: if cur_offset == 0 {
Some(compression_info.clone())
Some(args.compression_info)
} else {
None
},
};
cur_offset += cur_len;
let packet_type = if is_req {
let packet_type = if args.is_req {
PacketType::RpcReq
} else {
PacketType::RpcResp
@@ -190,10 +198,10 @@ pub fn build_rpc_packet(
let mut buf = Vec::new();
cur_packet.encode(&mut buf).unwrap();
let mut zc_packet = ZCPacket::new_with_payload(&buf);
zc_packet.fill_peer_manager_hdr(from_peer, to_peer, packet_type as u8);
zc_packet.fill_peer_manager_hdr(args.from_peer, args.to_peer, packet_type as u8);
ret.push(zc_packet);
if content.len() == 0 {
if args.content.is_empty() {
break;
}
}
+17 -13
View File
@@ -20,6 +20,7 @@ use crate::{
self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse,
TunnelInfo,
},
rpc_impl::packet::BuildRpcPacketArgs,
rpc_types::{controller::Controller, error::Result},
},
tunnel::{
@@ -53,6 +54,12 @@ pub struct Server {
stats_manager: Option<Arc<StatsManager>>,
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
pub fn new() -> Self {
Server::new_with_registry(Arc::new(ServiceRegistry::new()))
@@ -139,10 +146,7 @@ impl Server {
tracing::trace!(?key, ?packet, "Received request packet");
let ret = packet_merges
.entry(key.clone())
.or_insert_with(PacketMerger::new)
.feed(packet);
let ret = packet_merges.entry(key.clone()).or_default().feed(packet);
match ret {
Ok(Some(packet)) => {
@@ -238,7 +242,7 @@ impl Server {
let mut resp_msg = RpcResponse::default();
let now = std::time::Instant::now();
let compression_info = packet.compression_info.clone();
let compression_info = packet.compression_info;
let resp_bytes = Self::handle_rpc_request(packet, reg, tunnel_info).await;
match &resp_bytes {
@@ -290,19 +294,19 @@ impl Server {
.await
.unwrap();
let packets = build_rpc_packet(
to_peer,
from_peer,
desc,
let packets = build_rpc_packet(BuildRpcPacketArgs {
from_peer: to_peer,
to_peer: from_peer,
rpc_desc: desc,
transaction_id,
false,
&compressed_resp,
is_req: false,
content: &compressed_resp,
trace_id,
RpcCompressionInfo {
compression_info: RpcCompressionInfo {
algo: algo.into(),
accepted_algo: CompressionAlgoPb::Zstd.into(),
},
);
});
for packet in packets {
if let Err(err) = sender.send(packet).await {
@@ -52,6 +52,12 @@ pub struct ServiceRegistry {
table: DashMap<ServiceKey, ServiceEntry>,
}
impl Default for ServiceRegistry {
fn default() -> Self {
Self::new()
}
}
impl ServiceRegistry {
pub fn new() -> Self {
Self {
-1
View File
@@ -17,7 +17,6 @@ pub trait Handler: Clone + Send + Sync + 'static {
type Descriptor: descriptor::ServiceDescriptor + Default;
type Controller: super::controller::Controller;
///
/// Perform a raw call to the specified service and method.
async fn call(
+8 -4
View File
@@ -233,8 +233,10 @@ async fn rpc_tunnel_stuck_test() {
let out =
client.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "test".to_string());
tasks.spawn(async move {
let mut ctrl = RpcController::default();
ctrl.timeout_ms = 1000;
let ctrl = RpcController {
timeout_ms: 1000,
..Default::default()
};
let input = SayHelloRequest {
name: "world".to_string(),
@@ -263,8 +265,10 @@ async fn rpc_tunnel_stuck_test() {
let out =
client.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "test".to_string());
let mut ctrl = RpcController::default();
ctrl.timeout_ms = 1000;
let ctrl = RpcController {
timeout_ms: 1000,
..Default::default()
};
let input = SayHelloRequest {
name: "fuck world".to_string(),
};