fix peer establish direct conn with subnet proxy to one of local interface (#1782)

* fix peer establish direct conn with subnet proxy to one of local interface

* fix peer mgr ref loop
This commit is contained in:
KKRainbow
2026-01-15 01:00:32 +08:00
committed by GitHub
parent f8b34e3c86
commit 53264f67bf
21 changed files with 354 additions and 170 deletions
+12 -13
View File
@@ -230,8 +230,10 @@ impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc {
}
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
self.0
.get_peer_manager()
let Some(peer_manager) = self.0.get_peer_manager() else {
return false;
};
peer_manager
.check_allow_kcp_to_dst(&IpAddr::V4(*dst_ip))
.await
}
@@ -503,19 +505,16 @@ impl KcpProxyDst {
route.get_peer_groups_by_ip(&dst_ip)
);
let send_to_self =
Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()));
if global_ctx.should_deny_proxy(&dst_socket, false) {
return Err(anyhow::anyhow!(
"dst socket {:?} is in running listeners, ignore it",
dst_socket
)
.into());
}
let send_to_self = global_ctx.is_ip_local_virtual_ip(&dst_ip);
if send_to_self && global_ctx.no_tun() {
if global_ctx.is_port_in_running_listeners(dst_socket.port(), false)
&& global_ctx.is_ip_in_same_network(&src_ip)
{
return Err(anyhow::anyhow!(
"dst socket {:?} is in running listeners, ignore it",
dst_socket
)
.into());
}
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
+13 -12
View File
@@ -192,8 +192,10 @@ impl TcpProxyForKcpSrcTrait for TcpProxyForQUICSrc {
}
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
self.0.get_peer_manager().get_peer_map();
let Some(peer_manager) = self.0.get_peer_manager() else {
return false;
};
let peer_map: Arc<crate::peers::peer_map::PeerMap> = peer_manager.get_peer_map();
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
return false;
};
@@ -414,17 +416,16 @@ impl QUICProxyDst {
route.get_peer_groups_by_ipv4(&dst_ip)
);
let send_to_self = Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address());
if ctx.should_deny_proxy(&dst_socket.into(), false) {
return Err(anyhow::anyhow!(
"dst socket {:?} is in running listeners, ignore it",
dst_socket
)
.into());
}
let send_to_self = ctx.is_ip_local_virtual_ip(&dst_ip.into());
if send_to_self && ctx.no_tun() {
if ctx.is_port_in_running_listeners(dst_socket.port(), false)
&& ctx.is_ip_in_same_network(&src_ip)
{
return Err(anyhow::anyhow!(
"dst socket {:?} is in running listeners, ignore it",
dst_socket
)
.into());
}
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
+13 -7
View File
@@ -233,7 +233,6 @@ impl AsyncTcpConnector for Socks5KcpConnector {
kcp_endpoint,
peer_mgr: self.peer_mgr.clone(),
};
println!("connect to kcp endpoint, addr = {:?}", addr);
let ret = c
.connect(self.src_addr, addr)
.await
@@ -355,7 +354,7 @@ impl Socks5ServerNet {
pub fn new(
ipv4_addr: cidr::Ipv4Inet,
auth: Option<SimpleUserPassword>,
peer_manager: Arc<PeerManager>,
peer_manager: Weak<PeerManager>,
packet_recv: Arc<Mutex<mpsc::Receiver<ZCPacket>>>,
entries: Socks5EntrySet,
) -> Self {
@@ -390,6 +389,10 @@ impl Socks5ServerNet {
let dst = ipv4.get_destination();
let packet = ZCPacket::new_with_payload(&data);
let Some(peer_manager) = peer_manager.upgrade() else {
tracing::warn!("peer manager is gone, smoltcp sender exited");
return;
};
if let Err(e) = peer_manager
.send_msg_by_ip(packet, IpAddr::V4(dst), false)
.await
@@ -474,7 +477,7 @@ struct UdpClientKey {
pub struct Socks5Server {
global_ctx: Arc<GlobalCtx>,
peer_manager: Arc<PeerManager>,
peer_manager: Weak<PeerManager>,
auth: Option<SimpleUserPassword>,
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
@@ -587,7 +590,7 @@ impl Socks5Server {
let (packet_sender, packet_recv) = mpsc::channel(1024);
Arc::new(Self {
global_ctx,
peer_manager,
peer_manager: Arc::downgrade(&peer_manager),
auth,
tasks: Arc::new(std::sync::Mutex::new(JoinSet::new())),
@@ -675,7 +678,7 @@ impl Socks5Server {
)?;
let entries = self.entries.clone();
let peer_manager = Arc::downgrade(&self.peer_manager);
let peer_manager = self.peer_manager.clone();
let net = self.net.clone();
self.tasks.lock().unwrap().spawn(async move {
loop {
@@ -714,7 +717,10 @@ impl Socks5Server {
let cfgs = self.global_ctx.config.get_port_forwards();
self.reload_port_forwards(&cfgs).await?;
self.peer_manager
let Some(peer_manager) = self.peer_manager.upgrade() else {
return Err(anyhow::anyhow!("peer manager is gone").into());
};
peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
@@ -806,7 +812,7 @@ impl Socks5Server {
join_joinset_background(tasks.clone(), "tcp port forward".to_string());
let forward_tasks = tasks;
let kcp_endpoint = self.kcp_endpoint.lock().await.clone();
let peer_mgr = Arc::downgrade(&self.peer_manager.clone());
let peer_mgr = self.peer_manager.clone();
let cancel_token = CancellationToken::new();
self.cancel_tokens
.insert(cfg.clone(), cancel_token.clone().drop_guard());
+35 -26
View File
@@ -316,7 +316,7 @@ type AddrConnSockMap = Arc<DashMap<SocketAddr, ArcNatDstEntry>>;
#[derive(Debug)]
pub struct TcpProxy<C: NatDstConnector> {
global_ctx: Arc<GlobalCtx>,
peer_manager: Arc<PeerManager>,
peer_manager: Weak<PeerManager>,
local_port: AtomicU16,
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
@@ -346,8 +346,10 @@ impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
if let Err(e) = smoltcp_stack_sender.try_send(packet) {
tracing::error!("send to smoltcp stack failed: {:?}", e);
}
} else if let Err(e) = self.peer_manager.get_nic_channel().send(packet).await {
tracing::error!("send to nic failed: {:?}", e);
} else if let Some(peer_manager) = self.get_peer_manager() {
if let Err(e) = peer_manager.get_nic_channel().send(packet).await {
tracing::error!("send to nic failed: {:?}", e);
}
}
return None;
} else {
@@ -443,7 +445,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
Arc::new(Self {
global_ctx: global_ctx.clone(),
peer_manager,
peer_manager: Arc::downgrade(&peer_manager),
local_port: AtomicU16::new(0),
tasks: Arc::new(std::sync::Mutex::new(JoinSet::new())),
@@ -467,6 +469,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
})
}
pub fn get_peer_manager(&self) -> Option<Arc<PeerManager>> {
self.peer_manager.upgrade()
}
fn update_tcp_packet_checksum(
tcp_packet: &mut MutableTcpPacket,
ipv4_src: &Ipv4Addr,
@@ -487,10 +493,13 @@ impl<C: NatDstConnector> TcpProxy<C> {
self.run_syn_map_cleaner().await?;
self.run_listener().await?;
if add_pipeline {
self.peer_manager
let peer_manager = self
.get_peer_manager()
.ok_or_else(|| anyhow::anyhow!("peer manager is gone"))?;
peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
self.peer_manager
peer_manager
.add_nic_packet_process_pipeline(Box::new(self.clone()))
.await;
}
@@ -569,6 +578,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
let dst = ipv4.get_destination();
let packet = ZCPacket::new_with_payload(&data);
let Some(peer_mgr) = peer_mgr.upgrade() else {
tracing::warn!("peer manager is gone, smoltcp sender exited");
return;
};
if let Err(e) = peer_mgr
.send_msg_by_ip(packet, IpAddr::V4(dst), false)
.await
@@ -734,21 +747,18 @@ impl<C: NatDstConnector> TcpProxy<C> {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
let nat_dst = if Some(nat_entry.real_dst.ip())
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
{
if global_ctx.is_port_in_running_listeners(nat_entry.real_dst.port(), false)
&& global_ctx.is_ip_in_same_network(&nat_entry.src.ip())
{
tracing::error!(
?nat_entry,
"nat dst port {} is in running listeners, ignore it",
nat_entry.real_dst.port()
);
nat_entry.state.store(NatDstEntryState::Closed);
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry);
return;
}
if global_ctx.should_deny_proxy(&nat_entry.real_dst, false) {
tracing::error!(
?nat_entry,
"nat dst port {} is in running listeners, ignore it",
nat_entry.real_dst.port()
);
nat_entry.state.store(NatDstEntryState::Closed);
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry);
return;
}
let nat_dst = if global_ctx.is_ip_local_virtual_ip(&nat_entry.real_dst.ip()) {
format!("127.0.0.1:{}", nat_entry.real_dst.port())
.parse()
.unwrap()
@@ -831,7 +841,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
}
pub fn get_my_peer_id(&self) -> u32 {
self.peer_manager.my_peer_id()
self.peer_manager
.upgrade()
.map(|pm| pm.my_peer_id())
.unwrap_or_default()
}
pub fn get_local_ip(&self) -> Option<Ipv4Addr> {
@@ -952,10 +965,6 @@ impl<C: NatDstConnector> TcpProxy<C> {
Some(())
}
pub fn get_peer_manager(&self) -> &Arc<PeerManager> {
&self.peer_manager
}
pub fn is_tcp_proxy_connection(&self, src: SocketAddr) -> bool {
self.syn_map.contains_key(&src) || self.addr_conn_map.contains_key(&src)
}
+12 -17
View File
@@ -12,7 +12,6 @@ use std::{
};
use device::BufferDevice;
use futures::Future;
use reactor::Reactor;
pub use smoltcp;
use smoltcp::{
@@ -24,6 +23,8 @@ pub use socket::{TcpListener, TcpStream, UdpSocket};
pub use socket_allocator::BufferSize;
use tokio::sync::Notify;
use crate::common::scoped_task::ScopedTask;
/// The async devices.
pub mod channel_device;
pub mod device;
@@ -78,6 +79,7 @@ pub struct Net {
ip_addr: IpCidr,
from_port: AtomicU16,
stopper: Arc<Notify>,
fut: ScopedTask<io::Result<()>>,
}
impl std::fmt::Debug for Net {
@@ -92,15 +94,10 @@ impl std::fmt::Debug for Net {
impl Net {
/// Creates a new `Net` instance. It panics if the medium is not supported.
pub fn new<D: device::AsyncDevice + 'static>(device: D, config: NetConfig) -> Net {
let (net, fut) = Self::new2(device, config);
tokio::spawn(fut);
net
Self::new2(device, config)
}
fn new2<D: device::AsyncDevice + 'static>(
device: D,
config: NetConfig,
) -> (Net, impl Future<Output = io::Result<()>> + Send) {
fn new2<D: device::AsyncDevice + 'static>(device: D, config: NetConfig) -> Net {
let mut buffer_device = BufferDevice::new(device.capabilities().clone());
let mut iface = Interface::new(config.interface_config, &mut buffer_device, Instant::now());
let ip_addr = config.ip_addr;
@@ -129,15 +126,13 @@ impl Net {
stopper.clone(),
);
(
Net {
reactor: Arc::new(reactor),
ip_addr: config.ip_addr,
from_port: AtomicU16::new(10001),
stopper,
},
fut,
)
Net {
reactor: Arc::new(reactor),
ip_addr: config.ip_addr,
from_port: AtomicU16::new(10001),
stopper,
fut: ScopedTask::from(tokio::spawn(fut)),
}
}
pub fn get_address(&self) -> IpAddr {
self.ip_addr.address().into()
+54 -32
View File
@@ -1,6 +1,6 @@
use std::{
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
sync::{atomic::AtomicBool, Arc},
sync::{atomic::AtomicBool, Arc, Weak},
time::Duration,
};
@@ -46,25 +46,35 @@ struct UdpNatEntry {
src_peer_id: PeerId,
my_peer_id: PeerId,
src_socket: SocketAddr,
socket: UdpSocket,
socket: Option<UdpSocket>,
forward_task: Mutex<Option<JoinHandle<()>>>,
stopped: AtomicBool,
start_time: std::time::Instant,
last_active_time: AtomicCell<std::time::Instant>,
denied: bool,
}
impl UdpNatEntry {
#[tracing::instrument(err(level = Level::WARN))]
fn new(src_peer_id: PeerId, my_peer_id: PeerId, src_socket: SocketAddr) -> Result<Self, Error> {
fn new(
src_peer_id: PeerId,
my_peer_id: PeerId,
src_socket: SocketAddr,
denied: bool,
) -> Result<Self, Error> {
// TODO: try use src port, so we will be ip restricted nat type
let socket2_socket = socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)?;
let dst_socket_addr = "0.0.0.0:0".parse().unwrap();
setup_sokcet2(&socket2_socket, &dst_socket_addr)?;
let socket = UdpSocket::from_std(socket2_socket.into())?;
let socket = if denied {
None
} else {
let socket2_socket = socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)?;
let dst_socket_addr = "0.0.0.0:0".parse().unwrap();
setup_sokcet2(&socket2_socket, &dst_socket_addr)?;
Some(UdpSocket::from_std(socket2_socket.into())?)
};
Ok(Self {
src_peer_id,
@@ -75,6 +85,7 @@ impl UdpNatEntry {
stopped: AtomicBool::new(false),
start_time: std::time::Instant::now(),
last_active_time: AtomicCell::new(std::time::Instant::now()),
denied,
})
}
@@ -165,7 +176,11 @@ impl UdpNatEntry {
let (len, src_socket) = match timeout(
Duration::from_secs(120),
self_clone.socket.recv_buf_from(&mut cur_buf),
self_clone
.socket
.as_ref()
.unwrap()
.recv_buf_from(&mut cur_buf),
)
.await
{
@@ -239,7 +254,7 @@ impl UdpNatEntry {
#[derive(Debug)]
pub struct UdpProxy {
global_ctx: ArcGlobalCtx,
peer_manager: Arc<PeerManager>,
peer_manager: Weak<PeerManager>,
cidr_set: CidrSet,
@@ -299,22 +314,7 @@ impl UdpProxy {
};
// TODO: should it be async.
let dst_socket = if Some(ipv4.get_destination())
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address)
{
if self
.global_ctx
.is_port_in_running_listeners(udp_packet.get_destination(), true)
&& self
.global_ctx
.is_ip_in_same_network(&std::net::IpAddr::V4(ipv4.get_source()))
{
tracing::debug!(
dst_port = udp_packet.get_destination(),
"dst socket is in running listeners, ignore it"
);
return Some(());
}
let dst_socket = if self.global_ctx.is_ip_local_virtual_ip(&real_dst_ip.into()) {
format!("127.0.0.1:{}", udp_packet.get_destination())
.parse()
.unwrap()
@@ -337,16 +337,29 @@ impl UdpProxy {
.entry(nat_key)
.or_try_insert_with::<Error>(|| {
tracing::info!(?packet, ?ipv4, ?udp_packet, "udp nat table entry created");
let denied = self.global_ctx.should_deny_proxy(
&SocketAddr::new(real_dst_ip.into(), udp_packet.get_destination()),
true,
);
let _g = self.global_ctx.net_ns.guard();
Ok(Arc::new(UdpNatEntry::new(
hdr.from_peer_id.get(),
hdr.to_peer_id.get(),
nat_key.src_socket,
denied,
)?))
})
.ok()?
.clone();
if nat_entry.denied {
tracing::debug!(
dst_port = udp_packet.get_destination(),
"dst socket is in running listeners, ignore it"
);
return Some(());
}
if nat_entry.forward_task.lock().await.is_none() {
nat_entry
.forward_task
@@ -367,6 +380,8 @@ impl UdpProxy {
let _g = self.global_ctx.net_ns.guard();
nat_entry
.socket
.as_ref()
.unwrap()
.send_to(udp_packet.payload(), dst_socket)
.await
};
@@ -405,7 +420,7 @@ impl UdpProxy {
let (sender, receiver) = channel(1024);
let ret = Self {
global_ctx,
peer_manager,
peer_manager: Arc::downgrade(&peer_manager),
cidr_set,
nat_table: Arc::new(DashMap::new()),
sender,
@@ -417,7 +432,10 @@ impl UdpProxy {
}
pub async fn start(self: &Arc<Self>) -> Result<(), Error> {
self.peer_manager
let Some(peer_manager) = self.peer_manager.upgrade() else {
return Err(anyhow::anyhow!("peer manager is gone").into());
};
peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
@@ -457,7 +475,11 @@ impl UdpProxy {
hdr.set_latency_first(is_latency_first);
let to_peer_id = hdr.to_peer_id.into();
tracing::trace!(?msg, ?to_peer_id, "udp nat packet response send");
let ret = peer_manager.send_msg_for_proxy(msg, to_peer_id).await;
let Some(pm) = peer_manager.upgrade() else {
tracing::warn!("peer manager is gone, udp proxy send loop exit");
return;
};
let ret = pm.send_msg_for_proxy(msg, to_peer_id).await;
if ret.is_err() {
tracing::error!("send icmp packet to peer failed: {:?}", ret);
}