use workspace, prepare for config server and gui (#48)

This commit is contained in:
Sijie.Sun
2024-04-04 10:33:53 +08:00
committed by GitHub
parent bb4ae71869
commit 4eb7efe5fc
77 changed files with 162 additions and 195 deletions
+481
View File
@@ -0,0 +1,481 @@
use std::borrow::BorrowMut;
use std::net::Ipv4Addr;
use std::sync::{Arc, Weak};
use anyhow::Context;
use futures::StreamExt;
use pnet::packet::ethernet::EthernetPacket;
use pnet::packet::ipv4::Ipv4Packet;
use tokio::{sync::Mutex, task::JoinSet};
use tokio_util::bytes::{Bytes, BytesMut};
use tonic::transport::Server;
use crate::common::config::ConfigLoader;
use crate::common::error::Error;
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent};
use crate::common::PeerId;
use crate::connector::direct::DirectConnectorManager;
use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager};
use crate::connector::udp_hole_punch::UdpHolePunchConnector;
use crate::gateway::icmp_proxy::IcmpProxy;
use crate::gateway::tcp_proxy::TcpProxy;
use crate::gateway::udp_proxy::UdpProxy;
use crate::peer_center::instance::PeerCenterInstance;
use crate::peers::peer_conn::PeerConnId;
use crate::peers::peer_manager::{PeerManager, RouteAlgoType};
use crate::peers::rpc_service::PeerManagerRpcService;
use crate::rpc::vpn_portal_rpc_server::VpnPortalRpc;
use crate::rpc::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo};
use crate::tunnels::SinkItem;
use crate::vpn_portal::{self, VpnPortal};
use tokio_stream::wrappers::ReceiverStream;
use super::listeners::ListenerManager;
use super::virtual_nic;
pub struct Instance {
inst_name: String,
id: uuid::Uuid,
virtual_nic: Option<Arc<virtual_nic::VirtualNic>>,
peer_packet_receiver: Option<ReceiverStream<SinkItem>>,
tasks: JoinSet<()>,
peer_manager: Arc<PeerManager>,
listener_manager: Arc<Mutex<ListenerManager<PeerManager>>>,
conn_manager: Arc<ManualConnectorManager>,
direct_conn_manager: Arc<DirectConnectorManager>,
udp_hole_puncher: Arc<Mutex<UdpHolePunchConnector>>,
tcp_proxy: Arc<TcpProxy>,
icmp_proxy: Arc<IcmpProxy>,
udp_proxy: Arc<UdpProxy>,
peer_center: Arc<PeerCenterInstance>,
vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>,
global_ctx: ArcGlobalCtx,
}
impl Instance {
pub fn new(config: impl ConfigLoader + Send + Sync + 'static) -> Self {
let global_ctx = Arc::new(GlobalCtx::new(config));
log::info!(
"[INIT] instance creating. config: {}",
global_ctx.config.dump()
);
let (peer_packet_sender, peer_packet_receiver) = tokio::sync::mpsc::channel(100);
let id = global_ctx.get_id();
let peer_manager = Arc::new(PeerManager::new(
RouteAlgoType::Ospf,
global_ctx.clone(),
peer_packet_sender.clone(),
));
let listener_manager = Arc::new(Mutex::new(ListenerManager::new(
global_ctx.clone(),
peer_manager.clone(),
)));
let conn_manager = Arc::new(ManualConnectorManager::new(
global_ctx.clone(),
peer_manager.clone(),
));
let mut direct_conn_manager =
DirectConnectorManager::new(global_ctx.clone(), peer_manager.clone());
direct_conn_manager.run();
let udp_hole_puncher = UdpHolePunchConnector::new(global_ctx.clone(), peer_manager.clone());
let arc_tcp_proxy = TcpProxy::new(global_ctx.clone(), peer_manager.clone());
let arc_icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone())
.with_context(|| "create icmp proxy failed")
.unwrap();
let arc_udp_proxy = UdpProxy::new(global_ctx.clone(), peer_manager.clone())
.with_context(|| "create udp proxy failed")
.unwrap();
let peer_center = Arc::new(PeerCenterInstance::new(peer_manager.clone()));
let vpn_portal_inst = vpn_portal::wireguard::WireGuard::default();
Instance {
inst_name: global_ctx.inst_name.clone(),
id,
virtual_nic: None,
peer_packet_receiver: Some(ReceiverStream::new(peer_packet_receiver)),
tasks: JoinSet::new(),
peer_manager,
listener_manager,
conn_manager,
direct_conn_manager: Arc::new(direct_conn_manager),
udp_hole_puncher: Arc::new(Mutex::new(udp_hole_puncher)),
tcp_proxy: arc_tcp_proxy,
icmp_proxy: arc_icmp_proxy,
udp_proxy: arc_udp_proxy,
peer_center,
vpn_portal: Arc::new(Mutex::new(Box::new(vpn_portal_inst))),
global_ctx,
}
}
pub fn get_conn_manager(&self) -> Arc<ManualConnectorManager> {
self.conn_manager.clone()
}
async fn do_forward_nic_to_peers_ipv4(ret: BytesMut, mgr: &PeerManager) {
if let Some(ipv4) = Ipv4Packet::new(&ret) {
if ipv4.get_version() != 4 {
tracing::info!("[USER_PACKET] not ipv4 packet: {:?}", ipv4);
return;
}
let dst_ipv4 = ipv4.get_destination();
tracing::trace!(
?ret,
"[USER_PACKET] recv new packet from tun device and forward to peers."
);
let send_ret = mgr.send_msg_ipv4(ret, dst_ipv4).await;
if send_ret.is_err() {
tracing::trace!(?send_ret, "[USER_PACKET] send_msg_ipv4 failed")
}
} else {
tracing::warn!(?ret, "[USER_PACKET] not ipv4 packet");
}
}
async fn do_forward_nic_to_peers_ethernet(mut ret: BytesMut, mgr: &PeerManager) {
if let Some(eth) = EthernetPacket::new(&ret) {
log::warn!("begin to forward: {:?}, type: {}", eth, eth.get_ethertype());
Self::do_forward_nic_to_peers_ipv4(ret.split_off(14), mgr).await;
} else {
log::warn!("not ipv4 packet: {:?}", ret);
}
}
fn do_forward_nic_to_peers(&mut self) -> Result<(), Error> {
// read from nic and write to corresponding tunnel
let nic = self.virtual_nic.as_ref().unwrap();
let nic = nic.clone();
let mgr = self.peer_manager.clone();
self.tasks.spawn(async move {
let mut stream = nic.pin_recv_stream();
while let Some(ret) = stream.next().await {
if ret.is_err() {
log::error!("read from nic failed: {:?}", ret);
break;
}
Self::do_forward_nic_to_peers_ipv4(ret.unwrap(), mgr.as_ref()).await;
// Self::do_forward_nic_to_peers_ethernet(ret.into(), mgr.as_ref()).await;
}
});
Ok(())
}
fn do_forward_peers_to_nic(
tasks: &mut JoinSet<()>,
nic: Arc<virtual_nic::VirtualNic>,
channel: Option<ReceiverStream<Bytes>>,
) {
tasks.spawn(async move {
let send = nic.pin_send_stream();
let channel = channel.unwrap();
let ret = channel
.map(|packet| {
log::trace!(
"[USER_PACKET] forward packet from peers to nic. packet: {:?}",
packet
);
Ok(packet)
})
.forward(send)
.await;
if ret.is_err() {
panic!("do_forward_tunnel_to_nic");
}
});
}
async fn add_initial_peers(&mut self) -> Result<(), Error> {
for peer in self.global_ctx.config.get_peers().iter() {
self.get_conn_manager()
.add_connector_by_url(peer.uri.as_str())
.await?;
}
Ok(())
}
async fn prepare_tun_device(&mut self) -> Result<(), Error> {
let nic = virtual_nic::VirtualNic::new(self.get_global_ctx())
.create_dev()
.await?;
self.global_ctx
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
self.virtual_nic = Some(Arc::new(nic));
self.do_forward_nic_to_peers().unwrap();
Self::do_forward_peers_to_nic(
self.tasks.borrow_mut(),
self.virtual_nic.as_ref().unwrap().clone(),
self.peer_packet_receiver.take(),
);
Ok(())
}
async fn assign_ipv4_to_tun_device(&mut self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
let nic = self.virtual_nic.as_ref().unwrap().clone();
nic.link_up().await?;
nic.remove_ip(None).await?;
nic.add_ip(ipv4_addr, 24).await?;
if cfg!(target_os = "macos") {
nic.add_route(ipv4_addr, 24).await?;
}
Ok(())
}
pub async fn run(&mut self) -> Result<(), Error> {
self.prepare_tun_device().await?;
if let Some(ipv4_addr) = self.global_ctx.get_ipv4() {
self.assign_ipv4_to_tun_device(ipv4_addr).await?;
}
self.listener_manager
.lock()
.await
.prepare_listeners()
.await?;
self.listener_manager.lock().await.run().await?;
self.peer_manager.run().await?;
self.run_rpc_server().unwrap();
self.tcp_proxy.start().await.unwrap();
self.icmp_proxy.start().await.unwrap();
self.udp_proxy.start().await.unwrap();
self.run_proxy_cidrs_route_updater();
self.udp_hole_puncher.lock().await.run().await?;
self.peer_center.init().await;
self.add_initial_peers().await?;
if let Some(_) = self.global_ctx.get_vpn_portal_cidr() {
self.vpn_portal
.lock()
.await
.start(self.get_global_ctx(), self.get_peer_manager())
.await?;
}
Ok(())
}
pub fn get_peer_manager(&self) -> Arc<PeerManager> {
self.peer_manager.clone()
}
pub async fn close_peer_conn(
&mut self,
peer_id: PeerId,
conn_id: &PeerConnId,
) -> Result<(), Error> {
self.peer_manager
.get_peer_map()
.close_peer_conn(peer_id, conn_id)
.await?;
Ok(())
}
pub async fn wait(&mut self) {
while let Some(ret) = self.tasks.join_next().await {
log::info!("task finished: {:?}", ret);
ret.unwrap();
}
}
pub fn id(&self) -> uuid::Uuid {
self.id
}
pub fn peer_id(&self) -> PeerId {
self.peer_manager.my_peer_id()
}
fn get_vpn_portal_rpc_service(&self) -> impl VpnPortalRpc {
struct VpnPortalRpcService {
peer_mgr: Weak<PeerManager>,
vpn_portal: Weak<Mutex<Box<dyn VpnPortal>>>,
}
#[tonic::async_trait]
impl VpnPortalRpc for VpnPortalRpcService {
async fn get_vpn_portal_info(
&self,
_request: tonic::Request<GetVpnPortalInfoRequest>,
) -> Result<tonic::Response<GetVpnPortalInfoResponse>, tonic::Status> {
let Some(vpn_portal) = self.vpn_portal.upgrade() else {
return Err(tonic::Status::unavailable("vpn portal not available"));
};
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(tonic::Status::unavailable("peer manager not available"));
};
let vpn_portal = vpn_portal.lock().await;
let ret = GetVpnPortalInfoResponse {
vpn_portal_info: Some(VpnPortalInfo {
vpn_type: vpn_portal.name(),
client_config: vpn_portal.dump_client_config(peer_mgr).await,
connected_clients: vpn_portal.list_clients().await,
}),
};
Ok(tonic::Response::new(ret))
}
}
VpnPortalRpcService {
peer_mgr: Arc::downgrade(&self.peer_manager),
vpn_portal: Arc::downgrade(&self.vpn_portal),
}
}
fn run_rpc_server(&mut self) -> Result<(), Box<dyn std::error::Error>> {
let Some(addr) = self.global_ctx.config.get_rpc_portal() else {
tracing::info!("rpc server not enabled, because rpc_portal is not set.");
return Ok(());
};
let peer_mgr = self.peer_manager.clone();
let conn_manager = self.conn_manager.clone();
let net_ns = self.global_ctx.net_ns.clone();
let peer_center = self.peer_center.clone();
let vpn_portal_rpc = self.get_vpn_portal_rpc_service();
self.tasks.spawn(async move {
let _g = net_ns.guard();
Server::builder()
.add_service(
crate::rpc::peer_manage_rpc_server::PeerManageRpcServer::new(
PeerManagerRpcService::new(peer_mgr),
),
)
.add_service(
crate::rpc::connector_manage_rpc_server::ConnectorManageRpcServer::new(
ConnectorManagerRpcService(conn_manager.clone()),
),
)
.add_service(
crate::rpc::peer_center_rpc_server::PeerCenterRpcServer::new(
peer_center.get_rpc_service(),
),
)
.add_service(crate::rpc::vpn_portal_rpc_server::VpnPortalRpcServer::new(
vpn_portal_rpc,
))
.serve(addr)
.await
.with_context(|| format!("rpc server failed. addr: {}", addr))
.unwrap();
});
Ok(())
}
fn run_proxy_cidrs_route_updater(&mut self) {
let peer_mgr = self.peer_manager.clone();
let global_ctx = self.global_ctx.clone();
let net_ns = self.global_ctx.net_ns.clone();
let nic = self.virtual_nic.as_ref().unwrap().clone();
self.tasks.spawn(async move {
let mut cur_proxy_cidrs = vec![];
loop {
let mut proxy_cidrs = vec![];
let routes = peer_mgr.list_routes().await;
for r in routes {
for cidr in r.proxy_cidrs {
let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else {
continue;
};
proxy_cidrs.push(cidr);
}
}
// add vpn portal cidr to proxy_cidrs
if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() {
proxy_cidrs.push(vpn_cfg.client_cidr);
}
// if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it.
for cidr in cur_proxy_cidrs.iter() {
if proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = nic
.get_ifcfg()
.remove_ipv4_route(
nic.ifname(),
cidr.first_address(),
cidr.network_length(),
)
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"remove route failed.",
);
}
}
for cidr in proxy_cidrs.iter() {
if cur_proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = nic
.get_ifcfg()
.add_ipv4_route(nic.ifname(), cidr.first_address(), cidr.network_length())
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"add route failed.",
);
}
}
cur_proxy_cidrs = proxy_cidrs;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
});
}
pub fn get_global_ctx(&self) -> ArcGlobalCtx {
self.global_ctx.clone()
}
}
+202
View File
@@ -0,0 +1,202 @@
use std::{fmt::Debug, sync::Arc};
use anyhow::Context;
use async_trait::async_trait;
use tokio::{sync::Mutex, task::JoinSet};
use crate::{
common::{
error::Error,
global_ctx::{ArcGlobalCtx, GlobalCtxEvent},
netns::NetNS,
},
peers::peer_manager::PeerManager,
tunnels::{
ring_tunnel::RingTunnelListener,
tcp_tunnel::TcpTunnelListener,
udp_tunnel::UdpTunnelListener,
wireguard::{WgConfig, WgTunnelListener},
Tunnel, TunnelListener,
},
};
#[async_trait]
pub trait TunnelHandlerForListener {
async fn handle_tunnel(&self, tunnel: Box<dyn Tunnel>) -> Result<(), Error>;
}
#[async_trait]
impl TunnelHandlerForListener for PeerManager {
#[tracing::instrument]
async fn handle_tunnel(&self, tunnel: Box<dyn Tunnel>) -> Result<(), Error> {
self.add_tunnel_as_server(tunnel).await
}
}
pub struct ListenerManager<H> {
global_ctx: ArcGlobalCtx,
net_ns: NetNS,
listeners: Vec<Arc<Mutex<dyn TunnelListener>>>,
peer_manager: Arc<H>,
tasks: JoinSet<()>,
}
impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManager<H> {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<H>) -> Self {
Self {
global_ctx: global_ctx.clone(),
net_ns: global_ctx.net_ns.clone(),
listeners: Vec::new(),
peer_manager,
tasks: JoinSet::new(),
}
}
pub async fn prepare_listeners(&mut self) -> Result<(), Error> {
self.add_listener(RingTunnelListener::new(
format!("ring://{}", self.global_ctx.get_id())
.parse()
.unwrap(),
))
.await?;
for l in self.global_ctx.config.get_listener_uris().iter() {
match l.scheme() {
"tcp" => {
self.add_listener(TcpTunnelListener::new(l.clone())).await?;
}
"udp" => {
self.add_listener(UdpTunnelListener::new(l.clone())).await?;
}
"wg" => {
let nid = self.global_ctx.get_network_identity();
let wg_config =
WgConfig::new_from_network_identity(&nid.network_name, &nid.network_secret);
self.add_listener(WgTunnelListener::new(l.clone(), wg_config))
.await?;
}
_ => {
log::warn!("unsupported listener uri: {}", l);
}
}
}
Ok(())
}
pub async fn add_listener<Listener>(&mut self, listener: Listener) -> Result<(), Error>
where
Listener: TunnelListener + 'static,
{
let listener = Arc::new(Mutex::new(listener));
self.listeners.push(listener);
Ok(())
}
#[tracing::instrument]
async fn run_listener(
listener: Arc<Mutex<dyn TunnelListener>>,
peer_manager: Arc<H>,
global_ctx: ArcGlobalCtx,
) {
let mut l = listener.lock().await;
global_ctx.add_running_listener(l.local_url());
global_ctx.issue_event(GlobalCtxEvent::ListenerAdded(l.local_url()));
while let Ok(ret) = l.accept().await {
let tunnel_info = ret.info().unwrap();
global_ctx.issue_event(GlobalCtxEvent::ConnectionAccepted(
tunnel_info.local_addr.clone(),
tunnel_info.remote_addr.clone(),
));
tracing::info!(ret = ?ret, "conn accepted");
let peer_manager = peer_manager.clone();
let global_ctx = global_ctx.clone();
tokio::spawn(async move {
let server_ret = peer_manager.handle_tunnel(ret).await;
if let Err(e) = &server_ret {
global_ctx.issue_event(GlobalCtxEvent::ConnectionError(
tunnel_info.local_addr,
tunnel_info.remote_addr,
e.to_string(),
));
tracing::error!(error = ?e, "handle conn error");
}
});
}
}
pub async fn run(&mut self) -> Result<(), Error> {
for listener in &self.listeners {
let _guard = self.net_ns.guard();
let addr = listener.lock().await.local_url();
log::warn!("run listener: {:?}", listener);
listener
.lock()
.await
.listen()
.await
.with_context(|| format!("failed to add listener {}", addr))?;
self.tasks.spawn(Self::run_listener(
listener.clone(),
self.peer_manager.clone(),
self.global_ctx.clone(),
));
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use futures::{SinkExt, StreamExt};
use tokio::time::timeout;
use crate::{
common::global_ctx::tests::get_mock_global_ctx,
tunnels::{ring_tunnel::RingTunnelConnector, TunnelConnector},
};
use super::*;
#[derive(Debug)]
struct MockListenerHandler {}
#[async_trait]
impl TunnelHandlerForListener for MockListenerHandler {
async fn handle_tunnel(&self, _tunnel: Box<dyn Tunnel>) -> Result<(), Error> {
let data = "abc";
_tunnel.pin_sink().send(data.into()).await.unwrap();
Err(Error::Unknown)
}
}
#[tokio::test]
async fn handle_error_in_accept() {
let handler = Arc::new(MockListenerHandler {});
let mut listener_mgr = ListenerManager::new(get_mock_global_ctx(), handler.clone());
let ring_id = format!("ring://{}", uuid::Uuid::new_v4());
listener_mgr
.add_listener(RingTunnelListener::new(ring_id.parse().unwrap()))
.await
.unwrap();
listener_mgr.run().await.unwrap();
let connect_once = |ring_id| async move {
let tunnel = RingTunnelConnector::new(ring_id).connect().await.unwrap();
assert_eq!(tunnel.pin_stream().next().await.unwrap().unwrap(), "abc");
tunnel
};
timeout(std::time::Duration::from_secs(1), async move {
connect_once(ring_id.parse().unwrap()).await;
// handle tunnel fail should not impact the second connect
connect_once(ring_id.parse().unwrap()).await;
})
.await
.unwrap();
}
}
+4
View File
@@ -0,0 +1,4 @@
pub mod instance;
pub mod listeners;
pub mod tun_codec;
pub mod virtual_nic;
+179
View File
@@ -0,0 +1,179 @@
use std::io;
use byteorder::{NativeEndian, NetworkEndian, WriteBytesExt};
use tokio_util::bytes::{BufMut, Bytes, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
/// A packet protocol IP version
#[derive(Debug, Clone, Copy, Default)]
enum PacketProtocol {
#[default]
IPv4,
IPv6,
Other(u8),
}
// Note: the protocol in the packet information header is platform dependent.
impl PacketProtocol {
#[cfg(any(target_os = "linux", target_os = "android"))]
fn into_pi_field(self) -> Result<u16, io::Error> {
use nix::libc;
match self {
PacketProtocol::IPv4 => Ok(libc::ETH_P_IP as u16),
PacketProtocol::IPv6 => Ok(libc::ETH_P_IPV6 as u16),
PacketProtocol::Other(_) => Err(io::Error::new(
io::ErrorKind::Other,
"neither an IPv4 nor IPv6 packet",
)),
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn into_pi_field(self) -> Result<u16, io::Error> {
use nix::libc;
match self {
PacketProtocol::IPv4 => Ok(libc::PF_INET as u16),
PacketProtocol::IPv6 => Ok(libc::PF_INET6 as u16),
PacketProtocol::Other(_) => Err(io::Error::new(
io::ErrorKind::Other,
"neither an IPv4 nor IPv6 packet",
)),
}
}
#[cfg(target_os = "windows")]
fn into_pi_field(self) -> Result<u16, io::Error> {
unimplemented!()
}
}
#[derive(Debug)]
pub enum TunPacketBuffer {
Bytes(Bytes),
BytesMut(BytesMut),
}
impl From<TunPacketBuffer> for Bytes {
fn from(buf: TunPacketBuffer) -> Self {
match buf {
TunPacketBuffer::Bytes(bytes) => bytes,
TunPacketBuffer::BytesMut(bytes) => bytes.freeze(),
}
}
}
impl AsRef<[u8]> for TunPacketBuffer {
fn as_ref(&self) -> &[u8] {
match self {
TunPacketBuffer::Bytes(bytes) => bytes.as_ref(),
TunPacketBuffer::BytesMut(bytes) => bytes.as_ref(),
}
}
}
/// A Tun Packet to be sent or received on the TUN interface.
#[derive(Debug)]
pub struct TunPacket(PacketProtocol, TunPacketBuffer);
/// Infer the protocol based on the first nibble in the packet buffer.
fn infer_proto(buf: &[u8]) -> PacketProtocol {
match buf[0] >> 4 {
4 => PacketProtocol::IPv4,
6 => PacketProtocol::IPv6,
p => PacketProtocol::Other(p),
}
}
impl TunPacket {
/// Create a new `TunPacket` based on a byte slice.
pub fn new(buffer: TunPacketBuffer) -> TunPacket {
let proto = infer_proto(buffer.as_ref());
TunPacket(proto, buffer)
}
/// Return this packet's bytes.
pub fn get_bytes(&self) -> &[u8] {
match &self.1 {
TunPacketBuffer::Bytes(bytes) => bytes.as_ref(),
TunPacketBuffer::BytesMut(bytes) => bytes.as_ref(),
}
}
pub fn into_bytes(self) -> Bytes {
match self.1 {
TunPacketBuffer::Bytes(bytes) => bytes,
TunPacketBuffer::BytesMut(bytes) => bytes.freeze(),
}
}
pub fn into_bytes_mut(self) -> BytesMut {
match self.1 {
TunPacketBuffer::Bytes(_) => panic!("cannot into_bytes_mut from bytes"),
TunPacketBuffer::BytesMut(bytes) => bytes,
}
}
}
/// A TunPacket Encoder/Decoder.
pub struct TunPacketCodec(bool, i32);
impl TunPacketCodec {
/// Create a new `TunPacketCodec` specifying whether the underlying
/// tunnel Device has enabled the packet information header.
pub fn new(pi: bool, mtu: i32) -> TunPacketCodec {
TunPacketCodec(pi, mtu)
}
}
impl Decoder for TunPacketCodec {
type Item = TunPacket;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if buf.is_empty() {
return Ok(None);
}
let mut pkt = buf.split_to(buf.len());
// reserve enough space for the next packet
if self.0 {
buf.reserve(self.1 as usize + 4);
} else {
buf.reserve(self.1 as usize);
}
// if the packet information is enabled we have to ignore the first 4 bytes
if self.0 {
let _ = pkt.split_to(4);
}
let proto = infer_proto(pkt.as_ref());
Ok(Some(TunPacket(proto, TunPacketBuffer::BytesMut(pkt))))
}
}
impl Encoder<TunPacket> for TunPacketCodec {
type Error = io::Error;
fn encode(&mut self, item: TunPacket, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.reserve(item.get_bytes().len() + 4);
match item {
TunPacket(proto, bytes) if self.0 => {
// build the packet information header comprising of 2 u16
// fields: flags and protocol.
let mut buf = Vec::<u8>::with_capacity(4);
// flags is always 0
buf.write_u16::<NativeEndian>(0)?;
// write the protocol as network byte order
buf.write_u16::<NetworkEndian>(proto.into_pi_field()?)?;
dst.put_slice(&buf);
dst.put(Bytes::from(bytes));
}
TunPacket(_, bytes) => dst.put(Bytes::from(bytes)),
}
Ok(())
}
}
+196
View File
@@ -0,0 +1,196 @@
use std::{net::Ipv4Addr, pin::Pin};
use crate::{
common::{
error::Result,
global_ctx::ArcGlobalCtx,
ifcfg::{IfConfiger, IfConfiguerTrait},
},
tunnels::{
codec::BytesCodec, common::FramedTunnel, DatagramSink, DatagramStream, Tunnel, TunnelError,
},
};
use futures::{SinkExt, StreamExt};
use tokio_util::{bytes::Bytes, codec::Framed};
use tun::Device;
use super::tun_codec::{TunPacket, TunPacketCodec};
pub struct VirtualNic {
dev_name: String,
queue_num: usize,
global_ctx: ArcGlobalCtx,
ifname: Option<String>,
tun: Option<Box<dyn Tunnel>>,
ifcfg: Box<dyn IfConfiguerTrait + Send + Sync + 'static>,
}
impl VirtualNic {
pub fn new(global_ctx: ArcGlobalCtx) -> Self {
Self {
dev_name: "".to_owned(),
queue_num: 1,
global_ctx,
ifname: None,
tun: None,
ifcfg: Box::new(IfConfiger {}),
}
}
pub fn set_dev_name(mut self, dev_name: &str) -> Result<Self> {
self.dev_name = dev_name.to_owned();
Ok(self)
}
pub fn set_queue_num(mut self, queue_num: usize) -> Result<Self> {
self.queue_num = queue_num;
Ok(self)
}
async fn create_dev_ret_err(&mut self) -> Result<()> {
let mut config = tun::Configuration::default();
let has_packet_info = cfg!(target_os = "macos");
config.layer(tun::Layer::L3);
#[cfg(target_os = "linux")]
{
config.platform(|config| {
// detect protocol by ourselves for cross platform
config.packet_information(false);
});
}
if self.queue_num != 1 {
todo!("queue_num != 1")
}
config.queues(self.queue_num);
config.up();
let dev = {
let _g = self.global_ctx.net_ns.guard();
tun::create_as_async(&config)?
};
let ifname = dev.get_ref().name()?;
self.ifcfg.wait_interface_show(ifname.as_str()).await?;
let ft: Box<dyn Tunnel> = if has_packet_info {
let framed = Framed::new(dev, TunPacketCodec::new(true, 2500));
let (sink, stream) = framed.split();
let new_stream = stream.map(|item| match item {
Ok(item) => Ok(item.into_bytes_mut()),
Err(err) => {
println!("tun stream error: {:?}", err);
Err(TunnelError::TunError(err.to_string()))
}
});
let new_sink = Box::pin(sink.with(|item: Bytes| async move {
if false {
return Err(TunnelError::TunError("tun sink error".to_owned()));
}
Ok(TunPacket::new(super::tun_codec::TunPacketBuffer::Bytes(
item,
)))
}));
Box::new(FramedTunnel::new(new_stream, new_sink, None))
} else {
let framed = Framed::new(dev, BytesCodec::new(2500));
let (sink, stream) = framed.split();
Box::new(FramedTunnel::new(stream, sink, None))
};
self.ifname = Some(ifname.to_owned());
self.tun = Some(ft);
Ok(())
}
pub async fn create_dev(mut self) -> Result<Self> {
self.create_dev_ret_err().await?;
Ok(self)
}
pub fn ifname(&self) -> &str {
self.ifname.as_ref().unwrap().as_str()
}
pub async fn link_up(&self) -> Result<()> {
let _g = self.global_ctx.net_ns.guard();
self.ifcfg.set_link_status(self.ifname(), true).await?;
Ok(())
}
pub async fn add_route(&self, address: Ipv4Addr, cidr: u8) -> Result<()> {
let _g = self.global_ctx.net_ns.guard();
self.ifcfg
.add_ipv4_route(self.ifname(), address, cidr)
.await?;
Ok(())
}
pub async fn remove_ip(&self, ip: Option<Ipv4Addr>) -> Result<()> {
let _g = self.global_ctx.net_ns.guard();
self.ifcfg.remove_ip(self.ifname(), ip).await?;
Ok(())
}
pub async fn add_ip(&self, ip: Ipv4Addr, cidr: i32) -> Result<()> {
let _g = self.global_ctx.net_ns.guard();
self.ifcfg
.add_ipv4_ip(self.ifname(), ip, cidr as u8)
.await?;
Ok(())
}
pub fn pin_recv_stream(&self) -> Pin<Box<dyn DatagramStream>> {
self.tun.as_ref().unwrap().pin_stream()
}
pub fn pin_send_stream(&self) -> Pin<Box<dyn DatagramSink>> {
self.tun.as_ref().unwrap().pin_sink()
}
pub fn get_ifcfg(&self) -> &dyn IfConfiguerTrait {
self.ifcfg.as_ref()
}
}
#[cfg(test)]
mod tests {
use crate::common::{error::Error, global_ctx::tests::get_mock_global_ctx};
use super::VirtualNic;
async fn run_test_helper() -> Result<VirtualNic, Error> {
let dev = VirtualNic::new(get_mock_global_ctx()).create_dev().await?;
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
dev.link_up().await?;
dev.remove_ip(None).await?;
dev.add_ip("10.144.111.1".parse().unwrap(), 24).await?;
Ok(dev)
}
#[tokio::test]
async fn tun_test() {
let _dev = run_test_helper().await.unwrap();
// let mut stream = nic.pin_recv_stream();
// while let Some(item) = stream.next().await {
// println!("item: {:?}", item);
// }
// let framed = dev.into_framed();
// let (mut s, mut b) = framed.split();
// loop {
// let tmp = b.next().await.unwrap().unwrap();
// let tmp = EthernetPacket::new(tmp.get_bytes());
// println!("ret: {:?}", tmp.unwrap());
// }
}
}