magic dns (#813)

This patch implements:

1. A dns server that handles .et.net. zone in local and forward all other queries to system dns server.

2. A dns server instance which is a singleton in one machine, using one specific tcp port to be exclusive with each other. this instance is responsible for config system dns and run the dns server to handle dns queries.

3. A dns client instance that all easytier instance will run one, this instance will try to connect to dns server instance, and update the dns record in the dns server instance.

this pr only implements the system config for windows. linux & mac will do later.
This commit is contained in:
Sijie.Sun
2025-05-16 09:24:24 +08:00
committed by GitHub
parent 99430983bc
commit 28fe6257be
40 changed files with 2800 additions and 229 deletions
+3
View File
@@ -30,6 +30,9 @@ message FlagsInConfig {
// allow relay kcp packets (for public server, this can reduce the throughput)
bool disable_relay_kcp = 20;
bool proxy_forward_by_system = 21;
// enable magic dns or not
bool accept_dns = 22;
}
message RpcDescriptor {
+49
View File
@@ -0,0 +1,49 @@
syntax = "proto3";
import "google/protobuf/timestamp.proto";
import "common.proto";
import "cli.proto";
package magic_dns;
message DnsRecordA {
string name = 1;
common.Ipv4Addr value = 2;
int32 ttl = 3;
}
message DnsRecordSOA {
string name = 1;
string value = 2;
}
message DnsRecord {
oneof record {
DnsRecordA a = 1;
DnsRecordSOA soa = 2;
}
}
message DnsRecordList {
repeated DnsRecord records = 1;
}
message UpdateDnsRecordRequest {
string zone = 1;
repeated cli.Route routes = 2;
}
message GetDnsRecordResponse {
map<string, DnsRecordList> records = 1;
}
message HandshakeRequest {}
message HandshakeResponse {}
service MagicDnsServerRpc {
rpc Handshake(HandshakeRequest) returns (HandshakeResponse) {}
rpc Heartbeat(common.Void) returns (common.Void) {}
rpc UpdateDnsRecord(UpdateDnsRecordRequest) returns (common.Void) {}
rpc GetDnsRecord(common.Void) returns (GetDnsRecordResponse) {}
}
+1
View File
@@ -0,0 +1 @@
include!(concat!(env!("OUT_DIR"), "/magic_dns.rs"));
+1
View File
@@ -4,6 +4,7 @@ pub mod rpc_types;
pub mod cli;
pub mod common;
pub mod error;
pub mod magic_dns;
pub mod peer_rpc;
pub mod web;
+2 -2
View File
@@ -65,7 +65,7 @@ pub struct Client {
transport: Mutex<Transport>,
inflight_requests: InflightRequestTable,
peer_info: PeerInfoTable,
tasks: Arc<Mutex<JoinSet<()>>>,
tasks: Mutex<JoinSet<()>>,
}
impl Client {
@@ -76,7 +76,7 @@ impl Client {
transport: Mutex::new(MpscTunnel::new(ring_b, None)),
inflight_requests: Arc::new(DashMap::new()),
peer_info: Arc::new(DashMap::new()),
tasks: Arc::new(Mutex::new(JoinSet::new())),
tasks: Mutex::new(JoinSet::new()),
}
}
+24 -5
View File
@@ -12,7 +12,10 @@ use tokio_stream::StreamExt;
use crate::{
common::{join_joinset_background, PeerId},
proto::{
common::{self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse},
common::{
self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse,
TunnelInfo,
},
rpc_types::{controller::Controller, error::Result},
},
tunnel::{
@@ -82,7 +85,8 @@ impl Server {
let packet_merges = self.packet_mergers.clone();
let reg = self.registry.clone();
let t = tasks.clone();
let t = Arc::downgrade(&tasks);
let tunnel_info = mpsc.tunnel_info();
tasks.lock().unwrap().spawn(async move {
let mut mpsc = mpsc;
let mut rx = mpsc.get_stream();
@@ -120,10 +124,15 @@ impl Server {
match ret {
Ok(Some(packet)) => {
packet_merges.remove(&key);
let Some(t) = t.upgrade() else {
tracing::error!("tasks is dropped");
return;
};
t.lock().unwrap().spawn(Self::handle_rpc(
mpsc.get_sink(),
packet,
reg.clone(),
tunnel_info.clone(),
));
}
Ok(None) => {}
@@ -143,7 +152,11 @@ impl Server {
});
}
async fn handle_rpc_request(packet: RpcPacket, reg: Arc<ServiceRegistry>) -> Result<Bytes> {
async fn handle_rpc_request(
packet: RpcPacket,
reg: Arc<ServiceRegistry>,
tunnel_info: Option<TunnelInfo>,
) -> Result<Bytes> {
let body = if let Some(compression_info) = packet.compression_info {
decompress_packet(
compression_info.algo.try_into().unwrap_or_default(),
@@ -158,6 +171,7 @@ impl Server {
let mut ctrl = RpcController::default();
let raw_req = Bytes::from(rpc_request.request);
ctrl.set_raw_input(raw_req.clone());
ctrl.set_tunnel_info(tunnel_info);
let ret = timeout(
timeout_duration,
reg.call_method(packet.descriptor.unwrap(), ctrl.clone(), raw_req),
@@ -170,7 +184,12 @@ impl Server {
}
}
async fn handle_rpc(sender: MpscTunnelSender, packet: RpcPacket, reg: Arc<ServiceRegistry>) {
async fn handle_rpc(
sender: MpscTunnelSender,
packet: RpcPacket,
reg: Arc<ServiceRegistry>,
tunnel_info: Option<TunnelInfo>,
) {
let from_peer = packet.from_peer;
let to_peer = packet.to_peer;
let transaction_id = packet.transaction_id;
@@ -181,7 +200,7 @@ impl Server {
let now = std::time::Instant::now();
let compression_info = packet.compression_info.clone();
let resp_bytes = Self::handle_rpc_request(packet, reg).await;
let resp_bytes = Self::handle_rpc_request(packet, reg, tunnel_info).await;
match &resp_bytes {
Ok(r) => {
+68 -12
View File
@@ -9,6 +9,7 @@ use tokio::task::JoinSet;
use crate::{
common::join_joinset_background,
proto::{
common::TunnelInfo,
rpc_impl::bidirect::BidirectRpcManager,
rpc_types::{__rt::RpcClientFactory, error::Error},
},
@@ -17,11 +18,22 @@ use crate::{
use super::service_registry::ServiceRegistry;
#[async_trait::async_trait]
#[auto_impl::auto_impl(Arc, Box)]
pub trait RpcServerHook: Send + Sync {
async fn on_new_client(&self, _tunnel_info: Option<TunnelInfo>) {}
async fn on_client_disconnected(&self, _tunnel_info: Option<TunnelInfo>) {}
}
struct DefaultHook;
impl RpcServerHook for DefaultHook {}
pub struct StandAloneServer<L> {
registry: Arc<ServiceRegistry>,
listener: Option<L>,
inflight_server: Arc<AtomicU32>,
tasks: Arc<Mutex<JoinSet<()>>>,
tasks: JoinSet<()>,
hook: Option<Arc<dyn RpcServerHook>>,
}
impl<L: TunnelListener + 'static> StandAloneServer<L> {
@@ -30,10 +42,16 @@ impl<L: TunnelListener + 'static> StandAloneServer<L> {
registry: Arc::new(ServiceRegistry::new()),
listener: Some(listener),
inflight_server: Arc::new(AtomicU32::new(0)),
tasks: Arc::new(Mutex::new(JoinSet::new())),
tasks: JoinSet::new(),
hook: None,
}
}
pub fn set_hook(&mut self, hook: Arc<dyn RpcServerHook>) {
self.hook = Some(hook);
}
pub fn registry(&self) -> &ServiceRegistry {
&self.registry
}
@@ -42,17 +60,20 @@ impl<L: TunnelListener + 'static> StandAloneServer<L> {
listener: &mut L,
inflight: Arc<AtomicU32>,
registry: Arc<ServiceRegistry>,
tasks: Arc<Mutex<JoinSet<()>>>,
hook: Arc<dyn RpcServerHook>,
) -> Result<(), Error> {
listener
.listen()
.await
.with_context(|| "failed to listen")?;
let tasks = Arc::new(Mutex::new(JoinSet::new()));
join_joinset_background(tasks.clone(), "standalone serve_loop".to_string());
loop {
let tunnel = listener.accept().await?;
let tunnel_info = tunnel.info();
let registry = registry.clone();
let inflight_server = inflight.clone();
let hook = hook.clone();
hook.on_new_client(tunnel_info.clone()).await;
inflight_server.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
tasks.lock().unwrap().spawn(async move {
let server =
@@ -60,27 +81,32 @@ impl<L: TunnelListener + 'static> StandAloneServer<L> {
server.rpc_server().registry().replace_registry(&registry);
server.run_with_tunnel(tunnel);
server.wait().await;
hook.on_client_disconnected(tunnel_info.clone()).await;
inflight_server.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
});
}
}
pub async fn serve(&mut self) -> Result<(), Error> {
let tasks = self.tasks.clone();
let mut listener = self.listener.take().unwrap();
let registry = self.registry.clone();
let hook = self.hook.take().unwrap_or_else(|| Arc::new(DefaultHook));
join_joinset_background(tasks.clone(), "standalone server tasks".to_string());
listener
.listen()
.await
.with_context(|| "failed to listen")?;
let registry = self.registry.clone();
let inflight_server = self.inflight_server.clone();
self.tasks.lock().unwrap().spawn(async move {
self.tasks.spawn(async move {
loop {
let ret = Self::serve_loop(
&mut listener,
inflight_server.clone(),
registry.clone(),
tasks.clone(),
hook.clone(),
)
.await;
if let Err(e) = ret {
@@ -146,4 +172,34 @@ impl<C: TunnelConnector> StandAloneClient<C> {
.rpc_client()
.scoped_client::<F>(1, 1, domain_name))
}
pub async fn wait(&mut self) {
if let Some(client) = self.client.take() {
client.wait().await;
}
}
}
#[cfg(test)]
mod tests {
use crate::{
proto::rpc_impl::standalone::StandAloneServer,
tunnel::{
tcp::{TcpTunnelConnector, TcpTunnelListener},
TunnelConnector as _,
},
};
#[tokio::test]
async fn standalone_exit_on_drop() {
let addr: url::Url = "tcp://0.0.0.0:53884".parse().unwrap();
let tunnel = TcpTunnelListener::new(addr.clone());
let mut server = StandAloneServer::new(tunnel);
server.serve().await.unwrap();
drop(server);
// tcp should closed
let mut connector = TcpTunnelConnector::new(addr);
connector.connect().await.unwrap_err();
}
}
@@ -2,6 +2,8 @@ use std::sync::{Arc, Mutex};
use bytes::Bytes;
use crate::proto::common::TunnelInfo;
// Controller must impl clone and all cloned controllers share the same data
pub trait Controller: Send + Sync + Clone + 'static {
fn timeout_ms(&self) -> i32 {
@@ -21,6 +23,11 @@ pub trait Controller: Send + Sync + Clone + 'static {
None
}
fn set_tunnel_info(&mut self, _tunnel_info: Option<TunnelInfo>) {}
fn get_tunnel_info(&self) -> Option<&TunnelInfo> {
None
}
fn set_raw_output(&mut self, _raw_output: Bytes) {}
fn get_raw_output(&self) -> Option<Bytes> {
None
@@ -38,6 +45,7 @@ pub struct BaseController {
pub timeout_ms: i32,
pub trace_id: i32,
pub raw_data: Arc<Mutex<BaseControllerRawData>>,
pub tunnel_info: Option<TunnelInfo>,
}
impl Controller for BaseController {
@@ -72,6 +80,14 @@ impl Controller for BaseController {
fn get_raw_output(&self) -> Option<Bytes> {
self.raw_data.lock().unwrap().raw_output.clone()
}
fn get_tunnel_info(&self) -> Option<&TunnelInfo> {
self.tunnel_info.as_ref()
}
fn set_tunnel_info(&mut self, tunnel_info: Option<TunnelInfo>) {
self.tunnel_info = tunnel_info;
}
}
impl Default for BaseController {
@@ -83,6 +99,7 @@ impl Default for BaseController {
raw_input: None,
raw_output: None,
})),
tunnel_info: None,
}
}
}