Compare commits

..

11 Commits

Author SHA1 Message Date
Luna Yao 811f151155 refactor: rpc build (#2244)
rewrite rpc build with quota crate
2026-05-15 14:01:56 +08:00
Luna Yao 8428a89d2d refactor: introduce HedgeExt for task hedging; rewrite NatDstQuicConnector (#2229) 2026-05-12 20:26:16 +08:00
韩嘉乐 513695297c [OHOS] feat: Enhance Rust kernel with config management and routing improvements (#2227)
* [OHOS.with ai] 将配置管理/配置分享/路由聚合/实例状态解析下沉至 Rust 内核,收敛职责并提升性能 (#2209)

* feat: add ohrs config store and startup error logging

* feat: full ability core for ohos

* feat: full ability core for ohos

* feat: clean code

---------

Co-authored-by: FrankHan <frankhan@FrankHans-Mac-mini.local>

* fix: 添加缺失文件

* fix: 修复更新路由启动两次TUN问题,并调整日志

* fix: rustfmt

* fix: 适配Cidr忽略/32格式路由

* fix: 修复Option适配错误

* fix: rustfmt

* fix: rustfmt

---------

Co-authored-by: FrankHan <frankhan@FrankHans-Mac-mini.local>
2026-05-10 14:15:31 +08:00
21paradox bfbfa2ef8d fix: reuse conn by dst_peer_id, every peer use only 1 quic conn, to fix nat lost problem (#2216) 2026-05-09 22:33:44 +08:00
KKRainbow 8e1d079142 feat: add Windows UDP broadcast relay (#2222)
This may helps games to find rooms in virtual network.

- add opt-in Windows UDP broadcast relay config flag and CLI/env plumbing
- capture local UDP broadcasts with Windows raw sockets, normalize packets, and inject them via PeerManager
2026-05-09 09:56:31 +08:00
fanyang 55f15bb6f0 fix(connector): classify manual reconnect timeouts by stage (#2062) 2026-05-08 22:08:51 +08:00
Luna Yao 96fd39649a revert UPX version to 4.2.4 in core.yml (#2221) 2026-05-07 18:49:40 +08:00
KKRainbow 74fc8b300d chore: bump version to 2.6.4 (#2219) 2026-05-07 13:48:51 +08:00
KKRainbow baeee40b79 fix machine uid and easytier-web panic (#2215)
1. fix(web-client): persist and migrate machine id
2. fix panic when easytier-web session receive malformat packet
2026-05-07 00:57:42 +08:00
fanyang 4342c8d7a2 fix: add missing CLI help text (#2213) 2026-05-05 17:05:34 +08:00
KKRainbow 1178b312fa fix foreign network entry leak (#2211) 2026-05-05 11:01:44 +08:00
103 changed files with 6954 additions and 2034 deletions
+4 -1
View File
@@ -157,6 +157,9 @@ jobs:
- uses: mlugg/setup-zig@v2
if: ${{ contains(matrix.OS, 'ubuntu') }}
with:
version: 0.16.0
use-cache: true
- uses: taiki-e/install-action@v2
if: ${{ contains(matrix.OS, 'ubuntu') }}
@@ -227,7 +230,7 @@ jobs:
*) UPX_ARCH="amd64" ;;
esac
UPX_VERSION=5.1.1
UPX_VERSION=4.2.4
UPX_PKG="upx-${UPX_VERSION}-${UPX_ARCH}_linux"
curl -L "https://github.com/upx/upx/releases/download/v${UPX_VERSION}/${UPX_PKG}.tar.xz" -s | tar xJvf -
cp "${UPX_PKG}/upx" .
+1 -1
View File
@@ -11,7 +11,7 @@ on:
image_tag:
description: 'Tag for this image build'
type: string
default: 'v2.6.3'
default: 'v2.6.4'
required: true
mark_latest:
description: 'Mark this image as latest'
+1 -1
View File
@@ -18,7 +18,7 @@ on:
version:
description: 'Version for this release'
type: string
default: 'v2.6.3'
default: 'v2.6.4'
required: true
make_latest:
description: 'Mark this release as latest'
Generated
+9 -26
View File
@@ -2229,7 +2229,7 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555"
[[package]]
name = "easytier"
version = "2.6.3"
version = "2.6.4"
dependencies = [
"aes-gcm",
"anyhow",
@@ -2264,7 +2264,6 @@ dependencies = [
"derivative",
"derive_builder",
"derive_more 2.1.1",
"easytier-rpc-build",
"encoding",
"flume 0.12.0",
"forwarded-header-value",
@@ -2291,6 +2290,7 @@ dependencies = [
"machine-uid",
"maplit",
"mimalloc",
"moka",
"multimap",
"natpmp",
"netlink-packet-core",
@@ -2309,6 +2309,7 @@ dependencies = [
"pin-project-lite",
"pnet",
"prefix-trie",
"proc-macro2",
"prost",
"prost-build",
"prost-reflect",
@@ -2318,6 +2319,7 @@ dependencies = [
"prost-wkt-types",
"quinn",
"quinn-plaintext",
"quote",
"rand 0.8.5",
"rcgen",
"regex",
@@ -2357,7 +2359,6 @@ dependencies = [
"tokio-util",
"tokio-websockets",
"toml 0.8.19",
"tonic-build",
"tracing",
"tracing-subscriber",
"tun-easytier",
@@ -2405,7 +2406,7 @@ dependencies = [
[[package]]
name = "easytier-gui"
version = "2.6.3"
version = "2.6.4"
dependencies = [
"anyhow",
"async-trait",
@@ -2436,14 +2437,6 @@ dependencies = [
"windows 0.52.0",
]
[[package]]
name = "easytier-rpc-build"
version = "0.1.0"
dependencies = [
"heck 0.5.0",
"prost-build",
]
[[package]]
name = "easytier-uptime"
version = "0.1.0"
@@ -2486,7 +2479,7 @@ dependencies = [
[[package]]
name = "easytier-web"
version = "2.6.3"
version = "2.6.4"
dependencies = [
"anyhow",
"async-trait",
@@ -5103,9 +5096,12 @@ version = "0.12.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
dependencies = [
"async-lock",
"crossbeam-channel",
"crossbeam-epoch",
"crossbeam-utils",
"event-listener",
"futures-util",
"loom",
"parking_lot",
"portable-atomic",
@@ -10051,19 +10047,6 @@ dependencies = [
"tracing",
]
[[package]]
name = "tonic-build"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964"
dependencies = [
"prettyplease",
"proc-macro2",
"prost-build",
"quote",
"syn 2.0.117",
]
[[package]]
name = "tower"
version = "0.4.13"
-1
View File
@@ -3,7 +3,6 @@ resolver = "2"
members = [
"easytier",
"easytier-gui/src-tauri",
"easytier-rpc-build",
"easytier-web",
"easytier-contrib/easytier-ffi",
"easytier-contrib/easytier-uptime",
+1 -1
View File
@@ -1,6 +1,6 @@
id=easytier_magisk
name=EasyTier_Magisk
version=v2.6.3
version=v2.6.4
versionCode=1
author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
+544 -132
View File
File diff suppressed because it is too large Load Diff
+10
View File
@@ -7,6 +7,10 @@ edition = "2024"
crate-type=["cdylib"]
[dependencies]
async-trait = "0.1"
base64 = "0.22"
flate2 = "1.1"
gethostname = "1.1"
ohos-hilog-binding = {version = "*", features = ["redirect"]}
easytier = { path = "../../easytier" }
napi-derive-ohos = "1.1"
@@ -26,10 +30,16 @@ napi-ohos = { version = "1.1", default-features = false, features = [
"web_stream",
] }
once_cell = "1.21.3"
ipnet = "2.10"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.125"
prost-reflect = { version = "0.14.5", default-features = false, features = ["derive"] }
rusqlite = { version = "0.32", features = ["bundled"] }
tracing-subscriber = "0.3.19"
tracing-core = "0.1.33"
tracing = "0.1.41"
tokio = { version = "1", features = ["rt-multi-thread", "sync", "time"] }
url = "2.5"
uuid = { version = "1.5.0", features = [
"v4",
"fast-rng",
@@ -0,0 +1,4 @@
pub(crate) mod repository;
pub(crate) mod services;
pub(crate) mod storage;
pub(crate) mod types;
@@ -0,0 +1,13 @@
#[path = "../../config_repo/field_store.rs"]
mod field_store;
#[path = "../../config_repo/import_export.rs"]
mod import_export;
#[path = "../../config_repo/legacy_migration.rs"]
mod legacy_migration;
#[path = "../../config_repo/validation.rs"]
mod validation;
#[path = "../../config_repo.rs"]
mod repo;
pub use repo::*;
@@ -0,0 +1,2 @@
pub(crate) mod schema_service;
pub(crate) mod share_link_service;
@@ -0,0 +1,414 @@
use easytier::proto::ALL_DESCRIPTOR_BYTES;
use napi_derive_ohos::napi;
use once_cell::sync::Lazy;
use prost_reflect::{Cardinality, DescriptorPool, FieldDescriptor, Kind, MessageDescriptor};
use serde::Serialize;
#[derive(Debug, Clone, Serialize)]
#[napi(object)]
pub struct FieldOption {
pub label: String,
pub value: String,
}
#[derive(Debug, Clone, Serialize)]
#[napi(object)]
pub struct ValidationRule {
pub rule_type: String,
pub arg: String,
pub message: String,
}
#[derive(Debug, Clone, Serialize)]
#[napi(object)]
pub struct NetworkConfigSchema {
pub node_kind: String,
pub name: String,
pub field_number: i32,
pub type_name: Option<String>,
pub semantic_type: Option<String>,
pub value_kind: String,
pub is_list: bool,
pub required: bool,
pub default_value_text: Option<String>,
pub enum_options: Vec<FieldOption>,
pub validations: Vec<ValidationRule>,
pub children: Vec<NetworkConfigSchema>,
pub definitions: Vec<NetworkConfigSchema>,
}
#[derive(Debug, Clone, Serialize)]
#[napi(object)]
pub struct ConfigFieldMapping {
pub field_name: String,
pub field_number: i32,
}
static DESCRIPTOR_POOL: Lazy<DescriptorPool> = Lazy::new(|| {
DescriptorPool::decode(ALL_DESCRIPTOR_BYTES)
.expect("easytier descriptor pool should decode from embedded protobuf descriptors")
});
const NETWORK_CONFIG_MESSAGE_NAME: &str = "api.manage.NetworkConfig";
fn descriptor_pool() -> &'static DescriptorPool {
&DESCRIPTOR_POOL
}
fn network_config_descriptor() -> MessageDescriptor {
descriptor_pool()
.get_message_by_name(NETWORK_CONFIG_MESSAGE_NAME)
.expect("api.manage.NetworkConfig descriptor should exist")
}
fn field_default_value_text(field: &FieldDescriptor) -> Option<String> {
if field.is_list() || field.is_map() {
return Some("[]".to_string());
}
match field.kind() {
Kind::Bool => Some("false".to_string()),
Kind::String => Some("\"\"".to_string()),
Kind::Bytes => Some("\"\"".to_string()),
Kind::Int32
| Kind::Sint32
| Kind::Sfixed32
| Kind::Int64
| Kind::Sint64
| Kind::Sfixed64
| Kind::Uint32
| Kind::Fixed32
| Kind::Uint64
| Kind::Fixed64
| Kind::Float
| Kind::Double => Some("0".to_string()),
Kind::Enum(enum_desc) => enum_desc
.get_value(0)
.map(|value| value.number().to_string()),
Kind::Message(_) => None,
}
}
fn field_type_name(field: &FieldDescriptor) -> Option<String> {
match field.kind() {
Kind::Enum(enum_desc) => Some(enum_desc.full_name().to_string()),
Kind::Message(message_desc) => Some(message_desc.full_name().to_string()),
_ => None,
}
}
fn field_semantic_type(field: &FieldDescriptor) -> Option<String> {
match field.name() {
"virtual_ipv4" => Some("cidr_ip".to_string()),
"network_length" => Some("cidr_mask".to_string()),
"peer_urls" => Some("peer[]".to_string()),
"proxy_cidrs" => Some("cidr[]".to_string()),
"listener_urls" => Some("listener[]".to_string()),
"routes" => Some("route[]".to_string()),
"exit_nodes" => Some("ip[]".to_string()),
"relay_network_whitelist" => Some("network_name[]".to_string()),
"mapped_listeners" => Some("mapped_listener[]".to_string()),
"port_forwards" => Some("port_forward[]".to_string()),
_ => None,
}
}
fn enum_options(kind: Kind) -> Vec<FieldOption> {
match kind {
Kind::Enum(enum_desc) => enum_desc
.values()
.map(|value| FieldOption {
label: value.name().to_string(),
value: value.number().to_string(),
})
.collect(),
_ => Vec::new(),
}
}
fn should_expose_field(field: &FieldDescriptor) -> bool {
match field.containing_oneof() {
Some(_) => field
.field_descriptor_proto()
.proto3_optional
.unwrap_or(false),
None => true,
}
}
fn build_validations(field: &FieldDescriptor) -> Vec<ValidationRule> {
if field.cardinality() == Cardinality::Required {
return vec![ValidationRule {
rule_type: "required".to_string(),
arg: String::new(),
message: format!("{} is required", field.name()),
}];
}
Vec::new()
}
fn kind_to_value_kind(field: &FieldDescriptor) -> String {
if field.is_map() {
return "object".to_string();
}
match field.kind() {
Kind::Bool => "boolean".to_string(),
Kind::String | Kind::Bytes => "string".to_string(),
Kind::Int32
| Kind::Sint32
| Kind::Sfixed32
| Kind::Int64
| Kind::Sint64
| Kind::Sfixed64
| Kind::Uint32
| Kind::Fixed32
| Kind::Uint64
| Kind::Fixed64
| Kind::Float
| Kind::Double => "number".to_string(),
Kind::Enum(_) => "enum".to_string(),
Kind::Message(_) => "object".to_string(),
}
}
fn build_node(
node_kind: &str,
name: String,
field_number: i32,
type_name: Option<String>,
semantic_type: Option<String>,
value_kind: String,
is_list: bool,
required: bool,
default_value_text: Option<String>,
enum_options: Vec<FieldOption>,
validations: Vec<ValidationRule>,
children: Vec<NetworkConfigSchema>,
definitions: Vec<NetworkConfigSchema>,
) -> NetworkConfigSchema {
NetworkConfigSchema {
node_kind: node_kind.to_string(),
name,
field_number,
type_name,
semantic_type,
value_kind,
is_list,
required,
default_value_text,
enum_options,
validations,
children,
definitions,
}
}
fn build_map_entry_node(message_desc: &MessageDescriptor) -> NetworkConfigSchema {
let key_field = message_desc.map_entry_key_field();
let value_field = message_desc.map_entry_value_field();
build_node(
"object",
message_desc.name().to_string(),
0,
Some(message_desc.full_name().to_string()),
None,
"object".to_string(),
false,
true,
None,
Vec::new(),
Vec::new(),
vec![
build_schema_field_node(&key_field),
build_schema_field_node(&value_field),
],
Vec::new(),
)
}
fn field_children(field: &FieldDescriptor) -> Vec<NetworkConfigSchema> {
if field.is_map() {
if let Kind::Message(message_desc) = field.kind() {
return vec![build_map_entry_node(&message_desc)];
}
}
match field.kind() {
Kind::Message(message_desc) => build_message_children(&message_desc),
_ => Vec::new(),
}
}
fn build_message_children(message_desc: &MessageDescriptor) -> Vec<NetworkConfigSchema> {
message_desc
.fields()
.filter(should_expose_field)
.map(|field| build_schema_field_node(&field))
.collect()
}
fn build_schema_field_node(field: &FieldDescriptor) -> NetworkConfigSchema {
build_node(
"field",
field.name().to_string(),
field.number() as i32,
field_type_name(field),
field_semantic_type(field),
kind_to_value_kind(field),
field.is_list() || field.is_map(),
field.cardinality() == Cardinality::Required,
field_default_value_text(field),
enum_options(field.kind()),
build_validations(field),
field_children(field),
Vec::new(),
)
}
fn collect_definitions() -> Vec<NetworkConfigSchema> {
let mut definitions = Vec::new();
for message_desc in descriptor_pool().all_messages() {
let full_name = message_desc.full_name();
if full_name == NETWORK_CONFIG_MESSAGE_NAME || message_desc.is_map_entry() {
continue;
}
definitions.push(build_node(
"object",
full_name.to_string(),
0,
Some(full_name.to_string()),
None,
"object".to_string(),
false,
true,
None,
Vec::new(),
Vec::new(),
build_message_children(&message_desc),
Vec::new(),
));
}
for enum_desc in descriptor_pool().all_enums() {
definitions.push(build_node(
"enum",
enum_desc.full_name().to_string(),
0,
Some(enum_desc.full_name().to_string()),
None,
"enum".to_string(),
false,
false,
None,
enum_options(Kind::Enum(enum_desc.clone())),
Vec::new(),
Vec::new(),
Vec::new(),
));
}
definitions.sort_by(|a, b| a.name.cmp(&b.name));
definitions
}
fn build_network_config_schema() -> NetworkConfigSchema {
let network_config = network_config_descriptor();
build_node(
"schema",
network_config.name().to_string(),
0,
Some(network_config.full_name().to_string()),
None,
"object".to_string(),
false,
true,
None,
Vec::new(),
Vec::new(),
build_message_children(&network_config),
collect_definitions(),
)
}
fn build_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
network_config_descriptor()
.fields()
.filter(should_expose_field)
.map(|field| ConfigFieldMapping {
field_name: field.name().to_string(),
field_number: field.number() as i32,
})
.collect()
}
pub fn get_network_config_schema() -> NetworkConfigSchema {
build_network_config_schema()
}
pub fn get_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
build_network_config_field_mappings()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn schema_is_exposed_as_single_tree_type() {
let schema = get_network_config_schema();
assert_eq!(schema.node_kind, "schema");
assert_eq!(schema.name, "NetworkConfig");
assert_eq!(
schema.type_name.as_deref(),
Some("api.manage.NetworkConfig")
);
let virtual_ipv4 = schema
.children
.iter()
.find(|field| field.name == "virtual_ipv4")
.expect("virtual_ipv4 field");
assert_eq!(virtual_ipv4.semantic_type.as_deref(), Some("cidr_ip"));
let secure_mode = schema
.children
.iter()
.find(|field| field.name == "secure_mode")
.expect("secure_mode field");
assert!(
secure_mode
.children
.iter()
.any(|field| field.name == "enabled")
);
let secure_mode_definition = schema
.definitions
.iter()
.find(|definition| definition.name == "common.SecureModeConfig")
.expect("secure mode definition");
assert!(
secure_mode_definition
.children
.iter()
.any(|field| field.name == "local_private_key")
);
let networking_method_definition = schema
.definitions
.iter()
.find(|definition| definition.name == "api.manage.NetworkingMethod")
.expect("networking method enum definition");
assert!(
networking_method_definition
.enum_options
.iter()
.any(|option| option.label == "PublicServer")
);
}
}
@@ -0,0 +1,197 @@
use crate::config::repository::{get_config_record, save_config_record};
use crate::config::services::schema_service::get_network_config_field_mappings;
use crate::config::types::stored_config::SharedConfigLinkPayload;
use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD};
use easytier::proto::api::manage::NetworkConfig;
use flate2::{Compression, read::ZlibDecoder, write::ZlibEncoder};
use gethostname::gethostname;
use std::collections::HashMap;
use std::io::{Read, Write};
use url::Url;
use uuid::Uuid;
const SHARE_LINK_HOST: &str = "easytier.cn";
const SHARE_LINK_PATH: &str = "/comp_cfg";
fn field_name_to_id_map() -> HashMap<String, String> {
get_network_config_field_mappings()
.into_iter()
.map(|mapping| (mapping.field_name, mapping.field_number.to_string()))
.collect()
}
fn field_id_to_name_map() -> HashMap<String, String> {
get_network_config_field_mappings()
.into_iter()
.map(|mapping| (mapping.field_number.to_string(), mapping.field_name))
.collect()
}
fn prune_empty(value: &serde_json::Value) -> Option<serde_json::Value> {
match value {
serde_json::Value::Null => None,
serde_json::Value::Array(values) if values.is_empty() => None,
_ => Some(value.clone()),
}
}
fn map_config_json(config: &NetworkConfig) -> Result<String, String> {
let field_name_to_id = field_name_to_id_map();
let raw = serde_json::to_value(config).map_err(|err| err.to_string())?;
let mut mapped = serde_json::Map::new();
for (key, value) in raw.as_object().cloned().unwrap_or_default() {
let Some(value) = prune_empty(&value) else {
continue;
};
let mapped_key = field_name_to_id.get(&key).cloned().unwrap_or(key);
mapped.insert(mapped_key, value);
}
serde_json::to_string(&mapped).map_err(|err| err.to_string())
}
fn unmap_config_json(raw: &str) -> Result<NetworkConfig, String> {
let field_id_to_name = field_id_to_name_map();
let value = serde_json::from_str::<serde_json::Value>(raw).map_err(|err| err.to_string())?;
let mut mapped = serde_json::Map::new();
for (key, value) in value.as_object().cloned().unwrap_or_default() {
let field_name = field_id_to_name.get(&key).cloned().unwrap_or(key);
mapped.insert(field_name, value);
}
serde_json::from_value(serde_json::Value::Object(mapped)).map_err(|err| err.to_string())
}
fn compress_to_base64url(raw: &str) -> Result<String, String> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::best());
encoder
.write_all(raw.as_bytes())
.map_err(|err| err.to_string())?;
let compressed = encoder.finish().map_err(|err| err.to_string())?;
Ok(URL_SAFE_NO_PAD.encode(compressed))
}
fn decompress_from_base64url(raw: &str) -> Result<String, String> {
let compressed = URL_SAFE_NO_PAD.decode(raw).map_err(|err| err.to_string())?;
let mut decoder = ZlibDecoder::new(compressed.as_slice());
let mut out = String::new();
decoder
.read_to_string(&mut out)
.map_err(|err| err.to_string())?;
Ok(out)
}
pub fn build_config_share_link(
config_id: &str,
display_name: Option<String>,
only_start: bool,
) -> Option<String> {
let record = get_config_record(config_id)?;
let config = serde_json::from_str::<NetworkConfig>(&record.config_json).ok()?;
let mapped_json = map_config_json(&config).ok()?;
let compressed = compress_to_base64url(&mapped_json).ok()?;
let final_name = display_name
.or(Some(record.meta.display_name))
.filter(|name| !name.is_empty());
let mut url = Url::parse(&format!("https://{SHARE_LINK_HOST}{SHARE_LINK_PATH}")).ok()?;
url.query_pairs_mut().append_pair("cfg", &compressed);
if let Some(name) = final_name {
url.query_pairs_mut().append_pair("name", &name);
}
if only_start {
url.query_pairs_mut().append_pair("only_start", "true");
}
Some(url.to_string())
}
pub fn parse_config_share_link(share_link: &str) -> Option<SharedConfigLinkPayload> {
let url = Url::parse(share_link).ok()?;
if url.host_str()? != SHARE_LINK_HOST || url.path() != SHARE_LINK_PATH {
return None;
}
let cfg = url
.query_pairs()
.find(|(key, _)| key == "cfg")?
.1
.to_string();
let mapped_json = decompress_from_base64url(&cfg).ok()?;
let mut config = unmap_config_json(&mapped_json).ok()?;
config.instance_id = Some(Uuid::new_v4().to_string());
let hostname = gethostname().to_string_lossy().to_string();
if !hostname.is_empty() {
config.hostname = Some(hostname);
}
let config_json = serde_json::to_string(&config).ok()?;
let display_name = url
.query_pairs()
.find(|(key, _)| key == "name")
.map(|(_, value)| value.to_string())
.filter(|name| !name.is_empty());
let only_start = url
.query_pairs()
.find(|(key, _)| key == "only_start")
.map(|(_, value)| value == "true")
.unwrap_or(false);
Some(SharedConfigLinkPayload {
config_json,
display_name,
only_start,
})
}
pub fn import_config_share_link(
share_link: &str,
display_name_override: Option<String>,
) -> Option<String> {
let payload = parse_config_share_link(share_link)?;
let config = serde_json::from_str::<NetworkConfig>(&payload.config_json).ok()?;
let config_id = config.instance_id.clone()?;
let display_name = display_name_override
.filter(|name| !name.is_empty())
.or(payload.display_name)
.unwrap_or_else(|| config_id.clone());
save_config_record(config_id.clone(), display_name, payload.config_json)?;
Some(config_id)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config_repo::{create_config_record, init_config_store};
use std::time::{SystemTime, UNIX_EPOCH};
fn test_root() -> String {
let unique = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
std::env::temp_dir()
.join(format!("easytier_ohrs_share_test_{unique}"))
.to_string_lossy()
.into_owned()
}
#[test]
fn share_link_roundtrip_works() {
assert!(init_config_store(test_root()));
create_config_record("cfg-share".to_string(), "share-demo".to_string())
.expect("create config");
let link = build_config_share_link("cfg-share", None, true).expect("share link");
let payload = parse_config_share_link(&link).expect("parse link");
let config =
serde_json::from_str::<NetworkConfig>(&payload.config_json).expect("config json");
assert!(payload.only_start);
assert_eq!(payload.display_name.as_deref(), Some("share-demo"));
assert_ne!(config.instance_id.as_deref(), Some("cfg-share"));
let imported_id = import_config_share_link(&link, None).expect("import link");
assert_ne!(imported_id, "cfg-share");
}
}
@@ -0,0 +1,333 @@
use crate::config::types::stored_config::{StoredConfigList, StoredConfigMeta};
use ohos_hilog_binding::{hilog_debug, hilog_error};
use rusqlite::{Connection, OptionalExtension, params};
use std::path::PathBuf;
use std::sync::Mutex;
use std::time::{SystemTime, UNIX_EPOCH};
static CONFIG_DB_PATH: Mutex<Option<PathBuf>> = Mutex::new(None);
const CONFIG_DB_FILE_NAME: &str = "easytier-config-store.db";
#[derive(Debug, Clone)]
struct StoredConfigMetaRecord {
config_id: String,
display_name: String,
created_at: String,
updated_at: String,
favorite: bool,
temporary: bool,
}
pub(crate) fn now_ts_string() -> String {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs().to_string())
.unwrap_or_else(|_| "0".to_string())
}
fn db_file_path() -> Option<PathBuf> {
CONFIG_DB_PATH
.lock()
.ok()
.and_then(|guard| guard.as_ref().cloned())
}
fn init_schema(conn: &Connection) -> rusqlite::Result<()> {
conn.execute_batch(
"PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS stored_configs (
config_id TEXT PRIMARY KEY,
display_name TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
favorite INTEGER NOT NULL DEFAULT 0,
temporary INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS stored_config_fields (
config_id TEXT NOT NULL,
field_name TEXT NOT NULL,
field_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (config_id, field_name),
FOREIGN KEY (config_id) REFERENCES stored_configs(config_id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_stored_config_fields_config_id
ON stored_config_fields(config_id);",
)
}
pub(crate) fn open_db() -> Option<Connection> {
let path = db_file_path()?;
let conn = match Connection::open(&path) {
Ok(conn) => conn,
Err(e) => {
hilog_error!("[Rust] failed to open config db {}: {}", path.display(), e);
return None;
}
};
if let Err(e) = init_schema(&conn) {
hilog_error!(
"[Rust] failed to initialize config db {}: {}",
path.display(),
e
);
return None;
}
Some(conn)
}
fn row_to_meta(row: &rusqlite::Row<'_>) -> rusqlite::Result<StoredConfigMetaRecord> {
Ok(StoredConfigMetaRecord {
config_id: row.get(0)?,
display_name: row.get(1)?,
created_at: row.get(2)?,
updated_at: row.get(3)?,
favorite: row.get::<_, i64>(4)? != 0,
temporary: row.get::<_, i64>(5)? != 0,
})
}
fn load_meta_record(conn: &Connection, config_id: &str) -> Option<StoredConfigMetaRecord> {
conn.query_row(
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
FROM stored_configs WHERE config_id = ?1",
params![config_id],
row_to_meta,
)
.optional()
.ok()
.flatten()
}
fn to_meta(record: StoredConfigMetaRecord) -> StoredConfigMeta {
StoredConfigMeta {
config_id: record.config_id,
display_name: record.display_name,
created_at: record.created_at,
updated_at: record.updated_at,
favorite: record.favorite,
temporary: record.temporary,
}
}
pub fn init_config_meta_store(root_dir: String) -> bool {
let root = PathBuf::from(root_dir);
if let Err(e) = std::fs::create_dir_all(&root) {
hilog_error!(
"[Rust] failed to create config db dir {}: {}",
root.display(),
e
);
return false;
}
let db_path = root.join(CONFIG_DB_FILE_NAME);
match CONFIG_DB_PATH.lock() {
Ok(mut guard) => {
*guard = Some(db_path.clone());
}
Err(e) => {
hilog_error!("[Rust] failed to lock config db path: {}", e);
return false;
}
}
if open_db().is_none() {
return false;
}
hilog_debug!("[Rust] initialized config db at {}", db_path.display());
true
}
pub fn list_config_meta_entries() -> StoredConfigList {
let Some(conn) = open_db() else {
return StoredConfigList { configs: vec![] };
};
let mut stmt = match conn.prepare(
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
FROM stored_configs
ORDER BY updated_at DESC, display_name ASC",
) {
Ok(stmt) => stmt,
Err(e) => {
hilog_error!("[Rust] failed to prepare list meta query: {}", e);
return StoredConfigList { configs: vec![] };
}
};
let rows = match stmt.query_map([], row_to_meta) {
Ok(rows) => rows,
Err(e) => {
hilog_error!("[Rust] failed to list config meta rows: {}", e);
return StoredConfigList { configs: vec![] };
}
};
let configs = rows.filter_map(Result::ok).map(to_meta).collect();
StoredConfigList { configs }
}
pub fn get_config_display_name(config_id: &str) -> Option<String> {
let conn = open_db()?;
load_meta_record(&conn, config_id).map(|record| record.display_name)
}
pub fn get_config_meta(config_id: &str) -> Option<StoredConfigMeta> {
let conn = open_db()?;
load_meta_record(&conn, config_id).map(to_meta)
}
pub fn upsert_config_meta(
config_id: String,
display_name: String,
favorite: bool,
temporary: bool,
) -> StoredConfigMeta {
let now = now_ts_string();
let Some(conn) = open_db() else {
return StoredConfigMeta {
config_id,
display_name,
created_at: now.clone(),
updated_at: now,
favorite,
temporary,
};
};
let created_at = load_meta_record(&conn, &config_id)
.map(|record| record.created_at)
.unwrap_or_else(|| now.clone());
if let Err(e) = conn.execute(
"INSERT INTO stored_configs (
config_id, display_name, created_at, updated_at, favorite, temporary
) VALUES (?1, ?2, ?3, ?4, ?5, ?6)
ON CONFLICT(config_id) DO UPDATE SET
display_name = excluded.display_name,
updated_at = excluded.updated_at,
favorite = excluded.favorite,
temporary = excluded.temporary",
params![
config_id,
display_name,
created_at,
now,
if favorite { 1 } else { 0 },
if temporary { 1 } else { 0 }
],
) {
hilog_error!("[Rust] failed to upsert config meta: {}", e);
}
get_config_meta(&config_id).unwrap_or(StoredConfigMeta {
config_id,
display_name,
created_at,
updated_at: now,
favorite,
temporary,
})
}
pub(crate) fn upsert_config_meta_in_tx(
tx: &rusqlite::Transaction<'_>,
config_id: String,
display_name: String,
favorite: bool,
temporary: bool,
) -> Option<StoredConfigMeta> {
let now = now_ts_string();
let created_at = tx
.query_row(
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
FROM stored_configs WHERE config_id = ?1",
params![config_id],
row_to_meta,
)
.optional()
.ok()
.flatten()
.map(|record| record.created_at)
.unwrap_or_else(|| now.clone());
tx.execute(
"INSERT INTO stored_configs (
config_id, display_name, created_at, updated_at, favorite, temporary
) VALUES (?1, ?2, ?3, ?4, ?5, ?6)
ON CONFLICT(config_id) DO UPDATE SET
display_name = excluded.display_name,
updated_at = excluded.updated_at,
favorite = excluded.favorite,
temporary = excluded.temporary",
params![
config_id,
display_name,
created_at,
now,
if favorite { 1 } else { 0 },
if temporary { 1 } else { 0 }
],
)
.ok()?;
tx.query_row(
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
FROM stored_configs WHERE config_id = ?1",
params![config_id],
row_to_meta,
)
.optional()
.ok()
.flatten()
.map(to_meta)
.or(Some(StoredConfigMeta {
config_id,
display_name,
created_at,
updated_at: now,
favorite,
temporary,
}))
}
pub fn set_config_display_name(
config_id: String,
display_name: String,
) -> Option<StoredConfigMeta> {
let conn = open_db()?;
let mut record = load_meta_record(&conn, &config_id)?;
record.display_name = display_name;
record.updated_at = now_ts_string();
conn.execute(
"UPDATE stored_configs
SET display_name = ?2, updated_at = ?3
WHERE config_id = ?1",
params![config_id, record.display_name, record.updated_at],
)
.ok()?;
Some(to_meta(record))
}
pub fn delete_config_meta(config_id: &str) -> bool {
let Some(conn) = open_db() else {
return false;
};
match conn.execute(
"DELETE FROM stored_configs WHERE config_id = ?1",
params![config_id],
) {
Ok(rows) => rows > 0,
Err(e) => {
hilog_error!("[Rust] failed to delete config meta {}: {}", config_id, e);
false
}
}
}
@@ -0,0 +1 @@
pub(crate) mod config_meta;
@@ -0,0 +1 @@
pub(crate) mod stored_config;
@@ -0,0 +1,68 @@
use napi_derive_ohos::napi;
use serde::Serialize;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct StoredConfigMeta {
pub config_id: String,
pub display_name: String,
pub created_at: String,
pub updated_at: String,
pub favorite: bool,
pub temporary: bool,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct StoredConfigRecord {
pub meta: StoredConfigMeta,
pub config_json: String,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct StoredConfigList {
pub configs: Vec<StoredConfigMeta>,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct ExportTomlResult {
pub toml_text: String,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct StoredConfigSummary {
pub config_id: String,
pub display_name: String,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct SharedConfigLinkPayload {
pub config_json: String,
pub display_name: Option<String>,
pub only_start: bool,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct LocalSocketSyncMessage {
pub message_type: String,
pub payload_json: String,
}
#[derive(Debug, Clone, Serialize)]
#[napi(object)]
pub struct KeyValuePair {
pub key: String,
pub value: String,
}
@@ -0,0 +1,349 @@
use super::{field_store, import_export, legacy_migration, validation};
use crate::config::storage::config_meta::{
delete_config_meta, get_config_meta, init_config_meta_store, list_config_meta_entries, open_db,
upsert_config_meta_in_tx,
};
use crate::config::types::stored_config::{ExportTomlResult, StoredConfigRecord};
use easytier::common::config::ConfigLoader;
use easytier::proto::api::manage::NetworkConfig;
use ohos_hilog_binding::{hilog_debug, hilog_error};
use rusqlite::params;
use serde_json::Value;
use std::path::PathBuf;
use std::sync::Mutex;
static CONFIG_ROOT_DIR: Mutex<Option<PathBuf>> = Mutex::new(None);
pub(crate) const CONFIG_DIR_NAME: &str = "easytier-configs";
pub(crate) const KERNEL_SOCKET_FILE_NAME: &str = "easytier-kernel.sock";
pub(crate) fn config_root_dir() -> Option<PathBuf> {
CONFIG_ROOT_DIR
.lock()
.ok()
.and_then(|guard| guard.as_ref().cloned())
}
pub(crate) fn kernel_socket_path() -> Option<PathBuf> {
config_root_dir().map(|root| root.join(KERNEL_SOCKET_FILE_NAME))
}
pub(crate) fn legacy_config_file_path(config_id: &str) -> Option<PathBuf> {
legacy_migration::legacy_config_file_path(&config_root_dir(), CONFIG_DIR_NAME, config_id)
}
pub fn init_config_store(root_dir: String) -> bool {
let root = PathBuf::from(root_dir);
let configs_dir = root.join(CONFIG_DIR_NAME);
if let Err(e) = std::fs::create_dir_all(&configs_dir) {
hilog_error!(
"[Rust] failed to create config dir {}: {}",
configs_dir.display(),
e
);
return false;
}
match CONFIG_ROOT_DIR.lock() {
Ok(mut guard) => {
*guard = Some(root.clone());
}
Err(e) => {
hilog_error!("[Rust] failed to lock config root dir: {}", e);
return false;
}
}
if !init_config_meta_store(root.to_string_lossy().into_owned()) {
return false;
}
hilog_debug!(
"[Rust] initialized config repo at {}",
configs_dir.display()
);
true
}
fn migrate_legacy_file_if_needed(config_id: &str) -> Option<()> {
legacy_migration::migrate_legacy_file_if_needed(
&config_root_dir(),
CONFIG_DIR_NAME,
config_id,
save_config_record,
)
}
pub fn save_config_record(
config_id: String,
display_name: String,
config_json: String,
) -> Option<StoredConfigRecord> {
let config = match validation::validate_config_json(&config_json, config_id.clone()) {
Ok(config) => config,
Err(e) => {
hilog_error!("[Rust] save_config_record failed {}", e);
return None;
}
};
let normalized_json = match serde_json::to_string(&config) {
Ok(raw) => raw,
Err(e) => {
hilog_error!(
"[Rust] failed to serialize normalized config {}: {}",
config_id,
e
);
return None;
}
};
let fields = match validation::config_to_top_level_map(&config) {
Some(fields) => fields,
None => return None,
};
let conn = open_db()?;
let tx = conn.unchecked_transaction().ok()?;
let existing_meta = get_config_meta(&config_id);
let favorite = existing_meta
.as_ref()
.map(|meta| meta.favorite)
.unwrap_or(false);
let temporary = existing_meta
.as_ref()
.map(|meta| meta.temporary)
.unwrap_or(false);
let meta = upsert_config_meta_in_tx(&tx, config_id.clone(), display_name, favorite, temporary)?;
field_store::replace_config_fields(&tx, &config_id, fields)?;
tx.commit().ok()?;
if let Some(legacy_path) = legacy_config_file_path(&config_id) {
if legacy_path.exists() {
let _ = std::fs::remove_file(legacy_path);
}
}
Some(StoredConfigRecord {
meta,
config_json: normalized_json,
})
}
pub fn load_config_json(config_id: &str) -> Option<String> {
migrate_legacy_file_if_needed(config_id)?;
let object = field_store::load_config_map_from_db(config_id)?;
serde_json::to_string(&Value::Object(object)).ok()
}
pub fn get_config_record(config_id: &str) -> Option<StoredConfigRecord> {
let config_json = load_config_json(config_id)?;
let meta = get_config_meta(config_id)?;
Some(StoredConfigRecord { meta, config_json })
}
pub fn get_config_field_value(config_id: &str, field: &str) -> Option<String> {
migrate_legacy_file_if_needed(config_id)?;
let conn = open_db()?;
conn.query_row(
"SELECT field_json FROM stored_config_fields
WHERE config_id = ?1 AND field_name = ?2",
params![config_id, field],
|row| row.get::<_, String>(0),
)
.ok()
}
pub fn set_config_field_value(config_id: &str, field: &str, json_value: &str) -> bool {
if field.contains('.') {
return false;
}
let raw = match load_config_json(config_id) {
Some(raw) => raw,
None => return false,
};
let mut value = match serde_json::from_str::<Value>(&raw) {
Ok(value) => value,
Err(_) => return false,
};
let new_field_value = match serde_json::from_str::<Value>(json_value) {
Ok(value) => value,
Err(_) => return false,
};
let object = match value.as_object_mut() {
Some(object) => object,
None => return false,
};
object.insert(field.to_string(), new_field_value);
let normalized = match serde_json::to_string(&value) {
Ok(raw) => raw,
Err(_) => return false,
};
let display_name = get_config_meta(config_id)
.map(|meta| meta.display_name)
.unwrap_or_else(|| config_id.to_string());
save_config_record(config_id.to_string(), display_name, normalized).is_some()
}
pub fn get_display_name(config_id: &str) -> Option<String> {
get_config_meta(config_id).map(|meta| meta.display_name)
}
pub fn get_default_config_json() -> Option<String> {
crate::build_default_network_config_json().ok()
}
pub fn create_config_record(config_id: String, display_name: String) -> Option<StoredConfigRecord> {
let raw = get_default_config_json()?;
let mut config = serde_json::from_str::<NetworkConfig>(&raw).ok()?;
config.instance_id = Some(config_id.clone());
let normalized_json = serde_json::to_string(&config).ok()?;
save_config_record(config_id, display_name, normalized_json)
}
pub fn start_kernel_with_config_id(config_id: &str) -> bool {
let raw = match load_config_json(config_id) {
Some(raw) => raw,
None => return false,
};
crate::run_network_instance_from_json(&raw)
}
pub fn list_config_meta_json() -> String {
serde_json::to_string(&list_config_meta_entries().configs).unwrap_or_else(|_| "[]".to_string())
}
pub fn delete_config_record(config_id: &str) -> bool {
if let Some(path) = legacy_config_file_path(config_id) {
if path.exists() {
let _ = std::fs::remove_file(path);
}
}
let conn = match open_db() {
Some(conn) => conn,
None => return false,
};
if let Err(e) = conn.execute(
"DELETE FROM stored_config_fields WHERE config_id = ?1",
params![config_id],
) {
hilog_error!("[Rust] failed to delete config fields {}: {}", config_id, e);
return false;
}
delete_config_meta(config_id)
}
pub fn export_config_toml(config_id: &str) -> Option<ExportTomlResult> {
let record = get_config_record(config_id)?;
import_export::export_config_toml_from_record(&record)
}
pub fn import_toml_config(
toml_text: String,
display_name: Option<String>,
) -> Option<StoredConfigRecord> {
import_export::import_toml_to_record(toml_text, display_name, save_config_record)
}
#[cfg(test)]
mod tests {
use super::*;
use rusqlite::params;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
fn test_root() -> String {
let unique = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
let dir = std::env::temp_dir().join(format!("easytier_ohrs_test_{}", unique));
dir.to_string_lossy().into_owned()
}
#[test]
fn save_get_export_delete_roundtrip() {
let root = test_root();
assert!(init_config_store(root.clone()));
let config_json = crate::build_default_network_config_json().expect("default config");
let saved = save_config_record("cfg-1".to_string(), "test-config".to_string(), config_json)
.expect("save config");
assert_eq!(saved.meta.config_id, "cfg-1");
assert_eq!(saved.meta.display_name, "test-config");
let loaded = get_config_record("cfg-1").expect("load config");
assert_eq!(loaded.meta.display_name, "test-config");
assert!(loaded.config_json.contains("cfg-1"));
let legacy_json_path = PathBuf::from(&root)
.join(CONFIG_DIR_NAME)
.join("cfg-1.json");
assert!(
!legacy_json_path.exists(),
"config should no longer be persisted as a per-config json file"
);
let conn = open_db().expect("db should be open");
let field_count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM stored_config_fields WHERE config_id = ?1",
params!["cfg-1"],
|row| row.get(0),
)
.expect("count config fields");
assert!(field_count > 0, "config fields should be stored in sqlite");
let exported = export_config_toml("cfg-1").expect("export toml");
assert!(exported.toml_text.contains("instance_id"));
assert!(delete_config_record("cfg-1"));
assert!(get_config_record("cfg-1").is_none());
}
#[test]
fn set_config_field_updates_only_requested_top_level_field() {
let root = test_root();
assert!(init_config_store(root));
let config_json = crate::build_default_network_config_json().expect("default config");
save_config_record(
"cfg-field".to_string(),
"field-config".to_string(),
config_json,
)
.expect("save config");
let before_network_name = get_config_field_value("cfg-field", "network_name");
let before_instance_id = get_config_field_value("cfg-field", "instance_id")
.expect("instance id field should exist");
assert!(set_config_field_value(
"cfg-field",
"network_name",
"\"changed-network\""
));
assert_eq!(
get_config_field_value("cfg-field", "network_name"),
Some("\"changed-network\"".to_string())
);
assert_eq!(
get_config_field_value("cfg-field", "instance_id"),
Some(before_instance_id)
);
assert_ne!(
get_config_field_value("cfg-field", "network_name"),
before_network_name
);
}
}
@@ -0,0 +1,67 @@
use crate::config::storage::config_meta::{now_ts_string, open_db};
use ohos_hilog_binding::hilog_error;
use rusqlite::{Connection, params};
use serde_json::{Map, Value};
pub(super) fn load_config_map_from_db(config_id: &str) -> Option<Map<String, Value>> {
let conn = open_db()?;
let mut stmt = conn
.prepare(
"SELECT field_name, field_json
FROM stored_config_fields
WHERE config_id = ?1",
)
.ok()?;
let rows = stmt
.query_map(params![config_id], |row| {
let field_name: String = row.get(0)?;
let field_json: String = row.get(1)?;
Ok((field_name, field_json))
})
.ok()?;
let mut object = Map::new();
for row in rows {
let (field_name, field_json) = row.ok()?;
let value = serde_json::from_str::<Value>(&field_json).ok()?;
object.insert(field_name, value);
}
if object.is_empty() {
None
} else {
Some(object)
}
}
pub(super) fn replace_config_fields(
tx: &Connection,
config_id: &str,
fields: Map<String, Value>,
) -> Option<()> {
if let Err(e) = tx.execute(
"DELETE FROM stored_config_fields WHERE config_id = ?1",
params![config_id],
) {
hilog_error!(
"[Rust] failed to clear existing config fields {}: {}",
config_id,
e
);
return None;
}
for (field_name, value) in fields {
let field_json = serde_json::to_string(&value).ok()?;
if let Err(e) = tx.execute(
"INSERT INTO stored_config_fields (config_id, field_name, field_json, updated_at)
VALUES (?1, ?2, ?3, ?4)",
params![config_id, field_name, field_json, now_ts_string()],
) {
hilog_error!("[Rust] failed to persist config field {}: {}", config_id, e);
return None;
}
}
Some(())
}
@@ -0,0 +1,48 @@
use crate::config::types::stored_config::{ExportTomlResult, StoredConfigRecord};
use easytier::common::config::{ConfigLoader, TomlConfigLoader};
use easytier::proto::api::manage::NetworkConfig;
pub(super) fn export_config_toml_from_record(
record: &StoredConfigRecord,
) -> Option<ExportTomlResult> {
let config = serde_json::from_str::<NetworkConfig>(&record.config_json).ok()?;
let toml = config.gen_config().ok()?;
Some(ExportTomlResult {
toml_text: toml.dump(),
})
}
pub(super) fn import_toml_to_record(
toml_text: String,
display_name: Option<String>,
save_config_record: impl Fn(String, String, String) -> Option<StoredConfigRecord>,
) -> Option<StoredConfigRecord> {
let config =
NetworkConfig::new_from_config(TomlConfigLoader::new_from_str(&toml_text).ok()?).ok()?;
let config_id = config.instance_id.clone()?;
let name_from_toml = toml_text
.lines()
.find_map(|line| {
let trimmed = line.trim();
if !trimmed.starts_with("instance_name") {
return None;
}
trimmed.split_once('=').map(|(_, value)| {
value
.trim()
.trim_matches('"')
.trim_matches('\'')
.to_string()
})
})
.filter(|name| !name.is_empty());
let final_name = display_name
.filter(|name| !name.is_empty())
.or(name_from_toml)
.unwrap_or_else(|| config_id.clone());
let config_json = serde_json::to_string(&config).ok()?;
save_config_record(config_id, final_name, config_json)
}
@@ -0,0 +1,45 @@
use crate::config::storage::config_meta::get_config_meta;
use ohos_hilog_binding::hilog_error;
use std::path::PathBuf;
pub(super) fn legacy_config_file_path(
root_dir: &Option<PathBuf>,
config_dir_name: &str,
config_id: &str,
) -> Option<PathBuf> {
root_dir.as_ref().map(|root| {
root.join(config_dir_name)
.join(format!("{}.json", config_id))
})
}
pub(super) fn migrate_legacy_file_if_needed(
root_dir: &Option<PathBuf>,
config_dir_name: &str,
config_id: &str,
save_config_record: impl Fn(
String,
String,
String,
) -> Option<crate::config::types::stored_config::StoredConfigRecord>,
) -> Option<()> {
let legacy_path = legacy_config_file_path(root_dir, config_dir_name, config_id)?;
if !legacy_path.exists() {
return Some(());
}
let raw = std::fs::read_to_string(&legacy_path).ok()?;
let display_name = get_config_meta(config_id)
.map(|meta| meta.display_name)
.unwrap_or_else(|| config_id.to_string());
save_config_record(config_id.to_string(), display_name, raw)?;
if let Err(e) = std::fs::remove_file(&legacy_path) {
hilog_error!(
"[Rust] failed to remove legacy config file {}: {}",
legacy_path.display(),
e
);
}
Some(())
}
@@ -0,0 +1,30 @@
use easytier::proto::api::manage::NetworkConfig;
use serde_json::{Map, Value};
pub(super) fn normalize_config_id(
mut config: NetworkConfig,
requested_id: String,
) -> Result<NetworkConfig, String> {
if requested_id.is_empty() {
return Err("config_id is required".to_string());
}
config.instance_id = Some(requested_id);
Ok(config)
}
pub(super) fn validate_config_json(
config_json: &str,
config_id: String,
) -> Result<NetworkConfig, String> {
let config = serde_json::from_str::<NetworkConfig>(config_json)
.map_err(|e| format!("parse config json failed: {}", e))?;
let config = normalize_config_id(config, config_id)?;
config
.gen_config()
.map_err(|e| format!("generate toml failed: {}", e))?;
Ok(config)
}
pub(super) fn config_to_top_level_map(config: &NetworkConfig) -> Option<Map<String, Value>> {
serde_json::to_value(config).ok()?.as_object().cloned()
}
@@ -0,0 +1,2 @@
pub(crate) mod config_api;
pub(crate) mod runtime_api;
@@ -0,0 +1,46 @@
use crate::config;
pub(crate) fn init_config_store(root_dir: String) -> bool {
config::repository::init_config_store(root_dir)
}
pub(crate) fn list_configs() -> String {
config::repository::list_config_meta_json()
}
pub(crate) fn save_config(config_id: String, display_name: String, config_json: String) -> bool {
config::repository::save_config_record(config_id, display_name, config_json).is_some()
}
pub(crate) fn create_config(config_id: String, display_name: String) -> bool {
config::repository::create_config_record(config_id, display_name).is_some()
}
pub(crate) fn delete_stored_config_meta(config_id: String) -> bool {
config::repository::delete_config_record(&config_id)
}
pub(crate) fn get_config(config_id: String) -> Option<String> {
config::repository::load_config_json(&config_id)
}
pub(crate) fn get_default_config() -> Option<String> {
config::repository::get_default_config_json()
}
pub(crate) fn get_config_field(config_id: String, field: String) -> Option<String> {
config::repository::get_config_field_value(&config_id, &field)
}
pub(crate) fn set_config_field(config_id: String, field: String, json_value: String) -> bool {
config::repository::set_config_field_value(&config_id, &field, &json_value)
}
pub(crate) fn import_toml(toml_text: String, display_name: Option<String>) -> Option<String> {
config::repository::import_toml_config(toml_text, display_name)
.map(|record| record.meta.config_id)
}
pub(crate) fn export_toml(config_id: String) -> Option<String> {
config::repository::export_config_toml(&config_id).map(|ret| ret.toml_text)
}
@@ -0,0 +1,184 @@
use crate::config::repository::load_config_json;
use crate::config::storage::config_meta::get_config_display_name;
use crate::config::types::stored_config::KeyValuePair;
use crate::kernel_bridge::{
aggregate_requested_tun_routes, start_local_socket_server as start_local_socket_server_inner,
stop_local_socket_server as stop_local_socket_server_inner,
};
use crate::runtime::state::runtime_state::{
RuntimeAggregateState, TunAggregateState, clear_tun_attached, mark_tun_attached,
runtime_instance_from_running_info,
};
use crate::{ASYNC_RUNTIME, EASYTIER_VERSION, INSTANCE_MANAGER, WEB_CLIENTS};
use easytier::proto::api::manage::NetworkConfig;
use ohos_hilog_binding::{hilog_error, hilog_info};
use std::sync::Arc;
pub(crate) fn start_kernel(
config_id: String,
start_kernel_with_config_id: impl Fn(&str) -> bool,
) -> bool {
start_kernel_with_config_id(&config_id)
}
pub(crate) fn stop_kernel(
config_id: String,
stop_web_client: impl Fn(&str) -> bool,
parse_instance_uuid: impl Fn(&str) -> Option<uuid::Uuid>,
maybe_stop_local_socket_server: impl Fn(),
) -> bool {
clear_tun_attached(&config_id);
if stop_web_client(&config_id) {
return true;
}
let Some(instance_id) = parse_instance_uuid(&config_id) else {
return false;
};
let ret = INSTANCE_MANAGER
.delete_network_instance(vec![instance_id])
.map(|_| true)
.unwrap_or_else(|err| {
hilog_error!("[Rust] stop_kernel failed {}: {}", config_id, err);
false
});
maybe_stop_local_socket_server();
ret
}
pub(crate) fn stop_network_instance(
config_ids: Vec<String>,
stop_kernel: impl Fn(String) -> bool,
) -> bool {
let mut ok = true;
for config_id in config_ids {
ok = stop_kernel(config_id) && ok;
}
ok
}
pub(crate) fn collect_network_infos() -> Vec<KeyValuePair> {
let infos = match INSTANCE_MANAGER.collect_network_infos_sync() {
Ok(infos) => infos,
Err(err) => {
hilog_error!("[Rust] collect network infos failed {}", err);
return vec![];
}
};
infos
.into_iter()
.filter_map(|(key, value)| {
serde_json::to_string(&value)
.ok()
.map(|value_json| KeyValuePair {
key: key.to_string(),
value: value_json,
})
})
.collect()
}
pub(crate) fn set_tun_fd(
config_id: String,
fd: i32,
parse_instance_uuid: impl Fn(&str) -> Option<uuid::Uuid>,
) -> bool {
let Some(instance_id) = parse_instance_uuid(&config_id) else {
hilog_error!("[Rust] set_tun_fd invalid instance id: {}", config_id);
return false;
};
INSTANCE_MANAGER
.set_tun_fd(&instance_id, fd)
.map(|_| {
mark_tun_attached(&config_id);
hilog_info!(
"[Rust] set_tun_fd success instance={} fd={} marked_attached=true",
config_id,
fd
);
true
})
.unwrap_or_else(|err| {
hilog_error!("[Rust] set_tun_fd failed {}: {}", config_id, err);
false
})
}
pub(crate) fn get_runtime_snapshot() -> RuntimeAggregateState {
get_runtime_snapshot_inner()
}
pub(crate) fn get_runtime_snapshot_inner() -> RuntimeAggregateState {
let infos = match INSTANCE_MANAGER.collect_network_infos_sync() {
Ok(infos) => infos,
Err(err) => {
hilog_error!("[Rust] collect network infos failed {}", err);
return RuntimeAggregateState {
instances: vec![],
tun: TunAggregateState {
active: false,
attached_instance_ids: vec![],
aggregated_routes: vec![],
dns_servers: vec![],
need_rebuild: false,
},
running_instance_count: 0,
};
}
};
let mut instances = Vec::with_capacity(infos.len());
for (instance_uuid, info) in infos {
let config_id = instance_uuid.to_string();
let display_name = get_config_display_name(&config_id).unwrap_or_else(|| config_id.clone());
let config_json = load_config_json(&config_id);
let stored_config = config_json
.as_deref()
.and_then(|raw| serde_json::from_str::<NetworkConfig>(raw).ok());
let magic_dns_enabled = stored_config
.as_ref()
.and_then(|cfg| cfg.enable_magic_dns)
.unwrap_or(false);
let need_exit_node = stored_config
.as_ref()
.map(|cfg| !cfg.exit_nodes.is_empty())
.unwrap_or(false);
instances.push(runtime_instance_from_running_info(
config_id,
display_name,
magic_dns_enabled,
need_exit_node,
info,
));
}
instances.sort_by(|a, b| {
a.display_name
.cmp(&b.display_name)
.then_with(|| a.instance_id.cmp(&b.instance_id))
});
let attached_instance_ids = instances
.iter()
.filter(|instance| instance.tun_required)
.map(|instance| instance.instance_id.clone())
.collect::<Vec<_>>();
let aggregated_routes = aggregate_requested_tun_routes(&instances);
let running_instance_count =
instances.iter().filter(|instance| instance.running).count() as i32;
let tun_active = !attached_instance_ids.is_empty();
RuntimeAggregateState {
instances,
tun: TunAggregateState {
active: tun_active,
attached_instance_ids,
aggregated_routes,
dns_servers: vec![],
need_rebuild: false,
},
running_instance_count,
}
}
@@ -0,0 +1,6 @@
mod protocol;
mod routing;
mod socket_server;
pub(crate) use routing::aggregate_requested_tun_routes;
pub use socket_server::{start_local_socket_server, stop_local_socket_server};
@@ -0,0 +1,50 @@
use crate::config::types::stored_config::LocalSocketSyncMessage;
use serde::Serialize;
use std::io::{Error, ErrorKind, Write};
use std::os::unix::net::UnixStream;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TunRequestPayload {
pub config_id: String,
pub instance_id: String,
pub display_name: String,
pub virtual_ipv4: Option<String>,
pub virtual_ipv4_cidr: Option<String>,
pub aggregated_routes: Vec<String>,
pub magic_dns_enabled: bool,
pub need_exit_node: bool,
}
pub(crate) fn send_local_socket_message(
stream: &mut UnixStream,
message_type: &str,
payload_json: String,
) -> std::io::Result<()> {
let message = LocalSocketSyncMessage {
message_type: message_type.to_string(),
payload_json,
};
let mut raw = serde_json::to_vec(&message)
.map_err(|err| Error::new(ErrorKind::InvalidData, err.to_string()))?;
raw.push(b'\n');
stream.write_all(&raw)?;
Ok(())
}
pub(crate) fn broadcast_local_socket_message(
clients: &mut Vec<UnixStream>,
message_type: &str,
payload_json: &str,
) -> bool {
let mut active_clients = Vec::with_capacity(clients.len());
let mut delivered = false;
for mut client in clients.drain(..) {
if send_local_socket_message(&mut client, message_type, payload_json.to_string()).is_ok() {
delivered = true;
active_clients.push(client);
}
}
*clients = active_clients;
delivered
}
@@ -0,0 +1,105 @@
use crate::config::repository::load_config_json;
use crate::runtime::state::runtime_state::RuntimeInstanceState;
use easytier::proto::api::manage::NetworkConfig;
use ipnet::IpNet;
use ohos_hilog_binding::hilog_debug;
use std::collections::HashSet;
use std::net::IpAddr;
pub(crate) fn load_manual_routes(config_id: &str) -> Vec<String> {
load_config_json(config_id)
.and_then(|raw| serde_json::from_str::<NetworkConfig>(&raw).ok())
.map(|config| config.routes)
.unwrap_or_default()
}
fn normalize_route_cidr(route: &str) -> Option<String> {
route
.parse::<IpNet>()
.ok()
.map(|network| match network {
IpNet::V4(net) => net.trunc().to_string(),
IpNet::V6(net) => net.trunc().to_string(),
})
.or_else(|| {
route.parse::<IpAddr>().ok().map(|addr| match addr {
IpAddr::V4(ip) => format!("{}/32", ip),
IpAddr::V6(ip) => format!("{}/128", ip),
})
})
}
fn simplify_routes(routes: Vec<String>) -> Vec<String> {
let mut parsed = routes
.into_iter()
.filter_map(|route| normalize_route_cidr(&route))
.filter_map(|route| route.parse::<IpNet>().ok())
.collect::<Vec<_>>();
parsed.sort_by(|left, right| {
left.prefix_len()
.cmp(&right.prefix_len())
.then_with(|| left.network().to_string().cmp(&right.network().to_string()))
});
let mut simplified = Vec::<IpNet>::new();
'outer: for route in parsed {
for existing in &simplified {
if existing.contains(&route.network()) && existing.prefix_len() <= route.prefix_len() {
continue 'outer;
}
}
simplified.retain(|existing| {
!(route.contains(&existing.network()) && route.prefix_len() <= existing.prefix_len())
});
simplified.push(route);
}
let mut seen = HashSet::new();
simplified
.into_iter()
.map(|route| route.to_string())
.filter(|route| seen.insert(route.clone()))
.collect()
}
pub(crate) fn aggregate_tun_routes(instance: &RuntimeInstanceState) -> Vec<String> {
let virtual_ipv4_cidr = instance
.my_node_info
.as_ref()
.and_then(|info| info.virtual_ipv4_cidr.clone());
let manual_routes = load_manual_routes(&instance.config_id);
let proxy_cidrs = instance
.routes
.iter()
.flat_map(|route| route.proxy_cidrs.iter().cloned())
.collect::<Vec<_>>();
let mut raw_routes = Vec::new();
if let Some(cidr) = virtual_ipv4_cidr.clone() {
raw_routes.push(cidr);
}
raw_routes.extend(manual_routes.iter().cloned());
raw_routes.extend(proxy_cidrs.iter().cloned());
let aggregated_routes = simplify_routes(raw_routes);
hilog_debug!(
"[Rust] aggregate_tun_routes instance={} proxy_cidrs={:?} aggregated_routes={:?}",
instance.instance_id,
proxy_cidrs,
aggregated_routes
);
aggregated_routes
}
pub(crate) fn aggregate_requested_tun_routes(instances: &[RuntimeInstanceState]) -> Vec<String> {
let mut aggregated_routes = Vec::new();
let mut seen_routes = HashSet::new();
for instance in instances.iter().filter(|instance| instance.tun_required) {
for route in aggregate_tun_routes(instance) {
if seen_routes.insert(route.clone()) {
aggregated_routes.push(route);
}
}
}
aggregated_routes
}
@@ -0,0 +1,196 @@
use super::protocol::{TunRequestPayload, broadcast_local_socket_message};
use crate::config::repository::kernel_socket_path;
use crate::get_runtime_snapshot_inner;
use crate::kernel_bridge::routing::aggregate_tun_routes;
use ohos_hilog_binding::{hilog_error, hilog_info};
use once_cell::sync::Lazy;
use std::collections::{HashMap, HashSet};
use std::io::ErrorKind;
use std::os::unix::net::{UnixListener, UnixStream};
use std::path::PathBuf;
use std::sync::Mutex;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::{self, JoinHandle};
use std::time::Duration;
struct LocalSocketState {
stop_flag: std::sync::Arc<AtomicBool>,
socket_path: PathBuf,
worker: JoinHandle<()>,
}
static LOCAL_SOCKET_STATE: Lazy<Mutex<Option<LocalSocketState>>> = Lazy::new(|| Mutex::new(None));
pub fn start_local_socket_server() -> bool {
let socket_path = match kernel_socket_path() {
Some(path) => path,
None => {
hilog_error!("[Rust] kernel socket path unavailable");
return false;
}
};
match LOCAL_SOCKET_STATE.lock() {
Ok(guard) if guard.is_some() => return true,
Ok(_) => {}
Err(err) => {
hilog_error!("[Rust] lock localsocket state failed: {}", err);
return false;
}
}
if socket_path.exists() {
let _ = std::fs::remove_file(&socket_path);
}
let listener = match UnixListener::bind(&socket_path) {
Ok(listener) => listener,
Err(err) => {
hilog_error!(
"[Rust] bind localsocket failed {}: {}",
socket_path.display(),
err
);
return false;
}
};
if let Err(err) = listener.set_nonblocking(true) {
hilog_error!("[Rust] set localsocket nonblocking failed: {}", err);
let _ = std::fs::remove_file(&socket_path);
return false;
}
let stop_flag = std::sync::Arc::new(AtomicBool::new(false));
let worker_stop_flag = stop_flag.clone();
let worker = thread::spawn(move || {
let mut last_snapshot_json = String::new();
let mut delivered_tun_requests = HashSet::new();
let mut last_tun_route_signatures = HashMap::<String, String>::new();
let mut clients = Vec::<UnixStream>::new();
while !worker_stop_flag.load(Ordering::Relaxed) {
let mut accepted_client = false;
loop {
match listener.accept() {
Ok((stream, _addr)) => {
accepted_client = true;
clients.push(stream);
}
Err(err) if err.kind() == ErrorKind::WouldBlock => break,
Err(err) => {
hilog_error!("[Rust] accept localsocket failed: {}", err);
break;
}
}
}
let snapshot = get_runtime_snapshot_inner();
let snapshot_json = match serde_json::to_string(&snapshot) {
Ok(json) => json,
Err(err) => {
hilog_error!("[Rust] serialize runtime snapshot failed: {}", err);
thread::sleep(Duration::from_millis(250));
continue;
}
};
if accepted_client || snapshot_json != last_snapshot_json {
let _ = broadcast_local_socket_message(
&mut clients,
"runtime_snapshot",
&snapshot_json,
);
last_snapshot_json = snapshot_json;
}
for instance in snapshot.instances.iter() {
if instance.running && instance.tun_required {
let virtual_ipv4 = instance
.my_node_info
.as_ref()
.and_then(|info| info.virtual_ipv4.clone());
let virtual_ipv4_cidr = instance
.my_node_info
.as_ref()
.and_then(|info| info.virtual_ipv4_cidr.clone());
if clients.is_empty() {
continue;
}
if virtual_ipv4.is_none() || virtual_ipv4_cidr.is_none() {
continue;
}
let aggregated_routes = aggregate_tun_routes(instance);
let route_signature = serde_json::to_string(&aggregated_routes)
.unwrap_or_else(|_| "[]".to_string());
let should_send = !delivered_tun_requests.contains(&instance.instance_id)
|| last_tun_route_signatures
.get(&instance.instance_id)
.map(|value| value != &route_signature)
.unwrap_or(true);
if !should_send {
continue;
}
let payload = TunRequestPayload {
config_id: instance.config_id.clone(),
instance_id: instance.instance_id.clone(),
display_name: instance.display_name.clone(),
virtual_ipv4,
virtual_ipv4_cidr,
aggregated_routes,
magic_dns_enabled: instance.magic_dns_enabled,
need_exit_node: instance.need_exit_node,
};
let payload_json = match serde_json::to_string(&payload) {
Ok(json) => json,
Err(err) => {
hilog_error!("[Rust] serialize tun request failed: {}", err);
continue;
}
};
if broadcast_local_socket_message(&mut clients, "tun_request", &payload_json) {
delivered_tun_requests.insert(instance.instance_id.clone());
last_tun_route_signatures
.insert(instance.instance_id.clone(), route_signature);
}
} else {
delivered_tun_requests.remove(&instance.instance_id);
last_tun_route_signatures.remove(&instance.instance_id);
}
}
thread::sleep(Duration::from_millis(250));
}
});
match LOCAL_SOCKET_STATE.lock() {
Ok(mut guard) => {
*guard = Some(LocalSocketState {
stop_flag,
socket_path,
worker,
});
true
}
Err(err) => {
hilog_error!("[Rust] lock localsocket state failed: {}", err);
false
}
}
}
pub fn stop_local_socket_server() -> bool {
let state = match LOCAL_SOCKET_STATE.lock() {
Ok(mut guard) => guard.take(),
Err(err) => {
hilog_error!("[Rust] lock localsocket state failed: {}", err);
return false;
}
};
if let Some(state) = state {
state.stop_flag.store(true, Ordering::Relaxed);
let _ = state.worker.join();
let _ = std::fs::remove_file(state.socket_path);
}
true
}
+439 -139
View File
@@ -1,185 +1,485 @@
mod native_log;
mod config;
mod exports;
mod kernel_bridge;
mod platform;
mod runtime;
use easytier::common::config::{ConfigFileControl, ConfigLoader, TomlConfigLoader};
use config::repository::{
create_config_record, delete_config_record, export_config_toml, get_config_field_value,
get_default_config_json, import_toml_config, init_config_store as init_repo_store,
list_config_meta_json, save_config_record, set_config_field_value, start_kernel_with_config_id,
};
use config::services::schema_service::{
ConfigFieldMapping, NetworkConfigSchema,
get_network_config_field_mappings as build_network_config_field_mappings,
get_network_config_schema as build_network_config_schema,
};
use config::services::share_link_service::{
build_config_share_link as build_config_share_link_inner,
import_config_share_link as import_config_share_link_inner,
parse_config_share_link as parse_config_share_link_inner,
};
use config::storage::config_meta::get_config_display_name;
use config::types::stored_config::{KeyValuePair, SharedConfigLinkPayload};
use easytier::common::constants::EASYTIER_VERSION;
use easytier::common::{
MachineIdOptions,
config::{ConfigFileControl, ConfigLoader, TomlConfigLoader},
};
use easytier::instance_manager::NetworkInstanceManager;
use easytier::proto::api::manage::NetworkConfig;
use easytier::proto::api::manage::NetworkingMethod;
use easytier::web_client::{WebClient, WebClientHooks, run_web_client};
use kernel_bridge::{
aggregate_requested_tun_routes, start_local_socket_server as start_local_socket_server_inner,
stop_local_socket_server as stop_local_socket_server_inner,
};
use napi_derive_ohos::napi;
use ohos_hilog_binding::{hilog_debug, hilog_error};
use ohos_hilog_binding::{hilog_error, hilog_info};
use runtime::state::runtime_state::{
RuntimeAggregateState, TunAggregateState, clear_tun_attached, mark_tun_attached,
runtime_instance_from_running_info,
};
use std::collections::{HashMap, HashSet};
use std::format;
use std::sync::{Arc, Mutex};
use tokio::runtime::{Builder, Runtime};
use uuid::Uuid;
static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
once_cell::sync::Lazy::new(NetworkInstanceManager::new);
pub(crate) static INSTANCE_MANAGER: once_cell::sync::Lazy<Arc<NetworkInstanceManager>> =
once_cell::sync::Lazy::new(|| Arc::new(NetworkInstanceManager::new()));
static ASYNC_RUNTIME: once_cell::sync::Lazy<Runtime> = once_cell::sync::Lazy::new(|| {
Builder::new_multi_thread()
.enable_all()
.build()
.expect("tokio runtime for easytier-ohrs")
});
static WEB_CLIENTS: once_cell::sync::Lazy<Mutex<HashMap<String, ManagedWebClient>>> =
once_cell::sync::Lazy::new(|| Mutex::new(HashMap::new()));
#[napi(object)]
pub struct KeyValuePair {
pub key: String,
pub value: String,
#[derive(Default)]
struct TrackedWebClientHooks {
instance_ids: Mutex<HashSet<Uuid>>,
}
#[napi]
pub fn easytier_version() -> String {
EASYTIER_VERSION.to_string()
struct ManagedWebClient {
_client: WebClient,
hooks: Arc<TrackedWebClientHooks>,
}
#[napi]
pub fn set_tun_fd(inst_id: String, fd: i32) -> bool {
match Uuid::try_parse(&inst_id) {
Ok(uuid) => match INSTANCE_MANAGER.set_tun_fd(&uuid, fd) {
Ok(_) => {
hilog_debug!("[Rust] set tun fd {} to {}.", fd, inst_id);
true
}
Err(e) => {
hilog_error!("[Rust] cant set tun fd {} to {}. {}", fd, inst_id, e);
false
}
},
Err(e) => {
hilog_error!("[Rust] cant covert {} to uuid. {}", inst_id, e);
#[async_trait::async_trait]
impl WebClientHooks for TrackedWebClientHooks {
async fn post_run_network_instance(&self, id: &Uuid) -> Result<(), String> {
self.instance_ids
.lock()
.map_err(|err| err.to_string())?
.insert(*id);
Ok(())
}
async fn post_remove_network_instances(&self, ids: &[Uuid]) -> Result<(), String> {
let mut guard = self.instance_ids.lock().map_err(|err| err.to_string())?;
for id in ids {
guard.remove(id);
}
Ok(())
}
}
fn is_config_server_config(config: &NetworkConfig) -> bool {
matches!(
NetworkingMethod::try_from(config.networking_method.unwrap_or_default())
.unwrap_or_default(),
NetworkingMethod::PublicServer
) && config
.public_server_url
.as_ref()
.is_some_and(|url| !url.trim().is_empty())
}
fn stop_web_client(config_id: &str) -> bool {
let managed = match WEB_CLIENTS.lock() {
Ok(mut guard) => guard.remove(config_id),
Err(err) => {
hilog_error!("[Rust] stop_web_client lock failed {}", err);
return false;
}
};
let Some(managed) = managed else {
return false;
};
let tracked_ids = managed
.hooks
.instance_ids
.lock()
.map(|guard| guard.iter().copied().collect::<Vec<_>>())
.unwrap_or_default();
drop(managed);
if tracked_ids.is_empty() {
maybe_stop_local_socket_server();
return true;
}
let ret = INSTANCE_MANAGER
.delete_network_instance(tracked_ids)
.map(|_| true)
.unwrap_or_else(|err| {
hilog_error!(
"[Rust] stop config server instances failed {}: {}",
config_id,
err
);
false
});
maybe_stop_local_socket_server();
ret
}
fn ensure_local_socket_server_started() -> bool {
start_local_socket_server_inner()
}
fn maybe_stop_local_socket_server() {
let no_local_instances = INSTANCE_MANAGER.list_network_instance_ids().is_empty();
let no_web_clients = WEB_CLIENTS
.lock()
.map(|guard| guard.is_empty())
.unwrap_or(false);
if no_local_instances && no_web_clients {
let _ = stop_local_socket_server_inner();
}
}
fn run_config_server_instance(config_id: &str, config: &NetworkConfig) -> bool {
if INSTANCE_MANAGER
.list_network_instance_ids()
.iter()
.next()
.is_some()
{
hilog_error!("[Rust] there is a running instance!");
return false;
}
let Some(config_server_url) = config.public_server_url.clone() else {
hilog_error!("[Rust] public_server_url missing for config server mode");
return false;
};
let hooks = Arc::new(TrackedWebClientHooks::default());
let secure_mode = config
.secure_mode
.as_ref()
.map(|mode| mode.enabled)
.unwrap_or(false);
let hostname = config.hostname.clone();
if !ensure_local_socket_server_started() {
return false;
}
let client = ASYNC_RUNTIME.block_on(run_web_client(
&config_server_url,
MachineIdOptions::default(),
hostname,
secure_mode,
INSTANCE_MANAGER.clone(),
Some(hooks.clone()),
));
let client = match client {
Ok(client) => client,
Err(err) => {
hilog_error!("[Rust] start config server failed {}", err);
return false;
}
};
match WEB_CLIENTS.lock() {
Ok(mut guard) => {
guard.insert(
config_id.to_string(),
ManagedWebClient {
_client: client,
hooks,
},
);
true
}
Err(err) => {
hilog_error!("[Rust] store config server client failed {}", err);
false
}
}
}
#[napi]
pub fn default_network_config() -> String {
match NetworkConfig::new_from_config(TomlConfigLoader::default()) {
Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| format!("ERROR {}", e)),
Err(e) => {
hilog_error!("[Rust] default_network_config failed {}", e);
format!("ERROR {}", e)
}
}
pub(crate) fn build_default_network_config_json() -> Result<String, String> {
let config = NetworkConfig::new_from_config(TomlConfigLoader::default())
.map_err(|e| format!("default_network_config failed {}", e))?;
serde_json::to_string(&config).map_err(|e| format!("default_network_config failed {}", e))
}
#[napi]
pub fn convert_toml_to_network_config(cfg_str: String) -> String {
match TomlConfigLoader::new_from_str(&cfg_str) {
Ok(cfg) => match NetworkConfig::new_from_config(cfg) {
Ok(result) => serde_json::to_string(&result).unwrap_or_else(|e| format!("ERROR {}", e)),
Err(e) => {
hilog_error!("[Rust] convert_toml_to_network_config failed {}", e);
format!("ERROR {}", e)
}
},
Err(e) => {
hilog_error!("[Rust] convert_toml_to_network_config failed {}", e);
format!("ERROR {}", e)
}
}
fn convert_toml_to_network_config_inner(toml_text: &str) -> Result<String, String> {
let config = NetworkConfig::new_from_config(
TomlConfigLoader::new_from_str(toml_text).map_err(|e| e.to_string())?,
)
.map_err(|e| e.to_string())?;
serde_json::to_string(&config).map_err(|e| e.to_string())
}
#[napi]
pub fn parse_network_config(cfg_json: String) -> bool {
match serde_json::from_str::<NetworkConfig>(&cfg_json) {
Ok(cfg) => match cfg.gen_config() {
Ok(toml) => {
hilog_debug!("[Rust] Convert to Toml {}", toml.dump());
true
}
Err(e) => {
hilog_error!("[Rust] parse config failed {}", e);
false
}
},
Err(e) => {
hilog_error!("[Rust] parse config failed {}", e);
false
}
}
fn parse_network_config_inner(cfg_json: &str) -> bool {
serde_json::from_str::<NetworkConfig>(cfg_json)
.ok()
.and_then(|cfg| cfg.gen_config().ok())
.is_some()
}
#[napi]
pub fn run_network_instance(cfg_json: String) -> bool {
let cfg = match serde_json::from_str::<NetworkConfig>(&cfg_json) {
Ok(cfg) => match cfg.gen_config() {
Ok(toml) => toml,
Err(e) => {
hilog_error!("[Rust] parse config failed {}", e);
return false;
}
},
pub(crate) fn run_network_instance_from_json(cfg_json: &str) -> bool {
let config = match serde_json::from_str::<NetworkConfig>(cfg_json) {
Ok(cfg) => cfg,
Err(e) => {
hilog_error!("[Rust] parse config failed {}", e);
return false;
}
};
if INSTANCE_MANAGER.list_network_instance_ids().len() > 0 {
if is_config_server_config(&config) {
let Some(config_id) = config.instance_id.as_deref() else {
hilog_error!("[Rust] config server config missing instance id");
return false;
};
return run_config_server_instance(config_id, &config);
}
let cfg = match config.gen_config() {
Ok(toml) => toml,
Err(e) => {
hilog_error!("[Rust] parse config failed {}", e);
return false;
}
};
if !INSTANCE_MANAGER.list_network_instance_ids().is_empty() {
hilog_error!("[Rust] there is a running instance!");
return false;
}
if !ensure_local_socket_server_started() {
return false;
}
let inst_id = cfg.get_id();
if INSTANCE_MANAGER
.list_network_instance_ids()
.contains(&inst_id)
{
hilog_error!("[Rust] instance {} already exists", inst_id);
return false;
}
INSTANCE_MANAGER
.run_network_instance(cfg, false, ConfigFileControl::STATIC_CONFIG)
.unwrap();
true
}
#[napi]
pub fn stop_network_instance(inst_names: Vec<String>) {
INSTANCE_MANAGER
.delete_network_instance(
inst_names
.into_iter()
.filter_map(|s| Uuid::parse_str(&s).ok())
.collect(),
)
.unwrap();
hilog_debug!("[Rust] stop_network_instance");
}
#[napi]
pub fn collect_network_infos() -> Vec<KeyValuePair> {
let mut result = Vec::new();
match INSTANCE_MANAGER.collect_network_infos_sync() {
Ok(map) => {
for (uuid, info) in map.iter() {
// convert value to json string
let value = match serde_json::to_string(&info) {
Ok(value) => value,
Err(e) => {
hilog_error!("[Rust] failed to serialize instance {} info: {}", uuid, e);
continue;
}
};
result.push(KeyValuePair {
key: uuid.clone().to_string(),
value: value.clone(),
});
}
}
Err(_) => {}
}
result
}
#[napi]
pub fn collect_running_network() -> Vec<String> {
INSTANCE_MANAGER
.list_network_instance_ids()
.clone()
.into_iter()
.map(|id| id.to_string())
.collect()
}
#[napi]
pub fn is_running_network(inst_id: String) -> bool {
match Uuid::try_parse(&inst_id) {
Ok(uuid) => INSTANCE_MANAGER.list_network_instance_ids().contains(&uuid),
Err(e) => {
hilog_error!("[Rust] cant covert {} to uuid. {}", inst_id, e);
match INSTANCE_MANAGER.run_network_instance(cfg, false, ConfigFileControl::STATIC_CONFIG) {
Ok(_) => true,
Err(err) => {
hilog_error!("[Rust] start_kernel failed for {}: {}", inst_id, err);
false
}
}
}
fn parse_instance_uuid(config_id: &str) -> Option<Uuid> {
match Uuid::parse_str(config_id) {
Ok(uuid) => Some(uuid),
Err(err) => {
hilog_error!("[Rust] invalid config_id {}: {}", config_id, err);
None
}
}
}
#[napi]
pub fn init_config_store(root_dir: String) -> bool {
exports::config_api::init_config_store(root_dir)
}
#[napi]
pub fn list_configs() -> String {
exports::config_api::list_configs()
}
#[napi]
pub fn get_config_display_name_by_id(config_id: String) -> Option<String> {
get_config_display_name(&config_id)
}
#[napi]
pub fn save_config(config_id: String, display_name: String, config_json: String) -> bool {
exports::config_api::save_config(config_id, display_name, config_json)
}
#[napi]
pub fn create_config(config_id: String, display_name: String) -> bool {
exports::config_api::create_config(config_id, display_name)
}
#[napi]
pub fn rename_stored_config(config_id: String, display_name: String) -> bool {
config::storage::config_meta::set_config_display_name(config_id, display_name).is_some()
}
#[napi]
pub fn delete_stored_config_meta(config_id: String) -> bool {
exports::config_api::delete_stored_config_meta(config_id)
}
#[napi]
pub fn get_config(config_id: String) -> Option<String> {
exports::config_api::get_config(config_id)
}
#[napi]
pub fn get_default_config() -> Option<String> {
exports::config_api::get_default_config()
}
#[napi]
pub fn get_config_field(config_id: String, field: String) -> Option<String> {
exports::config_api::get_config_field(config_id, field)
}
#[napi]
pub fn set_config_field(config_id: String, field: String, json_value: String) -> bool {
exports::config_api::set_config_field(config_id, field, json_value)
}
#[napi]
pub fn import_toml(toml_text: String, display_name: Option<String>) -> Option<String> {
exports::config_api::import_toml(toml_text, display_name)
}
#[napi]
pub fn export_toml(config_id: String) -> Option<String> {
exports::config_api::export_toml(config_id)
}
#[napi]
pub fn start_kernel(config_id: String) -> bool {
exports::runtime_api::start_kernel(config_id, start_kernel_with_config_id)
}
#[napi]
pub fn stop_kernel(config_id: String) -> bool {
exports::runtime_api::stop_kernel(
config_id,
stop_web_client,
parse_instance_uuid,
maybe_stop_local_socket_server,
)
}
#[napi]
pub fn stop_network_instance(config_ids: Vec<String>) -> bool {
exports::runtime_api::stop_network_instance(config_ids, stop_kernel)
}
#[napi]
pub fn easytier_version() -> String {
EASYTIER_VERSION.to_string()
}
#[napi]
pub fn default_network_config() -> String {
get_default_config().unwrap_or_else(|| "{}".to_string())
}
#[napi]
pub fn convert_toml_to_network_config(toml_text: String) -> String {
convert_toml_to_network_config_inner(&toml_text).unwrap_or_else(|err| format!("ERROR: {err}"))
}
#[napi]
pub fn parse_network_config(cfg_json: String) -> bool {
parse_network_config_inner(&cfg_json)
}
#[napi]
pub fn run_network_instance(cfg_json: String) -> bool {
run_network_instance_from_json(&cfg_json)
}
#[napi]
pub fn collect_network_infos() -> Vec<KeyValuePair> {
exports::runtime_api::collect_network_infos()
}
#[napi]
pub fn set_tun_fd(config_id: String, fd: i32) -> bool {
exports::runtime_api::set_tun_fd(config_id, fd, parse_instance_uuid)
}
#[napi]
pub fn get_network_config_schema() -> NetworkConfigSchema {
build_network_config_schema()
}
#[napi]
pub fn get_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
build_network_config_field_mappings()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn exported_plain_object_schema_contains_core_networkconfig_metadata() {
let schema = get_network_config_schema();
assert_eq!(schema.name, "NetworkConfig");
assert_eq!(schema.node_kind, "schema");
assert!(
schema
.children
.iter()
.any(|field| field.name == "network_name")
);
let secure_mode = schema
.children
.iter()
.find(|field| field.name == "secure_mode")
.expect("secure_mode field");
assert!(
secure_mode
.children
.iter()
.any(|field| field.name == "enabled")
);
}
}
#[napi]
pub fn get_runtime_snapshot() -> RuntimeAggregateState {
exports::runtime_api::get_runtime_snapshot()
}
pub(crate) fn get_runtime_snapshot_inner() -> RuntimeAggregateState {
exports::runtime_api::get_runtime_snapshot_inner()
}
#[napi]
pub fn build_config_share_link(config_id: String, only_start: Option<bool>) -> Option<String> {
build_config_share_link_inner(&config_id, None, only_start.unwrap_or(false))
}
#[napi]
pub fn parse_config_share_link(share_link: String) -> Option<SharedConfigLinkPayload> {
parse_config_share_link_inner(&share_link)
}
#[napi]
pub fn import_config_share_link(
share_link: String,
display_name_override: Option<String>,
) -> Option<String> {
import_config_share_link_inner(&share_link, display_name_override)
}
@@ -0,0 +1 @@
pub(crate) mod logging;
@@ -0,0 +1 @@
pub(crate) mod native_log;
@@ -0,0 +1 @@
pub(crate) mod state;
@@ -0,0 +1 @@
pub(crate) mod runtime_state;
@@ -0,0 +1,293 @@
use easytier::proto::{api, common};
use napi_derive_ohos::napi;
use serde::Serialize;
use std::collections::HashSet;
use std::sync::Mutex;
static ATTACHED_TUN_INSTANCE_IDS: once_cell::sync::Lazy<Mutex<HashSet<String>>> =
once_cell::sync::Lazy::new(|| Mutex::new(HashSet::new()));
pub fn mark_tun_attached(instance_id: &str) {
if let Ok(mut guard) = ATTACHED_TUN_INSTANCE_IDS.lock() {
guard.insert(instance_id.to_string());
}
}
pub fn clear_tun_attached(instance_id: &str) {
if let Ok(mut guard) = ATTACHED_TUN_INSTANCE_IDS.lock() {
guard.remove(instance_id);
}
}
pub fn is_tun_attached(instance_id: &str) -> bool {
ATTACHED_TUN_INSTANCE_IDS
.lock()
.map(|guard| guard.contains(instance_id))
.unwrap_or(false)
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct PeerConnStats {
pub rx_bytes: i64,
pub tx_bytes: i64,
pub rx_packets: i64,
pub tx_packets: i64,
pub latency_us: i64,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct PeerConnInfo {
pub conn_id: String,
pub my_peer_id: i64,
pub peer_id: i64,
pub features: Vec<String>,
pub tunnel_type: Option<String>,
pub local_addr: Option<String>,
pub remote_addr: Option<String>,
pub resolved_remote_addr: Option<String>,
pub stats: Option<PeerConnStats>,
pub loss_rate: Option<f64>,
pub is_client: bool,
pub network_name: Option<String>,
pub is_closed: bool,
pub secure_auth_level: Option<i32>,
pub peer_identity_type: Option<i32>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct PeerInfo {
pub peer_id: i64,
pub default_conn_id: Option<String>,
pub directly_connected_conns: Vec<String>,
pub conns: Vec<PeerConnInfo>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct RouteView {
pub peer_id: i64,
pub hostname: Option<String>,
pub ipv4: Option<String>,
pub ipv4_cidr: Option<String>,
pub ipv6_cidr: Option<String>,
pub proxy_cidrs: Vec<String>,
pub next_hop_peer_id: Option<i64>,
pub cost: Option<i32>,
pub path_latency: Option<i64>,
pub udp_nat_type: Option<i32>,
pub tcp_nat_type: Option<i32>,
pub inst_id: Option<String>,
pub version: Option<String>,
pub is_public_server: Option<bool>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct MyNodeInfo {
pub virtual_ipv4: Option<String>,
pub virtual_ipv4_cidr: Option<String>,
pub hostname: Option<String>,
pub version: Option<String>,
pub peer_id: Option<i64>,
pub listeners: Vec<String>,
pub vpn_portal_cfg: Option<String>,
pub udp_nat_type: Option<i32>,
pub tcp_nat_type: Option<i32>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct RuntimeInstanceState {
pub config_id: String,
pub instance_id: String,
pub display_name: String,
pub running: bool,
pub tun_required: bool,
pub tun_attached: bool,
pub magic_dns_enabled: bool,
pub need_exit_node: bool,
pub error_message: Option<String>,
pub my_node_info: Option<MyNodeInfo>,
pub events: Vec<String>,
pub routes: Vec<RouteView>,
pub peers: Vec<PeerInfo>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct TunAggregateState {
pub active: bool,
pub attached_instance_ids: Vec<String>,
pub aggregated_routes: Vec<String>,
pub dns_servers: Vec<String>,
pub need_rebuild: bool,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[napi(object)]
pub struct RuntimeAggregateState {
pub instances: Vec<RuntimeInstanceState>,
pub tun: TunAggregateState,
pub running_instance_count: i32,
}
fn stringify_ipv4_inet(value: Option<common::Ipv4Inet>) -> Option<String> {
value.map(|v| v.to_string())
}
fn stringify_ipv6_inet(value: Option<common::Ipv6Inet>) -> Option<String> {
value.map(|v| v.to_string())
}
fn stringify_url(value: Option<common::Url>) -> Option<String> {
value.map(|v| v.to_string())
}
fn stringify_uuid(value: Option<common::Uuid>) -> Option<String> {
value.map(|v| v.to_string())
}
fn optional_u32_to_i64(value: Option<u32>) -> Option<i64> {
value.map(|v| v as i64)
}
fn optional_i32_to_i64(value: Option<i32>) -> Option<i64> {
value.map(|v| v as i64)
}
fn route_to_view(route: api::instance::Route) -> RouteView {
let stun = route.stun_info;
let feature_flag = route.feature_flag;
RouteView {
peer_id: route.peer_id as i64,
hostname: (!route.hostname.is_empty()).then_some(route.hostname),
ipv4: route
.ipv4_addr
.as_ref()
.and_then(|inet| inet.address.as_ref())
.map(|addr| addr.to_string()),
ipv4_cidr: stringify_ipv4_inet(route.ipv4_addr),
ipv6_cidr: stringify_ipv6_inet(route.ipv6_addr),
proxy_cidrs: route.proxy_cidrs,
next_hop_peer_id: optional_u32_to_i64(route.next_hop_peer_id_latency_first)
.or_else(|| Some(route.next_hop_peer_id as i64)),
cost: Some(route.cost),
path_latency: optional_i32_to_i64(route.path_latency_latency_first)
.or_else(|| Some(route.path_latency as i64)),
udp_nat_type: stun.as_ref().map(|info| info.udp_nat_type),
tcp_nat_type: stun.as_ref().map(|info| info.tcp_nat_type),
inst_id: (!route.inst_id.is_empty()).then_some(route.inst_id),
version: (!route.version.is_empty()).then_some(route.version),
is_public_server: feature_flag.map(|flag| flag.is_public_server),
}
}
fn peer_conn_to_view(conn: api::instance::PeerConnInfo) -> PeerConnInfo {
let stats = conn.stats.map(|stats| PeerConnStats {
rx_bytes: stats.rx_bytes as i64,
tx_bytes: stats.tx_bytes as i64,
rx_packets: stats.rx_packets as i64,
tx_packets: stats.tx_packets as i64,
latency_us: stats.latency_us as i64,
});
PeerConnInfo {
conn_id: conn.conn_id,
my_peer_id: conn.my_peer_id as i64,
peer_id: conn.peer_id as i64,
features: conn.features,
tunnel_type: conn.tunnel.as_ref().map(|t| t.tunnel_type.clone()),
local_addr: conn
.tunnel
.as_ref()
.and_then(|t| stringify_url(t.local_addr.clone())),
remote_addr: conn
.tunnel
.as_ref()
.and_then(|t| stringify_url(t.remote_addr.clone())),
resolved_remote_addr: conn
.tunnel
.as_ref()
.and_then(|t| stringify_url(t.resolved_remote_addr.clone())),
stats,
loss_rate: Some(conn.loss_rate as f64),
is_client: conn.is_client,
network_name: (!conn.network_name.is_empty()).then_some(conn.network_name),
is_closed: conn.is_closed,
secure_auth_level: Some(conn.secure_auth_level),
peer_identity_type: Some(conn.peer_identity_type),
}
}
fn peer_to_view(peer: api::instance::PeerInfo) -> PeerInfo {
PeerInfo {
peer_id: peer.peer_id as i64,
default_conn_id: stringify_uuid(peer.default_conn_id),
directly_connected_conns: peer
.directly_connected_conns
.into_iter()
.map(|id| id.to_string())
.collect(),
conns: peer.conns.into_iter().map(peer_conn_to_view).collect(),
}
}
fn my_node_info_to_view(info: api::manage::MyNodeInfo) -> MyNodeInfo {
MyNodeInfo {
virtual_ipv4: info
.virtual_ipv4
.as_ref()
.and_then(|inet| inet.address.as_ref())
.map(|addr| addr.to_string()),
virtual_ipv4_cidr: stringify_ipv4_inet(info.virtual_ipv4),
hostname: (!info.hostname.is_empty()).then_some(info.hostname),
version: (!info.version.is_empty()).then_some(info.version),
peer_id: Some(info.peer_id as i64),
listeners: info
.listeners
.into_iter()
.map(|url| url.to_string())
.collect(),
vpn_portal_cfg: info.vpn_portal_cfg,
udp_nat_type: info.stun_info.as_ref().map(|stun| stun.udp_nat_type),
tcp_nat_type: info.stun_info.as_ref().map(|stun| stun.tcp_nat_type),
}
}
pub fn runtime_instance_from_running_info(
config_id: String,
display_name: String,
magic_dns_enabled: bool,
need_exit_node: bool,
info: api::manage::NetworkInstanceRunningInfo,
) -> RuntimeInstanceState {
let tun_attached = info.running && is_tun_attached(&config_id);
let tun_required = info.running && (info.dev_name != "no_tun" || tun_attached);
RuntimeInstanceState {
config_id: config_id.clone(),
instance_id: config_id,
display_name,
running: info.running,
tun_required,
tun_attached,
magic_dns_enabled,
need_exit_node,
error_message: info.error_msg,
my_node_info: info.my_node_info.map(my_node_info_to_view),
events: info.events,
routes: info.routes.into_iter().map(route_to_view).collect(),
peers: info.peers.into_iter().map(peer_to_view).collect(),
}
}
@@ -8,8 +8,7 @@ use anyhow::Context as _;
use dashmap::DashMap;
use easytier::{
common::config::{
ConfigFileControl, ConfigLoader, DEFAULT_CONNECTION_PRIORITY, NetworkIdentity, PeerConfig,
TomlConfigLoader,
ConfigFileControl, ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader,
},
instance_manager::NetworkInstanceManager,
};
@@ -361,7 +360,6 @@ impl HealthChecker {
.parse()
.with_context(|| "failed to parse peer uri")?,
peer_public_key: None,
priority: DEFAULT_CONNECTION_PRIORITY,
}]);
let inst_id = inst_id.unwrap_or(uuid::Uuid::new_v4());
+1 -1
View File
@@ -1,7 +1,7 @@
{
"name": "easytier-gui",
"type": "module",
"version": "2.6.3",
"version": "2.6.4",
"private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": {
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-gui"
version = "2.6.3"
version = "2.6.4"
description = "EasyTier GUI"
authors = ["you"]
edition.workspace = true
+9 -1
View File
@@ -490,10 +490,18 @@ async fn init_web_client(app: AppHandle, url: Option<String>) -> Result<(), Stri
.ok_or_else(|| "Instance manager is not available".to_string())?;
let hooks = Arc::new(manager::GuiHooks { app: app.clone() });
let machine_id_state_dir = app
.path()
.app_data_dir()
.with_context(|| "Failed to resolve machine id state directory")
.map_err(|e| format!("{:#}", e))?;
let web_client = web_client::run_web_client(
url.as_str(),
None,
easytier::common::MachineIdOptions {
explicit_machine_id: None,
state_dir: Some(machine_id_state_dir),
},
None,
false,
instance_manager,
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false
},
"productName": "easytier-gui",
"version": "2.6.3",
"version": "2.6.4",
"identifier": "com.kkrainbow.easytier",
"plugins": {
"shell": {
-20
View File
@@ -1,20 +0,0 @@
[package]
name = "easytier-rpc-build"
description = "Protobuf RPC Service Generator for EasyTier"
version = "0.1.0"
edition.workspace = true
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"]
license-file = "LICENSE"
readme = "README.md"
[dependencies]
heck = "0.5"
prost-build = "0.13"
[features]
default = []
internal-namespace = []
-1
View File
@@ -1 +0,0 @@
../LICENSE
-3
View File
@@ -1,3 +0,0 @@
# Introduction
This is a protobuf rpc service stub generator for [EasyTier](https://github.com/EasyTier/EasyTier) project.
-449
View File
@@ -1,449 +0,0 @@
extern crate heck;
extern crate prost_build;
use std::fmt;
#[cfg(feature = "internal-namespace")]
const NAMESPACE: &str = "crate::proto::rpc_types";
#[cfg(not(feature = "internal-namespace"))]
const NAMESPACE: &str = "easytier::proto::rpc_types";
/// The service generator to be used with `prost-build` to generate RPC implementations for
/// `prost-simple-rpc`.
///
/// See the crate-level documentation for more info.
#[allow(missing_copy_implementations)]
#[derive(Clone, Debug, Default)]
pub struct ServiceGenerator {
_private: (),
}
impl prost_build::ServiceGenerator for ServiceGenerator {
fn generate(&mut self, service: prost_build::Service, mut buf: &mut String) {
use std::fmt::Write;
let descriptor_name = format!("{}Descriptor", service.name);
let server_name = format!("{}Server", service.name);
let client_name = format!("{}Client", service.name);
let method_descriptor_name = format!("{}MethodDescriptor", service.name);
let mut trait_methods = String::new();
let mut weak_impl_methods = String::new();
let mut enum_methods = String::new();
let mut list_enum_methods = String::new();
let mut client_methods = String::new();
let mut client_own_methods = String::new();
let mut match_name_methods = String::new();
let mut match_proto_name_methods = String::new();
let mut match_input_type_methods = String::new();
let mut match_input_proto_type_methods = String::new();
let mut match_output_type_methods = String::new();
let mut match_output_proto_type_methods = String::new();
let mut match_handle_methods = String::new();
// generate trait default method Xxx::json_call_method match branch
let mut match_trait_json_methods = String::new();
let mut match_method_try_from = String::new();
for (idx, method) in service.methods.iter().enumerate() {
assert!(
!method.client_streaming,
"Client streaming not yet supported for method {}",
method.proto_name
);
assert!(
!method.server_streaming,
"Server streaming not yet supported for method {}",
method.proto_name
);
ServiceGenerator::write_comments(&mut trait_methods, 4, &method.comments).unwrap();
writeln!(
trait_methods,
r#" async fn {name}(&self, ctrl: Self::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}>;"#,
name = method.name,
input_type = method.input_type,
output_type = method.output_type,
namespace = NAMESPACE,
)
.unwrap();
writeln!(
weak_impl_methods,
r#" async fn {method_name}(&self, ctrl: Self::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{
let Some(service) = self.upgrade() else {{
return Err({namespace}::error::Error::Shutdown);
}};
service.{method_name}(ctrl, input).await
}}"#,
method_name = method.name,
input_type = method.input_type,
output_type = method.output_type,
namespace = NAMESPACE,
)
.unwrap();
ServiceGenerator::write_comments(&mut enum_methods, 4, &method.comments).unwrap();
writeln!(
enum_methods,
" {name} = {index},",
name = method.proto_name,
index = idx + 1
)
.unwrap();
writeln!(
match_method_try_from,
" {index} => Ok({service_name}MethodDescriptor::{name}),",
service_name = service.name,
name = method.proto_name,
index = idx + 1,
)
.unwrap();
writeln!(
list_enum_methods,
" {service_name}MethodDescriptor::{name},",
service_name = service.name,
name = method.proto_name
)
.unwrap();
writeln!(
client_methods,
r#" async fn {name}(&self, ctrl: H::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{
{client_name}Client::{name}_inner(self.0.clone(), ctrl, input).await
}}"#,
name = method.name,
input_type = method.input_type,
output_type = method.output_type,
client_name = service.name,
namespace = NAMESPACE,
)
.unwrap();
writeln!(
client_own_methods,
r#" async fn {name}_inner(handler: H, ctrl: H::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{
{namespace}::__rt::call_method(handler, ctrl, {method_descriptor_name}::{proto_name}, input).await
}}"#,
name = method.name,
method_descriptor_name = method_descriptor_name,
proto_name = method.proto_name,
input_type = method.input_type,
output_type = method.output_type,
namespace = NAMESPACE,
).unwrap();
let case = format!(
" {service_name}MethodDescriptor::{proto_name} => ",
service_name = service.name,
proto_name = method.proto_name
);
writeln!(match_name_methods, "{}{:?},", case, method.name).unwrap();
writeln!(match_proto_name_methods, "{}{:?},", case, method.proto_name).unwrap();
writeln!(
match_input_type_methods,
"{}::std::any::TypeId::of::<{}>(),",
case, method.input_type
)
.unwrap();
writeln!(
match_input_proto_type_methods,
"{}{:?},",
case, method.input_proto_type
)
.unwrap();
writeln!(
match_output_type_methods,
"{}::std::any::TypeId::of::<{}>(),",
case, method.output_type
)
.unwrap();
writeln!(
match_output_proto_type_methods,
"{}{:?},",
case, method.output_proto_type
)
.unwrap();
write!(
match_handle_methods,
r#"{} {{
let decoded: {input_type} = {namespace}::__rt::decode(input)?;
let ret = service.{name}(ctrl, decoded).await?;
{namespace}::__rt::encode(ret)
}}
"#,
case,
input_type = method.input_type,
name = method.name,
namespace = NAMESPACE,
)
.unwrap();
write!(
match_trait_json_methods,
r#" "{name}" | "{proto_name}" => {{
let req: {input_type} = ::serde_json::from_value(json).map_err(|e| {namespace}::error::Error::MalformatRpcPacket(format!("json error: {{}}", e)))?;
let resp = self.{typed_method}(ctrl, req).await?;
Ok(::serde_json::to_value(resp).map_err(|e| {namespace}::error::Error::MalformatRpcPacket(format!("json error: {{}}", e)))?)
}}
"#,
name = method.name,
proto_name = method.proto_name,
input_type = method.input_type,
typed_method = method.name,
namespace = NAMESPACE,
)
.unwrap();
}
ServiceGenerator::write_comments(&mut buf, 0, &service.comments).unwrap();
write!(
buf,
r#"
#[async_trait::async_trait]
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait {name} {{
type Controller: {namespace}::controller::Controller;
{trait_methods}
async fn json_call_method(
&self,
ctrl: Self::Controller,
method_name: &str,
json: ::serde_json::Value,
) -> {namespace}::error::Result<::serde_json::Value> {{
match method_name {{
{match_trait_json_methods}
_ => Err({namespace}::error::Error::InvalidMethodIndex(0, method_name.to_string())),
}}
}}
}}
#[async_trait::async_trait]
impl<T> {name} for ::std::sync::Weak<T>
where
T: Send + Sync + 'static,
::std::sync::Arc<T>: {name},
{{
type Controller = <::std::sync::Arc<T> as {name}>::Controller;
{weak_impl_methods}
}}
/// A service descriptor for a `{name}`.
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, Default)]
pub struct {descriptor_name};
/// Methods available on a `{name}`.
///
/// This can be used as a key when routing requests for servers/clients of a `{name}`.
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
#[repr(u8)]
pub enum {method_descriptor_name} {{
{enum_methods}
}}
impl std::convert::TryFrom<u8> for {method_descriptor_name} {{
type Error = {namespace}::error::Error;
fn try_from(value: u8) -> {namespace}::error::Result<Self> {{
match value {{
{match_method_try_from}
_ => Err({namespace}::error::Error::InvalidMethodIndex(value, "{name}".to_string())),
}}
}}
}}
/// A client for a `{name}`.
///
/// This implements the `{name}` trait by dispatching all method calls to the supplied `Handler`.
#[derive(Clone, Debug)]
pub struct {client_name}<H>(H) where H: {namespace}::handler::Handler;
impl<H> {client_name}<H> where H: {namespace}::handler::Handler<Descriptor = {descriptor_name}> {{
/// Creates a new client instance that delegates all method calls to the supplied handler.
pub fn new(handler: H) -> {client_name}<H> {{
{client_name}(handler)
}}
}}
impl<H> {client_name}<H> where H: {namespace}::handler::Handler<Descriptor = {descriptor_name}> {{
{client_own_methods}
}}
#[async_trait::async_trait]
impl<H> {name} for {client_name}<H> where H: {namespace}::handler::Handler<Descriptor = {descriptor_name}> {{
type Controller = H::Controller;
{client_methods}
}}
pub struct {client_name}Factory<C: {namespace}::controller::Controller>(std::marker::PhantomData<C>);
impl<C: {namespace}::controller::Controller> Clone for {client_name}Factory<C> {{
fn clone(&self) -> Self {{
Self(std::marker::PhantomData)
}}
}}
impl<C> {namespace}::__rt::RpcClientFactory for {client_name}Factory<C> where C: {namespace}::controller::Controller {{
type Descriptor = {descriptor_name};
type ClientImpl = Box<dyn {name}<Controller = C> + Send + Sync + 'static>;
type Controller = C;
fn new(handler: impl {namespace}::handler::Handler<Descriptor = Self::Descriptor, Controller = Self::Controller>) -> Self::ClientImpl {{
Box::new({client_name}::new(handler))
}}
}}
/// A server for a `{name}`.
///
/// This implements the `Server` trait by handling requests and dispatch them to methods on the
/// supplied `{name}`.
#[derive(Clone, Debug)]
pub struct {server_name}<A>(A) where A: {name} + Clone + Send + 'static;
impl<T> {server_name}<::std::sync::Weak<T>>
where
T: Send + Sync + 'static,
::std::sync::Arc<T>: {name},
{{
pub fn new_arc(service: ::std::sync::Arc<T>) -> {server_name}<::std::sync::Weak<T>> {{
{server_name}(::std::sync::Arc::downgrade(&service))
}}
}}
impl<A> {server_name}<A> where A: {name} + Clone + Send + 'static {{
/// Creates a new server instance that dispatches all calls to the supplied service.
pub fn new(service: A) -> {server_name}<A> {{
{server_name}(service)
}}
async fn call_inner(
service: A,
method: {method_descriptor_name},
ctrl: A::Controller,
input: ::bytes::Bytes)
-> {namespace}::error::Result<::bytes::Bytes> {{
match method {{
{match_handle_methods}
}}
}}
}}
impl {namespace}::descriptor::ServiceDescriptor for {descriptor_name} {{
type Method = {method_descriptor_name};
fn name(&self) -> &'static str {{ {name:?} }}
fn proto_name(&self) -> &'static str {{ {proto_name:?} }}
fn package(&self) -> &'static str {{ {package:?} }}
fn methods(&self) -> &'static [Self::Method] {{
&[ {list_enum_methods} ]
}}
}}
#[async_trait::async_trait]
impl<A> {namespace}::handler::Handler for {server_name}<A>
where
A: {name} + Clone + Send + Sync + 'static {{
type Descriptor = {descriptor_name};
type Controller = A::Controller;
async fn call(
&self,
ctrl: A::Controller,
method: {method_descriptor_name},
input: ::bytes::Bytes)
-> {namespace}::error::Result<::bytes::Bytes> {{
{server_name}::call_inner(self.0.clone(), method, ctrl, input).await
}}
}}
impl {namespace}::descriptor::MethodDescriptor for {method_descriptor_name} {{
fn name(&self) -> &'static str {{
match *self {{
{match_name_methods}
}}
}}
fn proto_name(&self) -> &'static str {{
match *self {{
{match_proto_name_methods}
}}
}}
fn input_type(&self) -> ::std::any::TypeId {{
match *self {{
{match_input_type_methods}
}}
}}
fn input_proto_type(&self) -> &'static str {{
match *self {{
{match_input_proto_type_methods}
}}
}}
fn output_type(&self) -> ::std::any::TypeId {{
match *self {{
{match_output_type_methods}
}}
}}
fn output_proto_type(&self) -> &'static str {{
match *self {{
{match_output_proto_type_methods}
}}
}}
fn index(&self) -> u8 {{
*self as u8
}}
}}
"#,
name = service.name,
descriptor_name = descriptor_name,
server_name = server_name,
client_name = client_name,
method_descriptor_name = method_descriptor_name,
proto_name = service.proto_name,
package = service.package,
trait_methods = trait_methods,
weak_impl_methods = weak_impl_methods,
enum_methods = enum_methods,
list_enum_methods = list_enum_methods,
client_own_methods = client_own_methods,
client_methods = client_methods,
match_name_methods = match_name_methods,
match_proto_name_methods = match_proto_name_methods,
match_input_type_methods = match_input_type_methods,
match_input_proto_type_methods = match_input_proto_type_methods,
match_output_type_methods = match_output_type_methods,
match_output_proto_type_methods = match_output_proto_type_methods,
match_handle_methods = match_handle_methods,
match_trait_json_methods = match_trait_json_methods,
namespace = NAMESPACE,
).unwrap();
}
}
impl ServiceGenerator {
fn write_comments<W>(
mut write: W,
indent: usize,
comments: &prost_build::Comments,
) -> fmt::Result
where
W: fmt::Write,
{
for comment in &comments.leading {
for line in comment.lines().filter(|s| !s.is_empty()) {
writeln!(write, "{}///{}", " ".repeat(indent), line)?;
}
}
Ok(())
}
}
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-web"
version = "2.6.3"
version = "2.6.4"
edition.workspace = true
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -99,6 +99,7 @@ const bool_flags: BoolFlag[] = [
{ field: 'disable_encryption', help: 'disable_encryption_help' },
{ field: 'disable_tcp_hole_punching', help: 'disable_tcp_hole_punching_help' },
{ field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' },
{ field: 'enable_udp_broadcast_relay', help: 'enable_udp_broadcast_relay_help' },
{ field: 'disable_upnp', help: 'disable_upnp_help' },
{ field: 'disable_sym_hole_punching', help: 'disable_sym_hole_punching_help' },
{ field: 'enable_magic_dns', help: 'enable_magic_dns_help' },
@@ -160,6 +160,9 @@ disable_tcp_hole_punching_help: 禁用TCP打洞功能
disable_udp_hole_punching: 禁用UDP打洞
disable_udp_hole_punching_help: 禁用UDP打洞功能
enable_udp_broadcast_relay: UDP 广播中继
enable_udp_broadcast_relay_help: "仅 Windows:捕获物理网卡上的本机 UDP 广播包并转发给 EasyTier 对等节点,帮助局域网游戏发现房间。需要管理员权限。"
disable_upnp: 禁用 UPnP
disable_upnp_help: 禁用符合条件监听器的运行时 UPnP/NAT-PMP 端口映射;自动端口映射默认开启。
@@ -260,6 +263,7 @@ event:
DhcpIpv4Conflicted: DHCP IPv4地址冲突
PortForwardAdded: 端口转发添加
ProxyCidrsUpdated: 子网代理CIDR更新
UdpBroadcastRelayStartResult: UDP广播中继启动结果
web:
login:
@@ -159,6 +159,9 @@ disable_tcp_hole_punching_help: Disable tcp hole punching
disable_udp_hole_punching: Disable UDP Hole Punching
disable_udp_hole_punching_help: Disable udp hole punching
enable_udp_broadcast_relay: UDP Broadcast Relay
enable_udp_broadcast_relay_help: "Windows only: capture local UDP broadcast packets from physical interfaces and forward them to EasyTier peers. Helps games to find rooms in local network. Requires administrator privileges."
disable_upnp: Disable UPnP
disable_upnp_help: Disable runtime UPnP/NAT-PMP port mapping for eligible listeners; automatic port mapping is enabled by default.
@@ -260,6 +263,7 @@ event:
DhcpIpv4Conflicted: DhcpIpv4Conflicted
PortForwardAdded: PortForwardAdded
ProxyCidrsUpdated: ProxyCidrsUpdated
UdpBroadcastRelayStartResult: UDP Broadcast Relay Start Result
web:
login:
@@ -134,6 +134,7 @@ export interface NetworkConfig {
disable_tcp_hole_punching?: boolean
disable_udp_hole_punching?: boolean
disable_upnp?: boolean
enable_udp_broadcast_relay?: boolean
disable_sym_hole_punching?: boolean
enable_relay_network_whitelist?: boolean
@@ -211,6 +212,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
disable_tcp_hole_punching: false,
disable_udp_hole_punching: false,
disable_upnp: false,
enable_udp_broadcast_relay: false,
disable_sym_hole_punching: false,
enable_relay_network_whitelist: false,
relay_network_whitelist: [],
@@ -447,4 +449,6 @@ export enum EventType {
PortForwardAdded = 'PortForwardAdded', // PortForwardConfigPb
ProxyCidrsUpdated = 'ProxyCidrsUpdated', // string[], string[]
UdpBroadcastRelayStartResult = 'UdpBroadcastRelayStartResult', // { capture_backend?: string, error?: string }
}
+1
View File
@@ -365,6 +365,7 @@ mod tests {
let _c = WebClient::new(
connector,
"test",
uuid::Uuid::new_v4(),
"test",
false,
Arc::new(NetworkInstanceManager::new()),
+6 -5
View File
@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
version = "2.6.3"
version = "2.6.4"
edition.workspace = true
rust-version.workspace = true
authors = ["kkrainbow"]
@@ -11,6 +11,7 @@ keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"]
license-file = "LICENSE"
readme = "README.md"
build = "build/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -70,6 +71,7 @@ async-stream = "0.3.5"
async-trait = "0.1.74"
dashmap = "6.0"
moka = { version = "0.12", features = ["future"] }
timedmap = "=1.0.1"
# for full-path zero-copy
@@ -315,15 +317,14 @@ jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [
[build-dependencies]
cfg_aliases = "0.2.1"
tonic-build = "0.12"
indoc = "2.0"
globwalk = "0.8.1"
regex = "1"
prost-build = "0.13.5"
prost-wkt-build = "0.6"
easytier-rpc-build = { path = "../easytier-rpc-build", features = [
"internal-namespace",
] }
prost-reflect-build = { version = "0.14.0" }
proc-macro2 = "1"
quote = "1"
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
+4 -1
View File
@@ -1,3 +1,6 @@
mod rpc;
use crate::rpc::ServiceGenerator;
use cfg_aliases::cfg_aliases;
use prost_wkt_build::{FileDescriptorSet, Message as _};
#[cfg(target_os = "windows")]
@@ -197,7 +200,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.type_attribute("acl.Rule", "#[serde(default)]")
.type_attribute("acl.GroupInfo", "#[serde(default)]")
.field_attribute(".api.manage.NetworkConfig", "#[serde(default)]")
.service_generator(Box::new(easytier_rpc_build::ServiceGenerator::default()))
.service_generator(Box::new(ServiceGenerator::default()))
.btree_map(["."])
.skip_debug([".common.Ipv4Addr", ".common.Ipv6Addr", ".common.UUID"]);
+720
View File
@@ -0,0 +1,720 @@
#![allow(non_snake_case)]
use indoc::formatdoc;
use proc_macro2::{Ident, TokenStream};
use quote::{format_ident, quote};
use std::str::FromStr;
fn parse(value: &str) -> TokenStream {
TokenStream::from_str(value)
.unwrap_or_else(|err| panic!("Failed to parse tokens: {} ({})", value, err))
}
fn doc(comments: &prost_build::Comments) -> TokenStream {
let doc = comments
.leading
.iter()
.flat_map(|c| c.lines().filter(|s| !s.is_empty()));
quote! { #( #[doc = #doc] )* }
}
const NAMESPACE: &str = "crate::proto::rpc_types";
struct Method {
index: u8,
doc: TokenStream,
method: Ident,
method_inner: Ident,
method_str: String,
method_proto: Ident,
method_proto_str: String,
Input: TokenStream,
Input_proto_str: String,
Output: TokenStream,
Output_proto_str: String,
}
impl Method {
fn new(index: u8, method: prost_build::Method) -> Self {
assert!(
!method.client_streaming,
"Client streaming not yet supported for method {}",
method.proto_name
);
assert!(
!method.server_streaming,
"Server streaming not yet supported for method {}",
method.proto_name
);
Self {
index,
doc: doc(&method.comments),
method: format_ident!("{}", method.name),
method_inner: format_ident!("{}_inner", method.name),
method_str: method.name,
method_proto: format_ident!("{}", method.proto_name),
method_proto_str: method.proto_name,
Input: parse(&method.input_type),
Input_proto_str: method.input_proto_type,
Output: parse(&method.output_type),
Output_proto_str: method.output_proto_type,
}
}
}
struct Service {
namespace: TokenStream,
doc: TokenStream,
Service: Ident,
ServiceDescriptor: Ident,
ServiceServer: Ident,
ServiceClient: Ident,
ServiceClientFactory: Ident,
ServiceMethodDescriptor: Ident,
Service_str: String,
Service_proto_str: String,
Service_package_str: String,
methods: Vec<Method>,
}
impl Service {
fn new(service: prost_build::Service) -> Self {
let methods = service
.methods
.into_iter()
.enumerate()
.map(|(i, method)| Method::new((i + 1) as u8, method))
.collect();
Self {
namespace: parse(NAMESPACE),
doc: doc(&service.comments),
Service: format_ident!("{}", service.name),
ServiceDescriptor: format_ident!("{}Descriptor", service.name),
ServiceServer: format_ident!("{}Server", service.name),
ServiceClient: format_ident!("{}Client", service.name),
ServiceClientFactory: format_ident!("{}ClientFactory", service.name),
ServiceMethodDescriptor: format_ident!("{}MethodDescriptor", service.name),
Service_str: service.name,
Service_proto_str: service.proto_name,
Service_package_str: service.package,
methods,
}
}
fn trait_Service(&self) -> TokenStream {
let Self {
namespace,
doc,
Service,
methods,
..
} = self;
let match_json_call_method = methods.iter().map(
|Method {
method,
method_str,
method_proto_str,
Input,
..
}| {
quote! {
#method_str | #method_proto_str => {
let req: #Input = ::serde_json::from_value(json)
.map_err(|e| #namespace::error::Error::MalformatRpcPacket(format!("json error: {}", e)))?;
let resp = self.#method(ctrl, req).await?;
Ok(::serde_json::to_value(resp)
.map_err(|e| #namespace::error::Error::MalformatRpcPacket(format!("json error: {}", e)))?)
}
}
},
);
let methods = methods.iter().map(
|Method {
doc,
method,
Input,
Output,
..
}| {
quote! {
#doc
async fn #method(&self, ctrl: Self::Controller, input: #Input) -> #namespace::error::Result<#Output>;
}
},
);
quote! {
#doc
#[async_trait::async_trait]
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait #Service {
type Controller: #namespace::controller::Controller;
#(#methods)*
async fn json_call_method(
&self,
ctrl: Self::Controller,
method: &str,
json: ::serde_json::Value,
) -> #namespace::error::Result<::serde_json::Value> {
match method {
#(#match_json_call_method)*
_ => Err(#namespace::error::Error::InvalidMethodIndex(0, method.to_string())),
}
}
}
}
}
fn impl_Service_for_Weak(&self) -> TokenStream {
let Self {
namespace,
Service,
methods,
..
} = self;
let methods = methods.iter().map(
|Method {
method,
Input,
Output,
..
}| {
quote! {
async fn #method(&self, ctrl: Self::Controller, input: #Input) -> #namespace::error::Result<#Output> {
let Some(service) = self.upgrade() else {
return Err(#namespace::error::Error::Shutdown);
};
service.#method(ctrl, input).await
}
}
},
);
quote! {
#[async_trait::async_trait]
impl<T> #Service for ::std::sync::Weak<T>
where
T: Send + Sync + 'static,
::std::sync::Arc<T>: #Service,
{
type Controller = <::std::sync::Arc<T> as #Service>::Controller;
#(#methods)*
}
}
}
fn struct_ServiceDescriptor(&self) -> TokenStream {
let Self {
namespace,
ServiceDescriptor,
ServiceMethodDescriptor,
Service_str,
Service_proto_str,
Service_package_str,
methods,
..
} = self;
let doc = format!("A service descriptor for a `{}`.", Service_str);
let methods = methods.iter().map(|Method { method_proto, .. }| {
quote! { #ServiceMethodDescriptor::#method_proto, }
});
quote! {
#[doc = #doc]
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, Default)]
pub struct #ServiceDescriptor;
impl #namespace::descriptor::ServiceDescriptor for #ServiceDescriptor {
type Method = #ServiceMethodDescriptor;
fn name(&self) -> &'static str { #Service_str }
fn proto_name(&self) -> &'static str { #Service_proto_str }
fn package(&self) -> &'static str { #Service_package_str }
fn methods(&self) -> &'static [Self::Method] {
&[ #(#methods)* ]
}
}
}
}
fn enum_ServiceMethodDescriptor(&self) -> TokenStream {
let Self {
ServiceMethodDescriptor,
Service_str,
methods,
..
} = self;
let doc = formatdoc! {"
Methods available on a `{Service_str}`.
This can be used as a key when routing requests for servers/clients of a `{Service_str}`.
"};
let variants = methods.iter().map(
|Method {
method_proto,
index,
..
}| {
quote! { #method_proto = #index, }
},
);
let impl_MethodDescriptor = self.impl_MethodDescriptor_for_ServiceMethodDescriptor();
let impl_TryFrom = self.impl_TryFrom_for_ServiceMethodDescriptor();
quote! {
#[doc = #doc]
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
#[repr(u8)]
pub enum #ServiceMethodDescriptor {
#(#variants)*
}
#impl_MethodDescriptor
#impl_TryFrom
}
}
fn impl_MethodDescriptor_for_ServiceMethodDescriptor(&self) -> TokenStream {
let Self {
namespace,
ServiceMethodDescriptor,
methods,
..
} = self;
let name = {
let arms = methods.iter().map(
|Method {
method_proto,
method_str,
..
}| {
quote! { #ServiceMethodDescriptor::#method_proto => #method_str, }
},
);
quote! {
fn name(&self) -> &'static str {
match *self {
#(#arms)*
}
}
}
};
let proto_name = {
let arms = methods.iter().map(
|Method {
method_proto,
method_proto_str,
..
}| {
quote! { #ServiceMethodDescriptor::#method_proto => #method_proto_str, }
},
);
quote! {
fn proto_name(&self) -> &'static str {
match *self {
#(#arms)*
}
}
}
};
let input_type = {
let arms = methods.iter().map(|Method { method_proto, Input, .. }| {
quote! { #ServiceMethodDescriptor::#method_proto => ::std::any::TypeId::of::<#Input>(), }
});
quote! {
fn input_type(&self) -> ::std::any::TypeId {
match *self {
#(#arms)*
}
}
}
};
let input_proto_type = {
let arms = methods.iter().map(
|Method {
method_proto,
Input_proto_str,
..
}| {
quote! { #ServiceMethodDescriptor::#method_proto => #Input_proto_str, }
},
);
quote! {
fn input_proto_type(&self) -> &'static str {
match *self {
#(#arms)*
}
}
}
};
let output_type = {
let arms = methods.iter().map(|Method { method_proto, Output, .. }| {
quote! { #ServiceMethodDescriptor::#method_proto => ::std::any::TypeId::of::<#Output>(), }
});
quote! {
fn output_type(&self) -> ::std::any::TypeId {
match *self {
#(#arms)*
}
}
}
};
let output_proto_type = {
let arms = methods.iter().map(
|Method {
method_proto,
Output_proto_str,
..
}| {
quote! { #ServiceMethodDescriptor::#method_proto => #Output_proto_str, }
},
);
quote! {
fn output_proto_type(&self) -> &'static str {
match *self {
#(#arms)*
}
}
}
};
quote! {
impl #namespace::descriptor::MethodDescriptor for #ServiceMethodDescriptor {
#name
#proto_name
#input_type
#input_proto_type
#output_type
#output_proto_type
fn index(&self) -> u8 {
*self as u8
}
}
}
}
fn impl_TryFrom_for_ServiceMethodDescriptor(&self) -> TokenStream {
let Self {
namespace,
ServiceMethodDescriptor,
Service_str,
methods,
..
} = self;
let arms = methods.iter().map(
|Method {
method_proto,
index,
..
}| {
quote! { #index => Ok(#ServiceMethodDescriptor::#method_proto), }
},
);
quote! {
impl std::convert::TryFrom<u8> for #ServiceMethodDescriptor {
type Error = #namespace::error::Error;
fn try_from(value: u8) -> #namespace::error::Result<Self> {
match value {
#(#arms)*
_ => Err(#namespace::error::Error::InvalidMethodIndex(value, #Service_str.to_string())),
}
}
}
}
}
fn struct_ServiceClient(&self) -> TokenStream {
let Self {
namespace,
ServiceDescriptor,
ServiceClient,
Service_str,
..
} = self;
let doc = formatdoc! {"
A client for a `{Service_str}`.
This implements the `{Service_str}` trait by dispatching all method calls to the supplied `Handler`.
"};
let impl_service_client = self.impl_ServiceClient();
let impl_service_for_client = self.impl_Service_for_ServiceClient();
quote! {
#[doc = #doc]
#[derive(Clone, Debug)]
pub struct #ServiceClient<H>(H) where H: #namespace::handler::Handler;
impl<H> #ServiceClient<H> where H: #namespace::handler::Handler<Descriptor = #ServiceDescriptor> {
/// Creates a new client instance that delegates all method calls to the supplied handler.
pub fn new(handler: H) -> Self {
Self(handler)
}
}
#impl_service_client
#impl_service_for_client
}
}
fn impl_ServiceClient(&self) -> TokenStream {
let Self {
namespace,
ServiceClient,
ServiceDescriptor,
ServiceMethodDescriptor,
methods,
..
} = self;
let methods = methods.iter().map(
|Method {
method_inner,
method_proto,
Input,
Output,
..
}| {
quote! {
async fn #method_inner(handler: H, ctrl: H::Controller, input: #Input) -> #namespace::error::Result<#Output> {
#namespace::__rt::call_method(handler, ctrl, #ServiceMethodDescriptor::#method_proto, input).await
}
}
},
);
quote! {
impl<H> #ServiceClient<H> where H: #namespace::handler::Handler<Descriptor = #ServiceDescriptor> {
#(#methods)*
}
}
}
fn impl_Service_for_ServiceClient(&self) -> TokenStream {
let Self {
namespace,
Service,
ServiceClient,
ServiceDescriptor,
methods,
..
} = self;
let methods = methods.iter().map(
|Method {
method,
method_inner,
Input,
Output,
..
}| {
quote! {
async fn #method(&self, ctrl: H::Controller, input: #Input) -> #namespace::error::Result<#Output> {
#ServiceClient::#method_inner(self.0.clone(), ctrl, input).await
}
}
},
);
quote! {
#[async_trait::async_trait]
impl<H> #Service for #ServiceClient<H> where H: #namespace::handler::Handler<Descriptor = #ServiceDescriptor> {
type Controller = H::Controller;
#(#methods)*
}
}
}
fn struct_ServiceClientFactory(&self) -> TokenStream {
let Self {
namespace,
Service,
ServiceClient,
ServiceClientFactory,
ServiceDescriptor,
..
} = self;
quote! {
pub struct #ServiceClientFactory<C: #namespace::controller::Controller>(std::marker::PhantomData<C>);
impl<C: #namespace::controller::Controller> Clone for #ServiceClientFactory<C> {
fn clone(&self) -> Self {
Self(std::marker::PhantomData)
}
}
impl<C> #namespace::__rt::RpcClientFactory for #ServiceClientFactory<C> where C: #namespace::controller::Controller {
type Descriptor = #ServiceDescriptor;
type ClientImpl = Box<dyn #Service<Controller = C> + Send + Sync + 'static>;
type Controller = C;
fn new(handler: impl #namespace::handler::Handler<Descriptor = Self::Descriptor, Controller = Self::Controller>) -> Self::ClientImpl {
Box::new(#ServiceClient::new(handler))
}
}
}
}
fn struct_ServiceServer(&self) -> TokenStream {
let Self {
namespace,
Service,
ServiceDescriptor,
ServiceServer,
ServiceMethodDescriptor,
Service_str,
methods,
..
} = self;
let doc = formatdoc! {"
A server for a `{Service_str}`.
This implements the `Server` trait by handling requests and dispatch them to methods on the
supplied `{Service_str}`.
"};
let arms = methods.iter().map(
|Method {
method_proto,
method,
Input,
..
}| {
quote! {
#ServiceMethodDescriptor::#method_proto => {
let decoded: #Input = #namespace::__rt::decode(input)?;
let ret = service.#method(ctrl, decoded).await?;
#namespace::__rt::encode(ret)
}
}
},
);
quote! {
#[doc = #doc]
#[derive(Clone, Debug)]
pub struct #ServiceServer<A>(A) where A: #Service + Clone + Send + 'static;
impl<T> #ServiceServer<::std::sync::Weak<T>>
where
T: Send + Sync + 'static,
::std::sync::Arc<T>: #Service,
{
pub fn new_arc(service: ::std::sync::Arc<T>) -> #ServiceServer<::std::sync::Weak<T>> {
#ServiceServer(::std::sync::Arc::downgrade(&service))
}
}
impl<A> #ServiceServer<A> where A: #Service + Clone + Send + 'static {
/// Creates a new server instance that dispatches all calls to the supplied service.
pub fn new(service: A) -> #ServiceServer<A> {
#ServiceServer(service)
}
async fn call_inner(
service: A,
method: #ServiceMethodDescriptor,
ctrl: A::Controller,
input: ::bytes::Bytes)
-> #namespace::error::Result<::bytes::Bytes> {
match method {
#(#arms)*
}
}
}
#[async_trait::async_trait]
impl<A> #namespace::handler::Handler for #ServiceServer<A>
where
A: #Service + Clone + Send + Sync + 'static {
type Descriptor = #ServiceDescriptor;
type Controller = A::Controller;
async fn call(
&self,
ctrl: A::Controller,
method: #ServiceMethodDescriptor,
input: ::bytes::Bytes)
-> #namespace::error::Result<::bytes::Bytes> {
#ServiceServer::call_inner(self.0.clone(), method, ctrl, input).await
}
}
}
}
}
/// The service generator to be used with `prost-build` to generate RPC implementations for
/// `prost-simple-rpc`.
///
/// See the crate-level documentation for more info.
#[non_exhaustive]
#[derive(Debug, Default)]
pub struct ServiceGenerator;
impl prost_build::ServiceGenerator for ServiceGenerator {
fn generate(&mut self, service: prost_build::Service, buf: &mut String) {
let info = Service::new(service);
let trait_Service = info.trait_Service();
let impl_Service_for_Weak = info.impl_Service_for_Weak();
let struct_ServiceDescriptor = info.struct_ServiceDescriptor();
let enum_ServiceMethodDescriptor = info.enum_ServiceMethodDescriptor();
let struct_ServiceClient = info.struct_ServiceClient();
let struct_ServiceClientFactory = info.struct_ServiceClientFactory();
let struct_ServiceServer = info.struct_ServiceServer();
let tokens = quote! {
#trait_Service
#impl_Service_for_Weak
#struct_ServiceDescriptor
#enum_ServiceMethodDescriptor
#struct_ServiceClient
#struct_ServiceClientFactory
#struct_ServiceServer
};
buf.push('\n');
buf.push_str(&tokens.to_string());
buf.push('\n');
}
}
+8 -2
View File
@@ -12,9 +12,9 @@ core_clap:
仅用户名:--config-server admin,将使用官方的服务器
machine_id:
en: |+
the machine id to identify this machine, used for config recovery after disconnection, must be unique and fixed. default is from system.
the machine id to identify this machine, used for config recovery after disconnection, must be unique and fixed. by default it is loaded from persisted local state; on first start it may be migrated from system information or generated, then remains fixed.
zh-CN: |+
Web 配置服务器通过 machine id 来识别机器,用于断线重连后的配置恢复,需要保证唯一且固定不变。默认从系统获得
Web 配置服务器通过 machine id 来识别机器,用于断线重连后的配置恢复,需要保证唯一且固定不变。默认从本地持久化状态读取;首次启动时可能基于系统信息迁移或生成,之后保持固定不变
config_file:
en: "path to the config file, NOTE: the options set by cmdline args will override options in config file"
zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项"
@@ -184,6 +184,9 @@ core_clap:
disable_upnp:
en: "disable runtime UPnP/NAT-PMP port mapping for eligible listeners; automatic port mapping is enabled by default"
zh-CN: "禁用符合条件监听器的运行时 UPnP/NAT-PMP 端口映射;自动端口映射默认开启"
enable_udp_broadcast_relay:
en: "Windows only: capture local UDP broadcast packets from physical interfaces and forward them to EasyTier peers. Helps games to find rooms in local network. Requires administrator privileges."
zh-CN: "仅 Windows:捕获物理网卡上的本机 UDP 广播包并转发给 EasyTier 对等节点,帮助局域网游戏发现房间。需要管理员权限。"
relay_all_peer_rpc:
en: "relay all peer rpc packets, even if the peer is not in the relay network whitelist. this can help peers not in relay network whitelist to establish p2p connection."
zh-CN: "转发所有对等节点的RPC数据包,即使对等节点不在转发网络白名单中。这可以帮助白名单外网络中的对等节点建立P2P连接。"
@@ -274,6 +277,9 @@ core_clap:
check_config:
en: Check config validity without starting the network
zh-CN: 检查配置文件的有效性并退出
daemon:
en: Run in daemon mode
zh-CN: 以守护进程模式运行
file_log_size_mb:
en: "per file log size in MB, default is 100MB"
zh-CN: "单个文件日志大小,单位 MB,默认值为 100MB"
+2 -3
View File
@@ -11,9 +11,8 @@ use windows::{
NET_FW_RULE_DIR_OUT,
},
Networking::WinSock::{
IP_UNICAST_IF, IPPROTO_IP, IPPROTO_IPV6, IPV6_UNICAST_IF, SIO_UDP_CONNRESET,
SO_EXCLUSIVEADDRUSE, SOCKET, SOCKET_ERROR, SOL_SOCKET, WSAGetLastError, WSAIoctl,
htonl, setsockopt,
IP_UNICAST_IF, IPPROTO_IP, IPPROTO_IPV6, IPV6_UNICAST_IF, SIO_UDP_CONNRESET, SOCKET,
SOCKET_ERROR, WSAGetLastError, WSAIoctl, htonl, setsockopt,
},
System::Com::{
CLSCTX_ALL, COINIT_MULTITHREADED, CoCreateInstance, CoInitializeEx, CoUninitialize,
+6 -132
View File
@@ -28,55 +28,6 @@ use super::env_parser;
pub type Flags = crate::proto::common::FlagsInConfig;
pub const DEFAULT_CONNECTION_PRIORITY: u32 = 0;
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
#[serde(untagged)]
enum ListenerConfigDef {
Url(url::Url),
Config {
url: url::Url,
#[serde(default)]
priority: u32,
},
}
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
#[serde(from = "ListenerConfigDef")]
pub struct ListenerConfig {
pub url: url::Url,
pub priority: u32,
}
impl ListenerConfig {
pub fn new(url: url::Url, priority: u32) -> Self {
Self { url, priority }
}
pub fn with_default_priority(url: url::Url) -> Self {
Self::new(url, DEFAULT_CONNECTION_PRIORITY)
}
}
impl From<url::Url> for ListenerConfig {
fn from(url: url::Url) -> Self {
Self::with_default_priority(url)
}
}
impl From<ListenerConfigDef> for ListenerConfig {
fn from(def: ListenerConfigDef) -> Self {
match def {
ListenerConfigDef::Url(url) => Self::with_default_priority(url),
ListenerConfigDef::Config { url, priority } => Self::new(url, priority),
}
}
}
fn listener_config_urls(listeners: Vec<ListenerConfig>) -> Vec<url::Url> {
listeners.into_iter().map(|listener| listener.url).collect()
}
pub fn gen_default_flags() -> Flags {
#[allow(deprecated)]
Flags {
@@ -121,6 +72,7 @@ pub fn gen_default_flags() -> Flags {
instance_recv_bps_limit: u64::MAX,
disable_upnp: false,
disable_relay_data: false,
enable_udp_broadcast_relay: false,
}
}
@@ -245,7 +197,6 @@ pub trait ConfigLoader: Send + Sync {
fn set_network_identity(&self, identity: NetworkIdentity);
fn get_listener_uris(&self) -> Vec<url::Url>;
fn get_listener_configs(&self) -> Vec<ListenerConfig>;
fn get_peers(&self) -> Vec<PeerConfig>;
fn set_peers(&self, peers: Vec<PeerConfig>);
@@ -255,8 +206,6 @@ pub trait ConfigLoader: Send + Sync {
fn get_mapped_listeners(&self) -> Vec<url::Url>;
fn set_mapped_listeners(&self, listeners: Option<Vec<url::Url>>);
fn get_mapped_listener_configs(&self) -> Vec<ListenerConfig>;
fn set_mapped_listener_configs(&self, listeners: Option<Vec<ListenerConfig>>);
fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig>;
fn set_vpn_portal_config(&self, config: VpnPortalConfig);
@@ -455,8 +404,6 @@ impl Default for NetworkIdentity {
pub struct PeerConfig {
pub uri: url::Url,
pub peer_public_key: Option<String>,
#[serde(default)]
pub priority: u32,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
@@ -588,8 +535,8 @@ struct Config {
ipv6_public_addr_prefix: Option<String>,
dhcp: Option<bool>,
network_identity: Option<NetworkIdentity>,
listeners: Option<Vec<ListenerConfig>>,
mapped_listeners: Option<Vec<ListenerConfig>>,
listeners: Option<Vec<url::Url>>,
mapped_listeners: Option<Vec<url::Url>>,
exit_nodes: Option<Vec<IpAddr>>,
peer: Option<Vec<PeerConfig>>,
@@ -903,10 +850,6 @@ impl ConfigLoader for TomlConfigLoader {
}
fn get_listener_uris(&self) -> Vec<url::Url> {
listener_config_urls(self.get_listener_configs())
}
fn get_listener_configs(&self) -> Vec<ListenerConfig> {
self.config
.lock()
.unwrap()
@@ -924,29 +867,14 @@ impl ConfigLoader for TomlConfigLoader {
}
fn get_listeners(&self) -> Option<Vec<url::Url>> {
self.config
.lock()
.unwrap()
.listeners
.clone()
.map(listener_config_urls)
self.config.lock().unwrap().listeners.clone()
}
fn set_listeners(&self, listeners: Vec<url::Url>) {
self.config.lock().unwrap().listeners =
Some(listeners.into_iter().map(Into::into).collect());
self.config.lock().unwrap().listeners = Some(listeners);
}
fn get_mapped_listeners(&self) -> Vec<url::Url> {
listener_config_urls(self.get_mapped_listener_configs())
}
fn set_mapped_listeners(&self, listeners: Option<Vec<url::Url>>) {
self.config.lock().unwrap().mapped_listeners =
listeners.map(|listeners| listeners.into_iter().map(Into::into).collect());
}
fn get_mapped_listener_configs(&self) -> Vec<ListenerConfig> {
self.config
.lock()
.unwrap()
@@ -955,7 +883,7 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap_or_default()
}
fn set_mapped_listener_configs(&self, listeners: Option<Vec<ListenerConfig>>) {
fn set_mapped_listeners(&self, listeners: Option<Vec<url::Url>>) {
self.config.lock().unwrap().mapped_listeners = listeners;
}
@@ -1475,60 +1403,6 @@ members = ["admin"]
assert_eq!(group.members, vec!["admin"]);
}
#[test]
fn test_listener_priority_config_supports_old_and_structured_values() {
let config = TomlConfigLoader::new_from_str(
r#"
listeners = [
"tcp://0.0.0.0:11010",
{ url = "udp://0.0.0.0:11010", priority = 80 },
]
mapped_listeners = [
"tcp://example.com:11010",
{ url = "tcp://frps.example.com:30001", priority = 100 },
]
[[peer]]
uri = "tcp://proxy.example.com:443"
priority = 100
[[peer]]
uri = "tcp://normal.example.com:11010"
"#,
)
.unwrap();
let listeners = config.get_listener_configs();
assert_eq!(listeners[0].url.to_string(), "tcp://0.0.0.0:11010");
assert_eq!(listeners[0].priority, DEFAULT_CONNECTION_PRIORITY);
assert_eq!(listeners[1].url.to_string(), "udp://0.0.0.0:11010");
assert_eq!(listeners[1].priority, 80);
let mapped_listeners = config.get_mapped_listener_configs();
assert_eq!(
mapped_listeners[0].url.to_string(),
"tcp://example.com:11010"
);
assert_eq!(mapped_listeners[0].priority, DEFAULT_CONNECTION_PRIORITY);
assert_eq!(
mapped_listeners[1].url.to_string(),
"tcp://frps.example.com:30001"
);
assert_eq!(mapped_listeners[1].priority, 100);
let peers = config.get_peers();
assert_eq!(peers[0].uri.to_string(), "tcp://proxy.example.com:443");
assert_eq!(peers[0].priority, 100);
assert_eq!(peers[1].uri.to_string(), "tcp://normal.example.com:11010");
assert_eq!(peers[1].priority, DEFAULT_CONNECTION_PRIORITY);
let dumped = config.dump();
let reloaded = TomlConfigLoader::new_from_str(&dumped).unwrap();
assert_eq!(reloaded.get_listener_configs(), listeners);
assert_eq!(reloaded.get_mapped_listener_configs(), mapped_listeners);
assert_eq!(reloaded.get_peers(), peers);
}
#[test]
fn test_network_config_source_user_is_implicit() {
let config = TomlConfigLoader::default();
-2
View File
@@ -23,8 +23,6 @@ define_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS, u64, 1000);
define_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, u64, 10);
define_global_var!(MACHINE_UID, Option<String>, None);
define_global_var!(MAX_DIRECT_CONNS_PER_PEER_IN_FOREIGN_NETWORK, u32, 3);
define_global_var!(DIRECT_CONNECT_TO_PUBLIC_SERVER, bool, true);
+2 -1
View File
@@ -1,5 +1,4 @@
use std::{io, result};
use thiserror::Error;
use crate::tunnel;
@@ -55,4 +54,6 @@ pub enum Error {
pub type Result<T> = result::Result<T, Error>;
pub type ErrorCollection = crate::utils::error::ErrorCollection<Error>;
// impl From for std::
+14 -22
View File
@@ -11,7 +11,7 @@ use dashmap::DashMap;
use super::{
PeerId,
config::{ConfigLoader, DEFAULT_CONNECTION_PRIORITY, Flags, ListenerConfig},
config::{ConfigLoader, Flags},
netns::NetNS,
network::IPCollector,
stun::{StunInfoCollector, StunInfoCollectorTrait},
@@ -77,6 +77,11 @@ pub enum GlobalCtxEvent {
ProxyCidrsUpdated(Vec<cidr::Ipv4Cidr>, Vec<cidr::Ipv4Cidr>), // (added, removed)
UdpBroadcastRelayStartResult {
capture_backend: Option<String>,
error: Option<String>,
},
CredentialChanged,
}
@@ -212,7 +217,7 @@ pub struct GlobalCtx {
stun_info_collection: Mutex<Arc<dyn StunInfoCollectorTrait>>,
running_listeners: Mutex<Vec<ListenerConfig>>,
running_listeners: Mutex<Vec<url::Url>>,
advertised_ipv6_public_addr_prefix: Mutex<Option<cidr::Ipv6Cidr>>,
flags: ArcSwap<Flags>,
@@ -509,28 +514,13 @@ impl GlobalCtx {
}
pub fn get_running_listeners(&self) -> Vec<url::Url> {
self.running_listeners
.lock()
.unwrap()
.iter()
.map(|listener| listener.url.clone())
.collect()
}
pub fn get_running_listener_configs(&self) -> Vec<ListenerConfig> {
self.running_listeners.lock().unwrap().clone()
}
pub fn add_running_listener(&self, url: url::Url) {
self.add_running_listener_with_priority(url, DEFAULT_CONNECTION_PRIORITY);
}
pub fn add_running_listener_with_priority(&self, url: url::Url, priority: u32) {
let mut l = self.running_listeners.lock().unwrap();
if let Some(listener) = l.iter_mut().find(|listener| listener.url == url) {
listener.priority = priority;
} else {
l.push(ListenerConfig::new(url, priority));
if !l.contains(&url) {
l.push(url);
}
}
@@ -759,9 +749,11 @@ impl GlobalCtx {
}
fn is_port_in_running_listeners(&self, port: u16, is_udp: bool) -> bool {
self.running_listeners.lock().unwrap().iter().any(|x| {
x.url.port() == Some(port) && matches_protocol!(&x.url, Protocol::UDP) == is_udp
})
self.running_listeners
.lock()
.unwrap()
.iter()
.any(|x| x.port() == Some(port) && matches_protocol!(x, Protocol::UDP) == is_udp)
}
#[tracing::instrument(ret, skip(self))]
+596
View File
@@ -0,0 +1,596 @@
use std::{
env,
ffi::OsString,
io::Write as _,
path::{Path, PathBuf},
time::{Duration, Instant},
};
use anyhow::Context as _;
#[cfg(unix)]
use nix::{
errno::Errno,
fcntl::{Flock, FlockArg},
};
#[derive(Debug, Clone, Default)]
pub struct MachineIdOptions {
pub explicit_machine_id: Option<String>,
pub state_dir: Option<PathBuf>,
}
pub fn resolve_machine_id(opts: &MachineIdOptions) -> anyhow::Result<uuid::Uuid> {
if let Some(explicit_machine_id) = opts.explicit_machine_id.as_deref() {
return Ok(parse_or_hash_machine_id(explicit_machine_id));
}
let state_file = resolve_machine_id_state_file(opts.state_dir.as_deref())?;
let allow_legacy_machine_uid_migration =
should_attempt_legacy_machine_uid_migration(&state_file);
if let Some(machine_id) = read_state_machine_id(&state_file)? {
return Ok(machine_id);
}
if let Some(machine_id) = read_legacy_machine_id_file() {
return persist_machine_id(&state_file, machine_id);
}
if allow_legacy_machine_uid_migration
&& let Some(machine_id) = resolve_legacy_machine_uid_hash()
{
return persist_machine_id(&state_file, machine_id);
}
let machine_id = resolve_new_machine_id().unwrap_or_else(uuid::Uuid::new_v4);
persist_machine_id(&state_file, machine_id)
}
fn parse_or_hash_machine_id(raw: &str) -> uuid::Uuid {
if let Ok(mid) = uuid::Uuid::parse_str(raw.trim()) {
return mid;
}
digest_uuid_from_str(raw)
}
fn digest_uuid_from_str(raw: &str) -> uuid::Uuid {
let mut b = [0u8; 16];
crate::tunnel::generate_digest_from_str("", raw, &mut b);
uuid::Uuid::from_bytes(b)
}
fn resolve_machine_id_state_file(state_dir: Option<&Path>) -> anyhow::Result<PathBuf> {
let state_dir = match state_dir {
Some(dir) => dir.to_path_buf(),
None => default_machine_id_state_dir()?,
};
Ok(state_dir.join("machine_id"))
}
fn non_empty_os_string(value: Option<OsString>) -> Option<OsString> {
value.filter(|value| !value.is_empty())
}
#[cfg(target_os = "linux")]
fn default_linux_machine_id_state_dir(
xdg_data_home: Option<OsString>,
home: Option<OsString>,
) -> PathBuf {
if let Some(path) = non_empty_os_string(xdg_data_home) {
return PathBuf::from(path).join("easytier");
}
if let Some(home) = non_empty_os_string(home) {
return PathBuf::from(home)
.join(".local")
.join("share")
.join("easytier");
}
PathBuf::from("/var/lib/easytier")
}
fn default_machine_id_state_dir() -> anyhow::Result<PathBuf> {
cfg_select! {
target_os = "linux" => Ok(default_linux_machine_id_state_dir(
env::var_os("XDG_DATA_HOME"),
env::var_os("HOME"),
)),
all(target_os = "macos", not(feature = "macos-ne")) => {
let home = non_empty_os_string(env::var_os("HOME"))
.ok_or_else(|| anyhow::anyhow!("HOME is not set, cannot resolve machine id state directory"))?;
Ok(PathBuf::from(home)
.join("Library")
.join("Application Support")
.join("com.easytier"))
},
target_os = "windows" => {
let local_app_data = non_empty_os_string(env::var_os("LOCALAPPDATA")).ok_or_else(|| {
anyhow::anyhow!("LOCALAPPDATA is not set, cannot resolve machine id state directory")
})?;
Ok(PathBuf::from(local_app_data).join("easytier"))
},
target_os = "freebsd" => {
let home = non_empty_os_string(env::var_os("HOME"))
.ok_or_else(|| anyhow::anyhow!("HOME is not set, cannot resolve machine id state directory"))?;
Ok(PathBuf::from(home).join(".local").join("share").join("easytier"))
},
target_os = "android" => {
anyhow::bail!("machine id state directory must be provided explicitly on Android");
},
_ => anyhow::bail!("machine id state directory is unsupported on this platform"),
}
}
fn read_state_machine_id(path: &Path) -> anyhow::Result<Option<uuid::Uuid>> {
let Some(contents) = read_optional_file(path)? else {
return Ok(None);
};
let machine_id = uuid::Uuid::parse_str(contents.trim())
.with_context(|| format!("invalid machine id in state file {}", path.display()))?;
Ok(Some(machine_id))
}
fn read_legacy_machine_id_file() -> Option<uuid::Uuid> {
let path = legacy_machine_id_file_path()?;
read_legacy_machine_id_file_at(&path)
}
fn read_legacy_machine_id_file_at(path: &Path) -> Option<uuid::Uuid> {
let contents = match std::fs::read_to_string(path) {
Ok(contents) => contents,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return None,
Err(err) => {
tracing::warn!(
path = %path.display(),
%err,
"ignoring unreadable legacy machine id file"
);
return None;
}
};
match uuid::Uuid::parse_str(contents.trim()) {
Ok(machine_id) => Some(machine_id),
Err(err) => {
tracing::warn!(
path = %path.display(),
%err,
"ignoring invalid legacy machine id file"
);
None
}
}
}
fn legacy_machine_id_file_path() -> Option<PathBuf> {
std::env::current_exe()
.ok()
.map(|path| path.with_file_name("et_machine_id"))
}
fn read_optional_file(path: &Path) -> anyhow::Result<Option<String>> {
match std::fs::read_to_string(path) {
Ok(contents) => Ok(Some(contents)),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(err) => Err(err).with_context(|| format!("failed to read {}", path.display())),
}
}
fn should_attempt_legacy_machine_uid_migration(state_file: &Path) -> bool {
let Some(state_dir) = state_file.parent() else {
return false;
};
let Ok(mut entries) = std::fs::read_dir(state_dir) else {
return false;
};
entries.any(|entry| entry.is_ok())
}
fn resolve_legacy_machine_uid_hash() -> Option<uuid::Uuid> {
machine_uid_seed().map(|seed| digest_uuid_from_str(seed.as_str()))
}
fn resolve_new_machine_id() -> Option<uuid::Uuid> {
let seed = machine_uid_seed()?;
#[cfg(target_os = "linux")]
{
let seed = linux_machine_id_seed(&seed);
Some(digest_uuid_from_str(&seed))
}
#[cfg(not(target_os = "linux"))]
{
Some(digest_uuid_from_str(&seed))
}
}
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "macos-ne")),
target_os = "windows",
target_os = "freebsd"
))]
fn machine_uid_seed() -> Option<String> {
machine_uid::get()
.ok()
.filter(|value| !value.trim().is_empty())
}
#[cfg(not(any(
target_os = "linux",
all(target_os = "macos", not(feature = "macos-ne")),
target_os = "windows",
target_os = "freebsd"
)))]
fn machine_uid_seed() -> Option<String> {
None
}
#[cfg(target_os = "linux")]
fn linux_machine_id_seed(machine_uid: &str) -> String {
let mut seed = format!("machine_uid={machine_uid}");
let hostname = gethostname::gethostname()
.to_string_lossy()
.trim()
.to_string();
if !hostname.is_empty() {
seed.push_str("\nhostname=");
seed.push_str(&hostname);
}
let mac_addresses = collect_linux_mac_addresses();
if !mac_addresses.is_empty() {
seed.push_str("\nmacs=");
seed.push_str(&mac_addresses.join(","));
}
seed
}
#[cfg(target_os = "linux")]
fn collect_linux_mac_addresses() -> Vec<String> {
let mut macs = Vec::new();
let Ok(entries) = std::fs::read_dir("/sys/class/net") else {
return macs;
};
for entry in entries.flatten() {
let Ok(name) = entry.file_name().into_string() else {
continue;
};
if name == "lo" {
continue;
}
let address_path = entry.path().join("address");
let Ok(address) = std::fs::read_to_string(address_path) else {
continue;
};
let address = address.trim().to_ascii_lowercase();
if address.is_empty() || address == "00:00:00:00:00:00" {
continue;
}
macs.push(address);
}
macs.sort();
macs.dedup();
macs.truncate(3);
macs
}
fn persist_machine_id(path: &Path, machine_id: uuid::Uuid) -> anyhow::Result<uuid::Uuid> {
if let Some(existing) = read_state_machine_id(path)? {
return Ok(existing);
}
let _lock = MachineIdWriteLock::acquire(path)?;
if let Some(existing) = read_state_machine_id(path)? {
return Ok(existing);
}
write_uuid_file_atomically(path, machine_id)?;
Ok(machine_id)
}
fn write_uuid_file_atomically(path: &Path, machine_id: uuid::Uuid) -> anyhow::Result<()> {
let parent = path.parent().ok_or_else(|| {
anyhow::anyhow!(
"machine id state file {} has no parent directory",
path.display()
)
})?;
std::fs::create_dir_all(parent).with_context(|| {
format!(
"failed to create machine id state directory {}",
parent.display()
)
})?;
let tmp_path = parent.join(format!(
".machine_id.tmp-{}-{}",
std::process::id(),
uuid::Uuid::new_v4()
));
{
let mut file = std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&tmp_path)
.with_context(|| format!("failed to create {}", tmp_path.display()))?;
file.write_all(machine_id.to_string().as_bytes())
.with_context(|| format!("failed to write {}", tmp_path.display()))?;
file.sync_all()
.with_context(|| format!("failed to flush {}", tmp_path.display()))?;
}
if let Err(err) = std::fs::rename(&tmp_path, path) {
let _ = std::fs::remove_file(&tmp_path);
return Err(err).with_context(|| {
format!(
"failed to move machine id state file into place at {}",
path.display()
)
});
}
Ok(())
}
struct MachineIdWriteLock {
#[cfg(unix)]
_lock: Flock<std::fs::File>,
#[cfg(not(unix))]
path: PathBuf,
}
impl MachineIdWriteLock {
fn acquire(path: &Path) -> anyhow::Result<Self> {
let parent = path.parent().ok_or_else(|| {
anyhow::anyhow!(
"machine id state file {} has no parent directory",
path.display()
)
})?;
std::fs::create_dir_all(parent).with_context(|| {
format!(
"failed to create machine id state directory {}",
parent.display()
)
})?;
#[cfg(unix)]
{
Self::acquire_unix(path)
}
#[cfg(not(unix))]
{
Self::acquire_fallback(path)
}
}
#[cfg(unix)]
fn acquire_unix(path: &Path) -> anyhow::Result<Self> {
let lock_path = path.with_extension("lock");
let deadline = Instant::now() + Duration::from_secs(5);
let mut lock_file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(false)
.open(&lock_path)
.with_context(|| format!("failed to open machine id lock {}", lock_path.display()))?;
loop {
match Flock::lock(lock_file, FlockArg::LockExclusiveNonblock) {
Ok(lock) => return Ok(Self { _lock: lock }),
Err((file, Errno::EAGAIN)) => {
if Instant::now() >= deadline {
anyhow::bail!(
"timed out waiting for machine id lock {}",
lock_path.display()
);
}
lock_file = file;
std::thread::sleep(Duration::from_millis(50));
}
Err((_file, err)) => {
anyhow::bail!(
"failed to acquire machine id lock {}: {}",
lock_path.display(),
err
);
}
}
}
}
#[cfg(not(unix))]
fn acquire_fallback(path: &Path) -> anyhow::Result<Self> {
let lock_path = path.with_extension("lock");
let deadline = Instant::now() + Duration::from_secs(5);
loop {
match std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&lock_path)
{
Ok(mut file) => {
writeln!(file, "pid={}", std::process::id()).ok();
return Ok(Self { path: lock_path });
}
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
if should_reap_stale_lock_file(&lock_path) {
let _ = std::fs::remove_file(&lock_path);
continue;
}
if Instant::now() >= deadline {
anyhow::bail!(
"timed out waiting for machine id lock {}",
lock_path.display()
);
}
std::thread::sleep(Duration::from_millis(50));
}
Err(err) => {
return Err(err).with_context(|| {
format!("failed to acquire machine id lock {}", lock_path.display())
});
}
}
}
}
}
#[cfg(not(unix))]
fn should_reap_stale_lock_file(lock_path: &Path) -> bool {
const STALE_LOCK_AGE: Duration = Duration::from_secs(30);
let Ok(metadata) = std::fs::metadata(lock_path) else {
return false;
};
let Ok(modified) = metadata.modified() else {
return false;
};
modified
.elapsed()
.is_ok_and(|elapsed| elapsed >= STALE_LOCK_AGE)
}
impl Drop for MachineIdWriteLock {
fn drop(&mut self) {
#[cfg(not(unix))]
let _ = std::fs::remove_file(&self.path);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_resolve_machine_id_uses_uuid_seed_verbatim() {
let raw = "33333333-3333-3333-3333-333333333333".to_string();
let opts = MachineIdOptions {
explicit_machine_id: Some(raw.clone()),
state_dir: None,
};
assert_eq!(
resolve_machine_id(&opts).unwrap(),
uuid::Uuid::parse_str(&raw).unwrap()
);
}
#[test]
fn test_resolve_machine_id_reads_state_file() {
let temp_dir = tempfile::tempdir().unwrap();
let expected = uuid::Uuid::new_v4();
std::fs::write(temp_dir.path().join("machine_id"), expected.to_string()).unwrap();
let opts = MachineIdOptions {
explicit_machine_id: None,
state_dir: Some(temp_dir.path().to_path_buf()),
};
assert_eq!(resolve_machine_id(&opts).unwrap(), expected);
}
#[test]
fn test_read_legacy_machine_id_file_ignores_read_errors() {
let temp_dir = tempfile::tempdir().unwrap();
assert_eq!(read_legacy_machine_id_file_at(temp_dir.path()), None);
}
#[test]
fn test_write_uuid_file_atomically_writes_expected_contents() {
let temp_dir = tempfile::tempdir().unwrap();
let machine_id = uuid::Uuid::new_v4();
let state_file = temp_dir.path().join("machine_id");
write_uuid_file_atomically(&state_file, machine_id).unwrap();
assert_eq!(
std::fs::read_to_string(state_file).unwrap(),
machine_id.to_string()
);
}
#[test]
fn test_non_empty_os_string_filters_empty_values() {
assert_eq!(non_empty_os_string(Some(OsString::new())), None);
assert_eq!(
non_empty_os_string(Some(OsString::from("foo"))),
Some(OsString::from("foo"))
);
}
#[cfg(target_os = "linux")]
#[test]
fn test_default_linux_machine_id_state_dir_falls_back_in_order() {
assert_eq!(
default_linux_machine_id_state_dir(
Some(OsString::from("/tmp/xdg")),
Some(OsString::from("/tmp/home"))
),
PathBuf::from("/tmp/xdg").join("easytier")
);
assert_eq!(
default_linux_machine_id_state_dir(
Some(OsString::new()),
Some(OsString::from("/tmp/home"))
),
PathBuf::from("/tmp/home")
.join(".local")
.join("share")
.join("easytier")
);
assert_eq!(
default_linux_machine_id_state_dir(Some(OsString::new()), Some(OsString::new())),
PathBuf::from("/var/lib/easytier")
);
}
#[test]
fn test_persist_machine_id_creates_missing_state_dir() {
let temp_dir = tempfile::tempdir().unwrap();
let state_file = temp_dir.path().join("nested").join("machine_id");
let machine_id = uuid::Uuid::new_v4();
assert_eq!(
persist_machine_id(&state_file, machine_id).unwrap(),
machine_id
);
assert_eq!(
std::fs::read_to_string(state_file).unwrap(),
machine_id.to_string()
);
}
#[test]
fn test_legacy_machine_uid_migration_requires_existing_state_dir_content() {
let temp_dir = tempfile::tempdir().unwrap();
let missing_state_file = temp_dir.path().join("missing").join("machine_id");
assert!(!should_attempt_legacy_machine_uid_migration(
&missing_state_file
));
let empty_dir = temp_dir.path().join("empty");
std::fs::create_dir_all(&empty_dir).unwrap();
assert!(!should_attempt_legacy_machine_uid_migration(
&empty_dir.join("machine_id")
));
std::fs::write(empty_dir.join("config.toml"), "x=1").unwrap();
assert!(should_attempt_legacy_machine_uid_migration(
&empty_dir.join("machine_id")
));
}
}
+3 -76
View File
@@ -1,15 +1,12 @@
use std::{
fmt::Debug,
future,
io::Write as _,
sync::{Arc, Mutex},
};
use time::util::refresh_tz;
use tokio::{task::JoinSet, time::timeout};
use tracing::Instrument;
use crate::{set_global_var, use_global_var};
pub mod acl_processor;
pub mod compressor;
pub mod config;
@@ -21,6 +18,7 @@ pub mod global_ctx;
pub mod idn;
pub mod ifcfg;
pub mod log;
pub mod machine_id;
pub mod netns;
pub mod network;
pub mod os_info;
@@ -31,6 +29,8 @@ pub mod token_bucket;
pub mod tracing_rolling_appender;
pub mod upnp;
pub use machine_id::{MachineIdOptions, resolve_machine_id};
pub fn get_logger_timer<F: time::formatting::Formattable>(
format: F,
) -> tracing_subscriber::fmt::time::OffsetTime<F> {
@@ -96,71 +96,6 @@ pub fn join_joinset_background<T: Debug + Send + Sync + 'static>(
);
}
pub fn set_default_machine_id(mid: Option<String>) {
set_global_var!(MACHINE_UID, mid);
}
pub fn get_machine_id() -> uuid::Uuid {
if let Some(default_mid) = use_global_var!(MACHINE_UID) {
if let Ok(mid) = uuid::Uuid::parse_str(default_mid.trim()) {
return mid;
}
let mut b = [0u8; 16];
crate::tunnel::generate_digest_from_str("", &default_mid, &mut b);
return uuid::Uuid::from_bytes(b);
}
// a path same as the binary
let machine_id_file = std::env::current_exe()
.map(|x| x.with_file_name("et_machine_id"))
.unwrap_or_else(|_| std::path::PathBuf::from("et_machine_id"));
// try load from local file
if let Ok(mid) = std::fs::read_to_string(&machine_id_file)
&& let Ok(mid) = uuid::Uuid::parse_str(mid.trim())
{
return mid;
}
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "macos-ne")),
target_os = "windows",
target_os = "freebsd"
))]
let gen_mid = machine_uid::get()
.map(|x| {
if x.is_empty() {
return uuid::Uuid::new_v4();
}
let mut b = [0u8; 16];
crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b);
uuid::Uuid::from_bytes(b)
})
.ok();
#[cfg(not(any(
target_os = "linux",
all(target_os = "macos", not(feature = "macos-ne")),
target_os = "windows",
target_os = "freebsd"
)))]
let gen_mid = None;
if let Some(mid) = gen_mid {
return mid;
}
let gen_mid = uuid::Uuid::new_v4();
// try save to local file
if let Ok(mut file) = std::fs::File::create(machine_id_file) {
let _ = file.write_all(gen_mid.to_string().as_bytes());
}
gen_mid
}
pub fn shrink_dashmap<K: Eq + std::hash::Hash, V>(
map: &dashmap::DashMap<K, V>,
threshold: Option<usize>,
@@ -210,12 +145,4 @@ mod tests {
assert_eq!(weak_js.weak_count(), 0);
assert_eq!(weak_js.strong_count(), 0);
}
#[test]
fn test_get_machine_id_uses_uuid_seed_verbatim() {
let raw = "33333333-3333-3333-3333-333333333333".to_string();
set_default_machine_id(Some(raw.clone()));
assert_eq!(get_machine_id(), uuid::Uuid::parse_str(&raw).unwrap());
set_default_machine_id(None);
}
}
+22
View File
@@ -85,6 +85,15 @@ pub enum MetricName {
/// Traffic packets forwarded for foreign network, forward
TrafficPacketsForeignForwardForwarded,
/// UDP broadcast relay packets captured from the raw socket
UdpBroadcastRelayPacketsCaptured,
/// UDP broadcast relay packets ignored before forwarding
UdpBroadcastRelayPacketsIgnored,
/// UDP broadcast relay packets forwarded
UdpBroadcastRelayPacketsForwarded,
/// UDP broadcast relay packets that failed to forward
UdpBroadcastRelayPacketsForwardFailed,
/// Compression bytes before compression
CompressionBytesRxBefore,
/// Compression bytes after compression
@@ -167,6 +176,19 @@ impl fmt::Display for MetricName {
write!(f, "traffic_packets_foreign_forward_forwarded")
}
MetricName::UdpBroadcastRelayPacketsCaptured => {
write!(f, "udp_broadcast_relay_packets_captured")
}
MetricName::UdpBroadcastRelayPacketsIgnored => {
write!(f, "udp_broadcast_relay_packets_ignored")
}
MetricName::UdpBroadcastRelayPacketsForwarded => {
write!(f, "udp_broadcast_relay_packets_forwarded")
}
MetricName::UdpBroadcastRelayPacketsForwardFailed => {
write!(f, "udp_broadcast_relay_packets_forward_failed")
}
MetricName::CompressionBytesRxBefore => write!(f, "compression_bytes_rx_before"),
MetricName::CompressionBytesRxAfter => write!(f, "compression_bytes_rx_after"),
MetricName::CompressionBytesTxBefore => write!(f, "compression_bytes_tx_before"),
+54 -233
View File
@@ -13,8 +13,8 @@ use std::{
use crate::{
common::{
PeerId, config::DEFAULT_CONNECTION_PRIORITY, dns::socket_addrs, error::Error,
global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait,
PeerId, dns::socket_addrs, error::Error, global_ctx::ArcGlobalCtx,
stun::StunInfoCollectorTrait,
},
connector::udp_hole_punch::handle_rpc_result,
peers::{
@@ -31,7 +31,7 @@ use crate::{
},
rpc_types::controller::BaseController,
},
tunnel::{IpVersion, PrioritizedConnector, matches_protocol, udp::UdpTunnelConnector},
tunnel::{IpVersion, matches_protocol, udp::UdpTunnelConnector},
use_global_var,
};
@@ -48,7 +48,6 @@ use url::Host;
pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1;
pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300;
const DIRECT_CONNECTOR_LOW_PRIORITY_RETRY_TIMEOUT_SEC: u64 = 300;
static TESTING: AtomicBool = AtomicBool::new(false);
@@ -132,70 +131,11 @@ struct DstBlackListItem(PeerId, String);
#[derive(Hash, Eq, PartialEq, Clone)]
struct DstListenerUrlBlackListItem(PeerId, String);
#[derive(Clone, Debug)]
struct AvailableListener {
url: url::Url,
priority: u32,
}
fn available_listeners_from_ip_list(
ip_list: &GetIpListResponse,
enable_ipv6: bool,
) -> Vec<AvailableListener> {
let candidate_listeners: Vec<AvailableListener> = if ip_list.listener_infos.is_empty() {
ip_list
.listeners
.iter()
.map(|url| AvailableListener {
url: url.clone().into(),
priority: DEFAULT_CONNECTION_PRIORITY,
})
.collect()
} else {
ip_list
.listener_infos
.iter()
.filter_map(|info| {
info.url.as_ref().map(|url| AvailableListener {
url: url.clone().into(),
priority: info.priority,
})
})
.collect()
};
candidate_listeners
.into_iter()
.filter(|l| l.url.scheme() != "ring")
.filter(|l| {
mapped_listener_port(&l.url).is_some()
&& l.url
.host()
.is_some_and(|host| enable_ipv6 || !matches!(host, Host::Ipv6(_)))
})
.collect()
}
fn sort_available_listeners(available_listeners: &mut [AvailableListener], default_protocol: &str) {
available_listeners.sort_by_key(|l| {
let scheme = l.url.scheme();
let protocol_priority = if scheme == default_protocol {
3
} else if scheme == "udp" {
2
} else {
1
};
(std::cmp::Reverse(l.priority), protocol_priority)
});
}
struct DirectConnectorManagerData {
global_ctx: ArcGlobalCtx,
peer_manager: Arc<PeerManager>,
dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>,
peer_black_list: timedmap::TimedMap<PeerId, ()>,
low_priority_direct_retry_backoff: timedmap::TimedMap<PeerId, ()>,
}
impl DirectConnectorManagerData {
@@ -205,7 +145,6 @@ impl DirectConnectorManagerData {
peer_manager,
dst_listener_blacklist: timedmap::TimedMap::new(),
peer_black_list: timedmap::TimedMap::new(),
low_priority_direct_retry_backoff: timedmap::TimedMap::new(),
}
}
@@ -262,7 +201,6 @@ impl DirectConnectorManagerData {
&self,
dst_peer_id: PeerId,
remote_url: &url::Url,
priority: u32,
) -> Result<(PeerId, PeerConnId), Error> {
let local_socket = Arc::new(
UdpSocket::bind("[::]:0")
@@ -301,12 +239,7 @@ impl DirectConnectorManagerData {
// NOTICE: must add as directly connected tunnel
self.peer_manager
.add_client_tunnel_with_peer_id_hint_and_priority(
ret,
true,
Some(dst_peer_id),
priority,
)
.add_client_tunnel_with_peer_id_hint(ret, true, Some(dst_peer_id))
.await
}
@@ -314,7 +247,6 @@ impl DirectConnectorManagerData {
&self,
dst_peer_id: PeerId,
remote_url: &url::Url,
priority: u32,
) -> Result<(PeerId, PeerConnId), Error> {
let local_socket = {
let _g = self.global_ctx.net_ns.guard();
@@ -343,34 +275,21 @@ impl DirectConnectorManagerData {
.await?;
self.peer_manager
.add_client_tunnel_with_peer_id_hint_and_priority(
ret,
true,
Some(dst_peer_id),
priority,
)
.add_client_tunnel_with_peer_id_hint(ret, true, Some(dst_peer_id))
.await
}
async fn do_try_connect_to_ip(
&self,
dst_peer_id: PeerId,
addr: String,
priority: u32,
) -> Result<(), Error> {
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?;
let remote_url = connector.remote_url();
let (peer_id, conn_id) = if matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp)) {
match remote_url.host() {
Some(Host::Ipv6(_)) => {
self.connect_to_public_ipv6(dst_peer_id, &remote_url, priority)
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
.await?
}
Some(Host::Ipv4(ip)) if is_public_ipv4(ip) => {
match self
.connect_to_public_ipv4(dst_peer_id, &remote_url, priority)
.await
{
match self.connect_to_public_ipv4(dst_peer_id, &remote_url).await {
Ok(ret) => ret,
Err(err) => {
tracing::debug!(
@@ -381,7 +300,7 @@ impl DirectConnectorManagerData {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager.try_direct_connect_with_peer_id_hint(
PrioritizedConnector::new(connector, priority),
connector,
Some(dst_peer_id),
),
)
@@ -392,10 +311,8 @@ impl DirectConnectorManagerData {
_ => {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager.try_direct_connect_with_peer_id_hint(
PrioritizedConnector::new(connector, priority),
Some(dst_peer_id),
),
self.peer_manager
.try_direct_connect_with_peer_id_hint(connector, Some(dst_peer_id)),
)
.await??
}
@@ -403,10 +320,8 @@ impl DirectConnectorManagerData {
} else {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager.try_direct_connect_with_peer_id_hint(
PrioritizedConnector::new(connector, priority),
Some(dst_peer_id),
),
self.peer_manager
.try_direct_connect_with_peer_id_hint(connector, Some(dst_peer_id)),
)
.await??
};
@@ -430,7 +345,6 @@ impl DirectConnectorManagerData {
self: Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
addr: String,
priority: u32,
) -> Result<(), Error> {
let mut rand_gen = rand::rngs::OsRng;
let backoff_ms = [1000, 2000, 4000];
@@ -447,26 +361,19 @@ impl DirectConnectorManagerData {
return Err(Error::UrlInBlacklist);
}
let has_good_direct_conn = || {
self.peer_manager
.has_directly_connected_conn_with_priority_at_most(dst_peer_id, priority)
};
loop {
if has_good_direct_conn() {
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
return Ok(());
}
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start one round");
let ret = self
.do_try_connect_to_ip(dst_peer_id, addr.clone(), priority)
.await;
let ret = self.do_try_connect_to_ip(dst_peer_id, addr.clone()).await;
tracing::debug!(?ret, ?dst_peer_id, ?addr, "try_connect_to_ip return");
if ret.is_ok() {
return Ok(());
}
if has_good_direct_conn() {
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
return Ok(());
}
@@ -497,19 +404,17 @@ impl DirectConnectorManagerData {
self: &Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
ip_list: &GetIpListResponse,
listener: &AvailableListener,
listener: &url::Url,
tasks: &mut JoinSet<Result<(), Error>>,
) {
let Ok(mut addrs) = resolve_mapped_listener_addrs(&listener.url).await else {
let Ok(mut addrs) = resolve_mapped_listener_addrs(listener).await else {
tracing::error!(?listener, "failed to parse socket address from listener");
return;
};
let listener_host = addrs.pop();
tracing::info!(?listener_host, ?listener, "try direct connect to peer");
let is_udp = matches_protocol!(&listener.url, Protocol::UDP);
let listener_url = &listener.url;
let priority = listener.priority;
let is_udp = matches_protocol!(listener, Protocol::UDP);
// Snapshot running listeners once; used for cheap port pre-checks before the
// expensive should_deny_proxy call (which binds a socket per IP) in the
// unspecified-address expansion loops below.
@@ -544,13 +449,12 @@ impl DirectConnectorManagerData {
);
return;
}
let mut addr = listener_url.clone();
let mut addr = (*listener).clone();
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
addr.to_string(),
priority,
));
} else {
tracing::error!(
@@ -571,8 +475,7 @@ impl DirectConnectorManagerData {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
listener_url.to_string(),
priority,
listener.to_string(),
));
}
}
@@ -601,13 +504,12 @@ impl DirectConnectorManagerData {
);
return;
}
let mut addr = listener_url.clone();
let mut addr = (*listener).clone();
if addr.set_host(Some(format!("[{}]", ip).as_str())).is_ok() {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
addr.to_string(),
priority,
));
} else {
tracing::error!(
@@ -633,8 +535,7 @@ impl DirectConnectorManagerData {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
listener_url.to_string(),
priority,
listener.to_string(),
));
}
}
@@ -652,7 +553,15 @@ impl DirectConnectorManagerData {
ip_list: GetIpListResponse,
) -> Result<(), Error> {
let enable_ipv6 = self.global_ctx.get_flags().enable_ipv6;
let mut available_listeners = available_listeners_from_ip_list(&ip_list, enable_ipv6);
let available_listeners = ip_list
.listeners
.clone()
.into_iter()
.map(Into::<url::Url>::into)
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
.filter(|l| mapped_listener_port(l).is_some() && l.host().is_some())
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
.collect::<Vec<_>>();
tracing::debug!(?available_listeners, "got available listeners");
@@ -661,30 +570,35 @@ impl DirectConnectorManagerData {
}
let default_protocol = self.global_ctx.get_flags().default_protocol;
// Sort by configured priority first (lower is better), then prefer the
// default protocol and UDP. The best candidate group is in the last slot.
sort_available_listeners(&mut available_listeners, &default_protocol);
// sort available listeners, default protocol has the highest priority, udp is second, others just random
// highest priority is in the last
let mut available_listeners = available_listeners;
available_listeners.sort_by_key(|l| {
let scheme = l.scheme();
if scheme == default_protocol {
3
} else if scheme == "udp" {
2
} else {
1
}
});
while let Some(cur_listener) = available_listeners.last() {
while !available_listeners.is_empty() {
let mut tasks = JoinSet::new();
let mut listener_list = vec![];
let cur_priority = cur_listener.priority;
let cur_scheme = cur_listener.url.scheme().to_owned();
let cur_scheme = available_listeners.last().unwrap().scheme().to_owned();
while let Some(listener) = available_listeners.last() {
if listener.priority != cur_priority || listener.url.scheme() != cur_scheme {
if listener.scheme() != cur_scheme {
break;
}
tracing::debug!(
%cur_priority,
"try direct connect to peer with listener: {}",
listener.url
);
tracing::debug!("try direct connect to peer with listener: {}", listener);
self.spawn_direct_connect_task(dst_peer_id, &ip_list, listener, &mut tasks)
.await;
listener_list.push(listener.url.to_string());
listener_list.push(listener.clone().to_string());
available_listeners.pop();
}
@@ -692,16 +606,12 @@ impl DirectConnectorManagerData {
tracing::debug!(
?ret,
?dst_peer_id,
?cur_priority,
?cur_scheme,
?listener_list,
"all tasks finished for current scheme"
);
if self
.peer_manager
.has_directly_connected_conn_with_priority_at_most(dst_peer_id, cur_priority)
{
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
tracing::info!(
"direct connect to peer {} success, has direct conn",
dst_peer_id
@@ -756,29 +666,13 @@ impl DirectConnectorManagerData {
.await;
tracing::info!(?ret, ?dst_peer_id, "do_try_direct_connect return");
if peer_manager.has_directly_connected_conn_with_priority_at_most(
dst_peer_id,
DEFAULT_CONNECTION_PRIORITY,
) {
if peer_manager.has_directly_connected_conn(dst_peer_id) {
tracing::info!(
"direct connect to peer {} success, has direct conn",
dst_peer_id
);
return Ok(());
}
if peer_manager.has_directly_connected_conn(dst_peer_id) {
self.low_priority_direct_retry_backoff.insert(
dst_peer_id,
(),
Duration::from_secs(DIRECT_CONNECTOR_LOW_PRIORITY_RETRY_TIMEOUT_SEC),
);
tracing::info!(
"direct connect to peer {} skipped temporarily, only low-priority direct conn exists",
dst_peer_id
);
return Ok(());
}
}
}
}
@@ -821,7 +715,6 @@ impl PeerTaskLauncher for DirectConnectorLauncher {
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
data.peer_black_list.cleanup();
data.low_priority_direct_retry_backoff.cleanup();
let my_peer_id = data.peer_manager.my_peer_id();
data.peer_manager
.list_peers()
@@ -829,13 +722,7 @@ impl PeerTaskLauncher for DirectConnectorLauncher {
.into_iter()
.filter(|peer_id| {
*peer_id != my_peer_id
&& !data
.peer_manager
.has_directly_connected_conn_with_priority_at_most(
*peer_id,
DEFAULT_CONNECTION_PRIORITY,
)
&& !data.low_priority_direct_retry_backoff.contains(peer_id)
&& !data.peer_manager.has_directly_connected_conn(*peer_id)
&& !data.peer_black_list.contains(peer_id)
})
.collect()
@@ -926,16 +813,12 @@ mod tests {
wait_route_appear_with_cost,
},
proto::peer_rpc::GetIpListResponse,
proto::peer_rpc::ListenerInfo,
tunnel::{IpScheme, TunnelScheme, matches_scheme},
};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use super::{
DEFAULT_CONNECTION_PRIORITY, TESTING, available_listeners_from_ip_list,
mapped_listener_port, resolve_mapped_listener_addrs, sort_available_listeners,
};
use super::{TESTING, mapped_listener_port, resolve_mapped_listener_addrs};
#[tokio::test]
async fn public_ipv6_candidate_rejects_easytier_managed_addr_even_in_tests() {
@@ -985,68 +868,6 @@ mod tests {
);
}
#[test]
fn available_listener_order_uses_priority_before_protocol() {
let ip_list = GetIpListResponse {
listener_infos: vec![
ListenerInfo {
url: Some("tcp://127.0.0.1:11010".parse().unwrap()),
priority: 100,
},
ListenerInfo {
url: Some("udp://127.0.0.1:11011".parse().unwrap()),
priority: 0,
},
ListenerInfo {
url: Some("tcp://127.0.0.1:11012".parse().unwrap()),
priority: 0,
},
],
..Default::default()
};
let mut listeners = available_listeners_from_ip_list(&ip_list, true);
sort_available_listeners(&mut listeners, "tcp");
let ordered_urls = listeners
.iter()
.rev()
.map(|listener| listener.url.to_string())
.collect::<Vec<_>>();
assert_eq!(
ordered_urls,
vec![
"tcp://127.0.0.1:11012",
"udp://127.0.0.1:11011",
"tcp://127.0.0.1:11010",
]
);
}
#[test]
fn available_listener_order_keeps_legacy_listeners_at_default_priority() {
let mut ip_list = GetIpListResponse::default();
ip_list
.listeners
.push("udp://127.0.0.1:11010".parse().unwrap());
ip_list
.listeners
.push("tcp://127.0.0.1:11011".parse().unwrap());
let mut listeners = available_listeners_from_ip_list(&ip_list, true);
sort_available_listeners(&mut listeners, "tcp");
assert!(
listeners
.iter()
.all(|listener| listener.priority == DEFAULT_CONNECTION_PRIORITY)
);
assert_eq!(
listeners.last().unwrap().url.to_string(),
"tcp://127.0.0.1:11011"
);
}
#[tokio::test]
async fn resolve_mapped_listener_addrs_uses_default_ports() {
let wss_addrs = resolve_mapped_listener_addrs(&"wss://127.0.0.1".parse().unwrap())
+180 -99
View File
@@ -1,15 +1,15 @@
use std::{
collections::BTreeSet,
future::Future,
sync::{Arc, Weak},
time::{Duration, Instant},
};
use dashmap::{DashMap, DashSet};
use dashmap::DashSet;
use tokio::{sync::mpsc, task::JoinSet, time::timeout};
use crate::{
common::{
PeerId, config::DEFAULT_CONNECTION_PRIORITY, dns::socket_addrs, join_joinset_background,
},
common::{PeerId, dns::socket_addrs, join_joinset_background},
peers::peer_conn::PeerConnId,
proto::{
api::instance::{
@@ -18,7 +18,7 @@ use crate::{
},
rpc_types::{self, controller::BaseController},
},
tunnel::{IpVersion, PrioritizedConnector, TunnelConnector},
tunnel::{IpVersion, TunnelConnector, TunnelScheme, matches_scheme},
utils::weak_upgrade,
};
@@ -34,7 +34,7 @@ use crate::{
use super::create_connector_by_url;
type ConnectorMap = Arc<DashMap<url::Url, u32>>;
type ConnectorMap = Arc<DashSet<url::Url>>;
#[derive(Debug, Clone)]
struct ReconnResult {
@@ -45,7 +45,7 @@ struct ReconnResult {
struct ConnectorManagerData {
connectors: ConnectorMap,
reconnecting: DashMap<url::Url, u32>,
reconnecting: DashSet<url::Url>,
peer_manager: Weak<PeerManager>,
alive_conn_urls: Arc<DashSet<url::Url>>,
// user removed connector urls
@@ -62,14 +62,14 @@ pub struct ManualConnectorManager {
impl ManualConnectorManager {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
let connectors = Arc::new(DashMap::new());
let connectors = Arc::new(DashSet::new());
let tasks = JoinSet::new();
let mut ret = Self {
global_ctx: global_ctx.clone(),
data: Arc::new(ConnectorManagerData {
connectors,
reconnecting: DashMap::new(),
reconnecting: DashSet::new(),
peer_manager: Arc::downgrade(&peer_manager),
alive_conn_urls: Arc::new(DashSet::new()),
removed_conn_urls: Arc::new(DashSet::new()),
@@ -85,28 +85,65 @@ impl ManualConnectorManager {
ret
}
fn reconnect_timeout(dead_url: &url::Url) -> Duration {
let use_long_timeout = matches_scheme!(
dead_url,
TunnelScheme::Http | TunnelScheme::Https | TunnelScheme::Txt | TunnelScheme::Srv
) || matches!(dead_url.scheme(), "ws" | "wss");
Duration::from_secs(if use_long_timeout { 20 } else { 2 })
}
fn remaining_budget(started_at: Instant, total_timeout: Duration) -> Option<Duration> {
let remaining = total_timeout.checked_sub(started_at.elapsed())?;
(!remaining.is_zero()).then_some(remaining)
}
fn emit_connect_error(
data: &ConnectorManagerData,
dead_url: &url::Url,
ip_version: IpVersion,
error: &Error,
) {
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.to_string(),
format!("{:?}", ip_version),
format!("{:#?}", error),
));
}
fn reconnect_timeout_error(stage: &str, duration: Duration) -> Error {
Error::AnyhowError(anyhow::anyhow!("{} timeout after {:?}", stage, duration))
}
async fn with_reconnect_timeout<T, F>(
stage: &'static str,
started_at: Instant,
total_timeout: Duration,
fut: F,
) -> Result<T, Error>
where
F: Future<Output = Result<T, Error>>,
{
let remaining = Self::remaining_budget(started_at, total_timeout)
.ok_or_else(|| Self::reconnect_timeout_error(stage, started_at.elapsed()))?;
timeout(remaining, fut)
.await
.map_err(|_| Self::reconnect_timeout_error(stage, remaining))?
}
}
impl ManualConnectorManager {
pub fn add_connector<T>(&self, connector: T)
where
T: TunnelConnector,
T: TunnelConnector + 'static,
{
tracing::info!("add_connector: {}", connector.remote_url());
let priority = connector.priority();
self.data
.connectors
.insert(connector.remote_url(), priority);
self.data.connectors.insert(connector.remote_url());
}
pub async fn add_connector_by_url(&self, url: url::Url) -> Result<(), Error> {
self.add_connector_by_url_with_priority(url, DEFAULT_CONNECTION_PRIORITY)
.await
}
pub async fn add_connector_by_url_with_priority(
&self,
url: url::Url,
priority: u32,
) -> Result<(), Error> {
self.data.connectors.insert(url, priority);
self.data.connectors.insert(url);
Ok(())
}
@@ -152,25 +189,19 @@ impl ManualConnectorManager {
Connector {
url: Some(conn_url.into()),
status: status.into(),
priority: *item.value(),
},
);
}
let reconnecting_urls: BTreeSet<_> = self
.data
.reconnecting
.iter()
.map(|item| (item.key().clone(), *item.value()))
.collect();
let reconnecting_urls: BTreeSet<url::Url> =
self.data.reconnecting.iter().map(|x| x.clone()).collect();
for (conn_url, priority) in reconnecting_urls {
for conn_url in reconnecting_urls {
ret.insert(
0,
Connector {
url: Some(conn_url.into()),
status: ConnectorStatus::Connecting.into(),
priority,
},
);
}
@@ -197,20 +228,16 @@ impl ManualConnectorManager {
for dead_url in dead_urls {
let data_clone = data.clone();
let sender = reconn_result_send.clone();
let priority = data
.connectors
.remove(&dead_url)
.map(|(_, priority)| priority)
.unwrap_or(DEFAULT_CONNECTION_PRIORITY);
let previous = data.reconnecting.insert(dead_url.clone(), priority);
assert!(previous.is_none());
data.connectors.remove(&dead_url).unwrap();
let insert_succ = data.reconnecting.insert(dead_url.clone());
assert!(insert_succ);
tasks.lock().unwrap().spawn(async move {
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), priority).await;
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone() ).await;
let _ = sender.send(reconn_ret).await;
data_clone.reconnecting.remove(&dead_url).unwrap();
data_clone.connectors.insert(dead_url.clone(), priority);
data_clone.connectors.insert(dead_url.clone());
});
}
tracing::info!("reconn_interval tick, done");
@@ -230,7 +257,7 @@ impl ManualConnectorManager {
if data.connectors.remove(url).is_some() {
tracing::warn!("connector: {}, removed", url);
continue;
} else if data.reconnecting.contains_key(url) {
} else if data.reconnecting.contains(url) {
tracing::warn!("connector: {}, reconnecting, remove later.", url);
remove_later.insert(url.clone());
continue;
@@ -266,12 +293,18 @@ impl ManualConnectorManager {
async fn conn_reconnect_with_ip_version(
data: Arc<ConnectorManagerData>,
dead_url: String,
dead_url: url::Url,
ip_version: IpVersion,
priority: u32,
started_at: Instant,
total_timeout: Duration,
) -> Result<ReconnResult, Error> {
let connector =
create_connector_by_url(&dead_url, &data.global_ctx.clone(), ip_version).await?;
let connector = Self::with_reconnect_timeout(
"resolve",
started_at,
total_timeout,
create_connector_by_url(dead_url.as_str(), &data.global_ctx, ip_version),
)
.await?;
data.global_ctx
.issue_event(GlobalCtxEvent::Connecting(connector.remote_url()));
@@ -282,12 +315,25 @@ impl ManualConnectorManager {
)));
};
let (peer_id, conn_id) = pm
.try_direct_connect(PrioritizedConnector::new(connector, priority))
.await?;
let tunnel = Self::with_reconnect_timeout(
"connect",
started_at,
total_timeout,
pm.connect_tunnel(connector),
)
.await?;
let (peer_id, conn_id) = Self::with_reconnect_timeout(
"handshake",
started_at,
total_timeout,
pm.add_client_tunnel_with_peer_id_hint(tunnel, true, None),
)
.await?;
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
Ok(ReconnResult {
dead_url,
dead_url: dead_url.to_string(),
peer_id,
conn_id,
})
@@ -296,27 +342,37 @@ impl ManualConnectorManager {
async fn conn_reconnect(
data: Arc<ConnectorManagerData>,
dead_url: url::Url,
priority: u32,
) -> Result<ReconnResult, Error> {
tracing::info!("reconnect: {}", dead_url);
let mut ip_versions = vec![];
if dead_url.scheme() == "ring" || dead_url.scheme() == "txt" || dead_url.scheme() == "srv" {
if matches_scheme!(
dead_url,
TunnelScheme::Ring | TunnelScheme::Txt | TunnelScheme::Srv
) {
ip_versions.push(IpVersion::Both);
} else {
let converted_dead_url = crate::common::idn::convert_idn_to_ascii(dead_url.clone())?;
let addrs = match socket_addrs(&converted_dead_url, || Some(1000)).await {
let converted_dead_url =
match crate::common::idn::convert_idn_to_ascii(dead_url.clone()) {
Ok(url) => url,
Err(error) => {
let error: Error = error.into();
Self::emit_connect_error(&data, &dead_url, IpVersion::Both, &error);
return Err(error);
}
};
let addrs = match Self::with_reconnect_timeout(
"resolve",
Instant::now(),
Self::reconnect_timeout(&dead_url),
socket_addrs(&converted_dead_url, || Some(1000)),
)
.await
{
Ok(addrs) => addrs,
Err(e) => {
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.to_string(),
format!("{:?}", IpVersion::Both),
format!("{:?}", e),
));
return Err(Error::AnyhowError(anyhow::anyhow!(
"get ip from url failed: {:?}",
e
)));
Err(error) => {
Self::emit_connect_error(&data, &dead_url, IpVersion::Both, &error);
return Err(error);
}
};
tracing::info!(?addrs, ?dead_url, "get ip from url done");
@@ -341,47 +397,24 @@ impl ManualConnectorManager {
"cannot get ip from url"
)));
for ip_version in ip_versions {
let use_long_timeout = dead_url.scheme() == "http"
|| dead_url.scheme() == "https"
|| dead_url.scheme() == "ws"
|| dead_url.scheme() == "wss"
|| dead_url.scheme() == "txt"
|| dead_url.scheme() == "srv";
let ret = timeout(
// allow http/websocket connector to wait longer
std::time::Duration::from_secs(if use_long_timeout { 20 } else { 2 }),
Self::conn_reconnect_with_ip_version(
data.clone(),
dead_url.to_string(),
ip_version,
priority,
),
let started_at = Instant::now();
let ret = Self::conn_reconnect_with_ip_version(
data.clone(),
dead_url.clone(),
ip_version,
started_at,
Self::reconnect_timeout(&dead_url),
)
.await;
tracing::info!("reconnect: {} done, ret: {:?}", dead_url, ret);
match ret {
Ok(Ok(_)) => {
// 外层和内层都成功:解包并跳出
reconn_ret = ret.unwrap();
break;
}
Ok(Err(e)) => {
// 外层成功,内层失败
reconn_ret = Err(e);
}
Err(e) => {
// 外层失败
reconn_ret = Err(e.into());
Ok(result) => return Ok(result),
Err(error) => {
Self::emit_connect_error(&data, &dead_url, ip_version, &error);
reconn_ret = Err(error);
}
}
// 发送事件(只有在未 break 时才执行)
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.to_string(),
format!("{:?}", ip_version),
format!("{:?}", reconn_ret),
));
}
reconn_ret
@@ -417,6 +450,54 @@ mod tests {
use super::*;
#[tokio::test]
async fn reconnect_timeout_reports_exhausted_budget_for_stage() {
let started_at = Instant::now() - Duration::from_millis(50);
let err = ManualConnectorManager::with_reconnect_timeout(
"resolve",
started_at,
Duration::from_millis(1),
async { Ok::<(), Error>(()) },
)
.await
.unwrap_err();
let message = err.to_string();
assert!(message.contains("resolve timeout after"));
}
#[tokio::test]
async fn reconnect_timeout_reports_stage_timeout_with_remaining_budget() {
let err = ManualConnectorManager::with_reconnect_timeout(
"handshake",
Instant::now(),
Duration::from_millis(10),
async {
tokio::time::sleep(Duration::from_millis(50)).await;
Ok::<(), Error>(())
},
)
.await
.unwrap_err();
let message = err.to_string();
assert!(message.contains("handshake timeout after"));
}
#[tokio::test]
async fn reconnect_timeout_preserves_success_within_budget() {
let result = ManualConnectorManager::with_reconnect_timeout(
"connect",
Instant::now(),
Duration::from_millis(50),
async { Ok::<_, Error>(123_u32) },
)
.await
.unwrap();
assert_eq!(result, 123);
}
#[tokio::test]
async fn test_reconnect_with_connecting_addr() {
set_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS, 1);
+1 -4
View File
@@ -472,10 +472,7 @@ impl PeerTaskLauncher for TcpHolePunchPeerTaskLauncher {
continue;
}
if data.peer_mgr.has_conn_with_priority_at_most(
peer_id,
crate::common::config::DEFAULT_CONNECTION_PRIORITY,
) {
if data.peer_mgr.get_peer_map().has_peer(peer_id) {
tracing::trace!(peer_id, "tcp hole punch task collect skip already has peer");
continue;
}
+1 -4
View File
@@ -474,10 +474,7 @@ impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
continue;
}
if data.peer_mgr.has_conn_with_priority_at_most(
peer_id,
crate::common::config::DEFAULT_CONNECTION_PRIORITY,
) {
if data.peer_mgr.get_peer_map().has_peer(peer_id) {
continue;
}
+16 -3
View File
@@ -484,6 +484,15 @@ struct NetworkOptions {
)]
disable_upnp: Option<bool>,
#[arg(
long,
env = "ET_ENABLE_UDP_BROADCAST_RELAY",
help = t!("core_clap.enable_udp_broadcast_relay").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
enable_udp_broadcast_relay: Option<bool>,
#[arg(
long,
env = "ET_RELAY_ALL_PEER_RPC",
@@ -923,7 +932,6 @@ impl NetworkOptions {
.parse()
.with_context(|| format!("failed to parse peer uri: {}", p))?,
peer_public_key: None,
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
});
}
cfg.set_peers(peers);
@@ -961,7 +969,6 @@ impl NetworkOptions {
format!("failed to parse external node uri: {}", external_nodes)
})?,
peer_public_key: None,
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
});
cfg.set_peers(old_peers);
}
@@ -1144,6 +1151,9 @@ impl NetworkOptions {
.disable_sym_hole_punching
.unwrap_or(f.disable_sym_hole_punching);
f.disable_upnp = self.disable_upnp.unwrap_or(f.disable_upnp);
f.enable_udp_broadcast_relay = self
.enable_udp_broadcast_relay
.unwrap_or(f.enable_udp_broadcast_relay);
// Configure tld_dns_zone: use provided value if set
if let Some(tld_dns_zone) = &self.tld_dns_zone {
f.tld_dns_zone = tld_dns_zone.clone();
@@ -1338,7 +1348,10 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
let _web_client = if let Some(config_server_url_s) = cli.config_server.as_ref() {
let wc = web_client::run_web_client(
config_server_url_s,
cli.machine_id.clone(),
crate::common::MachineIdOptions {
explicit_machine_id: cli.machine_id.clone(),
state_dir: None,
},
cli.network_options.hostname.clone(),
cli.network_options.secure_mode.unwrap_or(false),
manager.clone(),
+34 -73
View File
@@ -43,7 +43,7 @@ use easytier::{
},
instance::{
AclManageRpc, AclManageRpcClientFactory, Connector, ConnectorManageRpc,
ConnectorManageRpcClientFactory, ConnectorStatus, CredentialManageRpc,
ConnectorManageRpcClientFactory, CredentialManageRpc,
CredentialManageRpcClientFactory, DumpRouteRequest, ForeignNetworkEntryPb,
GenerateCredentialRequest, GetAclStatsRequest, GetPrometheusStatsRequest,
GetStatsRequest, GetVpnPortalInfoRequest, GetWhitelistRequest,
@@ -193,8 +193,11 @@ struct PeerArgs {
#[derive(Subcommand, Debug)]
enum PeerSubCommand {
/// List connected peers
List,
/// Show public IPv6 address information
Ipv6,
/// List foreign networks discovered by this instance
ListForeign {
#[arg(
long,
@@ -203,6 +206,7 @@ enum PeerSubCommand {
)]
trusted_keys: bool,
},
/// List global foreign networks from the peer center
ListGlobalForeign,
}
@@ -214,16 +218,18 @@ struct RouteArgs {
#[derive(Subcommand, Debug)]
enum RouteSubCommand {
/// List routes propagated by peers
List,
/// Dump routes in CIDR format
Dump,
}
#[derive(Args, Debug)]
struct ConnectorArgs {
#[arg(short, long)]
#[arg(short, long, help = "filter connectors by virtual IPv4 address")]
ipv4: Option<String>,
#[arg(short, long)]
#[arg(short, long, help = "filter connectors by peer URL")]
peers: Vec<String>,
#[command(subcommand)]
@@ -236,14 +242,13 @@ enum ConnectorSubCommand {
Add {
#[arg(help = "connector url, e.g., tcp://1.2.3.4:11010")]
url: String,
#[arg(short = 'p', long = "priority", default_value_t = easytier::common::config::DEFAULT_CONNECTION_PRIORITY, help = "connection priority; lower values are preferred")]
priority: u32,
},
/// Remove a connector
Remove {
#[arg(help = "connector url, e.g., tcp://1.2.3.4:11010")]
url: String,
},
/// List connectors
List,
}
@@ -256,11 +261,7 @@ struct MappedListenerArgs {
#[derive(Subcommand, Debug)]
enum MappedListenerSubCommand {
/// Add Mapped Listerner
Add {
url: String,
#[arg(short = 'p', long = "priority", default_value_t = easytier::common::config::DEFAULT_CONNECTION_PRIORITY, help = "listener priority; lower values are preferred")]
priority: u32,
},
Add { url: String },
/// Remove Mapped Listener
Remove { url: String },
/// List Existing Mapped Listener
@@ -289,6 +290,7 @@ struct AclArgs {
#[derive(Subcommand, Debug)]
enum AclSubCommand {
/// Show ACL rule hit statistics
Stats,
}
@@ -456,19 +458,25 @@ struct InstallArgs {
#[arg(long, default_value = env!("CARGO_PKG_DESCRIPTION"), help = "service description")]
description: String,
#[arg(long)]
#[arg(long, help = "display name shown by the service manager")]
display_name: Option<String>,
#[arg(long)]
#[arg(
long,
help = "whether to disable starting the service automatically on boot (true/false)"
)]
disable_autostart: Option<bool>,
#[arg(long)]
#[arg(
long,
help = "whether to disable automatic restart when the service fails (true/false)"
)]
disable_restart_on_failure: Option<bool>,
#[arg(long, help = "path to easytier-core binary")]
core_path: Option<PathBuf>,
#[arg(long)]
#[arg(long, help = "working directory for the easytier-core service")]
service_work_dir: Option<PathBuf>,
#[arg(
@@ -1253,7 +1261,6 @@ impl<'a> CommandHandler<'a> {
&self,
url: &str,
action: ConfigPatchAction,
priority: Option<u32>,
) -> Result<(), Error> {
let url = match action {
ConfigPatchAction::Add => Self::connector_validate_url(url)?,
@@ -1274,7 +1281,6 @@ impl<'a> CommandHandler<'a> {
connectors: vec![UrlPatch {
action: action.into(),
url: Some(url.into()),
priority,
}],
..Default::default()
}),
@@ -1289,12 +1295,11 @@ impl<'a> CommandHandler<'a> {
&self,
url: &str,
action: ConfigPatchAction,
priority: Option<u32>,
) -> Result<(), Error> {
let url = url.to_string();
self.apply_to_instances(|handler| {
let url = url.clone();
Box::pin(async move { handler.apply_connector_modify(&url, action, priority).await })
Box::pin(async move { handler.apply_connector_modify(&url, action).await })
})
.await
}
@@ -1318,8 +1323,6 @@ impl<'a> CommandHandler<'a> {
tx_bytes: String,
#[tabled(rename = "tunnel")]
tunnel_proto: String,
#[tabled(rename = "prio")]
priority: String,
#[tabled(rename = "NAT")]
nat_type: String,
#[tabled(skip)]
@@ -1349,10 +1352,6 @@ impl<'a> CommandHandler<'a> {
rx_bytes: format_size(p.get_rx_bytes().unwrap_or(0), humansize::DECIMAL),
tx_bytes: format_size(p.get_tx_bytes().unwrap_or(0), humansize::DECIMAL),
tunnel_proto: p.get_conn_protos().unwrap_or_default().join(","),
priority: p
.get_conn_priority()
.map(|priority| priority.to_string())
.unwrap_or_else(|| "-".to_string()),
nat_type: p.get_udp_nat_type(),
id: route.peer_id.to_string(),
version: if route.version.is_empty() {
@@ -1378,7 +1377,6 @@ impl<'a> CommandHandler<'a> {
rx_bytes: "-".to_string(),
tx_bytes: "-".to_string(),
tunnel_proto: "-".to_string(),
priority: "-".to_string(),
nat_type: if let Some(info) = p.stun_info {
info.udp_nat_type().as_str_name().to_string()
} else {
@@ -1842,29 +1840,6 @@ impl<'a> CommandHandler<'a> {
}
async fn handle_connector_list(&self) -> Result<(), Error> {
#[derive(tabled::Tabled, serde::Serialize)]
struct ConnectorTableItem {
url: String,
status: String,
priority: String,
}
impl From<Connector> for ConnectorTableItem {
fn from(connector: Connector) -> Self {
Self {
url: connector
.url
.map(Into::<url::Url>::into)
.map(|url| url.to_string())
.unwrap_or_default(),
status: ConnectorStatus::try_from(connector.status)
.map(|status| format!("{:?}", status))
.unwrap_or_else(|_| connector.status.to_string()),
priority: connector.priority.to_string(),
}
}
}
let results = self
.collect_instance_results(|handler| Box::pin(handler.fetch_connector_list()))
.await?;
@@ -1872,13 +1847,8 @@ impl<'a> CommandHandler<'a> {
return self.print_json_results(results);
}
self.print_results(&results, |connectors| {
let mut items = connectors
.iter()
.cloned()
.map(ConnectorTableItem::from)
.collect::<Vec<_>>();
items.sort_by(|a, b| a.url.cmp(&b.url));
print_output(&items, self.output_format, &[], &[], self.no_trunc)
println!("response: {:#?}", connectors);
Ok(())
})
}
@@ -1917,7 +1887,6 @@ impl<'a> CommandHandler<'a> {
&self,
url: &str,
action: ConfigPatchAction,
priority: Option<u32>,
) -> Result<(), Error> {
let url = Self::mapped_listener_validate_url(url)?;
let client = self.get_config_client().await?;
@@ -1927,7 +1896,6 @@ impl<'a> CommandHandler<'a> {
mapped_listeners: vec![UrlPatch {
action: action.into(),
url: Some(url.into()),
priority,
}],
..Default::default()
}),
@@ -1942,16 +1910,11 @@ impl<'a> CommandHandler<'a> {
&self,
url: &str,
action: ConfigPatchAction,
priority: Option<u32>,
) -> Result<(), Error> {
let url = url.to_string();
self.apply_to_instances(|handler| {
let url = url.clone();
Box::pin(async move {
handler
.apply_mapped_listener_modify(&url, action, priority)
.await
})
Box::pin(async move { handler.apply_mapped_listener_modify(&url, action).await })
})
.await
}
@@ -2934,17 +2897,15 @@ async fn main() -> Result<(), Error> {
}
},
SubCommand::Connector(conn_args) => match conn_args.sub_command {
Some(ConnectorSubCommand::Add { url, priority }) => {
Some(ConnectorSubCommand::Add { url }) => {
handler
.handle_connector_modify(&url, ConfigPatchAction::Add, Some(priority))
.handle_connector_modify(&url, ConfigPatchAction::Add)
.await?;
println!(
"connector add applied to selected instance(s): {url}, priority: {priority}"
);
println!("connector add applied to selected instance(s): {url}");
}
Some(ConnectorSubCommand::Remove { url }) => {
handler
.handle_connector_modify(&url, ConfigPatchAction::Remove, None)
.handle_connector_modify(&url, ConfigPatchAction::Remove)
.await?;
println!("connector remove applied to selected instance(s): {url}");
}
@@ -2957,15 +2918,15 @@ async fn main() -> Result<(), Error> {
},
SubCommand::MappedListener(mapped_listener_args) => {
match mapped_listener_args.sub_command {
Some(MappedListenerSubCommand::Add { url, priority }) => {
Some(MappedListenerSubCommand::Add { url }) => {
handler
.handle_mapped_listener_modify(&url, ConfigPatchAction::Add, Some(priority))
.handle_mapped_listener_modify(&url, ConfigPatchAction::Add)
.await?;
println!("add mapped listener: {url}, priority: {priority}");
println!("add mapped listener: {url}");
}
Some(MappedListenerSubCommand::Remove { url }) => {
handler
.handle_mapped_listener_modify(&url, ConfigPatchAction::Remove, None)
.handle_mapped_listener_modify(&url, ConfigPatchAction::Remove)
.await?;
println!("remove mapped listener: {url}");
}
+45 -59
View File
@@ -4,7 +4,7 @@ use std::{
time::Duration,
};
use anyhow::Context;
use anyhow::{Context, anyhow, bail};
use bytes::Bytes;
use dashmap::DashMap;
use guarden::defer;
@@ -15,12 +15,13 @@ use kcp_sys::{
stream::KcpStream,
};
use prost::Message;
use tokio::{select, task::JoinSet};
use tokio::task::JoinSet;
use super::{
CidrSet,
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
};
use crate::utils::task::HedgeExt;
use crate::{
common::{
acl_processor::PacketInfo,
@@ -114,72 +115,57 @@ pub struct NatDstKcpConnector {
impl NatDstConnector for NatDstKcpConnector {
type DstStream = KcpStream;
async fn connect(&self, src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
async fn connect(
&self,
src: SocketAddr,
nat_dst: SocketAddr,
) -> anyhow::Result<Self::DstStream> {
let peer_mgr = self
.peer_mgr
.upgrade()
.ok_or_else(|| anyhow!("peer manager is not available"))?;
let dst_peer = {
let SocketAddr::V4(addr) = nat_dst else {
bail!("ipv6 is not supported");
};
peer_mgr
.get_peer_map()
.get_peer_id_by_ipv4(addr.ip())
.await
.ok_or_else(|| anyhow!("no peer found for nat dst: {}", nat_dst))?
};
tracing::trace!(?nat_dst, ?dst_peer, "kcp nat");
let conn_data = KcpConnData {
src: Some(src.into()),
dst: Some(nat_dst.into()),
};
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer manager is not available").into());
};
let stream = (0..5)
.map(|_| {
let kcp_endpoint = self.kcp_endpoint.clone();
let my_peer_id = peer_mgr.my_peer_id();
let dst_peer_id = match nat_dst {
SocketAddr::V4(addr) => peer_mgr.get_peer_map().get_peer_id_by_ipv4(addr.ip()).await,
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
};
async move {
let conn_id = kcp_endpoint
.connect(
Duration::from_secs(10),
my_peer_id,
dst_peer,
Bytes::from(conn_data.encode_to_vec()),
)
.await?;
let Some(dst_peer) = dst_peer_id else {
return Err(anyhow::anyhow!("no peer found for nat dst: {}", nat_dst).into());
};
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peer);
let mut connect_tasks: JoinSet<std::result::Result<ConnId, anyhow::Error>> = JoinSet::new();
let mut retry_remain = 5;
loop {
select! {
Some(Ok(Ok(ret))) = connect_tasks.join_next() => {
// just wait for the previous connection to finish
let stream = KcpStream::new(&self.kcp_endpoint, ret)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
return Ok(stream);
KcpStream::new(&kcp_endpoint, conn_id).context("failed to create kcp stream")
}
_ = tokio::time::sleep(Duration::from_millis(200)), if !connect_tasks.is_empty() && retry_remain > 0 => {
// no successful connection yet, trigger another connection attempt
}
else => {
// got error in connect_tasks, continue to retry
if retry_remain == 0 && connect_tasks.is_empty() {
break;
}
}
}
})
.hedge(Duration::from_millis(200))
.await
.context("failed to connect to peer")?;
// create a new connection task
if retry_remain == 0 {
continue;
}
retry_remain -= 1;
let kcp_endpoint = self.kcp_endpoint.clone();
let my_peer_id = peer_mgr.my_peer_id();
let conn_data_clone = conn_data;
connect_tasks.spawn(async move {
kcp_endpoint
.connect(
Duration::from_secs(10),
my_peer_id,
dst_peer,
Bytes::from(conn_data_clone.encode_to_vec()),
)
.await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst))
});
}
Err(anyhow::anyhow!("failed to connect to nat dst: {}", nat_dst).into())
Ok(stream)
}
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
+107 -59
View File
@@ -18,17 +18,20 @@ use crate::tunnel::packet_def::{
PacketType, PeerManagerHeader, TAIL_RESERVED_SIZE, ZCPacket, ZCPacketType,
};
use crate::tunnel::quic::{client_config, endpoint_config, server_config};
use anyhow::{Context, Error, anyhow};
use crate::utils::task::HedgeExt;
use anyhow::{Context, Error, anyhow, bail, ensure};
use atomic_refcell::AtomicRefCell;
use bytes::{BufMut, Bytes, BytesMut};
use dashmap::DashMap;
use derivative::Derivative;
use derive_more::{Constructor, Deref, DerefMut, From, Into};
use guarden::defer;
use moka::future::Cache;
use prost::Message;
use quinn::udp::{EcnCodepoint, RecvMeta, Transmit};
use quinn::{
AsyncUdpSocket, Endpoint, RecvStream, SendStream, StreamId, UdpPoller, default_runtime,
AsyncUdpSocket, Connection, ConnectionError, Endpoint, RecvStream, SendStream, StreamId,
UdpPoller, WriteError, default_runtime,
};
use std::cmp::min;
use std::future::Future;
@@ -43,8 +46,8 @@ use tokio::io::{AsyncReadExt, Join, join};
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::{Receiver, Sender, channel};
use tokio::task::JoinSet;
use tokio::time::{Instant, timeout};
use tokio::{join, pin, select};
use tokio::time::timeout;
use tokio::{join, select};
use tokio_util::sync::PollSender;
use tracing::{debug, error, info, instrument, trace, warn};
@@ -279,6 +282,7 @@ impl From<(SendStream, RecvStream)> for QuicStream {
pub struct NatDstQuicConnector {
pub(crate) endpoint: Endpoint,
pub(crate) peer_mgr: Weak<PeerManager>,
pub(crate) conn_map: Cache<PeerId, Connection>,
}
#[async_trait::async_trait]
@@ -289,21 +293,25 @@ impl NatDstConnector for NatDstQuicConnector {
&self,
src: SocketAddr,
nat_dst: SocketAddr,
) -> crate::common::error::Result<Self::DstStream> {
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer manager is not available").into());
) -> anyhow::Result<Self::DstStream> {
let peer_mgr = self
.peer_mgr
.upgrade()
.ok_or_else(|| anyhow!("peer manager is not available"))?;
let dst_peer = {
let SocketAddr::V4(addr) = nat_dst else {
bail!("ipv6 is not supported");
};
peer_mgr
.get_peer_map()
.get_peer_id_by_ipv4(addr.ip())
.await
.ok_or_else(|| anyhow!("no peer found for nat dst: {}", nat_dst))?
};
let Some(dst_peer_id) = (match nat_dst {
SocketAddr::V4(addr) => peer_mgr.get_peer_map().get_peer_id_by_ipv4(addr.ip()).await,
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
}) else {
return Err(anyhow::anyhow!("no peer found for nat dst: {}", nat_dst).into());
};
tracing::trace!(?nat_dst, ?dst_peer, "quic nat");
trace!("quic nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peer_id);
let addr = QuicAddr::new(dst_peer_id, PacketType::QuicSrc).into();
let header = {
let conn_data = QuicConnData {
src: Some(src.into()),
@@ -311,61 +319,90 @@ impl NatDstConnector for NatDstQuicConnector {
};
let len = conn_data.encoded_len();
if len > (u16::MAX as usize) {
return Err(anyhow!("conn data too large: {:?}", len).into());
}
ensure!(len <= u16::MAX as usize, "conn data too large: {len}");
let mut buf = BytesMut::with_capacity(2 + len);
buf.put_u16(len as u16);
conn_data.encode(&mut buf).unwrap();
conn_data.encode(&mut buf)?;
buf.freeze()
};
let mut connect_tasks = JoinSet::<Result<QuicStream, Error>>::new();
let connect = |tasks: &mut JoinSet<_>| {
let endpoint = self.endpoint.clone();
let header = header.clone();
let reconnect = || async move {
self.conn_map.invalidate(&dst_peer).await;
tasks.spawn(async move {
let connection = endpoint.connect(addr, "")?.await?;
let mut stream: QuicStream = connection.open_bi().await?.into();
stream.writer_mut().write_chunk(header).await?;
Ok(stream)
});
let connect = (0..5)
.map(|_| {
let endpoint = self.endpoint.clone();
async move {
endpoint
.connect(QuicAddr::new(dst_peer, PacketType::QuicSrc).into(), "")
.context("failed to create connection")?
.await
.context("connection failed")
}
})
.hedge(Duration::from_millis(200));
self.conn_map
.try_get_with(dst_peer, connect)
.await
.context("failed to connect to peer")
};
connect(&mut connect_tasks);
let mut reconnected = false;
let timer = tokio::time::sleep(Duration::from_millis(200));
pin!(timer);
let mut connection = if let Some(connection) = self.conn_map.get(&dst_peer).await
&& connection.close_reason().is_none()
{
connection
} else {
reconnected = true;
reconnect().await?
};
let mut retry_remain = 5;
loop {
select! {
Some(result) = connect_tasks.join_next() => {
match result {
Ok(Ok(stream)) => return Ok(stream.into()),
_ => {
if connect_tasks.is_empty() {
if retry_remain == 0 {
return Err(anyhow!("failed to connect to nat dst: {:?}", nat_dst).into())
}
let is_retryable = |error: &ConnectionError| {
matches!(
error,
ConnectionError::ConnectionClosed(_)
| ConnectionError::ApplicationClosed(_)
| ConnectionError::Reset
| ConnectionError::TimedOut
)
};
let mut retry = !reconnected;
let header = header.clone();
let result = async {
let mut stream: QuicStream = connection
.open_bi()
.await
.inspect_err(|error| retry &= is_retryable(error))?
.into();
stream
.writer_mut()
.write_chunk(header)
.await
.inspect_err(|error| {
retry &= matches!(error, WriteError::ConnectionLost(error) if is_retryable(error))
})?;
Ok(stream.into())
}
.await;
retry_remain -= 1;
connect(&mut connect_tasks);
timer.as_mut().reset(Instant::now() + Duration::from_millis(200))
}
}
}
}
_ = &mut timer, if retry_remain > 0 => {
retry_remain -= 1;
connect(&mut connect_tasks);
timer.as_mut().reset(Instant::now() + Duration::from_millis(200));
if let Err(error) = &result {
if retry {
debug!(?error, "failed to open quic stream, retrying...");
reconnected = true;
connection = reconnect().await?;
continue;
} else {
self.conn_map.invalidate(&dst_peer).await;
}
}
break result;
}
}
@@ -595,10 +632,17 @@ impl QuicStreamReceiver {
}
};
match Self::establish_stream(stream, ctx.clone()).await {
Ok(stream) => drop(tasks.spawn(stream)),
Err(e) => warn!("failed to establish quic stream from {:?}: {:?}", connection.remote_address(), e),
}
let ctx = ctx.clone();
tasks.spawn(async move {
match Self::establish_stream(stream, ctx).await {
Ok(transfer_fut) => {
if let Err(e) = transfer_fut.await {
warn!("quic stream transfer error: {:?}", e);
}
}
Err(e) => warn!("failed to establish quic stream: {:?}", e),
}
});
}
res = tasks.join_next(), if !tasks.is_empty() => {
@@ -816,7 +860,7 @@ impl QuicProxy {
Arc::new(socket),
default_runtime().unwrap(),
)
.unwrap();
.unwrap(); // TODO: maybe a different transport config
endpoint.set_default_client_config(client_config());
self.endpoint = Some(endpoint.clone());
@@ -845,6 +889,10 @@ impl QuicProxy {
NatDstQuicConnector {
endpoint: endpoint.clone(),
peer_mgr: Arc::downgrade(&peer_mgr),
conn_map: Cache::builder()
.max_capacity(u8::MAX.into()) // cf. quinn transport config (max_concurrent_bidi_streams)
.time_to_idle(Duration::from_secs(600)) // cf. quinn transport config (max_idle_timeout)
.build(),
},
));
+1 -1
View File
@@ -240,7 +240,7 @@ impl AsyncTcpConnector for Socks5KcpConnector {
let ret = c
.connect(self.src_addr, addr)
.await
.map_err(|e| super::fast_socks5::SocksError::Other(e.into()))?;
.map_err(super::fast_socks5::SocksError::Other)?;
Ok(SocksTcpStream::Kcp(ret))
}
}
+8 -9
View File
@@ -44,7 +44,7 @@ use super::tokio_smoltcp::{self, Net, NetConfig, channel_device};
pub(crate) trait NatDstConnector: Send + Sync + Clone + 'static {
type DstStream: AsyncRead + AsyncWrite + Unpin + Send;
async fn connect(&self, src: SocketAddr, dst: SocketAddr) -> Result<Self::DstStream>;
async fn connect(&self, src: SocketAddr, dst: SocketAddr) -> anyhow::Result<Self::DstStream>;
fn check_packet_from_peer_fast(&self, cidr_set: &CidrSet, global_ctx: &GlobalCtx) -> bool;
fn check_packet_from_peer(
&self,
@@ -63,14 +63,13 @@ pub struct NatDstTcpConnector;
#[async_trait::async_trait]
impl NatDstConnector for NatDstTcpConnector {
type DstStream = TcpStream;
async fn connect(&self, _src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let socket = match TcpSocket::new_v4() {
Ok(s) => s,
Err(error) => {
log::error!(?error, "create v4 socket failed");
return Err(error.into());
}
};
async fn connect(
&self,
_src: SocketAddr,
nat_dst: SocketAddr,
) -> anyhow::Result<Self::DstStream> {
let socket = TcpSocket::new_v4()
.inspect_err(|error| log::error!(?error, "create v4 socket failed"))?;
let stream = timeout(Duration::from_secs(10), socket.connect(nat_dst))
.await?
+9 -40
View File
@@ -560,39 +560,16 @@ impl InstanceConfigPatcher {
return Ok(());
}
let global_ctx = weak_upgrade(&self.global_ctx)?;
let current_mapped_listener_configs = global_ctx.config.get_mapped_listener_configs();
let mut priority_by_url = current_mapped_listener_configs
.iter()
.map(|listener| (listener.url.clone(), listener.priority))
.collect::<std::collections::HashMap<_, _>>();
let mut current_mapped_listeners = current_mapped_listener_configs
.into_iter()
.map(|listener| listener.url)
.collect();
for patch in &mapped_listeners {
if let (Some(url), Some(priority)) = (&patch.url, patch.priority) {
priority_by_url.insert(url.clone().into(), priority);
}
}
let mut current_mapped_listeners = global_ctx.config.get_mapped_listeners();
let patches = mapped_listeners.into_iter().map(Into::into).collect();
InstanceConfigPatcher::trace_patchables(&patches);
crate::proto::api::config::patch_vec(&mut current_mapped_listeners, patches);
if current_mapped_listeners.is_empty() {
global_ctx.config.set_mapped_listener_configs(None);
global_ctx.config.set_mapped_listeners(None);
} else {
let mapped_listener_configs = current_mapped_listeners
.into_iter()
.map(|url| {
let priority = priority_by_url
.get(&url)
.copied()
.unwrap_or(crate::common::config::DEFAULT_CONNECTION_PRIORITY);
crate::common::config::ListenerConfig::new(url, priority)
})
.collect();
global_ctx
.config
.set_mapped_listener_configs(Some(mapped_listener_configs));
.set_mapped_listeners(Some(current_mapped_listeners));
}
Ok(())
}
@@ -613,14 +590,7 @@ impl InstanceConfigPatcher {
match ConfigPatchAction::try_from(connector.action) {
Ok(ConfigPatchAction::Add) => {
tracing::info!("Connector added: {}", url);
conn_manager
.add_connector_by_url_with_priority(
url,
connector
.priority
.unwrap_or(crate::common::config::DEFAULT_CONNECTION_PRIORITY),
)
.await?;
conn_manager.add_connector_by_url(url).await?;
}
Ok(ConfigPatchAction::Remove) => {
tracing::info!("Connector removed: {}", url);
@@ -772,7 +742,7 @@ impl Instance {
async fn add_initial_peers(&self) -> Result<(), Error> {
for peer in self.global_ctx.config.get_peers().iter() {
self.get_conn_manager()
.add_connector_by_url_with_priority(peer.uri.clone(), peer.priority)
.add_connector_by_url(peer.uri.clone())
.await?;
}
Ok(())
@@ -1258,12 +1228,11 @@ impl Instance {
_request: ListMappedListenerRequest,
) -> Result<ListMappedListenerResponse, rpc_types::error::Error> {
let mut ret = ListMappedListenerResponse::default();
let listener_configs = weak_upgrade(&self.0)?.config.get_mapped_listener_configs();
let mapped_listeners: Vec<MappedListener> = listener_configs
let urls = weak_upgrade(&self.0)?.config.get_mapped_listeners();
let mapped_listeners: Vec<MappedListener> = urls
.into_iter()
.map(|listener| MappedListener {
url: Some(listener.url.into()),
priority: listener.priority,
.map(|u| MappedListener {
url: Some(u.into()),
})
.collect();
ret.mappedlisteners = mapped_listeners;
+5 -26
View File
@@ -91,7 +91,6 @@ pub type ListenerCreator = Box<dyn ListenerCreatorTrait>;
struct ListenerFactory {
creator_fn: Arc<ListenerCreator>,
must_succ: bool,
priority: u32,
}
pub struct ListenerManager<H> {
@@ -126,9 +125,8 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
)
.await?;
for listener_cfg in self.global_ctx.config.get_listener_configs().iter() {
let l = listener_cfg.url.clone();
let priority = listener_cfg.priority;
for l in self.global_ctx.config.get_listener_uris().iter() {
let l = l.clone();
let Ok(_) = create_listener_by_url(&l, self.global_ctx.clone()) else {
let msg = format!("failed to get listener by url: {}, maybe not supported", l);
self.global_ctx
@@ -138,10 +136,9 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
let ctx = self.global_ctx.clone();
let listener = l.clone();
self.add_listener_with_priority(
self.add_listener(
move || create_listener_by_url(&listener, ctx.clone()).unwrap(),
true,
priority,
)
.await?;
@@ -156,10 +153,9 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
.set_host(Some("[::]".to_string().as_str()))
.with_context(|| format!("failed to set ipv6 host for listener: {}", l))?;
let ctx = self.global_ctx.clone();
self.add_listener_with_priority(
self.add_listener(
move || create_listener_by_url(&ipv6_listener, ctx.clone()).unwrap(),
false,
priority,
)
.await?;
}
@@ -172,25 +168,10 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
&mut self,
creator: C,
must_succ: bool,
) -> Result<(), Error> {
self.add_listener_with_priority(
creator,
must_succ,
crate::common::config::DEFAULT_CONNECTION_PRIORITY,
)
.await
}
pub async fn add_listener_with_priority<C: ListenerCreatorTrait + 'static>(
&mut self,
creator: C,
must_succ: bool,
priority: u32,
) -> Result<(), Error> {
self.listeners.push(ListenerFactory {
creator_fn: Arc::new(Box::new(creator)),
must_succ,
priority,
});
Ok(())
}
@@ -200,7 +181,6 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
creator: Arc<ListenerCreator>,
peer_manager: Weak<H>,
global_ctx: ArcGlobalCtx,
priority: u32,
) {
let mut err_count = 0;
loop {
@@ -209,7 +189,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
match l.listen().await {
Ok(_) => {
err_count = 0;
global_ctx.add_running_listener_with_priority(l.local_url(), priority);
global_ctx.add_running_listener(l.local_url());
global_ctx.issue_event(GlobalCtxEvent::ListenerAdded(l.local_url()));
}
Err(e) => {
@@ -290,7 +270,6 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
listener.creator_fn.clone(),
self.peer_manager.clone(),
self.global_ctx.clone(),
listener.priority,
));
}
+3
View File
@@ -10,3 +10,6 @@ pub mod proxy_cidrs_monitor;
#[cfg(feature = "tun")]
pub mod virtual_nic;
#[cfg(any(windows, test))]
pub(crate) mod windows_udp_broadcast;
@@ -1,5 +1,8 @@
use std::{path::Path, sync::Arc};
#[cfg(target_os = "linux")]
use std::path::Path;
use std::sync::Arc;
#[cfg(target_os = "linux")]
use anyhow::Context;
use cidr::{Ipv6Cidr, Ipv6Inet};
#[cfg(target_os = "linux")]
@@ -321,7 +324,7 @@ async fn resolve_public_ipv6_provider_runtime_state_linux(
}
async fn resolve_public_ipv6_provider_runtime_state(
global_ctx: &ArcGlobalCtx,
_global_ctx: &ArcGlobalCtx,
config: PublicIpv6ProviderConfigSnapshot,
) -> PublicIpv6ProviderRuntimeState {
if !config.provider_enabled {
@@ -331,7 +334,7 @@ async fn resolve_public_ipv6_provider_runtime_state(
#[cfg(target_os = "linux")]
{
return resolve_public_ipv6_provider_runtime_state_linux(
global_ctx,
_global_ctx,
config.configured_prefix,
)
.await;
+35
View File
@@ -35,6 +35,8 @@ use tokio::{
task::JoinSet,
};
use tokio_util::bytes::Bytes;
#[cfg(target_os = "windows")]
use tokio_util::task::AbortOnDropHandle;
use tun::{AbstractDevice, AsyncDevice, Configuration, Layer};
use zerocopy::{NativeEndian, NetworkEndian};
@@ -801,6 +803,9 @@ pub struct NicCtx {
nic: Arc<Mutex<VirtualNic>>,
tasks: JoinSet<()>,
#[cfg(target_os = "windows")]
windows_udp_broadcast_relay: Option<AbortOnDropHandle<()>>,
}
impl NicCtx {
@@ -819,6 +824,9 @@ impl NicCtx {
nic: Arc::new(Mutex::new(VirtualNic::new(global_ctx))),
tasks: JoinSet::new(),
#[cfg(target_os = "windows")]
windows_udp_broadcast_relay: None,
}
}
@@ -1005,6 +1013,31 @@ impl NicCtx {
});
}
#[cfg(target_os = "windows")]
fn start_windows_udp_broadcast_relay(&mut self, virtual_ipv4: Ipv4Inet) {
if !self.global_ctx.get_flags().enable_udp_broadcast_relay {
return;
}
let Some(peer_manager) = self.peer_mgr.upgrade() else {
tracing::warn!("peer manager is dropped, skip Windows UDP broadcast relay");
return;
};
match super::windows_udp_broadcast::start(peer_manager, virtual_ipv4) {
Ok(handle) => {
self.windows_udp_broadcast_relay = Some(handle);
tracing::info!("Windows UDP broadcast relay started");
}
Err(err) => {
tracing::warn!(
?err,
"failed to start Windows UDP broadcast relay; administrator privileges are required"
);
}
}
}
async fn apply_route_changes(
ifcfg: &impl IfConfiguerTrait,
ifname: &str,
@@ -1347,6 +1380,8 @@ impl NicCtx {
// Assign IPv4 address if provided
if let Some(ipv4_addr) = ipv4_addr {
self.assign_ipv4_to_tun_device(ipv4_addr).await?;
#[cfg(target_os = "windows")]
self.start_windows_udp_broadcast_relay(ipv4_addr);
}
// Assign IPv6 address if provided
File diff suppressed because it is too large Load Diff
+22
View File
@@ -474,6 +474,28 @@ fn handle_event(
);
}
GlobalCtxEvent::UdpBroadcastRelayStartResult {
capture_backend,
error,
} => {
if let Some(error) = error {
event!(
warn,
?capture_backend,
%error,
"[{}] UDP broadcast relay start failed",
instance_id
);
} else {
event!(
info,
?capture_backend,
"[{}] UDP broadcast relay started",
instance_id
);
}
}
GlobalCtxEvent::CredentialChanged => {
event!(info, "[{}] credential changed", instance_id);
}
+6 -3
View File
@@ -551,7 +551,6 @@ impl NetworkConfig {
format!("failed to parse public server uri: {}", public_server_url)
})?,
peer_public_key: None,
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
}]);
}
NetworkingMethod::Manual => {
@@ -565,7 +564,6 @@ impl NetworkConfig {
.parse()
.with_context(|| format!("failed to parse peer uri: {}", peer_url))?,
peer_public_key: None,
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
});
}
if !peers.is_empty() {
@@ -822,6 +820,10 @@ impl NetworkConfig {
flags.disable_relay_data = disable_relay_data;
}
if let Some(enable_udp_broadcast_relay) = self.enable_udp_broadcast_relay {
flags.enable_udp_broadcast_relay = enable_udp_broadcast_relay;
}
if let Some(disable_sym_hole_punching) = self.disable_sym_hole_punching {
flags.disable_sym_hole_punching = disable_sym_hole_punching;
}
@@ -997,6 +999,7 @@ impl NetworkConfig {
result.disable_udp_hole_punching = Some(flags.disable_udp_hole_punching);
result.disable_upnp = Some(flags.disable_upnp);
result.disable_relay_data = Some(flags.disable_relay_data);
result.enable_udp_broadcast_relay = Some(flags.enable_udp_broadcast_relay);
result.disable_sym_hole_punching = Some(flags.disable_sym_hole_punching);
result.enable_magic_dns = Some(flags.accept_dns);
result.mtu = Some(flags.mtu as i32);
@@ -1111,7 +1114,6 @@ mod tests {
peers.push(crate::common::config::PeerConfig {
uri,
peer_public_key: None,
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
});
}
config.set_peers(peers);
@@ -1266,6 +1268,7 @@ mod tests {
flags.disable_tcp_hole_punching = rng.gen_bool(0.2);
flags.disable_udp_hole_punching = rng.gen_bool(0.2);
flags.disable_upnp = rng.gen_bool(0.2);
flags.enable_udp_broadcast_relay = rng.gen_bool(0.2);
flags.accept_dns = rng.gen_bool(0.6);
flags.mtu = rng.gen_range(1200..1500);
flags.private_mode = rng.gen_bool(0.3);
+128 -18
View File
@@ -6,11 +6,15 @@ in the future, with the help wo peer center we can forward packets of peers that
connected to any node in the local network.
*/
use std::{
sync::{Arc, Weak},
sync::{
Arc, Weak,
atomic::{AtomicBool, Ordering},
},
time::SystemTime,
};
use dashmap::{DashMap, DashSet};
use guarden::defer;
use tokio::{
sync::{
Mutex,
@@ -93,6 +97,7 @@ struct ForeignNetworkEntry {
stats_mgr: Arc<StatsManager>,
traffic_metrics: Arc<TrafficMetricRecorder>,
event_handler_started: AtomicBool,
tasks: Mutex<JoinSet<()>>,
@@ -160,10 +165,11 @@ impl ForeignNetworkEntry {
InstanceLabelKind::From,
)),
{
let peer_map = peer_map.clone();
let peer_map = Arc::downgrade(&peer_map);
move |peer_id| {
let peer_map = peer_map.clone();
async move {
let peer_map = peer_map.upgrade()?;
peer_map
.get_route_peer_info(peer_id)
.await
@@ -230,6 +236,7 @@ impl ForeignNetworkEntry {
stats_mgr,
traffic_metrics,
event_handler_started: AtomicBool::new(false),
tasks: Mutex::new(JoinSet::new()),
@@ -279,7 +286,7 @@ impl ForeignNetworkEntry {
flags.disable_relay_quic = !global_ctx.get_flags().enable_relay_foreign_network_quic;
config.set_flags(flags);
config.set_mapped_listener_configs(Some(global_ctx.config.get_mapped_listener_configs()));
config.set_mapped_listeners(Some(global_ctx.config.get_mapped_listeners()));
let foreign_global_ctx = Arc::new(GlobalCtx::new(config));
foreign_global_ctx
@@ -291,8 +298,8 @@ impl ForeignNetworkEntry {
Self::desired_avoid_relay_data_feature_flag(&global_ctx, relay_data);
foreign_global_ctx.set_base_advertised_feature_flags(feature_flag);
for listener in global_ctx.get_running_listener_configs().into_iter() {
foreign_global_ctx.add_running_listener_with_priority(listener.url, listener.priority);
for u in global_ctx.get_running_listeners().into_iter() {
foreign_global_ctx.add_running_listener(u);
}
foreign_global_ctx
@@ -674,6 +681,8 @@ struct ForeignNetworkManagerData {
network_peer_last_update: DashMap<String, SystemTime>,
accessor: Arc<Box<dyn GlobalForeignNetworkAccessor>>,
lock: std::sync::Mutex<()>,
#[cfg(test)]
fail_next_add_peer_conn_after_entry_insert: AtomicBool,
}
impl ForeignNetworkManagerData {
@@ -732,6 +741,36 @@ impl ForeignNetworkManagerData {
shrink_dashmap(&self.network_peer_last_update, None);
}
fn remove_network_if_current(
&self,
network_name: &String,
expected_entry: &Weak<ForeignNetworkEntry>,
) {
let _l = self.lock.lock().unwrap();
let Some(expected_entry) = expected_entry.upgrade() else {
return;
};
let old = self
.network_peer_maps
.remove_if(network_name, |_, entry| Arc::ptr_eq(entry, &expected_entry));
let Some((_, old)) = old else {
return;
};
old.traffic_metrics.clear_peer_cache();
let to_remove_peers = old.peer_map.list_peers();
for p in to_remove_peers {
self.peer_network_map.remove_if(&p, |_, v| {
v.remove(network_name);
v.is_empty()
});
}
self.network_peer_last_update.remove(network_name);
shrink_dashmap(&self.peer_network_map, None);
shrink_dashmap(&self.network_peer_maps, None);
shrink_dashmap(&self.network_peer_last_update, None);
}
#[allow(clippy::too_many_arguments)]
async fn get_or_insert_entry(
&self,
@@ -874,6 +913,8 @@ impl ForeignNetworkManager {
network_peer_last_update: DashMap::new(),
accessor: Arc::new(accessor),
lock: std::sync::Mutex::new(()),
#[cfg(test)]
fail_next_add_peer_conn_after_entry_insert: AtomicBool::new(false),
});
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
@@ -891,6 +932,13 @@ impl ForeignNetworkManager {
}
}
#[cfg(test)]
fn fail_next_add_peer_conn_after_entry_insert(&self) {
self.data
.fail_next_add_peer_conn_after_entry_insert
.store(true, Ordering::Release);
}
pub fn get_network_peer_id(&self, network_name: &str) -> Option<PeerId> {
self.data
.network_peer_maps
@@ -939,6 +987,35 @@ impl ForeignNetworkManager {
)
.await;
defer!(rollback_new_entry => sync [
data = self.data.clone(),
network_name = entry.network.network_name.clone(),
peer_id = peer_conn.get_peer_id(),
should_rollback = new_added
] {
if should_rollback {
tracing::warn!(
%network_name,
"rollback newly added foreign network entry after add_peer_conn returned error"
);
data.remove_peer(peer_id, &network_name);
}
});
#[cfg(test)]
if self
.data
.fail_next_add_peer_conn_after_entry_insert
.swap(false, Ordering::AcqRel)
{
return Err(anyhow::anyhow!(
"injected add_peer_conn failure after foreign network entry insert"
)
.into());
}
self.ensure_event_handler_started(&entry);
let same_identity = entry.network == peer_network;
let peer_identity_type = peer_conn.get_peer_identity_type();
let credential_peer_trusted = peer_digest_empty
@@ -952,10 +1029,6 @@ impl ForeignNetworkManager {
|| credential_identity_mismatch
|| entry.my_peer_id != peer_conn.get_my_peer_id()
{
if new_added {
self.data
.remove_peer(peer_conn.get_peer_id(), &entry.network.network_name.clone());
}
let err = if entry.my_peer_id != peer_conn.get_my_peer_id() {
anyhow::anyhow!(
"my peer id not match. exp: {:?} real: {:?}, need retry connect",
@@ -980,9 +1053,7 @@ impl ForeignNetworkManager {
return Err(err.into());
}
if new_added {
self.start_event_handler(&entry).await;
} else if let Some(peer) = entry.peer_map.get_peer_by_id(peer_conn.get_peer_id()) {
if !new_added && let Some(peer) = entry.peer_map.get_peer_by_id(peer_conn.get_peer_id()) {
let direct_conns_len = peer.get_directly_connections().len();
let max_count = use_global_var!(MAX_DIRECT_CONNS_PER_PEER_IN_FOREIGN_NETWORK);
if direct_conns_len >= max_count as usize {
@@ -996,23 +1067,31 @@ impl ForeignNetworkManager {
}
entry.peer_map.add_new_peer_conn(peer_conn).await?;
let _ = rollback_new_entry.defuse();
Ok(())
}
async fn start_event_handler(&self, entry: &ForeignNetworkEntry) {
fn ensure_event_handler_started(&self, entry: &Arc<ForeignNetworkEntry>) {
if entry.event_handler_started.swap(true, Ordering::AcqRel) {
return;
}
let data = self.data.clone();
let network_name = entry.network.network_name.clone();
let traffic_metrics = entry.traffic_metrics.clone();
let entry_for_cleanup = Arc::downgrade(entry);
let traffic_metrics = Arc::downgrade(&entry.traffic_metrics);
let mut s = entry.global_ctx.subscribe();
self.tasks.lock().unwrap().spawn(async move {
while let Ok(e) = s.recv().await {
match &e {
GlobalCtxEvent::PeerRemoved(peer_id) => {
tracing::info!(?e, "remove peer from foreign network manager");
traffic_metrics.remove_peer(*peer_id);
data.remove_peer(*peer_id, &network_name);
if let Some(traffic_metrics) = traffic_metrics.upgrade() {
traffic_metrics.remove_peer(*peer_id);
}
data.network_peer_last_update
.insert(network_name.clone(), SystemTime::now());
data.remove_peer(*peer_id, &network_name);
}
GlobalCtxEvent::PeerConnRemoved(..) => {
tracing::info!(?e, "clear no conn peer from foreign network manager");
@@ -1028,8 +1107,10 @@ impl ForeignNetworkManager {
}
// if lagged or recv done just remove the network
tracing::error!("global event handler at foreign network manager exit");
traffic_metrics.clear_peer_cache();
data.remove_network(&network_name);
if let Some(traffic_metrics) = traffic_metrics.upgrade() {
traffic_metrics.clear_peer_cache();
}
data.remove_network_if_current(&network_name, &entry_for_cleanup);
});
}
@@ -1615,6 +1696,35 @@ pub mod tests {
.await;
}
#[tokio::test]
async fn failed_new_foreign_peer_conn_rolls_back_entry_maps() {
let pm_center = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
let foreign_mgr = pm_center.get_foreign_network_manager();
foreign_mgr.fail_next_add_peer_conn_after_entry_insert();
let (a_ring, b_ring) = crate::tunnel::ring::create_ring_tunnel_pair();
let (client_ret, server_ret) = tokio::time::timeout(Duration::from_secs(5), async {
tokio::join!(
pma_net1.add_client_tunnel(a_ring, false),
pm_center.add_tunnel_as_server(b_ring, true)
)
})
.await
.unwrap();
assert!(client_ret.is_ok());
assert!(server_ret.is_err());
assert!(foreign_mgr.data.get_network_entry("net1").is_none());
assert!(
foreign_mgr
.data
.get_peer_network(pma_net1.my_peer_id())
.is_none()
);
}
#[tokio::test]
async fn foreign_network_peer_removed_clears_traffic_metric_peer_cache() {
let pm_center = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
+16 -169
View File
@@ -188,45 +188,23 @@ impl Peer {
async fn select_conn(&self) -> Option<ArcPeerConn> {
let default_conn_id = self.default_conn_id.load();
let latency_score = |latency_us| {
if latency_us == 0 {
u64::MAX
} else {
latency_us
}
};
let selected_conn = self
.conns
.iter()
.filter(|conn| !conn.value().is_closed())
.min_by_key(|conn| {
(
conn.value().priority(),
latency_score(conn.value().get_stats().latency_us),
)
})
.map(|conn| {
(
conn.get_conn_id(),
conn.value().priority(),
latency_score(conn.value().get_stats().latency_us),
)
});
let (selected_conn_id, selected_priority, selected_latency) = selected_conn?;
if let Some(default_conn) = self.conns.get(&default_conn_id)
&& !default_conn.is_closed()
&& (
default_conn.priority(),
latency_score(default_conn.get_stats().latency_us),
) == (selected_priority, selected_latency)
{
return Some(default_conn.clone());
if let Some(conn) = self.conns.get(&default_conn_id) {
return Some(conn.clone());
}
self.default_conn_id.store(selected_conn_id);
self.conns.get(&selected_conn_id).map(|conn| conn.clone())
// find a conn with the smallest latency
let mut min_latency = u64::MAX;
for conn in self.conns.iter() {
let latency = conn.value().get_stats().latency_us;
if latency < min_latency {
min_latency = latency;
self.default_conn_id.store(conn.get_conn_id());
}
}
self.conns
.get(&self.default_conn_id.load())
.map(|conn| conn.clone())
}
pub async fn send_msg(&self, msg: ZCPacket) -> Result<(), Error> {
@@ -271,26 +249,12 @@ impl Peer {
self.conns.iter().any(|entry| !entry.value().is_closed())
}
pub fn has_conn_with_priority_at_most(&self, priority: u32) -> bool {
self.conns
.iter()
.any(|entry| !entry.value().is_closed() && entry.value().priority() <= priority)
}
pub fn has_directly_connected_conn(&self) -> bool {
self.conns
.iter()
.any(|entry| !entry.value().is_closed() && !entry.value().is_hole_punched())
}
pub fn has_directly_connected_conn_with_priority_at_most(&self, priority: u32) -> bool {
self.conns.iter().any(|entry| {
!entry.value().is_closed()
&& !entry.value().is_hole_punched()
&& entry.value().priority() <= priority
})
}
pub fn get_directly_connections(&self) -> DashSet<uuid::Uuid> {
self.conns
.iter()
@@ -334,7 +298,7 @@ mod tests {
use crate::{
common::{
config::{DEFAULT_CONNECTION_PRIORITY, NetworkIdentity, PeerConfig},
config::{NetworkIdentity, PeerConfig},
global_ctx::{GlobalCtx, tests::get_mock_global_ctx},
new_peer_id,
},
@@ -416,122 +380,6 @@ mod tests {
close_handler.await.unwrap().unwrap();
}
#[tokio::test]
async fn select_conn_prefers_priority_then_latency() {
let (packet_send, _packet_recv) = create_packet_recv_chan();
let global_ctx = get_mock_global_ctx();
let local_peer_id = new_peer_id();
let remote_peer_id = new_peer_id();
let peer = Peer::new(remote_peer_id, packet_send, global_ctx.clone());
let ps = Arc::new(PeerSessionStore::new());
let (low_client_tunnel, low_server_tunnel) = create_ring_tunnel_pair();
let mut low_client_conn = PeerConn::new(
local_peer_id,
global_ctx.clone(),
low_client_tunnel,
ps.clone(),
);
low_client_conn.set_priority(100);
low_client_conn.record_latency_for_test(1000);
let low_conn_id = low_client_conn.get_conn_id();
let mut low_server_conn = PeerConn::new(
remote_peer_id,
global_ctx.clone(),
low_server_tunnel,
ps.clone(),
);
let (client_ret, server_ret) = tokio::join!(
low_client_conn.do_handshake_as_client(),
low_server_conn.do_handshake_as_server()
);
client_ret.unwrap();
server_ret.unwrap();
peer.add_peer_conn(low_client_conn).await.unwrap();
assert_eq!(peer.select_conn().await.unwrap().get_conn_id(), low_conn_id);
let (same_priority_client_tunnel, same_priority_server_tunnel) = create_ring_tunnel_pair();
let mut same_priority_client_conn = PeerConn::new(
local_peer_id,
global_ctx.clone(),
same_priority_client_tunnel,
ps.clone(),
);
same_priority_client_conn.set_priority(100);
same_priority_client_conn.record_latency_for_test(10);
let same_priority_conn_id = same_priority_client_conn.get_conn_id();
let mut same_priority_server_conn = PeerConn::new(
remote_peer_id,
global_ctx.clone(),
same_priority_server_tunnel,
ps.clone(),
);
let (client_ret, server_ret) = tokio::join!(
same_priority_client_conn.do_handshake_as_client(),
same_priority_server_conn.do_handshake_as_server()
);
client_ret.unwrap();
server_ret.unwrap();
peer.add_peer_conn(same_priority_client_conn).await.unwrap();
assert_eq!(
peer.select_conn().await.unwrap().get_conn_id(),
same_priority_conn_id
);
let (unknown_latency_client_tunnel, unknown_latency_server_tunnel) =
create_ring_tunnel_pair();
let mut unknown_latency_client_conn = PeerConn::new(
local_peer_id,
global_ctx.clone(),
unknown_latency_client_tunnel,
ps.clone(),
);
unknown_latency_client_conn.set_priority(100);
let mut unknown_latency_server_conn = PeerConn::new(
remote_peer_id,
global_ctx.clone(),
unknown_latency_server_tunnel,
ps.clone(),
);
let (client_ret, server_ret) = tokio::join!(
unknown_latency_client_conn.do_handshake_as_client(),
unknown_latency_server_conn.do_handshake_as_server()
);
client_ret.unwrap();
server_ret.unwrap();
peer.add_peer_conn(unknown_latency_client_conn)
.await
.unwrap();
assert_eq!(
peer.select_conn().await.unwrap().get_conn_id(),
same_priority_conn_id
);
let (high_client_tunnel, high_server_tunnel) = create_ring_tunnel_pair();
let mut high_client_conn = PeerConn::new(
local_peer_id,
global_ctx.clone(),
high_client_tunnel,
ps.clone(),
);
high_client_conn.set_priority(DEFAULT_CONNECTION_PRIORITY);
let high_conn_id = high_client_conn.get_conn_id();
let mut high_server_conn =
PeerConn::new(remote_peer_id, global_ctx, high_server_tunnel, ps);
let (client_ret, server_ret) = tokio::join!(
high_client_conn.do_handshake_as_client(),
high_server_conn.do_handshake_as_server()
);
client_ret.unwrap();
server_ret.unwrap();
peer.add_peer_conn(high_client_conn).await.unwrap();
assert_eq!(
peer.select_conn().await.unwrap().get_conn_id(),
high_conn_id
);
}
#[tokio::test]
async fn reject_peer_conn_with_mismatched_identity_type() {
let (packet_send, _packet_recv) = create_packet_recv_chan();
@@ -575,7 +423,6 @@ mod tests {
.local_public_key
.unwrap(),
),
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
}]);
let mut shared_client_conn = PeerConn::new(
local_peer_id,
+1 -23
View File
@@ -37,7 +37,7 @@ use crate::utils::BoxExt;
use crate::{
common::{
PeerId,
config::{DEFAULT_CONNECTION_PRIORITY, NetworkIdentity, NetworkSecretDigest},
config::{NetworkIdentity, NetworkSecretDigest},
error::Error,
global_ctx::ArcGlobalCtx,
},
@@ -305,7 +305,6 @@ pub struct PeerConn {
// remote or local
is_hole_punched: bool,
priority: u32,
close_event_notifier: Arc<PeerConnCloseNotify>,
@@ -394,7 +393,6 @@ impl PeerConn {
is_client: None,
is_hole_punched: true,
priority: DEFAULT_CONNECTION_PRIORITY,
close_event_notifier: Arc::new(PeerConnCloseNotify::new(conn_id)),
@@ -444,19 +442,6 @@ impl PeerConn {
self.is_hole_punched
}
pub fn set_priority(&mut self, priority: u32) {
self.priority = priority;
}
pub fn priority(&self) -> u32 {
self.priority
}
#[cfg(test)]
pub(crate) fn record_latency_for_test(&self, latency_us: u32) {
self.latency_stats.record_latency(latency_us);
}
pub fn is_closed(&self) -> bool {
self.close_event_notifier.is_closed()
}
@@ -544,7 +529,6 @@ impl PeerConn {
version: VERSION,
features: Vec::new(),
network_name: network.network_name.clone(),
connection_priority: self.priority,
..Default::default()
};
@@ -837,7 +821,6 @@ impl PeerConn {
a_session_generation,
a_conn_id: Some(a_conn_id.into()),
client_encryption_algorithm: self.my_encrypt_algo.clone(),
connection_priority: self.priority,
};
let mut hs = builder
@@ -1089,7 +1072,6 @@ impl PeerConn {
Some(&mut hs),
first_msg1,
)?;
self.priority = msg1_pb.connection_priority;
let remote_network_name = msg1_pb.a_network_name.clone();
self.record_control_rx(&remote_network_name, first_msg1_len);
@@ -1245,7 +1227,6 @@ impl PeerConn {
features: Vec::new(),
network_secret_digest: noise.secret_digest.clone(),
connection_priority: self.priority,
}
}
@@ -1283,7 +1264,6 @@ impl PeerConn {
self.is_client = Some(false);
} else if hdr.packet_type == PacketType::HandShake as u8 {
let rsp = Self::decode_handshake_packet(&first_pkt)?;
self.priority = rsp.connection_priority;
handshake_recved(self, &rsp.network_name)?;
tracing::info!("handshake request: {:?}", rsp);
self.record_control_rx(&rsp.network_name, first_pkt.buf_len() as u64);
@@ -1586,7 +1566,6 @@ impl PeerConn {
.as_ref()
.map(|x| x.peer_identity_type as i32)
.unwrap_or(PeerIdentityType::Admin as i32),
priority: self.priority,
}
}
@@ -2109,7 +2088,6 @@ pub mod tests {
.local_public_key
.unwrap(),
),
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
}]);
let ps = Arc::new(PeerSessionStore::new());
+55 -56
View File
@@ -22,7 +22,6 @@ use crate::{
common::{
PeerId,
compressor::{Compressor as _, DefaultCompressor},
config::DEFAULT_CONNECTION_PRIORITY,
constants::EASYTIER_VERSION,
error::Error,
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity},
@@ -32,7 +31,6 @@ use crate::{
},
peers::{
PeerPacketFilter,
peer::Peer,
peer_conn::PeerConn,
peer_rpc::PeerRpcManagerTransport,
peer_session::PeerSessionStore,
@@ -596,22 +594,6 @@ impl PeerManager {
tunnel: Box<dyn Tunnel>,
is_directly_connected: bool,
peer_id_hint: Option<PeerId>,
) -> Result<(PeerId, PeerConnId), Error> {
self.add_client_tunnel_with_peer_id_hint_and_priority(
tunnel,
is_directly_connected,
peer_id_hint,
DEFAULT_CONNECTION_PRIORITY,
)
.await
}
pub(crate) async fn add_client_tunnel_with_peer_id_hint_and_priority(
&self,
tunnel: Box<dyn Tunnel>,
is_directly_connected: bool,
peer_id_hint: Option<PeerId>,
priority: u32,
) -> Result<(PeerId, PeerConnId), Error> {
let mut peer = PeerConn::new_with_peer_id_hint(
self.my_peer_id,
@@ -620,7 +602,6 @@ impl PeerManager {
peer_id_hint,
self.peer_session_store.clone(),
);
peer.set_priority(priority);
peer.set_is_hole_punched(!is_directly_connected);
peer.do_handshake_as_client().await?;
let conn_id = peer.get_conn_id();
@@ -635,14 +616,6 @@ impl PeerManager {
Ok((peer_id, conn_id))
}
fn get_peer_by_id(&self, peer_id: PeerId) -> Option<Arc<Peer>> {
self.peers.get_peer_by_id(peer_id).or_else(|| {
self.foreign_network_client
.get_peer_map()
.get_peer_by_id(peer_id)
})
}
pub fn has_directly_connected_conn(&self, peer_id: PeerId) -> bool {
if let Some(peer) = self.peers.get_peer_by_id(peer_id) {
peer.has_directly_connected_conn()
@@ -651,20 +624,6 @@ impl PeerManager {
}
}
pub(crate) fn has_directly_connected_conn_with_priority_at_most(
&self,
peer_id: PeerId,
priority: u32,
) -> bool {
self.get_peer_by_id(peer_id)
.is_some_and(|peer| peer.has_directly_connected_conn_with_priority_at_most(priority))
}
pub(crate) fn has_conn_with_priority_at_most(&self, peer_id: PeerId, priority: u32) -> bool {
self.get_peer_by_id(peer_id)
.is_some_and(|peer| peer.has_conn_with_priority_at_most(priority))
}
#[tracing::instrument]
pub async fn try_direct_connect<C>(&self, connector: C) -> Result<(PeerId, PeerConnId), Error>
where
@@ -677,21 +636,27 @@ impl PeerManager {
#[tracing::instrument]
pub async fn try_direct_connect_with_peer_id_hint<C>(
&self,
mut connector: C,
connector: C,
peer_id_hint: Option<PeerId>,
) -> Result<(PeerId, PeerConnId), Error>
where
C: TunnelConnector + Debug,
{
let priority = connector.priority();
let ns = self.global_ctx.net_ns.clone();
let t = ns
.run_async(|| async move { connector.connect().await })
.await?;
self.add_client_tunnel_with_peer_id_hint_and_priority(t, true, peer_id_hint, priority)
let t = self.connect_tunnel(connector).await?;
self.add_client_tunnel_with_peer_id_hint(t, true, peer_id_hint)
.await
}
pub(crate) async fn connect_tunnel<C>(&self, mut connector: C) -> Result<Box<dyn Tunnel>, Error>
where
C: TunnelConnector + Debug,
{
let ns = self.global_ctx.net_ns.clone();
Ok(ns
.run_async(|| async move { connector.connect().await })
.await?)
}
// avoid loop back to virtual network
fn check_remote_addr_not_from_virtual_network(
&self,
@@ -1604,17 +1569,26 @@ impl PeerManager {
ipv6_addr.is_multicast() || *ipv6_addr == ipv6_inet.last_address()
}
fn select_ipv4_broadcast_peers<'a>(
routes: impl IntoIterator<Item = &'a instance::Route>,
my_peer_id: PeerId,
) -> Vec<PeerId> {
routes
.into_iter()
.filter_map(|route| {
(route.peer_id != my_peer_id && route.ipv4_addr.is_some()).then_some(route.peer_id)
})
.collect()
}
pub async fn get_msg_dst_peer_ipv4(&self, ipv4_addr: &Ipv4Addr) -> (Vec<PeerId>, bool) {
let mut is_exit_node = false;
let mut dst_peers = vec![];
if self.is_all_peers_broadcast_ipv4(ipv4_addr) {
dst_peers.extend(self.peers.list_routes().await.iter().filter_map(|x| {
if *x.key() != self.my_peer_id {
Some(*x.key())
} else {
None
}
}));
dst_peers.extend(Self::select_ipv4_broadcast_peers(
&self.peers.list_route_infos().await,
self.my_peer_id,
));
} else if let Some(peer_id) = self.peers.get_peer_id_by_ipv4(ipv4_addr).await {
dst_peers.push(peer_id);
} else if !self
@@ -2234,6 +2208,32 @@ mod tests {
assert!(!PeerManager::should_mark_recent_traffic_for_fanout(2));
}
fn route_with_ipv4(
peer_id: u32,
ipv4_addr: Option<std::net::Ipv4Addr>,
) -> crate::proto::api::instance::Route {
crate::proto::api::instance::Route {
peer_id,
ipv4_addr: ipv4_addr.map(|addr| cidr::Ipv4Inet::new(addr, 24).unwrap().into()),
..Default::default()
}
}
#[test]
fn ipv4_broadcast_peer_selection_skips_peers_without_ipv4() {
let routes = vec![
route_with_ipv4(1, Some(std::net::Ipv4Addr::new(10, 126, 126, 1))),
route_with_ipv4(2, None),
route_with_ipv4(3, Some(std::net::Ipv4Addr::new(10, 126, 126, 3))),
route_with_ipv4(4, None),
];
assert_eq!(
PeerManager::select_ipv4_broadcast_peers(&routes, 3),
vec![1]
);
}
#[test]
fn gc_recent_traffic_removes_expired_and_connected_entries() {
let stale_peer = 1;
@@ -3077,7 +3077,6 @@ mod tests {
crate::common::config::PeerConfig {
uri: server_remote_url,
peer_public_key: Some(server_pub_b64.clone()),
priority: crate::common::config::DEFAULT_CONNECTION_PRIORITY,
},
]);
+5 -16
View File
@@ -5,8 +5,7 @@ use crate::{
proto::{
common::Void,
peer_rpc::{
DirectConnectorRpc, GetIpListRequest, GetIpListResponse, ListenerInfo,
SendUdpHolePunchPacketRequest,
DirectConnectorRpc, GetIpListRequest, GetIpListResponse, SendUdpHolePunchPacketRequest,
},
rpc_types::{self, controller::BaseController},
},
@@ -45,23 +44,13 @@ impl DirectConnectorRpc for DirectConnectorManagerRpcServer {
_: GetIpListRequest,
) -> rpc_types::error::Result<GetIpListResponse> {
let mut ret = self.global_ctx.get_ip_collector().collect_ip_addrs().await;
let listener_configs = self
ret.listeners = self
.global_ctx
.config
.get_mapped_listener_configs()
.get_mapped_listeners()
.into_iter()
.chain(self.global_ctx.get_running_listener_configs())
.collect::<Vec<_>>();
ret.listeners = listener_configs
.iter()
.map(|listener| listener.url.clone().into())
.collect();
ret.listener_infos = listener_configs
.into_iter()
.map(|listener| ListenerInfo {
url: Some(listener.url.into()),
priority: listener.priority,
})
.chain(self.global_ctx.get_running_listeners())
.map(Into::into)
.collect();
remove_easytier_managed_ipv6s(&mut ret, &self.global_ctx);
tracing::trace!(
-15
View File
@@ -141,21 +141,6 @@ pub mod instance {
ret
}
pub fn get_conn_priority(&self) -> Option<u32> {
let p = self.peer.as_ref()?;
let default_conn_id = p.default_conn_id.map(|id| id.to_string());
let mut ret = None;
for conn in p.conns.iter() {
if default_conn_id == Some(conn.conn_id.to_string()) {
return Some(conn.priority);
}
ret.get_or_insert(conn.priority);
}
ret
}
fn get_tunnel_proto_str(tunnel_info: &super::super::common::TunnelInfo) -> String {
tunnel_info.display_tunnel_type()
}
-1
View File
@@ -43,7 +43,6 @@ message StringPatch {
message UrlPatch {
ConfigPatchAction action = 1;
common.Url url = 2;
optional uint32 priority = 3;
}
message AclPatch {
+1 -6
View File
@@ -45,7 +45,6 @@ message PeerConnInfo {
bytes noise_remote_static_pubkey = 12;
peer_rpc.SecureAuthLevel secure_auth_level = 13;
peer_rpc.PeerIdentityType peer_identity_type = 14;
uint32 priority = 15;
}
message PeerInfo {
@@ -209,7 +208,6 @@ enum ConnectorStatus {
message Connector {
common.Url url = 1;
ConnectorStatus status = 2;
uint32 priority = 3;
}
message ListConnectorRequest { InstanceIdentifier instance = 1; }
@@ -220,10 +218,7 @@ service ConnectorManageRpc {
rpc ListConnector(ListConnectorRequest) returns (ListConnectorResponse);
}
message MappedListener {
common.Url url = 1;
uint32 priority = 2;
}
message MappedListener { common.Url url = 1; }
message ListMappedListenerRequest { InstanceIdentifier instance = 1; }
+1
View File
@@ -100,6 +100,7 @@ message NetworkConfig {
optional bool ipv6_public_addr_auto = 63;
optional string ipv6_public_addr_prefix = 64;
optional bool disable_relay_data = 65;
optional bool enable_udp_broadcast_relay = 66;
}
message PortForwardConfig {
+1
View File
@@ -76,6 +76,7 @@ message FlagsInConfig {
uint64 instance_recv_bps_limit = 39;
bool disable_upnp = 40;
bool disable_relay_data = 41;
bool enable_udp_broadcast_relay = 42;
}
message RpcDescriptor {
+4 -1
View File
@@ -14,5 +14,8 @@ pub mod web;
pub mod tests;
pub mod utils;
const DESCRIPTOR_POOL_BYTES: &[u8] =
pub const DESCRIPTOR_POOL_BYTES: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin"));
pub const ALL_DESCRIPTOR_BYTES: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/descriptors.bin"));
-8
View File
@@ -184,12 +184,6 @@ message GetIpListResponse {
common.Ipv6Addr public_ipv6 = 3;
repeated common.Ipv6Addr interface_ipv6s = 4;
repeated common.Url listeners = 5;
repeated ListenerInfo listener_infos = 6;
}
message ListenerInfo {
common.Url url = 1;
uint32 priority = 2;
}
message SendUdpHolePunchPacketRequest {
@@ -320,7 +314,6 @@ message HandshakeRequest {
repeated string features = 4;
string network_name = 5;
bytes network_secret_digest = 6;
uint32 connection_priority = 7;
}
message KcpConnData {
@@ -353,7 +346,6 @@ message PeerConnNoiseMsg1Pb {
optional uint32 a_session_generation = 3;
common.UUID a_conn_id = 4;
string client_encryption_algorithm = 5;
uint32 connection_priority = 6;
}
message PeerConnNoiseMsg2Pb {
+5 -17
View File
@@ -1,6 +1,6 @@
use delegate::delegate;
use derivative::Derivative;
use derive_more::{Deref, DerefMut, From, IntoIterator};
use derive_more::{AsMut, AsRef, Deref, DerefMut, From, IntoIterator};
use prost::Message;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
@@ -45,11 +45,15 @@ where
From,
Deref,
DerefMut,
AsRef,
AsMut,
Serialize,
Deserialize,
IntoIterator,
)]
#[derivative(Default(bound = ""))]
#[as_ref(forward)]
#[as_mut(forward)]
#[serde(transparent)]
#[into_iterator(owned, ref, ref_mut)]
pub struct RepeatedMessageModel<Model>(Vec<Model>);
@@ -74,22 +78,6 @@ impl<Model> Extend<Model> for RepeatedMessageModel<Model> {
}
}
impl<Model> AsRef<[Model]> for RepeatedMessageModel<Model> {
delegate! {
to self.0 {
fn as_ref(&self) -> &[Model];
}
}
}
impl<Model> AsMut<[Model]> for RepeatedMessageModel<Model> {
delegate! {
to self.0 {
fn as_mut(&mut self) -> &mut [Model];
}
}
}
impl<'m, Message, Model> TryFrom<&'m [Message]> for RepeatedMessageModel<Model>
where
Message: prost::Message,
+26
View File
@@ -115,6 +115,12 @@ impl<R> FramedReader<R> {
return Some(Err(TunnelError::InvalidPacket("body too long".to_string())));
}
if body_len < PEER_MANAGER_HEADER_SIZE {
return Some(Err(TunnelError::InvalidPacket(
"body too short".to_string(),
)));
}
if buf.len() < TCP_TUNNEL_HEADER_SIZE + body_len {
// body is not complete
return None;
@@ -555,6 +561,26 @@ pub mod tests {
tunnel::{TunnelConnector, TunnelListener, packet_def::ZCPacket},
};
#[cfg(test)]
use crate::tunnel::{
TunnelError,
packet_def::{PEER_MANAGER_HEADER_SIZE, TCP_TUNNEL_HEADER_SIZE},
};
#[test]
fn framed_reader_rejects_short_peer_manager_body() {
let mut buf = BytesMut::new();
buf.put_u32_le((PEER_MANAGER_HEADER_SIZE - 1) as u32);
buf.resize(TCP_TUNNEL_HEADER_SIZE + PEER_MANAGER_HEADER_SIZE - 1, 0);
let ret = super::FramedReader::<tokio::io::Empty>::extract_one_packet(&mut buf, 2000);
assert!(matches!(
ret,
Some(Err(TunnelError::InvalidPacket(msg))) if msg == "body too short"
));
}
pub async fn _tunnel_echo_server(tunnel: Box<dyn super::Tunnel>, once: bool) {
let (mut recv, mut send) = tunnel.split();
+1 -43
View File
@@ -3,7 +3,7 @@ use std::{
};
use crate::{
common::{config::DEFAULT_CONNECTION_PRIORITY, dns::socket_addrs, error::Error},
common::{dns::socket_addrs, error::Error},
proto::common::TunnelInfo,
};
use async_trait::async_trait;
@@ -139,53 +139,11 @@ pub trait TunnelListener: Send {
pub trait TunnelConnector: Send {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError>;
fn remote_url(&self) -> url::Url;
fn priority(&self) -> u32 {
DEFAULT_CONNECTION_PRIORITY
}
fn set_bind_addrs(&mut self, _addrs: Vec<SocketAddr>) {}
fn set_ip_version(&mut self, _ip_version: IpVersion) {}
fn set_resolved_addr(&mut self, _addr: SocketAddr) {}
}
#[derive(Debug)]
pub struct PrioritizedConnector<C> {
inner: C,
priority: u32,
}
impl<C> PrioritizedConnector<C> {
pub fn new(inner: C, priority: u32) -> Self {
Self { inner, priority }
}
}
#[async_trait]
impl<C: TunnelConnector> TunnelConnector for PrioritizedConnector<C> {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
self.inner.connect().await
}
fn remote_url(&self) -> url::Url {
self.inner.remote_url()
}
fn priority(&self) -> u32 {
self.priority
}
fn set_bind_addrs(&mut self, addrs: Vec<SocketAddr>) {
self.inner.set_bind_addrs(addrs);
}
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.inner.set_ip_version(ip_version);
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.inner.set_resolved_addr(addr);
}
}
pub fn build_url_from_socket_addr(addr: &String, scheme: &str) -> url::Url {
if let Ok(sock_addr) = addr.parse::<SocketAddr>() {
let url_str = format!("{}://0.0.0.0", scheme);
+58
View File
@@ -0,0 +1,58 @@
use delegate::delegate;
use derivative::Derivative;
use derive_more::{AsMut, AsRef, Deref, DerefMut, From, Into, IntoIterator};
use std::fmt;
use std::fmt::Display;
use thiserror::Error;
#[derive(Derivative, Debug, From, Into, Deref, DerefMut, AsRef, AsMut, IntoIterator, Error)]
#[derivative(Default(bound = ""))]
#[as_ref(forward)]
#[as_mut(forward)]
#[into_iterator(owned, ref, ref_mut)]
pub struct ErrorCollection<E> {
pub errors: Vec<E>,
}
impl<E> ErrorCollection<E> {
delegate! {
to Vec {
#[into]
pub fn new() -> Self;
#[into]
pub fn with_capacity(capacity: usize) -> Self;
}
}
}
impl<E, Item: Into<E>> FromIterator<Item> for ErrorCollection<E> {
fn from_iter<I: IntoIterator<Item = Item>>(iter: I) -> Self {
Self {
errors: iter.into_iter().map(Into::into).collect(),
}
}
}
impl<E> Extend<E> for ErrorCollection<E> {
delegate! {
to self.errors {
fn extend<T: IntoIterator<Item = E>>(&mut self, iter: T);
}
}
}
impl<E: Display> Display for ErrorCollection<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.errors.is_empty() {
return write!(f, "No errors");
}
write!(f, "{} error(s) occurred:", self.errors.len())?;
for (i, err) in self.errors.iter().enumerate() {
writeln!(f)?;
write!(f, " {}. {}", i + 1, err)?;
}
Ok(())
}
}
+1
View File
@@ -1,3 +1,4 @@
pub mod error;
pub mod panic;
pub mod string;
pub mod task;
+62
View File
@@ -1,9 +1,13 @@
use crate::utils::error::ErrorCollection;
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use tokio::task::JoinHandle;
use tokio::time::sleep;
use tokio_util::sync::CancellationToken;
use tokio_util::task::AbortOnDropHandle;
@@ -78,3 +82,61 @@ impl<Output> Future for CancellableTask<Output> {
}
// endregion
// region HedgeExt
pub(crate) trait HedgeExt: Iterator + Sized {
async fn hedge<T, E>(self, delay: Duration) -> Result<T, ErrorCollection<E>>
where
Self::Item: Future<Output = Result<T, E>>;
}
impl<I> HedgeExt for I
where
I: Iterator,
{
async fn hedge<T, E>(mut self, delay: Duration) -> Result<T, ErrorCollection<E>>
where
Self::Item: Future<Output = Result<T, E>>,
{
let mut tasks = FuturesUnordered::new();
let mut errors = ErrorCollection::new();
let mut exhausted = false;
macro_rules! spawn {
() => {
if let Some(fut) = self.next() {
tasks.push(fut);
} else {
exhausted = true;
}
};
}
spawn!();
while !tasks.is_empty() {
tokio::select! {
res = tasks.next() => {
match res {
Some(Ok(v)) => return Ok(v),
Some(Err(e)) => errors.push(e),
None => unreachable!(),
}
if !exhausted {
spawn!();
}
}
_ = sleep(delay), if !exhausted => {
spawn!();
}
}
}
Err(errors)
}
}
// endregion
+7
View File
@@ -9,6 +9,7 @@ use crate::{
pub struct Controller {
token: String,
machine_id: uuid::Uuid,
hostname: String,
device_os: DeviceOsInfo,
manager: Arc<NetworkInstanceManager>,
@@ -18,6 +19,7 @@ pub struct Controller {
impl Controller {
pub fn new(
token: String,
machine_id: uuid::Uuid,
hostname: String,
device_os: DeviceOsInfo,
manager: Arc<NetworkInstanceManager>,
@@ -25,6 +27,7 @@ impl Controller {
) -> Self {
Controller {
token,
machine_id,
hostname,
device_os,
manager,
@@ -44,6 +47,10 @@ impl Controller {
self.hostname.clone()
}
pub fn machine_id(&self) -> uuid::Uuid {
self.machine_id
}
pub fn device_os(&self) -> DeviceOsInfo {
self.device_os.clone()
}

Some files were not shown because too many files have changed in this diff Show More