Compare commits

..

20 Commits

Author SHA1 Message Date
fanyang 9d7a938e93 Address review comments 2026-05-04 10:42:51 +08:00
fanyang 6229229b31 feat: support lzo compression 2026-05-04 10:42:51 +08:00
fanyang 6a63853bad fix: silence listener warning in feature builds 2026-05-04 10:42:51 +08:00
fanyang 362aa7a9cd fix: allow omitted ACL config fields (#2206) 2026-05-04 00:47:24 +08:00
KKRainbow 12a7b5a5c5 fix: scope peer center server data to instance (#2198)
Stop sharing PeerCenterServer state through a process-global map so local and foreign-network services cannot mix peer-center data when peer ids overlap.
2026-05-02 01:43:01 +08:00
fanyang 4eba9b07b6 fix(web-client): keep retrying unreachable config server (#2140)
Defer config-server connector creation into the web client retry loop so
service startup does not fail when network or DNS is unavailable.
2026-05-02 00:09:48 +08:00
KKRainbow 1b48029bdc fix: clean stale foreign network state (#2197)
- clear foreign-network traffic metric peer caches on peer removal and network cleanup
- release reserved foreign-network peer IDs on handshake/add-peer error paths
- avoid creating no-op foreign-network token buckets when limits are unlimited
- shrink relay/session maps after cleanup and remove unused peer-center global data entries
2026-05-01 23:30:51 +08:00
KKRainbow 3542e944cb fix(quic): prune stopped endpoints from pool (#2195)
* remove wss port 0 compatibility code
* fix(quic): prune stopped endpoints from pool
2026-05-01 18:51:39 +08:00
KKRainbow 852d1c9e14 feat(gui): add UPnP and public IPv6 advanced options (#2194)
Expose disable-upnp and ipv6_public_addr_auto in the shared web/GUI config editor
bump release metadata to 2.6.3.
2026-05-01 13:45:19 +08:00
KKRainbow 4958394469 fix: protect self peer during credential refresh and allow need-p2p peers through public server (#2192)
* fix: protect self peer during credential refresh

* fix: allow need-p2p peers through public server
2026-05-01 06:59:30 +08:00
KKRainbow 41b6d65604 fix faketcp filter on windows (#2190) 2026-04-30 23:55:56 +08:00
KKRainbow aae30894dd fix: keep file logger disabled by default (#2189) 2026-04-30 21:42:30 +08:00
fanyang 81d169abfc fix: fall back when CLI manage service is unavailable (#2185) 2026-04-30 19:50:50 +08:00
Luna Yao 9c6c210e89 fix: disable SO_EXCLUSIVEADDRUSE on Windows (#2180) 2026-04-30 19:48:54 +08:00
Mg Pig d1c6dcf754 fix: prevent URL input layout flicker with container queries (#2186) 2026-04-30 19:45:01 +08:00
KKRainbow 97c8c4f55a feat: support disabling relay data forwarding (#2188)
- add a disable_relay_data runtime/config patch option
- reuse the existing avoid_relay_data feature flag when relay data forwarding is disabled
2026-04-30 19:44:40 +08:00
KKRainbow ed8df2d58f prevent EasyTier-managed IPv6 from being used as underlay connections (#2181)
When a node has public IPv6 addresses allocated by EasyTier, those addresses
are installed on the host's network interfaces. The system would then pick
them up as candidate source/destination addresses for underlay connections
(direct peer, UDP hole punch, bind addresses), causing overlay traffic to
loop back into the overlay itself.

Add a central predicate is_ip_easytier_managed_ipv6() and apply it at every
point where IPv6 addresses are selected for underlay use:
- Filter managed IPv6 from DNS-resolved connector addresses, including a
  UDP socket getsockname check to detect whether the OS would route through
  the overlay to reach a destination
- Skip managed IPv6 in bind address selection and STUN candidate filtering
- Strip managed IPv6 from GetIpListResponse RPC so peers never learn them
- Pass pre-resolved addresses to tunnel connectors to avoid re-resolution

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
2026-04-29 12:17:22 +08:00
lurenjia f66010e6f9 fix: preserve URL type in matches_scheme (#2179)
Avoid resolving Url::as_ref() to the full URL string before TunnelScheme
conversion. Add regression coverage for owned/borrowed URLs and the UDP
IPv6 hole-punch branch condition.

Co-authored-by: KKRainbow <443152178@qq.com>
2026-04-28 23:23:41 +08:00
Luna Yao d5c4700d32 utils: replace defer, ContextGuard, DetachableTask with guarden crate (#2163) 2026-04-27 18:29:46 +08:00
KKRainbow 969ecfc4ca fix(gui): refresh service after core version upgrade (#2172) 2026-04-27 15:54:52 +08:00
76 changed files with 2797 additions and 1299 deletions
+1 -1
View File
@@ -11,7 +11,7 @@ on:
image_tag:
description: 'Tag for this image build'
type: string
default: 'v2.6.2'
default: 'v2.6.3'
required: true
mark_latest:
description: 'Mark this image as latest'
+1 -1
View File
@@ -18,7 +18,7 @@ on:
version:
description: 'Version for this release'
type: string
default: 'v2.6.2'
default: 'v2.6.3'
required: true
make_latest:
description: 'Mark this release as latest'
Generated
+53 -25
View File
@@ -2229,7 +2229,7 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555"
[[package]]
name = "easytier"
version = "2.6.2"
version = "2.6.3"
dependencies = [
"aes-gcm",
"anyhow",
@@ -2273,6 +2273,7 @@ dependencies = [
"gethostname 0.5.0",
"git-version",
"globwalk",
"guarden",
"hickory-client",
"hickory-proto",
"hickory-resolver",
@@ -2287,6 +2288,7 @@ dependencies = [
"indoc",
"itertools 0.14.0",
"kcp-sys",
"lzokay-native",
"machine-uid",
"maplit",
"mimalloc",
@@ -2404,7 +2406,7 @@ dependencies = [
[[package]]
name = "easytier-gui"
version = "2.6.2"
version = "2.6.3"
dependencies = [
"anyhow",
"async-trait",
@@ -2456,6 +2458,7 @@ dependencies = [
"dashmap",
"easytier",
"futures",
"guarden",
"jsonwebtoken",
"mimalloc",
"mockall",
@@ -2484,7 +2487,7 @@ dependencies = [
[[package]]
name = "easytier-web"
version = "2.6.2"
version = "2.6.3"
dependencies = [
"anyhow",
"async-trait",
@@ -3590,6 +3593,28 @@ dependencies = [
"syn 2.0.117",
]
[[package]]
name = "guarden"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca87812d87fa82896df1adfb5c111cdeaae3edb6da028f5df002dcbd7df71454"
dependencies = [
"futures",
"guarden-macros",
"tokio",
]
[[package]]
name = "guarden-macros"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b42f4b8de91cbd793ce8e6cf8d4821ef02d2d5b4468e0a55a36c65c5581de53"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]]
name = "h2"
version = "0.4.7"
@@ -3705,12 +3730,6 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "hermit-abi"
version = "0.5.2"
@@ -4026,7 +4045,7 @@ dependencies = [
"libc",
"percent-encoding",
"pin-project-lite",
"socket2 0.6.1",
"socket2 0.5.10",
"tokio",
"tower-service",
"tracing",
@@ -4695,9 +4714,9 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.172"
version = "0.2.186"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66"
[[package]]
name = "libdbus-sys"
@@ -4856,6 +4875,16 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]]
name = "lzokay-native"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "792ba667add2798c6c3e988e630f4eb921b5cbc735044825b7111ef1582c8730"
dependencies = [
"byteorder",
"thiserror 1.0.63",
]
[[package]]
name = "mac"
version = "0.1.1"
@@ -5043,14 +5072,13 @@ dependencies = [
[[package]]
name = "mio"
version = "1.0.2"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec"
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
dependencies = [
"hermit-abi 0.3.9",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
"windows-sys 0.52.0",
"windows-sys 0.61.2",
]
[[package]]
@@ -6551,7 +6579,7 @@ checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218"
dependencies = [
"cfg-if",
"concurrent-queue",
"hermit-abi 0.5.2",
"hermit-abi",
"pin-project-lite",
"rustix 1.0.7",
"windows-sys 0.61.2",
@@ -8650,12 +8678,12 @@ dependencies = [
[[package]]
name = "socket2"
version = "0.6.1"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e"
dependencies = [
"libc",
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
@@ -9774,9 +9802,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.48.0"
version = "1.52.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6"
dependencies = [
"bytes",
"libc",
@@ -9784,7 +9812,7 @@ dependencies = [
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.6.1",
"socket2 0.6.3",
"tokio-macros",
"tracing",
"windows-sys 0.61.2",
@@ -9792,9 +9820,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
version = "2.6.0"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496"
dependencies = [
"proc-macro2",
"quote",
+1 -1
View File
@@ -1,6 +1,6 @@
id=easytier_magisk
name=EasyTier_Magisk
version=v2.6.2
version=v2.6.3
versionCode=1
author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
@@ -12,6 +12,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
chrono = { version = "0.4", features = ["serde"] }
uuid = { version = "1.0", features = ["v4", "serde"] }
guarden = "0.1"
# Axum web framework
axum = { version = "0.8.4", features = ["macros"] }
@@ -10,9 +10,9 @@ use easytier::{
common::config::{
ConfigFileControl, ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader,
},
defer,
instance_manager::NetworkInstanceManager,
};
use guarden::defer;
use serde::{Deserialize, Serialize};
use sqlx::any;
use tokio_util::task::AbortOnDropHandle;
+1 -1
View File
@@ -1,7 +1,7 @@
{
"name": "easytier-gui",
"type": "module",
"version": "2.6.2",
"version": "2.6.3",
"private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": {
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-gui"
version = "2.6.2"
version = "2.6.3"
description = "EasyTier GUI"
authors = ["you"]
edition.workspace = true
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false
},
"productName": "easytier-gui",
"version": "2.6.2",
"version": "2.6.3",
"identifier": "com.kkrainbow.easytier",
"plugins": {
"shell": {
+1
View File
@@ -18,6 +18,7 @@ export interface ServiceMode extends WebClientConfig {
rpc_portal: string
file_log_level: 'off' | 'warn' | 'info' | 'debug' | 'trace'
file_log_dir: string
installed_core_version?: string
}
export interface RemoteMode {
+20 -3
View File
@@ -16,7 +16,7 @@ import { useToast, useConfirm } from 'primevue'
import { loadMode, saveMode, WebClientConfig, type Mode } from '~/composables/mode'
import { saveLastNetworkInstanceId, loadLastNetworkInstanceId } from '~/composables/config'
import ModeSwitcher from '~/components/ModeSwitcher.vue'
import { getServiceStatus } from '~/composables/backend'
import { getEasytierVersion, getServiceStatus } from '~/composables/backend'
const { t, locale } = useI18n()
const confirm = useConfirm()
@@ -85,6 +85,20 @@ async function onUninstallService() {
});
}
function stripModeMetadata(mode: Mode) {
if (mode.mode !== 'service') {
return mode
}
const serviceConfig = { ...mode }
delete serviceConfig.installed_core_version
return serviceConfig
}
function modeConfigChanged(next: Mode) {
return JSON.stringify(stripModeMetadata(next)) !== JSON.stringify(stripModeMetadata(currentMode.value))
}
async function onStopService() {
isModeSaving.value = true
manualDisconnect.value = true
@@ -134,13 +148,14 @@ async function initWithMode(mode: Mode) {
}
url = mode.remote_rpc_address
break;
case 'service':
case 'service': {
if (!mode.config_dir || !mode.file_log_dir || !mode.file_log_level || !mode.rpc_portal) {
toast.add({ severity: 'error', summary: t('error'), detail: t('mode.service_config_empty'), life: 10000 })
return initWithMode({ ...mode, mode: 'normal' });
}
let serviceStatus = await getServiceStatus()
if (serviceStatus === "NotInstalled" || JSON.stringify(mode) !== JSON.stringify(currentMode.value)) {
const coreVersion = await getEasytierVersion()
if (serviceStatus === "NotInstalled" || modeConfigChanged(mode) || mode.installed_core_version !== coreVersion) {
mode.config_server_url = mode.config_server_url || undefined
await initService({
config_dir: mode.config_dir,
@@ -149,6 +164,7 @@ async function initWithMode(mode: Mode) {
rpc_portal: mode.rpc_portal,
config_server: mode.config_server_url,
})
mode.installed_core_version = coreVersion
serviceStatus = await getServiceStatus()
}
if (serviceStatus === "Stopped") {
@@ -157,6 +173,7 @@ async function initWithMode(mode: Mode) {
url = "tcp://" + mode.rpc_portal.replace("0.0.0.0", "127.0.0.1")
retrys = 5
break;
}
case 'normal':
url = mode.rpc_portal;
break;
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-web"
version = "2.6.2"
version = "2.6.3"
edition.workspace = true
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -81,6 +81,7 @@ const bool_flags: BoolFlag[] = [
{ field: 'latency_first', help: 'latency_first_help' },
{ field: 'use_smoltcp', help: 'use_smoltcp_help' },
{ field: 'disable_ipv6', help: 'disable_ipv6_help' },
{ field: 'ipv6_public_addr_auto', help: 'ipv6_public_addr_auto_help' },
{ field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' },
{ field: 'disable_kcp_input', help: 'disable_kcp_input_help' },
{ field: 'enable_quic_proxy', help: 'enable_quic_proxy_help' },
@@ -98,6 +99,7 @@ const bool_flags: BoolFlag[] = [
{ field: 'disable_encryption', help: 'disable_encryption_help' },
{ field: 'disable_tcp_hole_punching', help: 'disable_tcp_hole_punching_help' },
{ field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' },
{ field: 'disable_upnp', help: 'disable_upnp_help' },
{ field: 'disable_sym_hole_punching', help: 'disable_sym_hole_punching_help' },
{ field: 'enable_magic_dns', help: 'enable_magic_dns_help' },
{ field: 'enable_private_mode', help: 'enable_private_mode_help' },
@@ -2,7 +2,7 @@
import { AutoComplete, Button, Dialog, InputNumber, InputText } from 'primevue'
import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon'
import { computed, onMounted, onUnmounted, ref, watch } from 'vue'
import { computed, ref, watch } from 'vue'
import { useI18n } from 'vue-i18n'
const props = defineProps<{
@@ -13,25 +13,8 @@ const props = defineProps<{
const { t } = useI18n()
const url = defineModel<string>({ required: true })
const editing = ref(false)
const container = ref<HTMLElement | null>(null)
const internalCompact = ref(false)
const hostFocused = ref(false)
onMounted(() => {
if (container.value) {
const observer = new ResizeObserver(entries => {
for (const entry of entries) {
internalCompact.value = entry.contentRect.width < 400
}
})
observer.observe(container.value)
onUnmounted(() => {
observer.disconnect()
})
}
})
const parseUrl = (val: string | null | undefined): { proto: string; host: string; port: number | null } => {
const getValidPort = (portStr: string, proto: string) => {
const p = parseInt(portStr)
@@ -169,28 +152,30 @@ const onProtoChange = (newProto: string) => {
</script>
<template>
<div ref="container" class="w-full">
<InputGroup v-if="!internalCompact" class="w-full">
<div class="url-input-container w-full min-w-0 overflow-hidden">
<InputGroup class="url-input-full w-full min-w-0">
<AutoComplete :model-value="internalValue.proto" :suggestions="filteredProtos" dropdown
class="max-w-32 proto-autocomplete-in-group" @complete="searchProtos"
@update:model-value="onProtoChange" />
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow"
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow min-w-0"
@focus="onHostFocus" @blur="onHostBlur" />
<template v-if="!isNoPortProto">
<InputGroupAddon>
<span style="font-weight: bold">:</span>
</InputGroupAddon>
<InputNumber v-model="internalValue.port" :format="false" :min="1" :max="65535" class="max-w-24"
:placeholder="String(protos[internalValue.proto] ?? 11010)"
fluid />
:placeholder="String(protos[internalValue.proto] ?? 11010)" fluid />
</template>
<!-- Rendered in both responsive branches; keep action slot content free of side effects and duplicate IDs. -->
<slot name="actions"></slot>
</InputGroup>
<div v-else class="flex justify-between items-center p-2 border rounded w-full">
<span class="truncate mr-2">{{ url }}</span>
<div class="flex items-center">
<Button icon="pi pi-pencil" class="p-button-sm p-button-text" @click="editing = true" />
<div
class="url-input-compact flex justify-between items-center p-2 border rounded w-full min-w-0 overflow-hidden">
<span class="truncate mr-2 min-w-0 flex-1 overflow-hidden">{{ url }}</span>
<div class="flex items-center shrink-0">
<Button icon="pi pi-pencil" class="p-button-sm p-button-text" :aria-label="t('web.common.edit')"
@click="editing = true" />
<slot name="actions"></slot>
</div>
</div>
@@ -222,6 +207,28 @@ const onProtoChange = (newProto: string) => {
</template>
<style scoped>
.url-input-container {
container-type: inline-size;
}
.url-input-full {
display: none;
}
.url-input-compact {
display: flex;
}
@container (min-width: 400px) {
.url-input-full {
display: flex;
}
.url-input-compact {
display: none;
}
}
.proto-autocomplete-in-group,
.proto-autocomplete-in-group :deep(.p-autocomplete-input),
.proto-autocomplete-in-group :deep(.p-autocomplete-dropdown) {
@@ -104,6 +104,9 @@ use_smoltcp_help: 使用用户态 TCP/IP 协议栈,避免操作系统防火墙
disable_ipv6: 禁用IPv6
disable_ipv6_help: 禁用此节点的IPv6功能,仅使用IPv4进行网络通信。
ipv6_public_addr_auto: 自动获取公网 IPv6
ipv6_public_addr_auto_help: 自动从共享了 IPv6 子网的对等节点获取一个公网 IPv6 地址。
enable_kcp_proxy: 启用 KCP 代理
enable_kcp_proxy_help: 将 TCP 流量转为 KCP 流量,降低传输延迟,提升传输速度。
@@ -157,6 +160,9 @@ disable_tcp_hole_punching_help: 禁用TCP打洞功能
disable_udp_hole_punching: 禁用UDP打洞
disable_udp_hole_punching_help: 禁用UDP打洞功能
disable_upnp: 禁用 UPnP
disable_upnp_help: 禁用符合条件监听器的运行时 UPnP/NAT-PMP 端口映射;自动端口映射默认开启。
disable_sym_hole_punching: 禁用对称NAT打洞
disable_sym_hole_punching_help: 禁用对称NAT的打洞(生日攻击),将对称NAT视为锥形NAT处理
@@ -103,6 +103,9 @@ use_smoltcp_help: Use a user-space TCP/IP stack to avoid issues with operating s
disable_ipv6: Disable IPv6
disable_ipv6_help: Disable IPv6 functionality for this node, only use IPv4 for network communication.
ipv6_public_addr_auto: Auto Public IPv6
ipv6_public_addr_auto_help: Auto-obtain a public IPv6 address from a peer that shares its IPv6 subnet.
enable_kcp_proxy: Enable KCP Proxy
enable_kcp_proxy_help: Convert TCP traffic to KCP traffic to reduce latency and boost transmission speed.
@@ -156,6 +159,9 @@ disable_tcp_hole_punching_help: Disable tcp hole punching
disable_udp_hole_punching: Disable UDP Hole Punching
disable_udp_hole_punching_help: Disable udp hole punching
disable_upnp: Disable UPnP
disable_upnp_help: Disable runtime UPnP/NAT-PMP port mapping for eligible listeners; automatic port mapping is enabled by default.
disable_sym_hole_punching: Disable Symmetric NAT Hole Punching
disable_sym_hole_punching_help: Disable special hole punching handling for symmetric NAT (based on birthday attack), treat symmetric NAT as cone NAT
@@ -115,6 +115,7 @@ export interface NetworkConfig {
use_smoltcp?: boolean
disable_ipv6?: boolean
ipv6_public_addr_auto?: boolean
enable_kcp_proxy?: boolean
disable_kcp_input?: boolean
enable_quic_proxy?: boolean
@@ -132,6 +133,7 @@ export interface NetworkConfig {
disable_encryption?: boolean
disable_tcp_hole_punching?: boolean
disable_udp_hole_punching?: boolean
disable_upnp?: boolean
disable_sym_hole_punching?: boolean
enable_relay_network_whitelist?: boolean
@@ -190,6 +192,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
use_smoltcp: false,
disable_ipv6: false,
ipv6_public_addr_auto: false,
enable_kcp_proxy: false,
disable_kcp_input: false,
enable_quic_proxy: false,
@@ -207,6 +210,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
disable_encryption: false,
disable_tcp_hole_punching: false,
disable_udp_hole_punching: false,
disable_upnp: false,
disable_sym_hole_punching: false,
enable_relay_network_whitelist: false,
relay_network_whitelist: [],
+7 -1
View File
@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
version = "2.6.2"
version = "2.6.3"
edition.workspace = true
rust-version.workspace = true
authors = ["kkrainbow"]
@@ -50,6 +50,8 @@ time = "0.3"
toml = "0.8.12"
chrono = { version = "0.4.37", features = ["serde"] }
guarden = "0.1"
delegate = "0.13.5"
itertools = "0.14.0"
@@ -219,6 +221,7 @@ async-ringbuf = "0.3.1"
service-manager = { git = "https://github.com/EasyTier/service-manager-rs.git", branch = "main" }
zstd = { version = "0.13", optional = true }
lzokay-native = { version = "0.1", optional = true }
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys", rev = "94964794caaed5d388463137da59b97499619e5f", optional = true }
@@ -356,6 +359,7 @@ default = [
"faketcp",
"magic-dns",
"zstd",
"lzo",
]
full = [
"websocket",
@@ -370,6 +374,7 @@ full = [
"faketcp",
"magic-dns",
"zstd",
"lzo",
]
wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:quinn-plaintext", "dep:rustls", "dep:rcgen"]
@@ -400,5 +405,6 @@ tracing = ["tokio/tracing", "dep:console-subscriber"]
magic-dns = ["dep:hickory-client", "dep:hickory-server"]
faketcp = ["dep:flume"]
zstd = ["dep:zstd"]
lzo = ["dep:lzokay-native"]
# For Network Extension on macOS
macos-ne = []
+5
View File
@@ -191,6 +191,11 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
)
.type_attribute("peer_rpc.RouteForeignNetworkSummary", "#[derive(Hash, Eq)]")
.type_attribute("common.RpcDescriptor", "#[derive(Hash, Eq)]")
.type_attribute("acl.Acl", "#[serde(default)]")
.type_attribute("acl.AclV1", "#[serde(default)]")
.type_attribute("acl.Chain", "#[serde(default)]")
.type_attribute("acl.Rule", "#[serde(default)]")
.type_attribute("acl.GroupInfo", "#[serde(default)]")
.field_attribute(".api.manage.NetworkConfig", "#[serde(default)]")
.service_generator(Box::new(easytier_rpc_build::ServiceGenerator::default()))
.btree_map(["."])
+2 -2
View File
@@ -194,8 +194,8 @@ core_clap:
en: "the url of the ipv6 listener, e.g.: tcp://[::]:11010, if not set, will listen on random udp port"
zh-CN: "IPv6 监听器的URL,例如:tcp://[::]:11010,如果未设置,将在随机UDP端口上监听"
compression:
en: "compression algorithm to use, support none, zstd. default is none"
zh-CN: "要使用的压缩算法,支持 none、zstd。默认为 none"
en: "compression algorithm to use, supported: %{algorithms}. default is none"
zh-CN: "要使用的压缩算法,支持%{algorithms}。默认为 none"
mapped_listeners:
en: "manually specify the public address of the listener, other nodes can use this address to connect to this node. e.g.: tcp://123.123.123.123:11223, can specify multiple."
zh-CN: "手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。例如:tcp://123.123.123.123:11223,可以指定多个。"
+7 -6
View File
@@ -137,12 +137,13 @@ pub fn setup_socket_for_win<S: AsRawSocket>(
}
let socket = SOCKET(socket.as_raw_socket() as usize);
let optval = 1_i32.to_ne_bytes();
unsafe {
if setsockopt(socket, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, Some(&optval)) == SOCKET_ERROR {
return Err(io::Error::last_os_error());
}
}
// let optval = 1_i32.to_ne_bytes();
// unsafe {
// if setsockopt(socket, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, Some(&optval)) == SOCKET_ERROR {
// return Err(io::Error::last_os_error());
// }
// }
if let Some(iface) = bind_dev {
set_ip_unicast_if(socket, bind_addr, &iface)?;
+39
View File
@@ -1339,6 +1339,45 @@ mod tests {
assert_eq!(result.matched_rule, Some(RuleId::Priority(70)));
}
#[tokio::test]
async fn test_forward_acl_source_ip_whitelist() {
let mut acl_config = Acl::default();
let mut acl_v1 = AclV1::default();
let mut chain = Chain {
name: "subnet_proxy_protect".to_string(),
chain_type: ChainType::Forward as i32,
enabled: true,
default_action: Action::Drop as i32,
..Default::default()
};
chain.rules.push(Rule {
name: "allow_my_devices".to_string(),
priority: 1000,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Any as i32,
source_ips: vec!["10.172.192.2/32".to_string()],
..Default::default()
});
acl_v1.chains.push(chain);
acl_config.acl_v1 = Some(acl_v1);
let processor = AclProcessor::new(acl_config);
let mut packet_info = create_test_packet_info();
packet_info.dst_ip = "192.168.1.10".parse().unwrap();
packet_info.src_ip = "10.172.192.2".parse().unwrap();
let result = processor.process_packet(&packet_info, ChainType::Forward);
assert_eq!(result.action, Action::Allow);
assert_eq!(result.matched_rule, Some(RuleId::Priority(1000)));
packet_info.src_ip = "10.172.192.3".parse().unwrap();
let result = processor.process_packet(&packet_info, ChainType::Forward);
assert_eq!(result.action, Action::Drop);
assert_eq!(result.matched_rule, Some(RuleId::Default));
}
fn create_test_acl_config() -> Acl {
let mut acl_config = Acl::default();
+46 -10
View File
@@ -1,4 +1,4 @@
#[cfg(feature = "zstd")]
#[cfg(any(feature = "zstd", feature = "lzo"))]
use anyhow::Context;
#[cfg(feature = "zstd")]
use dashmap::DashMap;
@@ -53,6 +53,13 @@ impl DefaultCompressor {
)
})
}),
#[cfg(feature = "lzo")]
CompressorAlgo::Lzo => lzokay_native::compress(data).with_context(|| {
format!(
"Failed to compress data with algorithm: {:?}",
compress_algo
)
}),
CompressorAlgo::None => Ok(data.to_vec()),
}
}
@@ -85,6 +92,13 @@ impl DefaultCompressor {
compress_algo
))
}),
#[cfg(feature = "lzo")]
CompressorAlgo::Lzo => lzokay_native::decompress_all(data, None).with_context(|| {
format!(
"Failed to decompress data with algorithm: {:?}",
compress_algo
)
}),
CompressorAlgo::None => Ok(data.to_vec()),
}
}
@@ -181,14 +195,13 @@ thread_local! {
static DCTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Decompressor<'static>>> = RefCell::new(DashMap::new());
}
#[cfg(all(test, feature = "zstd"))]
#[cfg(all(test, any(feature = "zstd", feature = "lzo")))]
pub mod tests {
use super::*;
#[tokio::test]
async fn test_compress() {
let text = b"12345670000000000000000000";
let mut packet = ZCPacket::new_with_payload(text);
async fn test_compress_algo(compress_algo: CompressorAlgo) {
let text = vec![b'a'; 4096];
let mut packet = ZCPacket::new_with_payload(&text);
packet.fill_peer_manager_hdr(0, 0, 0);
let compressor = DefaultCompressor {};
@@ -200,7 +213,7 @@ pub mod tests {
);
compressor
.compress(&mut packet, CompressorAlgo::ZstdDefault)
.compress(&mut packet, compress_algo)
.await
.unwrap();
println!(
@@ -215,8 +228,7 @@ pub mod tests {
assert!(!packet.peer_manager_header().unwrap().is_compressed());
}
#[tokio::test]
async fn test_short_text_compress() {
async fn test_short_text_compress_algo(compress_algo: CompressorAlgo) {
let text = b"1234";
let mut packet = ZCPacket::new_with_payload(text);
packet.fill_peer_manager_hdr(0, 0, 0);
@@ -225,7 +237,7 @@ pub mod tests {
// short text can't be compressed
compressor
.compress(&mut packet, CompressorAlgo::ZstdDefault)
.compress(&mut packet, compress_algo)
.await
.unwrap();
assert!(!packet.peer_manager_header().unwrap().is_compressed());
@@ -234,4 +246,28 @@ pub mod tests {
assert_eq!(packet.payload(), text);
assert!(!packet.peer_manager_header().unwrap().is_compressed());
}
#[cfg(feature = "zstd")]
#[tokio::test]
async fn test_zstd_compress() {
test_compress_algo(CompressorAlgo::ZstdDefault).await;
}
#[cfg(feature = "zstd")]
#[tokio::test]
async fn test_zstd_short_text_compress() {
test_short_text_compress_algo(CompressorAlgo::ZstdDefault).await;
}
#[cfg(feature = "lzo")]
#[tokio::test]
async fn test_lzo_compress() {
test_compress_algo(CompressorAlgo::Lzo).await;
}
#[cfg(feature = "lzo")]
#[tokio::test]
async fn test_lzo_short_text_compress() {
test_short_text_compress_algo(CompressorAlgo::Lzo).await;
}
}
+66
View File
@@ -71,6 +71,7 @@ pub fn gen_default_flags() -> Flags {
need_p2p: false,
instance_recv_bps_limit: u64::MAX,
disable_upnp: false,
disable_relay_data: false,
}
}
@@ -1336,6 +1337,71 @@ stun_servers = [
assert!(err.to_string().contains("mapped listener port is missing"));
}
#[test]
fn test_acl_toml_rule_uses_defaults_for_omitted_fields() {
use crate::proto::acl::{Action, ChainType, Protocol};
let config_str = r#"
[[acl.acl_v1.chains]]
name = "subnet_proxy_protect"
chain_type = 3
enabled = true
default_action = 2
[[acl.acl_v1.chains.rules]]
name = "allow_my_devices"
priority = 1000
action = 1
source_ips = ["10.172.192.2/32"]
protocol = 5
enabled = true
"#;
let config = TomlConfigLoader::new_from_str(config_str).unwrap();
let acl = config.get_acl().unwrap();
let acl_v1 = acl.acl_v1.unwrap();
let chain = &acl_v1.chains[0];
let rule = &chain.rules[0];
assert_eq!(chain.chain_type, ChainType::Forward as i32);
assert_eq!(chain.default_action, Action::Drop as i32);
assert_eq!(rule.action, Action::Allow as i32);
assert_eq!(rule.protocol, Protocol::Any as i32);
assert_eq!(rule.source_ips, vec!["10.172.192.2/32"]);
assert!(rule.ports.is_empty());
assert!(rule.source_ports.is_empty());
assert!(rule.destination_ips.is_empty());
assert!(rule.source_groups.is_empty());
assert!(rule.destination_groups.is_empty());
assert_eq!(rule.rate_limit, 0);
assert_eq!(rule.burst_limit, 0);
assert!(!rule.stateful);
}
#[test]
fn test_acl_toml_group_can_omit_declares_or_members() {
let declares_only = r#"
[acl.acl_v1.group]
[[acl.acl_v1.group.declares]]
group_name = "admin"
group_secret = "admin-pw"
"#;
let config = TomlConfigLoader::new_from_str(declares_only).unwrap();
let group = config.get_acl().unwrap().acl_v1.unwrap().group.unwrap();
assert_eq!(group.declares.len(), 1);
assert!(group.members.is_empty());
let members_only = r#"
[acl.acl_v1.group]
members = ["admin"]
"#;
let config = TomlConfigLoader::new_from_str(members_only).unwrap();
let group = config.get_acl().unwrap().acl_v1.unwrap().group.unwrap();
assert!(group.declares.is_empty());
assert_eq!(group.members, vec!["admin"]);
}
#[test]
fn test_network_config_source_user_is_implicit() {
let config = TomlConfigLoader::default();
+20 -12
View File
@@ -73,16 +73,6 @@ pub async fn socket_addrs(
.port()
.or_else(default_port_number)
.ok_or(Error::InvalidUrl(url.to_string()))?;
// See https://github.com/EasyTier/EasyTier/pull/947
// here is for compatibility with old version
let port = match port {
0 => match url.scheme() {
"ws" => 80,
"wss" => 443,
_ => port,
},
_ => port,
};
// if host is an ip address, return it directly
match host {
@@ -121,9 +111,8 @@ pub async fn socket_addrs(
#[cfg(test)]
mod tests {
use crate::defer;
use super::*;
use guarden::defer;
#[tokio::test]
async fn test_socket_addrs() {
@@ -140,4 +129,23 @@ mod tests {
assert_eq!(2, addrs.len(), "addrs: {:?}", addrs);
println!("addrs2: {:?}", addrs);
}
#[tokio::test]
async fn socket_addrs_preserves_explicit_zero_port() {
let cases = [
("ws://127.0.0.1:0", 80, 0),
("wss://127.0.0.1:0", 443, 0),
("ws://127.0.0.1", 80, 80),
("wss://127.0.0.1", 443, 443),
];
for (raw_url, default_port, expected_port) in cases {
let url = url::Url::parse(raw_url).unwrap();
let addrs = socket_addrs(&url, || Some(default_port)).await.unwrap();
assert_eq!(
addrs,
vec![SocketAddr::from(([127, 0, 0, 1], expected_port))]
);
}
}
}
+160 -9
View File
@@ -1,5 +1,5 @@
use std::{
collections::{HashMap, hash_map::DefaultHasher},
collections::{BTreeSet, HashMap, hash_map::DefaultHasher},
hash::Hasher,
net::{IpAddr, SocketAddr},
sync::{Arc, Mutex},
@@ -203,6 +203,7 @@ pub struct GlobalCtx {
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
cached_ipv6: AtomicCell<Option<cidr::Ipv6Inet>>,
public_ipv6_lease: AtomicCell<Option<cidr::Ipv6Inet>>,
public_ipv6_routes: Mutex<BTreeSet<std::net::Ipv6Addr>>,
cached_proxy_cidrs: AtomicCell<Option<Vec<ProxyNetworkConfig>>>,
ip_collector: Mutex<Option<Arc<IPCollector>>>,
@@ -216,6 +217,12 @@ pub struct GlobalCtx {
flags: ArcSwap<Flags>,
// Runtime/base advertised feature flags before config-owned fields are
// overlaid by set_flags. Keep this separate so config patches do not erase
// runtime state such as public-server role, IPv6 provider status, or the
// non-whitelist avoid-relay preference.
base_feature_flags: AtomicCell<PeerFeatureFlag>,
feature_flags: AtomicCell<PeerFeatureFlag>,
token_bucket_manager: TokenBucketManager,
@@ -246,8 +253,17 @@ impl std::fmt::Debug for GlobalCtx {
pub type ArcGlobalCtx = std::sync::Arc<GlobalCtx>;
impl GlobalCtx {
fn derive_feature_flags(flags: &Flags, current: Option<PeerFeatureFlag>) -> PeerFeatureFlag {
let mut feature_flags = current.unwrap_or_default();
fn apply_disable_relay_data_flag(
flags: &Flags,
mut feature_flags: PeerFeatureFlag,
) -> PeerFeatureFlag {
if flags.disable_relay_data {
feature_flags.avoid_relay_data = true;
}
feature_flags
}
fn derive_feature_flags(flags: &Flags, mut feature_flags: PeerFeatureFlag) -> PeerFeatureFlag {
feature_flags.kcp_input = !flags.disable_kcp_input;
feature_flags.no_relay_kcp = flags.disable_relay_kcp;
feature_flags.support_conn_list_sync = true;
@@ -255,7 +271,7 @@ impl GlobalCtx {
feature_flags.no_relay_quic = flags.disable_relay_quic;
feature_flags.need_p2p = flags.need_p2p;
feature_flags.disable_p2p = flags.disable_p2p;
feature_flags
Self::apply_disable_relay_data_flag(flags, feature_flags)
}
pub fn new(config_fs: impl ConfigLoader + 'static) -> Self {
@@ -284,7 +300,8 @@ impl GlobalCtx {
let flags = config_fs.get_flags();
let feature_flags = Self::derive_feature_flags(&flags, None);
let base_feature_flags = PeerFeatureFlag::default();
let feature_flags = Self::derive_feature_flags(&flags, base_feature_flags);
let credential_storage_path = config_fs.get_credential_file();
let credential_manager = Arc::new(CredentialManager::new(credential_storage_path));
@@ -300,6 +317,7 @@ impl GlobalCtx {
cached_ipv4: AtomicCell::new(None),
cached_ipv6: AtomicCell::new(None),
public_ipv6_lease: AtomicCell::new(None),
public_ipv6_routes: Mutex::new(BTreeSet::new()),
cached_proxy_cidrs: AtomicCell::new(None),
ip_collector: Mutex::new(Some(Arc::new(IPCollector::new(
@@ -316,6 +334,8 @@ impl GlobalCtx {
flags: ArcSwap::new(Arc::new(flags)),
base_feature_flags: AtomicCell::new(base_feature_flags),
feature_flags: AtomicCell::new(feature_flags),
token_bucket_manager: TokenBucketManager::new(),
@@ -395,6 +415,11 @@ impl GlobalCtx {
self.public_ipv6_lease.store(addr);
}
pub fn set_public_ipv6_routes(&self, routes: BTreeSet<cidr::Ipv6Inet>) {
*self.public_ipv6_routes.lock().unwrap() =
routes.into_iter().map(|route| route.address()).collect();
}
pub fn is_ip_local_ipv6(&self, ip: &std::net::Ipv6Addr) -> bool {
self.get_ipv6().map(|x| x.address() == *ip).unwrap_or(false)
|| self
@@ -403,6 +428,10 @@ impl GlobalCtx {
.unwrap_or(false)
}
pub fn is_ip_easytier_managed_ipv6(&self, ip: &std::net::Ipv6Addr) -> bool {
self.is_ip_local_ipv6(ip) || self.public_ipv6_routes.lock().unwrap().contains(ip)
}
pub fn get_advertised_ipv6_public_addr_prefix(&self) -> Option<cidr::Ipv6Cidr> {
*self.advertised_ipv6_public_addr_prefix.lock().unwrap()
}
@@ -502,7 +531,7 @@ impl GlobalCtx {
self.config.set_flags(flags.clone());
self.feature_flags.store(Self::derive_feature_flags(
&flags,
Some(self.feature_flags.load()),
self.base_feature_flags.load(),
));
self.flags.store(Arc::new(flags));
}
@@ -567,8 +596,53 @@ impl GlobalCtx {
self.feature_flags.load()
}
pub fn set_feature_flags(&self, flags: PeerFeatureFlag) {
self.feature_flags.store(flags);
/// Replace the runtime/base advertised flags as a complete snapshot.
///
/// This is intended for foreign scoped contexts that inherit an already
/// computed feature-flag snapshot from their parent. Most callers should use
/// a narrower setter so they do not accidentally overwrite unrelated runtime
/// state.
pub fn set_base_advertised_feature_flags(&self, feature_flags: PeerFeatureFlag) {
self.base_feature_flags.store(feature_flags);
let flags = self.flags.load();
self.feature_flags
.store(Self::apply_disable_relay_data_flag(
flags.as_ref(),
feature_flags,
));
}
/// Set the avoid-relay preference that is independent of disable_relay_data.
///
/// disable_relay_data still forces the effective advertised flag to true,
/// but this base preference is preserved when that config flag is toggled.
pub fn set_avoid_relay_data_preference(&self, avoid_relay_data: bool) -> bool {
let mut base_feature_flags = self.base_feature_flags.load();
base_feature_flags.avoid_relay_data = avoid_relay_data;
self.base_feature_flags.store(base_feature_flags);
let mut feature_flags = self.feature_flags.load();
let previous = feature_flags.avoid_relay_data;
feature_flags.avoid_relay_data = avoid_relay_data || self.flags.load().disable_relay_data;
self.feature_flags.store(feature_flags);
previous != feature_flags.avoid_relay_data
}
/// Set the runtime IPv6-provider advertised bit without touching
/// config-derived feature flags.
pub fn set_ipv6_public_addr_provider_feature_flag(&self, enabled: bool) -> bool {
let mut base_feature_flags = self.base_feature_flags.load();
base_feature_flags.ipv6_public_addr_provider = enabled;
self.base_feature_flags.store(base_feature_flags);
let mut feature_flags = self.feature_flags.load();
if feature_flags.ipv6_public_addr_provider == enabled {
return false;
}
feature_flags.ipv6_public_addr_provider = enabled;
self.feature_flags.store(feature_flags);
true
}
pub fn token_bucket_manager(&self) -> &TokenBucketManager {
@@ -785,7 +859,7 @@ pub mod tests {
let mut feature_flags = global_ctx.get_feature_flags();
feature_flags.avoid_relay_data = true;
feature_flags.is_public_server = true;
global_ctx.set_feature_flags(feature_flags);
global_ctx.set_base_advertised_feature_flags(feature_flags);
let mut flags = global_ctx.get_flags().clone();
flags.disable_kcp_input = true;
@@ -809,6 +883,83 @@ pub mod tests {
assert!(!feature_flags.ipv6_public_addr_provider);
}
#[tokio::test]
async fn set_base_advertised_feature_flags_applies_current_values() {
let config = TomlConfigLoader::default();
let global_ctx = GlobalCtx::new(config);
let feature_flags = PeerFeatureFlag {
kcp_input: false,
no_relay_kcp: true,
quic_input: false,
no_relay_quic: true,
is_public_server: true,
..Default::default()
};
global_ctx.set_base_advertised_feature_flags(feature_flags);
assert_eq!(global_ctx.get_feature_flags(), feature_flags);
}
#[tokio::test]
async fn set_base_advertised_feature_flags_keeps_disable_relay_data_effective() {
let config = TomlConfigLoader::default();
let global_ctx = GlobalCtx::new(config);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = true;
global_ctx.set_flags(flags);
let mut feature_flags = global_ctx.get_feature_flags();
feature_flags.avoid_relay_data = false;
feature_flags.is_public_server = true;
global_ctx.set_base_advertised_feature_flags(feature_flags);
let advertised_feature_flags = global_ctx.get_feature_flags();
assert!(advertised_feature_flags.avoid_relay_data);
assert!(advertised_feature_flags.is_public_server);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
let advertised_feature_flags = global_ctx.get_feature_flags();
assert!(!advertised_feature_flags.avoid_relay_data);
assert!(advertised_feature_flags.is_public_server);
}
#[tokio::test]
async fn disable_relay_data_sets_avoid_relay_feature_flag() {
let config = TomlConfigLoader::default();
let global_ctx = GlobalCtx::new(config);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = true;
global_ctx.set_flags(flags);
assert!(global_ctx.get_feature_flags().avoid_relay_data);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
assert!(!global_ctx.get_feature_flags().avoid_relay_data);
global_ctx.set_avoid_relay_data_preference(true);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = true;
global_ctx.set_flags(flags);
assert!(global_ctx.get_feature_flags().avoid_relay_data);
let mut flags = global_ctx.get_flags().clone();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
assert!(global_ctx.get_feature_flags().avoid_relay_data);
}
#[tokio::test]
async fn should_deny_proxy_for_process_wide_rpc_port() {
protected_port::clear_protected_tcp_ports_for_test();
+193 -11
View File
@@ -58,6 +58,21 @@ fn parse_env_filter(default_level: Option<LevelFilter>) -> Result<EnvFilter, any
.with_context(|| "failed to create env filter")
}
fn parse_static_filter(level: LevelFilter) -> Result<EnvFilter, anyhow::Error> {
EnvFilter::builder()
.with_default_directive(level.into())
.parse("")
.with_context(|| "failed to create static filter")
}
fn parse_file_filter(level: LevelFilter) -> Result<EnvFilter, anyhow::Error> {
if matches!(level, LevelFilter::OFF) {
parse_static_filter(level)
} else {
parse_env_filter(Some(level))
}
}
fn is_log(meta: &Metadata) -> bool {
meta.target() == LOG_TARGET || meta.target().starts_with(&format!("{LOG_TARGET}::"))
}
@@ -165,14 +180,17 @@ fn file_layers(
) -> anyhow::Result<(Vec<BoxLayer>, Option<NewFilterSender>)> {
let mut layers = Vec::new();
let level = config.level.map(|s| s.parse().unwrap());
let level = config
.level
.map(|s| s.parse().unwrap())
.unwrap_or(LevelFilter::OFF);
if matches!(level, Some(LevelFilter::OFF)) && !reload {
if matches!(level, LevelFilter::OFF) && !reload {
return Ok((layers, None));
}
let (file_filter, file_filter_reloader) =
tracing_subscriber::reload::Layer::<_, Registry>::new(parse_env_filter(level)?);
tracing_subscriber::reload::Layer::<_, Registry>::new(parse_file_filter(level)?);
let layer = |wrapper| {
layer()
@@ -218,9 +236,7 @@ fn file_layers(
// 初始化全局状态
let _ = LOGGER_LEVEL_SENDER.set(std::sync::Mutex::new(tx.clone()));
if let Some(level) = level {
let _ = CURRENT_LOG_LEVEL.set(std::sync::Mutex::new(level.to_string()));
}
let _ = CURRENT_LOG_LEVEL.set(std::sync::Mutex::new(level.to_string()));
std::thread::spawn(move || {
while let Ok(lf) = rx.recv() {
@@ -232,11 +248,7 @@ fn file_layers(
}
};
let mut new_filter = match EnvFilter::builder()
.with_default_directive(parsed_level.into())
.from_env()
.with_context(|| "failed to create file filter")
{
let mut new_filter = match parse_file_filter(parsed_level) {
Ok(filter) => Some(filter),
Err(e) => {
error!("Failed to build new log filter for {:?}: {:?}", lf, e);
@@ -268,6 +280,36 @@ mod tests {
use super::*;
use crate::common::config::FileLoggerConfig;
const RUST_LOG: &str = "RUST_LOG";
struct EnvVarGuard {
key: &'static str,
previous: Option<std::ffi::OsString>,
}
impl EnvVarGuard {
fn set(key: &'static str, value: &str) -> Self {
let previous = std::env::var_os(key);
unsafe { std::env::set_var(key, value) };
Self { key, previous }
}
fn unset(key: &'static str) -> Self {
let previous = std::env::var_os(key);
unsafe { std::env::remove_var(key) };
Self { key, previous }
}
}
impl Drop for EnvVarGuard {
fn drop(&mut self) {
match &self.previous {
Some(value) => unsafe { std::env::set_var(self.key, value) },
None => unsafe { std::env::remove_var(self.key) },
}
}
}
#[ctor::ctor]
fn init() {
let _ = Registry::default()
@@ -276,7 +318,147 @@ mod tests {
}
#[test]
fn default_file_logger_level_is_off_without_reload() {
let (layers, sender) = file_layers(FileLoggerConfig::default(), false).unwrap();
assert!(layers.is_empty());
assert!(sender.is_none());
}
#[test]
#[serial_test::serial]
fn default_file_logger_level_filters_info_with_reload() {
let _guard = EnvVarGuard::set(RUST_LOG, "info");
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "default-off-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
let cfg = FileLoggerConfig {
file: Some(log_file_name),
dir: Some(temp_dir.path().to_string_lossy().to_string()),
..Default::default()
};
let (layers, _sender) = file_layers(cfg, true).unwrap();
let marker = "default-file-logger-off-marker";
let subscriber = Registry::default().with(layers);
tracing::subscriber::with_default(subscriber, || {
tracing::info!(target: LOG_TARGET, "{}", marker);
std::thread::sleep(std::time::Duration::from_millis(300));
});
let content = std::fs::read_to_string(&log_path).unwrap_or_default();
assert!(
!content.contains(marker),
"default file logger level should filter info logs"
);
}
#[test]
#[serial_test::serial]
fn file_logger_level_uses_env_filter_when_enabled() {
let _guard = EnvVarGuard::set(RUST_LOG, "debug");
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "env-filter-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
let cfg = FileLoggerConfig {
level: Some(LevelFilter::INFO.to_string()),
file: Some(log_file_name),
dir: Some(temp_dir.path().to_string_lossy().to_string()),
..Default::default()
};
let (layers, _sender) = file_layers(cfg, true).unwrap();
let marker = "file-logger-env-filter-marker";
let subscriber = Registry::default().with(layers);
tracing::subscriber::with_default(subscriber, || {
tracing::debug!(target: LOG_TARGET, "{}", marker);
std::thread::sleep(std::time::Duration::from_millis(300));
});
let content = std::fs::read_to_string(&log_path).unwrap_or_default();
assert!(
content.contains(marker),
"enabled file logger should use RUST_LOG directives"
);
}
#[test]
#[serial_test::serial]
fn file_logger_reload_uses_env_filter_when_enabled() {
let _guard = EnvVarGuard::set(RUST_LOG, "debug");
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "reload-env-filter-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
let cfg = FileLoggerConfig {
file: Some(log_file_name),
dir: Some(temp_dir.path().to_string_lossy().to_string()),
..Default::default()
};
let (layers, sender) = file_layers(cfg, true).unwrap();
let sender = sender.expect("reload=true should return a sender");
let marker = "file-logger-reload-env-filter-marker";
let subscriber = Registry::default().with(layers);
tracing::subscriber::with_default(subscriber, || {
sender.send(LevelFilter::INFO.to_string()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(300));
tracing::debug!(target: LOG_TARGET, "{}", marker);
std::thread::sleep(std::time::Duration::from_millis(300));
});
let content = std::fs::read_to_string(&log_path).unwrap_or_default();
assert!(
content.contains(marker),
"file logger enabled by reload should use RUST_LOG directives"
);
}
#[test]
#[serial_test::serial]
fn file_logger_reload_off_ignores_env_filter() {
let _guard = EnvVarGuard::set(RUST_LOG, "info");
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "reload-off-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
let cfg = FileLoggerConfig {
level: Some(LevelFilter::INFO.to_string()),
file: Some(log_file_name),
dir: Some(temp_dir.path().to_string_lossy().to_string()),
..Default::default()
};
let (layers, sender) = file_layers(cfg, true).unwrap();
let sender = sender.expect("reload=true should return a sender");
let marker = "file-logger-reload-off-marker";
let subscriber = Registry::default().with(layers);
tracing::subscriber::with_default(subscriber, || {
sender.send(LevelFilter::OFF.to_string()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(300));
tracing::info!(target: LOG_TARGET, "{}", marker);
std::thread::sleep(std::time::Duration::from_millis(300));
});
let content = std::fs::read_to_string(&log_path).unwrap_or_default();
assert!(
!content.contains(marker),
"disabled file logger should ignore RUST_LOG directives"
);
}
#[test]
#[serial_test::serial]
fn test_logger_reload() {
let _guard = EnvVarGuard::unset(RUST_LOG);
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "reload-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
+70 -30
View File
@@ -64,6 +64,24 @@ async fn resolve_mapped_listener_addrs(listener: &url::Url) -> Result<Vec<Socket
socket_addrs(listener, || mapped_listener_port(listener)).await
}
fn is_usable_public_ipv6_candidate(ip: &Ipv6Addr, global_ctx: &ArcGlobalCtx) -> bool {
is_usable_public_ipv6_candidate_with_mode(ip, global_ctx, TESTING.load(Ordering::Relaxed))
}
fn is_usable_public_ipv6_candidate_with_mode(
ip: &Ipv6Addr,
global_ctx: &ArcGlobalCtx,
testing: bool,
) -> bool {
!global_ctx.is_ip_easytier_managed_ipv6(ip)
&& (testing
|| (!ip.is_loopback()
&& !ip.is_unspecified()
&& !ip.is_unique_local()
&& !ip.is_unicast_link_local()
&& !ip.is_multicast()))
}
#[async_trait::async_trait]
pub trait PeerManagerForDirectConnector {
async fn list_peers(&self) -> Vec<PeerId>;
@@ -190,34 +208,28 @@ impl DirectConnectorManagerData {
.with_context(|| format!("failed to bind local socket for {}", remote_url))?,
);
let connector_ip = self
.peer_manager
.get_global_ctx()
.global_ctx
.get_stun_info_collector()
.get_stun_info()
.public_ip
.iter()
.find(|x| x.contains(':'))
.ok_or(anyhow::anyhow!(
"failed to get public ipv6 address from stun info"
))?
.parse::<Ipv6Addr>()
.with_context(|| {
format!(
"failed to parse public ipv6 address from stun info: {:?}",
self.peer_manager
.get_global_ctx()
.get_stun_info_collector()
.get_stun_info()
)
})?;
let connector_addr =
SocketAddr::new(IpAddr::V6(connector_ip), local_socket.local_addr()?.port());
.filter_map(|ip| ip.parse::<Ipv6Addr>().ok())
.find(|ip| !self.global_ctx.is_ip_easytier_managed_ipv6(ip));
// ask remote to send v6 hole punch packet
// and no matter what the result is, continue to connect
let _ = self
.remote_send_udp_hole_punch_packet(dst_peer_id, connector_addr, remote_url)
.await;
if let Some(connector_ip) = connector_ip {
let connector_addr =
SocketAddr::new(IpAddr::V6(connector_ip), local_socket.local_addr()?.port());
let _ = self
.remote_send_udp_hole_punch_packet(dst_peer_id, connector_addr, remote_url)
.await;
} else {
tracing::debug!(
?remote_url,
"skip remote IPv6 hole-punch packet; no non-EasyTier public IPv6 in STUN info"
);
}
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr = SocketAddr::from_url(remote_url.clone(), IpVersion::V6).await?;
@@ -479,14 +491,7 @@ impl DirectConnectorManagerData {
.iter()
.chain(ip_list.public_ipv6.iter())
.filter_map(|x| Ipv6Addr::from_str(&x.to_string()).ok())
.filter(|x| {
TESTING.load(Ordering::Relaxed)
|| (!x.is_loopback()
&& !x.is_unspecified()
&& !x.is_unique_local()
&& !x.is_unicast_link_local()
&& !x.is_multicast())
})
.filter(|x| is_usable_public_ipv6_candidate(x, &self.global_ctx))
.collect::<HashSet<_>>()
.iter()
.for_each(|ip| {
@@ -515,6 +520,11 @@ impl DirectConnectorManagerData {
);
}
});
} else if self.global_ctx.is_ip_easytier_managed_ipv6(s_addr.ip()) {
tracing::debug!(
?listener,
"skip EasyTier-managed IPv6 as direct-connect target"
);
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
if self
.global_ctx
@@ -790,9 +800,10 @@ impl DirectConnectorManager {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::{collections::BTreeSet, sync::Arc};
use crate::{
common::global_ctx::tests::get_mock_global_ctx,
connector::direct::{
DirectConnectorManager, DirectConnectorManagerData, DstListenerUrlBlackListItem,
},
@@ -802,12 +813,41 @@ mod tests {
wait_route_appear_with_cost,
},
proto::peer_rpc::GetIpListResponse,
tunnel::{IpScheme, TunnelScheme, matches_scheme},
};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use super::{TESTING, mapped_listener_port, resolve_mapped_listener_addrs};
#[tokio::test]
async fn public_ipv6_candidate_rejects_easytier_managed_addr_even_in_tests() {
let global_ctx = get_mock_global_ctx();
let managed_ipv6: cidr::Ipv6Inet = "2001:db8::2/128".parse().unwrap();
global_ctx.set_public_ipv6_routes(BTreeSet::from([managed_ipv6]));
assert!(!super::is_usable_public_ipv6_candidate_with_mode(
&"2001:db8::2".parse().unwrap(),
&global_ctx,
true,
));
assert!(super::is_usable_public_ipv6_candidate_with_mode(
&"::1".parse().unwrap(),
&global_ctx,
true,
));
}
#[test]
fn udp_ipv6_url_matches_hole_punch_branch_condition() {
let remote_url: url::Url = "udp://[2001:db8::1]:11010".parse().unwrap();
let takes_udp_ipv6_hole_punch_branch =
matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp))
&& matches!(remote_url.host(), Some(url::Host::Ipv6(_)));
assert!(takes_udp_ipv6_hole_punch_branch);
}
#[test]
fn mapped_listener_port_uses_ip_scheme_defaults() {
assert_eq!(
+180 -15
View File
@@ -1,19 +1,17 @@
use std::{
net::{SocketAddr, SocketAddrV4, SocketAddrV6},
sync::Arc,
};
use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, idn, network::IPCollector},
common::{dns::socket_addrs, error::Error, global_ctx::ArcGlobalCtx, idn},
connector::dns_connector::DnsTunnelConnector,
proto::common::PeerFeatureFlag,
tunnel::{
self, FromUrl, IpScheme, IpVersion, TunnelConnector, TunnelError, TunnelScheme,
self, IpScheme, IpVersion, TunnelConnector, TunnelError, TunnelScheme,
ring::RingTunnelConnector, tcp::TcpTunnelConnector, udp::UdpTunnelConnector,
},
utils::BoxExt,
};
use http_connector::HttpTunnelConnector;
use rand::seq::SliceRandom;
pub mod direct;
pub mod manual;
@@ -56,7 +54,7 @@ pub(crate) fn should_background_p2p_with_peer(
async fn set_bind_addr_for_peer_connector(
connector: &mut (impl TunnelConnector + ?Sized),
is_ipv4: bool,
ip_collector: &Arc<IPCollector>,
global_ctx: &ArcGlobalCtx,
) {
if cfg!(any(
target_os = "android",
@@ -69,7 +67,7 @@ async fn set_bind_addr_for_peer_connector(
return;
}
let ips = ip_collector.collect_ip_addrs().await;
let ips = global_ctx.get_ip_collector().collect_ip_addrs().await;
if is_ipv4 {
let mut bind_addrs = vec![];
for ipv4 in ips.interface_ipv4s {
@@ -80,7 +78,11 @@ async fn set_bind_addr_for_peer_connector(
} else {
let mut bind_addrs = vec![];
for ipv6 in ips.interface_ipv6s.iter().chain(ips.public_ipv6.iter()) {
let socket_addr = SocketAddrV6::new(std::net::Ipv6Addr::from(*ipv6), 0, 0, 0).into();
let ipv6 = std::net::Ipv6Addr::from(*ipv6);
if global_ctx.is_ip_easytier_managed_ipv6(&ipv6) {
continue;
}
let socket_addr = SocketAddrV6::new(ipv6, 0, 0, 0).into();
bind_addrs.push(socket_addr);
}
connector.set_bind_addrs(bind_addrs);
@@ -88,6 +90,144 @@ async fn set_bind_addr_for_peer_connector(
let _ = connector;
}
struct ResolvedConnectorAddr {
addr: SocketAddr,
ip_version: IpVersion,
}
fn connector_default_port(url: &url::Url) -> Option<u16> {
url.try_into()
.ok()
.and_then(|s: TunnelScheme| s.try_into().ok())
.map(IpScheme::default_port)
}
fn addr_matches_ip_version(addr: &SocketAddr, ip_version: IpVersion) -> bool {
match ip_version {
IpVersion::V4 => addr.is_ipv4(),
IpVersion::V6 => addr.is_ipv6(),
IpVersion::Both => true,
}
}
fn infer_effective_ip_version(addrs: &[SocketAddr], requested_ip_version: IpVersion) -> IpVersion {
match requested_ip_version {
IpVersion::Both if addrs.iter().all(SocketAddr::is_ipv4) => IpVersion::V4,
IpVersion::Both if addrs.iter().all(SocketAddr::is_ipv6) => IpVersion::V6,
_ => requested_ip_version,
}
}
async fn easytier_managed_ipv6_source_for_dst(
global_ctx: &ArcGlobalCtx,
dst_addr: SocketAddrV6,
) -> Result<Option<Ipv6Addr>, Error> {
let socket = {
let _g = global_ctx.net_ns.guard();
tokio::net::UdpSocket::bind("[::]:0").await?
};
socket.connect(SocketAddr::V6(dst_addr)).await?;
let IpAddr::V6(local_ip) = socket.local_addr()?.ip() else {
return Ok(None);
};
Ok(global_ctx
.is_ip_easytier_managed_ipv6(&local_ip)
.then_some(local_ip))
}
async fn ipv6_connector_reject_reason(
url: &url::Url,
global_ctx: &ArcGlobalCtx,
v6_addr: SocketAddrV6,
skip_source_validation_errors: bool,
) -> Result<Option<String>, Error> {
if global_ctx.is_ip_easytier_managed_ipv6(v6_addr.ip()) {
return Ok(Some(format!(
"{} resolves to EasyTier-managed IPv6 {}",
url,
v6_addr.ip()
)));
}
match easytier_managed_ipv6_source_for_dst(global_ctx, v6_addr).await {
Ok(Some(local_ip)) => Ok(Some(format!(
"{} would use EasyTier-managed IPv6 {} as local source for {}",
url, local_ip, v6_addr
))),
Ok(None) => Ok(None),
Err(err) if skip_source_validation_errors => Ok(Some(format!(
"{} IPv6 candidate {} could not be validated: {}",
url, v6_addr, err
))),
Err(err) => Err(err),
}
}
async fn resolve_connector_socket_addr(
url: &url::Url,
global_ctx: &ArcGlobalCtx,
ip_version: IpVersion,
) -> Result<ResolvedConnectorAddr, Error> {
let addrs = socket_addrs(url, || connector_default_port(url))
.await
.map_err(|e| {
TunnelError::InvalidAddr(format!(
"failed to resolve socket addr, url: {}, error: {}",
url, e
))
})?;
let mut usable_addrs = Vec::new();
let mut rejected_ipv6_reason = None;
let skip_source_validation_errors = ip_version == IpVersion::Both;
for addr in addrs
.into_iter()
.filter(|addr| addr_matches_ip_version(addr, ip_version))
{
if let SocketAddr::V6(v6_addr) = addr
&& let Some(reason) = ipv6_connector_reject_reason(
url,
global_ctx,
v6_addr,
skip_source_validation_errors,
)
.await?
{
rejected_ipv6_reason = Some(reason);
continue;
}
usable_addrs.push(addr);
}
if usable_addrs.is_empty() {
if let Some(reason) = rejected_ipv6_reason {
return Err(Error::InvalidUrl(format!(
"{}, refusing overlay-backed underlay connection",
reason
)));
}
return Err(Error::TunnelError(TunnelError::NoDnsRecordFound(
ip_version,
)));
}
let effective_ip_version = infer_effective_ip_version(&usable_addrs, ip_version);
let addr = usable_addrs
.choose(&mut rand::thread_rng())
.copied()
.ok_or_else(|| Error::TunnelError(TunnelError::NoDnsRecordFound(ip_version)))?;
Ok(ResolvedConnectorAddr {
addr,
ip_version: effective_ip_version,
})
}
pub async fn create_connector_by_url(
url: &str,
global_ctx: &ArcGlobalCtx,
@@ -98,9 +238,11 @@ pub async fn create_connector_by_url(
let scheme = (&url)
.try_into()
.map_err(|_| TunnelError::InvalidProtocol(url.scheme().to_owned()))?;
let mut effective_connector_ip_version = ip_version;
let mut connector: Box<dyn TunnelConnector + 'static> = match scheme {
TunnelScheme::Ip(scheme) => {
let dst_addr = SocketAddr::from_url(url.clone(), ip_version).await?;
let resolved_addr = resolve_connector_socket_addr(&url, global_ctx, ip_version).await?;
effective_connector_ip_version = resolved_addr.ip_version;
let mut connector: Box<dyn TunnelConnector> = match scheme {
IpScheme::Tcp => TcpTunnelConnector::new(url).boxed(),
IpScheme::Udp => UdpTunnelConnector::new(url).boxed(),
@@ -125,11 +267,12 @@ pub async fn create_connector_by_url(
#[cfg(feature = "faketcp")]
IpScheme::FakeTcp => tunnel::fake_tcp::FakeTcpTunnelConnector::new(url).boxed(),
};
connector.set_resolved_addr(resolved_addr.addr);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
resolved_addr.addr.is_ipv4(),
global_ctx,
)
.await;
}
@@ -151,16 +294,38 @@ pub async fn create_connector_by_url(
DnsTunnelConnector::new(url, global_ctx.clone()).boxed()
}
};
connector.set_ip_version(ip_version);
connector.set_ip_version(effective_connector_ip_version);
Ok(connector)
}
#[cfg(test)]
mod tests {
use crate::proto::common::PeerFeatureFlag;
use std::collections::BTreeSet;
use super::{should_background_p2p_with_peer, should_try_p2p_with_peer};
use crate::{
common::global_ctx::tests::get_mock_global_ctx, proto::common::PeerFeatureFlag,
tunnel::IpVersion,
};
use super::{
create_connector_by_url, should_background_p2p_with_peer, should_try_p2p_with_peer,
};
#[tokio::test]
async fn connector_rejects_easytier_managed_ipv6_destination() {
let global_ctx = get_mock_global_ctx();
let public_route: cidr::Ipv6Inet = "2001:db8::2/128".parse().unwrap();
global_ctx.set_public_ipv6_routes(BTreeSet::from([public_route]));
let ret =
create_connector_by_url("tcp://[2001:db8::2]:11010", &global_ctx, IpVersion::V6).await;
assert!(matches!(
ret,
Err(crate::common::error::Error::InvalidUrl(_))
));
}
#[test]
fn lazy_background_p2p_requires_need_p2p() {
+41 -17
View File
@@ -6,6 +6,7 @@ use std::{
use crossbeam::atomic::AtomicCell;
use dashmap::{DashMap, DashSet};
use guarden::defer;
use rand::seq::SliceRandom as _;
use tokio::{net::UdpSocket, sync::Mutex, task::JoinSet};
use tracing::{Instrument, Level, instrument};
@@ -15,7 +16,6 @@ use crate::{
common::{
PeerId, error::Error, global_ctx::ArcGlobalCtx, join_joinset_background, netns::NetNS, upnp,
},
defer,
peers::peer_manager::PeerManager,
proto::common::NatType,
tunnel::{
@@ -719,25 +719,31 @@ async fn check_udp_socket_local_addr(
) -> Result<(), Error> {
let socket = UdpSocket::bind("0.0.0.0:0").await?;
socket.connect(remote_mapped_addr).await?;
if let Ok(local_addr) = socket.local_addr() {
// local_addr should not be equal to an EasyTier-managed virtual/public address.
match local_addr.ip() {
IpAddr::V4(ip) => {
if global_ctx.get_ipv4().map(|ip| ip.address()) == Some(ip) {
return Err(anyhow::anyhow!("local address is virtual ipv4").into());
}
}
IpAddr::V6(ip) => {
if global_ctx.is_ip_local_ipv6(&ip) {
return Err(anyhow::anyhow!("local address is easytier-managed ipv6").into());
}
}
}
if let Ok(local_addr) = socket.local_addr()
&& let Some(err) = easytier_managed_local_addr_error(&global_ctx, local_addr)
{
return Err(anyhow::anyhow!(err).into());
}
Ok(())
}
fn easytier_managed_local_addr_error(
global_ctx: &ArcGlobalCtx,
local_addr: SocketAddr,
) -> Option<&'static str> {
// local_addr should not be equal to an EasyTier-managed virtual/public address.
match local_addr.ip() {
IpAddr::V4(ip) if global_ctx.get_ipv4().map(|ip| ip.address()) == Some(ip) => {
Some("local address is virtual ipv4")
}
IpAddr::V6(ip) if global_ctx.is_ip_easytier_managed_ipv6(&ip) => {
Some("local address is easytier-managed ipv6")
}
_ => None,
}
}
pub(crate) async fn try_connect_with_socket(
global_ctx: ArcGlobalCtx,
socket: Arc<UdpSocket>,
@@ -763,11 +769,29 @@ pub(crate) async fn try_connect_with_socket(
#[cfg(test)]
mod tests {
use std::{collections::BTreeSet, net::SocketAddr};
use crate::common::global_ctx::tests::get_mock_global_ctx;
use super::{
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS, should_create_public_listener,
should_retry_public_listener_selection,
MAX_PUBLIC_UDP_HOLE_PUNCH_LISTENERS, easytier_managed_local_addr_error,
should_create_public_listener, should_retry_public_listener_selection,
};
#[tokio::test]
async fn local_addr_check_rejects_easytier_public_ipv6_route() {
let global_ctx = get_mock_global_ctx();
let public_route: cidr::Ipv6Inet = "2001:db8::4/128".parse().unwrap();
global_ctx.set_public_ipv6_routes(BTreeSet::from([public_route]));
let local_addr: SocketAddr = "[2001:db8::4]:1234".parse().unwrap();
assert_eq!(
easytier_managed_local_addr_error(&global_ctx, local_addr),
Some("local address is easytier-managed ipv6")
);
}
#[test]
fn listener_selection_prefers_reuse_before_cap() {
assert!(!should_create_public_listener(1, true, true, false, false));
@@ -9,6 +9,7 @@ use std::{
};
use anyhow::Context;
use guarden::defer;
use rand::{Rng, seq::SliceRandom};
use tokio::{net::UdpSocket, sync::RwLock};
use tokio_util::task::AbortOnDropHandle;
@@ -22,7 +23,6 @@ use crate::{
},
handle_rpc_result,
},
defer,
peers::peer_manager::PeerManager,
proto::{
peer_rpc::{
+50 -11
View File
@@ -12,7 +12,6 @@ use crate::{
constants::EASYTIER_VERSION,
log,
},
defer,
instance_manager::NetworkInstanceManager,
launcher::add_proxy_network_to_config,
proto::common::{CompressionAlgoPb, SecureModeConfig},
@@ -23,6 +22,7 @@ use crate::{
use anyhow::Context;
use cidr::IpCidr;
use clap::{CommandFactory, Parser};
use guarden::defer;
use rust_i18n::t;
use std::{
net::{IpAddr, SocketAddr},
@@ -37,6 +37,38 @@ use crate::tunnel::IpScheme;
#[cfg(feature = "jemalloc-prof")]
use jemalloc_ctl::{Access as _, AsName as _, epoch, stats};
fn supported_compression_algorithms() -> &'static str {
cfg_select! {
all(feature = "zstd", feature = "lzo") => "none, zstd, lzo",
feature = "zstd" => "none, zstd",
feature = "lzo" => "none, lzo",
_ => "none",
}
}
fn compression_help() -> String {
t!(
"core_clap.compression",
algorithms = supported_compression_algorithms()
)
.to_string()
}
fn parse_compression_algorithm(compression: &str) -> anyhow::Result<CompressionAlgoPb> {
match compression {
"none" => Ok(CompressionAlgoPb::None),
#[cfg(feature = "zstd")]
"zstd" => Ok(CompressionAlgoPb::Zstd),
#[cfg(feature = "lzo")]
"lzo" => Ok(CompressionAlgoPb::Lzo),
_ => anyhow::bail!(
"unknown compression algorithm: {}, supported: {}",
compression,
supported_compression_algorithms()
),
}
}
#[cfg(target_os = "windows")]
windows_service::define_windows_service!(ffi_service_main, win_service_main);
@@ -513,7 +545,7 @@ struct NetworkOptions {
#[arg(
long,
env = "ET_COMPRESSION",
help = t!("core_clap.compression").to_string(),
help = compression_help(),
)]
compression: Option<String>,
@@ -1106,15 +1138,7 @@ impl NetworkOptions {
f.need_p2p = self.need_p2p.unwrap_or(f.need_p2p);
f.multi_thread = self.multi_thread.unwrap_or(f.multi_thread);
if let Some(compression) = &self.compression {
f.data_compress_algo = match compression.as_str() {
"none" => CompressionAlgoPb::None,
"zstd" => CompressionAlgoPb::Zstd,
_ => panic!(
"unknown compression algorithm: {}, supported: none, zstd",
compression
),
}
.into();
f.data_compress_algo = parse_compression_algorithm(compression)?.into();
}
f.bind_device = self.bind_device.unwrap_or(f.bind_device);
f.enable_kcp_proxy = self.enable_kcp_proxy.unwrap_or(f.enable_kcp_proxy);
@@ -1627,6 +1651,21 @@ async fn validate_config(cli: &Cli) -> anyhow::Result<()> {
mod tests {
use super::*;
#[test]
fn test_compression_help_uses_supported_algorithms() {
assert!(compression_help().contains(supported_compression_algorithms()));
}
#[test]
fn test_parse_compression_algorithm_rejects_unknown() {
let err = parse_compression_algorithm("snappy")
.unwrap_err()
.to_string();
assert!(err.contains("snappy"));
assert!(err.contains(supported_compression_algorithms()));
}
#[test]
fn test_parse_listeners() {
type IpSchemeMap = fn(&IpScheme) -> String;
+43 -3
View File
@@ -74,7 +74,7 @@ use easytier::{
common::{NatType, PortForwardConfigPb, SocketType},
peer_rpc::{GetGlobalPeerMapRequest, PeerCenterRpc, PeerCenterRpcClientFactory},
rpc_impl::standalone::StandAloneClient,
rpc_types::controller::BaseController,
rpc_types::{controller::BaseController, error::Error as RpcError},
},
tunnel::{TunnelScheme, tcp::TcpTunnelConnector},
utils::{PeerRoutePair, string::cost_to_str},
@@ -526,6 +526,40 @@ type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = Result<T, Error>> + 'a>
type ForeignNetworkMap = BTreeMap<String, ForeignNetworkEntryPb>;
type GlobalForeignNetworkMap = BTreeMap<u32, list_global_foreign_network_response::ForeignNetworks>;
fn is_missing_web_client_service(error: &RpcError) -> bool {
matches!(
error,
RpcError::InvalidServiceKey(service_name, _)
if service_name.trim_matches('"') == "WebClientService"
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn missing_web_client_service_matches_raw_service_name() {
let error = RpcError::InvalidServiceKey("WebClientService".to_string(), "".to_string());
assert!(is_missing_web_client_service(&error));
}
#[test]
fn missing_web_client_service_matches_serialized_service_name() {
let error = RpcError::InvalidServiceKey("\"WebClientService\"".to_string(), "".to_string());
assert!(is_missing_web_client_service(&error));
}
#[test]
fn missing_web_client_service_rejects_other_services() {
let error = RpcError::InvalidServiceKey("PeerManageRpc".to_string(), "".to_string());
assert!(!is_missing_web_client_service(&error));
}
}
#[derive(serde::Serialize)]
struct PeerListData {
node_info: NodeInfo,
@@ -599,9 +633,15 @@ impl<'a> CommandHandler<'a> {
}
let client = self.get_manage_client().await?;
let inst_ids = client
let list_response = match client
.list_network_instance(BaseController::default(), ListNetworkInstanceRequest {})
.await?
.await
{
Ok(response) => response,
Err(error) if is_missing_web_client_service(&error) => return Ok(None),
Err(error) => return Err(error.into()),
};
let inst_ids = list_response
.inst_ids
.into_iter()
.map(uuid::Uuid::from)
+2 -1
View File
@@ -7,6 +7,7 @@ use std::{
use anyhow::Context;
use bytes::Bytes;
use dashmap::DashMap;
use guarden::defer;
use kcp_sys::{
endpoint::{ConnId, KcpEndpoint, KcpPacketReceiver},
ffi_safe::KcpConfig,
@@ -359,7 +360,7 @@ impl KcpProxyDst {
transport_type: TcpProxyEntryTransportType::Kcp.into(),
},
);
crate::defer! {
defer! {
proxy_entries.remove(&conn_id);
if proxy_entries.capacity() - proxy_entries.len() > 16 {
proxy_entries.shrink_to_fit();
+2 -1
View File
@@ -24,6 +24,7 @@ use bytes::{BufMut, Bytes, BytesMut};
use dashmap::DashMap;
use derivative::Derivative;
use derive_more::{Constructor, Deref, DerefMut, From, Into};
use guarden::defer;
use prost::Message;
use quinn::udp::{EcnCodepoint, RecvMeta, Transmit};
use quinn::{
@@ -662,7 +663,7 @@ impl QuicStreamReceiver {
transport_type: TcpProxyEntryTransportType::Quic.into(),
},
);
crate::defer! {
defer! {
proxy_entries.remove(&handle);
if proxy_entries.capacity() - proxy_entries.len() > 16 {
proxy_entries.shrink_to_fit();
@@ -1,6 +1,5 @@
// translated from tailscale #32ce1bdb48078ec4cedaeeb5b1b2ff9c0ef61a49
use crate::defer;
use anyhow::{Context, Result};
use dbus::blocking::stdintf::org_freedesktop_dbus::Properties as _;
use std::fs;
@@ -167,6 +166,7 @@ fn new_os_configurator(_interface_name: String) -> Result<()> {
Ok(())
}
use guarden::defer;
use std::io::{self, BufRead, Cursor};
/// 返回 `resolv.conf` 内容的拥有者("systemd-resolved"、"NetworkManager"、"resolvconf" 或空字符串)
+5
View File
@@ -340,6 +340,11 @@ impl InstanceConfigPatcher {
global_ctx.set_ipv6(Some(ipv6.into()));
global_ctx.config.set_ipv6(Some(ipv6.into()));
}
if let Some(disable_relay_data) = patch.disable_relay_data {
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = disable_relay_data;
global_ctx.set_flags(flags);
}
if let Some(enabled) = patch.ipv6_public_addr_provider {
global_ctx.config.set_ipv6_public_addr_provider(enabled);
provider_config_changed = true;
+3 -3
View File
@@ -25,7 +25,7 @@ use crate::{
pub fn create_listener_by_url(
l: &url::Url,
global_ctx: ArcGlobalCtx,
_global_ctx: ArcGlobalCtx,
) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.try_into()? {
TunnelScheme::Ip(scheme) => match scheme {
@@ -34,7 +34,7 @@ pub fn create_listener_by_url(
#[cfg(feature = "wireguard")]
IpScheme::Wg => {
use crate::tunnel::wireguard::{WgConfig, WgTunnelListener};
let nid = global_ctx.get_network_identity();
let nid = _global_ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
@@ -43,7 +43,7 @@ pub fn create_listener_by_url(
}
#[cfg(feature = "quic")]
IpScheme::Quic => {
tunnel::quic::QuicTunnelListener::new(l.clone(), global_ctx.clone()).boxed()
tunnel::quic::QuicTunnelListener::new(l.clone(), _global_ctx.clone()).boxed()
}
#[cfg(feature = "websocket")]
IpScheme::Ws | IpScheme::Wss => {
+2 -10
View File
@@ -361,16 +361,8 @@ fn apply_public_ipv6_provider_runtime_state(
let prefix_changed = global_ctx.set_advertised_ipv6_public_addr_prefix(next_prefix);
let next_provider_enabled = matches!(state, PublicIpv6ProviderRuntimeState::Active(_));
let feature_changed = {
let mut feature_flags = global_ctx.get_feature_flags();
if feature_flags.ipv6_public_addr_provider == next_provider_enabled {
false
} else {
feature_flags.ipv6_public_addr_provider = next_provider_enabled;
global_ctx.set_feature_flags(feature_flags);
true
}
};
let feature_changed =
global_ctx.set_ipv6_public_addr_provider_feature_flag(next_provider_enabled);
prefix_changed || feature_changed
}
+5
View File
@@ -816,6 +816,10 @@ impl NetworkConfig {
flags.disable_upnp = disable_upnp;
}
if let Some(disable_relay_data) = self.disable_relay_data {
flags.disable_relay_data = disable_relay_data;
}
if let Some(disable_sym_hole_punching) = self.disable_sym_hole_punching {
flags.disable_sym_hole_punching = disable_sym_hole_punching;
}
@@ -990,6 +994,7 @@ impl NetworkConfig {
result.disable_tcp_hole_punching = Some(flags.disable_tcp_hole_punching);
result.disable_udp_hole_punching = Some(flags.disable_udp_hole_punching);
result.disable_upnp = Some(flags.disable_upnp);
result.disable_relay_data = Some(flags.disable_relay_data);
result.disable_sym_hole_punching = Some(flags.disable_sym_hole_punching);
result.enable_magic_dns = Some(flags.accept_dns);
result.mtu = Some(flags.mtu as i32);
+1 -24
View File
@@ -65,7 +65,7 @@ impl PeerCenterBase {
return Err(Error::Shutdown);
};
rpc_mgr.rpc_server().registry().register(
PeerCenterRpcServer::new(PeerCenterServer::new(self.peer_mgr.my_peer_id())),
PeerCenterRpcServer::new(PeerCenterServer::new()),
&self.peer_mgr.get_global_ctx().get_network_name(),
);
Ok(())
@@ -486,7 +486,6 @@ impl PeerCenterPeerManagerTrait for PeerMapWithPeerRpcManager {
#[cfg(test)]
mod tests {
use crate::{
peer_center::server::get_global_data,
peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
tunnel::common::tests::wait_for_condition,
};
@@ -515,25 +514,6 @@ mod tests {
.await
.unwrap();
let center_peer = PeerCenterBase::select_center_peer(&peer_mgr_a)
.await
.unwrap();
let center_data = get_global_data(center_peer);
// wait center_data has 3 records for 10 seconds
wait_for_condition(
|| async {
if center_data.global_peer_map.len() == 4 {
println!("center data {:#?}", center_data.global_peer_map);
true
} else {
false
}
},
Duration::from_secs(20),
)
.await;
let mut digest = None;
for pc in peer_centers.iter() {
let rpc_service = pc.get_rpc_service();
@@ -578,8 +558,5 @@ mod tests {
route_cost.end_update();
assert!(!route_cost.need_update());
}
let global_digest = get_global_data(center_peer).digest.load();
assert_eq!(digest.as_ref().unwrap(), &global_digest);
}
}
+96 -30
View File
@@ -6,7 +6,6 @@ use std::{
use crossbeam::atomic::AtomicCell;
use dashmap::DashMap;
use once_cell::sync::Lazy;
use tokio::task::JoinSet;
use crate::{
@@ -35,50 +34,41 @@ pub(crate) struct PeerCenterInfoEntry {
update_time: std::time::Instant,
}
#[derive(Default)]
pub(crate) struct PeerCenterServerGlobalData {
pub(crate) global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>,
pub(crate) peer_report_time: DashMap<PeerId, std::time::Instant>,
pub(crate) digest: AtomicCell<Digest>,
}
// a global unique instance for PeerCenterServer
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<PeerCenterServerGlobalData>>> =
Lazy::new(DashMap::new);
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<PeerCenterServerGlobalData> {
GLOBAL_DATA
.entry(node_id)
.or_insert_with(|| Arc::new(PeerCenterServerGlobalData::default()))
.value()
.clone()
#[derive(Debug, Default)]
struct PeerCenterServerData {
global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>,
peer_report_time: DashMap<PeerId, std::time::Instant>,
digest: AtomicCell<Digest>,
}
#[derive(Clone, Debug)]
pub struct PeerCenterServer {
// every peer has its own server, so use per-struct dash map is ok.
my_node_id: PeerId,
data: Arc<PeerCenterServerData>,
tasks: Arc<JoinSet<()>>,
}
impl PeerCenterServer {
pub fn new(my_node_id: PeerId) -> Self {
pub fn new() -> Self {
let data = Arc::new(PeerCenterServerData::default());
let weak_data = Arc::downgrade(&data);
let mut tasks = JoinSet::new();
tasks.spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
PeerCenterServer::clean_outdated_peer(my_node_id).await;
let Some(data) = weak_data.upgrade() else {
break;
};
PeerCenterServer::clean_outdated_peer_data(&data).await;
}
});
PeerCenterServer {
my_node_id,
data,
tasks: Arc::new(tasks),
}
}
async fn clean_outdated_peer(my_node_id: PeerId) {
let data = get_global_data(my_node_id);
async fn clean_outdated_peer_data(data: &PeerCenterServerData) {
data.peer_report_time.retain(|_, v| {
std::time::Instant::now().duration_since(*v) < std::time::Duration::from_secs(180)
});
@@ -88,8 +78,7 @@ impl PeerCenterServer {
});
}
fn calc_global_digest(my_node_id: PeerId) -> Digest {
let data = get_global_data(my_node_id);
fn calc_global_digest_data(data: &PeerCenterServerData) -> Digest {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
data.global_peer_map
.iter()
@@ -117,7 +106,7 @@ impl PeerCenterRpc for PeerCenterServer {
tracing::debug!("receive report_peers");
let data = get_global_data(self.my_node_id);
let data = &self.data;
data.peer_report_time
.insert(my_peer_id, std::time::Instant::now());
@@ -134,7 +123,7 @@ impl PeerCenterRpc for PeerCenterServer {
}
data.digest
.store(PeerCenterServer::calc_global_digest(self.my_node_id));
.store(PeerCenterServer::calc_global_digest_data(data));
Ok(ReportPeersResponse::default())
}
@@ -147,7 +136,7 @@ impl PeerCenterRpc for PeerCenterServer {
) -> Result<GetGlobalPeerMapResponse, rpc_types::error::Error> {
let digest = req.digest;
let data = get_global_data(self.my_node_id);
let data = &self.data;
if digest == data.digest.load() && digest != 0 {
return Ok(GetGlobalPeerMapResponse::default());
}
@@ -171,3 +160,80 @@ impl PeerCenterRpc for PeerCenterServer {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn server_clones_share_instance_data() {
let server = PeerCenterServer::new();
let server_clone = server.clone();
let mut peers = PeerInfoForGlobalMap::default();
peers
.direct_peers
.insert(100, DirectConnectedPeerInfo { latency_ms: 3 });
server
.report_peers(
BaseController::default(),
ReportPeersRequest {
my_peer_id: 99,
peer_infos: Some(peers),
},
)
.await
.unwrap();
let resp = server_clone
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert_eq!(1, resp.global_peer_map.len());
assert!(resp.global_peer_map[&99].direct_peers.contains_key(&100));
}
#[tokio::test]
async fn independent_server_instances_do_not_share_data() {
let server_a = PeerCenterServer::new();
let server_b = PeerCenterServer::new();
let mut peers = PeerInfoForGlobalMap::default();
peers
.direct_peers
.insert(101, DirectConnectedPeerInfo { latency_ms: 5 });
server_a
.report_peers(
BaseController::default(),
ReportPeersRequest {
my_peer_id: 100,
peer_infos: Some(peers),
},
)
.await
.unwrap();
let resp_a = server_a
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert_eq!(1, resp_a.global_peer_map.len());
let resp_b = server_b
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert!(resp_b.global_peer_map.is_empty());
}
}
+41 -2
View File
@@ -94,6 +94,8 @@ impl AclFilter {
/// Preserves connection tracking and rate limiting state across reloads
/// Now lock-free and doesn't require &mut self!
pub fn reload_rules(&self, acl_config: Option<&Acl>) {
self.outbound_allow_records.clear();
let Some(acl_config) = acl_config else {
self.acl_enabled.store(false, Ordering::Relaxed);
return;
@@ -400,14 +402,15 @@ mod tests {
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
sync::Arc,
time::Instant,
};
use crate::{
common::acl_processor::PacketInfo,
proto::acl::{ChainType, Protocol},
proto::acl::{Acl, ChainType, Protocol},
};
use super::AclFilter;
use super::{AclFilter, OutboundAllowRecord};
fn packet_info(dst_ip: IpAddr) -> PacketInfo {
PacketInfo {
@@ -445,4 +448,40 @@ mod tests {
assert_eq!(chain, ChainType::Forward);
}
#[tokio::test]
async fn reload_rules_clears_outbound_allow_records() {
let filter = AclFilter::new();
filter.outbound_allow_records.insert(
OutboundAllowRecord {
src_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)),
dst_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)),
src_port: Some(1234),
dst_port: Some(80),
protocol: Protocol::Tcp,
},
Instant::now(),
);
assert_eq!(filter.outbound_allow_records.len(), 1);
filter.reload_rules(Some(&Acl::default()));
assert_eq!(filter.outbound_allow_records.len(), 0);
filter.outbound_allow_records.insert(
OutboundAllowRecord {
src_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)),
dst_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)),
src_port: Some(4321),
dst_port: Some(443),
protocol: Protocol::Tcp,
},
Instant::now(),
);
assert_eq!(filter.outbound_allow_records.len(), 1);
filter.reload_rules(None);
assert_eq!(filter.outbound_allow_records.len(), 0);
}
}
+301 -20
View File
@@ -56,7 +56,7 @@ use super::{
route_trait::NextHopPolicy,
traffic_metrics::{
InstanceLabelKind, LogicalTrafficMetrics, TrafficKind, TrafficMetricRecorder,
route_peer_info_instance_id, traffic_kind,
is_relay_data_packet_type, route_peer_info_instance_id, traffic_kind,
},
};
@@ -69,11 +69,16 @@ pub trait GlobalForeignNetworkAccessor: Send + Sync + 'static {
struct ForeignNetworkEntry {
my_peer_id: PeerId,
// Node-global runtime flags, such as disable_relay_data, live on the parent
// context. The foreign context is scoped to the foreign network's OSPF view.
parent_global_ctx: ArcGlobalCtx,
global_ctx: ArcGlobalCtx,
network: NetworkIdentity,
peer_map: Arc<PeerMap>,
relay_peer_map: Arc<RelayPeerMap>,
peer_session_store: Arc<PeerSessionStore>,
// Static per-network permission from the whitelist check. disable_relay_data
// is the node-wide runtime override layered on top of this value.
relay_data: bool,
pm_packet_sender: Mutex<Option<PacketRecvChan>>,
@@ -82,7 +87,7 @@ struct ForeignNetworkEntry {
packet_recv: Mutex<Option<PacketRecvChanReceiver>>,
bps_limiter: Arc<TokenBucket>,
bps_limiter: Option<Arc<TokenBucket>>,
peer_center: Arc<PeerCenterInstance>,
@@ -186,14 +191,16 @@ impl ForeignNetworkEntry {
);
let relay_bps_limit = global_ctx.config.get_flags().foreign_relay_bps_limit;
let limiter_config = LimiterConfig {
burst_rate: None,
bps: Some(relay_bps_limit),
fill_duration_ms: None,
};
let bps_limiter = global_ctx
.token_bucket_manager()
.get_or_create(&network.network_name, limiter_config.into());
let bps_limiter = (relay_bps_limit != u64::MAX).then(|| {
let limiter_config = LimiterConfig {
burst_rate: None,
bps: Some(relay_bps_limit),
fill_duration_ms: None,
};
global_ctx
.token_bucket_manager()
.get_or_create(&network.network_name, limiter_config.into())
});
let peer_center = Arc::new(PeerCenterInstance::new(Arc::new(
PeerMapWithPeerRpcManager {
@@ -205,6 +212,7 @@ impl ForeignNetworkEntry {
Self {
my_peer_id,
parent_global_ctx: global_ctx.clone(),
global_ctx: foreign_global_ctx,
network,
peer_map,
@@ -231,6 +239,27 @@ impl ForeignNetworkEntry {
}
}
fn desired_avoid_relay_data_feature_flag(
parent_global_ctx: &ArcGlobalCtx,
relay_data: bool,
) -> bool {
!relay_data || parent_global_ctx.get_feature_flags().avoid_relay_data
}
fn sync_parent_relay_data_feature_flag(
parent_global_ctx: &ArcGlobalCtx,
global_ctx: &ArcGlobalCtx,
relay_data: bool,
) -> bool {
let avoid_relay_data =
Self::desired_avoid_relay_data_feature_flag(parent_global_ctx, relay_data);
if global_ctx.get_feature_flags().avoid_relay_data == avoid_relay_data {
return false;
}
global_ctx.set_avoid_relay_data_preference(avoid_relay_data)
}
fn build_foreign_global_ctx(
network: &NetworkIdentity,
global_ctx: ArcGlobalCtx,
@@ -258,10 +287,9 @@ impl ForeignNetworkEntry {
let mut feature_flag = global_ctx.get_feature_flags();
feature_flag.is_public_server = true;
if !relay_data {
feature_flag.avoid_relay_data = true;
}
foreign_global_ctx.set_feature_flags(feature_flag);
feature_flag.avoid_relay_data =
Self::desired_avoid_relay_data_feature_flag(&global_ctx, relay_data);
foreign_global_ctx.set_base_advertised_feature_flags(feature_flag);
for u in global_ctx.get_running_listeners().into_iter() {
foreign_global_ctx.add_running_listener(u);
@@ -412,6 +440,7 @@ impl ForeignNetworkEntry {
let peer_map = self.peer_map.clone();
let relay_peer_map = self.relay_peer_map.clone();
let traffic_metrics = self.traffic_metrics.clone();
let parent_global_ctx = self.parent_global_ctx.clone();
let relay_data = self.relay_data;
let pm_sender = self.pm_packet_sender.lock().await.take().unwrap();
let network_name = self.network.network_name.clone();
@@ -497,14 +526,21 @@ impl ForeignNetworkEntry {
"ignore packet in foreign network"
);
} else {
if packet_type == PacketType::Data as u8
|| packet_type == PacketType::KcpSrc as u8
|| packet_type == PacketType::KcpDst as u8
{
if !relay_data {
if is_relay_data_packet_type(packet_type) {
let disable_relay_data = parent_global_ctx.flags_arc().disable_relay_data;
if !relay_data || disable_relay_data {
tracing::debug!(
?from_peer_id,
?to_peer_id,
packet_type,
disable_relay_data,
"drop foreign network relay data"
);
continue;
}
if !bps_limiter.try_consume(len.into()) {
if let Some(bps_limiter) = bps_limiter.as_ref()
&& !bps_limiter.try_consume(len.into())
{
continue;
}
}
@@ -589,10 +625,31 @@ impl ForeignNetworkEntry {
});
}
async fn run_parent_feature_flag_sync_routine(&self) {
let parent_global_ctx = self.parent_global_ctx.clone();
let global_ctx = self.global_ctx.clone();
let relay_data = self.relay_data;
self.tasks.lock().await.spawn(async move {
let mut parent_events = parent_global_ctx.subscribe();
loop {
ForeignNetworkEntry::sync_parent_relay_data_feature_flag(
&parent_global_ctx,
&global_ctx,
relay_data,
);
if parent_events.recv().await.is_err() {
parent_events = parent_global_ctx.subscribe();
}
}
});
}
async fn prepare(&self, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
self.prepare_route(accessor).await;
self.start_packet_recv().await;
self.run_relay_session_gc_routine().await;
self.run_parent_feature_flag_sync_routine().await;
self.peer_rpc.run();
self.peer_center.init().await;
}
@@ -660,6 +717,7 @@ impl ForeignNetworkManagerData {
fn remove_network(&self, network_name: &String) {
let _l = self.lock.lock().unwrap();
if let Some(old) = self.network_peer_maps.remove(network_name) {
old.1.traffic_metrics.clear_peer_cache();
let to_remove_peers = old.1.peer_map.list_peers();
for p in to_remove_peers {
self.peer_network_map.remove_if(&p, |_, v| {
@@ -669,6 +727,9 @@ impl ForeignNetworkManagerData {
}
}
self.network_peer_last_update.remove(network_name);
shrink_dashmap(&self.peer_network_map, None);
shrink_dashmap(&self.network_peer_maps, None);
shrink_dashmap(&self.network_peer_last_update, None);
}
#[allow(clippy::too_many_arguments)]
@@ -941,12 +1002,14 @@ impl ForeignNetworkManager {
async fn start_event_handler(&self, entry: &ForeignNetworkEntry) {
let data = self.data.clone();
let network_name = entry.network.network_name.clone();
let traffic_metrics = entry.traffic_metrics.clone();
let mut s = entry.global_ctx.subscribe();
self.tasks.lock().unwrap().spawn(async move {
while let Ok(e) = s.recv().await {
match &e {
GlobalCtxEvent::PeerRemoved(peer_id) => {
tracing::info!(?e, "remove peer from foreign network manager");
traffic_metrics.remove_peer(*peer_id);
data.remove_peer(*peer_id, &network_name);
data.network_peer_last_update
.insert(network_name.clone(), SystemTime::now());
@@ -965,6 +1028,7 @@ impl ForeignNetworkManager {
}
// if lagged or recv done just remove the network
tracing::error!("global event handler at foreign network manager exit");
traffic_metrics.clear_peer_cache();
data.remove_network(&network_name);
});
}
@@ -1397,6 +1461,92 @@ pub mod tests {
);
}
#[tokio::test]
async fn disable_relay_data_blocks_foreign_network_transit_data() {
let pm_center = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
let pmb_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
connect_peer_manager(pma_net1.clone(), pm_center.clone()).await;
connect_peer_manager(pmb_net1.clone(), pm_center.clone()).await;
wait_route_appear(pma_net1.clone(), pmb_net1.clone())
.await
.unwrap();
let mut flags = pm_center.get_global_ctx().get_flags();
flags.disable_relay_data = true;
pm_center.get_global_ctx().set_flags(flags);
pm_center
.get_global_ctx()
.issue_event(GlobalCtxEvent::ConfigPatched(Default::default()));
let center_peer_id = pm_center
.get_foreign_network_manager()
.get_network_peer_id("net1")
.unwrap();
wait_for_condition(
|| {
let pma_net1 = pma_net1.clone();
async move {
pma_net1.list_routes().await.iter().any(|route| {
route.peer_id == center_peer_id
&& route
.feature_flag
.as_ref()
.map(|flag| flag.avoid_relay_data)
.unwrap_or(false)
})
}
},
Duration::from_secs(5),
)
.await;
let network_labels =
LabelSet::new().with_label_type(LabelType::NetworkName("net1".to_string()));
let forwarded_bytes_before = metric_value(
&pm_center,
MetricName::TrafficBytesForwarded,
network_labels.clone(),
);
let forwarded_packets_before = metric_value(
&pm_center,
MetricName::TrafficPacketsForwarded,
network_labels.clone(),
);
let mut transit_pkt = ZCPacket::new_with_payload(b"foreign-transit-disabled");
transit_pkt.fill_peer_manager_hdr(
pma_net1.my_peer_id(),
pmb_net1.my_peer_id(),
PacketType::Data as u8,
);
pma_net1
.get_foreign_network_client()
.send_msg(transit_pkt, center_peer_id)
.await
.unwrap();
tokio::time::sleep(Duration::from_millis(300)).await;
assert_eq!(
metric_value(
&pm_center,
MetricName::TrafficBytesForwarded,
network_labels.clone()
),
forwarded_bytes_before
);
assert_eq!(
metric_value(
&pm_center,
MetricName::TrafficPacketsForwarded,
network_labels
),
forwarded_packets_before
);
}
#[tokio::test]
async fn foreign_network_transit_control_forwarding_records_control_forwarded_metrics() {
let pm_center = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
@@ -1409,6 +1559,10 @@ pub mod tests {
.await
.unwrap();
let mut flags = pm_center.get_global_ctx().get_flags();
flags.disable_relay_data = true;
pm_center.get_global_ctx().set_flags(flags);
let center_peer_id = pm_center
.get_foreign_network_manager()
.get_network_peer_id("net1")
@@ -1461,6 +1615,58 @@ pub mod tests {
.await;
}
#[tokio::test]
async fn foreign_network_peer_removed_clears_traffic_metric_peer_cache() {
let pm_center = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
connect_peer_manager(pma_net1.clone(), pm_center.clone()).await;
wait_for_condition(
|| {
let pm_center = pm_center.clone();
async move {
pm_center
.get_foreign_network_manager()
.get_network_peer_id("net1")
.is_some()
}
},
Duration::from_secs(5),
)
.await;
let entry = pm_center
.get_foreign_network_manager()
.data
.get_network_entry("net1")
.unwrap();
entry
.traffic_metrics
.record_rx(pma_net1.my_peer_id(), PacketType::Data as u8, 128)
.await;
assert!(
entry
.traffic_metrics
.contains_peer_cache(pma_net1.my_peer_id())
);
entry
.global_ctx
.issue_event(GlobalCtxEvent::PeerRemoved(pma_net1.my_peer_id()));
wait_for_condition(
|| {
let entry = entry.clone();
let peer_id = pma_net1.my_peer_id();
async move { !entry.traffic_metrics.contains_peer_cache(peer_id) }
},
Duration::from_secs(5),
)
.await;
}
#[tokio::test]
async fn foreign_network_encapsulated_forwarding_records_tx_metrics() {
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
@@ -1657,6 +1863,81 @@ pub mod tests {
));
}
#[tokio::test]
async fn foreign_entry_feature_flag_tracks_parent_disable_relay_data_toggle() {
let global_ctx = get_mock_global_ctx_with_network(Some(NetworkIdentity::new(
"__access__".to_string(),
"access_secret".to_string(),
)));
let foreign_network = NetworkIdentity::new("net1".to_string(), "net1_secret".to_string());
let (pm_packet_sender, _pm_packet_recv) = create_packet_recv_chan();
let entry = ForeignNetworkEntry::new(
foreign_network,
1,
global_ctx.clone(),
true,
Arc::new(PeerSessionStore::new()),
pm_packet_sender,
);
assert!(!entry.global_ctx.get_feature_flags().avoid_relay_data);
entry.run_parent_feature_flag_sync_routine().await;
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = true;
global_ctx.set_flags(flags);
global_ctx.issue_event(GlobalCtxEvent::ConfigPatched(Default::default()));
wait_for_condition(
|| async { entry.global_ctx.get_feature_flags().avoid_relay_data },
Duration::from_secs(2),
)
.await;
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
global_ctx.issue_event(GlobalCtxEvent::ConfigPatched(Default::default()));
wait_for_condition(
|| async { !entry.global_ctx.get_feature_flags().avoid_relay_data },
Duration::from_secs(2),
)
.await;
}
#[tokio::test]
async fn foreign_entry_without_relay_data_keeps_avoid_feature_flag() {
let global_ctx = get_mock_global_ctx_with_network(Some(NetworkIdentity::new(
"__access__".to_string(),
"access_secret".to_string(),
)));
let foreign_network = NetworkIdentity::new("net1".to_string(), "net1_secret".to_string());
let (pm_packet_sender, _pm_packet_recv) = create_packet_recv_chan();
let entry = ForeignNetworkEntry::new(
foreign_network,
1,
global_ctx.clone(),
false,
Arc::new(PeerSessionStore::new()),
pm_packet_sender,
);
assert!(entry.global_ctx.get_feature_flags().avoid_relay_data);
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
ForeignNetworkEntry::sync_parent_relay_data_feature_flag(
&global_ctx,
&entry.global_ctx,
entry.relay_data,
);
assert!(entry.global_ctx.get_feature_flags().avoid_relay_data);
}
#[test]
fn credential_trust_path_rejects_admin_identity() {
assert!(ForeignNetworkManager::should_reject_credential_trust_path(
+4 -2
View File
@@ -12,6 +12,7 @@ use std::{
use base64::Engine as _;
use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
use guarden::guard;
use hmac::Mac;
use prost::Message;
@@ -40,7 +41,6 @@ use crate::{
error::Error,
global_ctx::ArcGlobalCtx,
},
guard,
peers::peer_session::{PeerSessionStore, SessionKey, UpsertResponderSessionReturn},
proto::{
api::instance::{PeerConnInfo, PeerConnStats},
@@ -1352,7 +1352,9 @@ impl PeerConn {
let is_foreign_network = conn_info_for_instrument.network_name
!= self.global_ctx.get_network_identity().network_name;
let recv_limiter = if is_foreign_network {
let recv_limiter = if is_foreign_network
&& self.global_ctx.get_flags().foreign_relay_bps_limit != u64::MAX
{
let relay_network_bps_limit = self.global_ctx.get_flags().foreign_relay_bps_limit;
let limiter_config = LimiterConfig {
burst_rate: None,
+162 -19
View File
@@ -38,7 +38,7 @@ use crate::{
route_trait::{ForeignNetworkRouteInfoMap, MockRoute, NextHopPolicy, RouteInterface},
traffic_metrics::{
InstanceLabelKind, LogicalTrafficMetrics, TrafficKind, TrafficMetricRecorder,
route_peer_info_instance_id, traffic_kind,
is_relay_data_packet_type, route_peer_info_instance_id, traffic_kind,
},
},
proto::{
@@ -263,9 +263,7 @@ impl PeerManager {
.is_err()
{
// if local network is not in whitelist, avoid relay data when exist any other route path
let mut f = global_ctx.get_feature_flags();
f.avoid_relay_data = true;
global_ctx.set_feature_flags(f);
global_ctx.set_avoid_relay_data_preference(true);
}
let is_secure_mode_enabled = global_ctx
@@ -689,6 +687,11 @@ impl PeerManager {
Ok(())
}
fn release_reserved_peer_id(&self, network_name: &str) {
self.reserved_my_peer_id_map.remove(network_name);
shrink_dashmap(&self.reserved_my_peer_id_map, None);
}
#[tracing::instrument(ret)]
pub async fn add_tunnel_as_server(
&self,
@@ -704,7 +707,8 @@ impl PeerManager {
tunnel,
self.peer_session_store.clone(),
);
conn.do_handshake_as_server_ext(|peer, network_name:&str| {
let mut reserved_peer_id_network_name = None;
let handshake_ret = conn.do_handshake_as_server_ext(|peer, network_name:&str| {
if network_name
== self.global_ctx.get_network_identity().network_name
{
@@ -715,6 +719,7 @@ impl PeerManager {
.foreign_network_manager
.get_network_peer_id(network_name);
if peer_id.is_none() {
reserved_peer_id_network_name = Some(network_name.to_string());
peer_id = Some(*self.reserved_my_peer_id_map.entry(network_name.to_string()).or_insert_with(|| {
rand::random::<PeerId>()
}).value());
@@ -730,7 +735,14 @@ impl PeerManager {
Ok(())
})
.await?;
.await;
if let Err(err) = handshake_ret {
if let Some(network_name) = reserved_peer_id_network_name {
self.release_reserved_peer_id(&network_name);
}
return Err(err);
}
let peer_identity = conn.get_network_identity();
let peer_network_name = peer_identity.network_name.clone();
@@ -749,6 +761,7 @@ impl PeerManager {
if !is_local_network && self.global_ctx.get_flags().private_mode && !foreign_network_allowed
{
self.release_reserved_peer_id(&peer_network_name);
return Err(Error::SecretKeyError(
"private mode is turned on, foreign network secret mismatch".to_string(),
));
@@ -756,14 +769,18 @@ impl PeerManager {
conn.set_is_hole_punched(!is_directly_connected);
if is_local_network {
self.add_new_peer_conn(conn).await?;
let add_peer_ret = if is_local_network {
self.add_new_peer_conn(conn).await
} else {
self.foreign_network_manager.add_peer_conn(conn).await?;
self.foreign_network_manager.add_peer_conn(conn).await
};
if let Err(err) = add_peer_ret {
self.release_reserved_peer_id(&peer_network_name);
return Err(err);
}
self.reserved_my_peer_id_map.remove(&peer_network_name);
shrink_dashmap(&self.reserved_my_peer_id_map, None);
self.release_reserved_peer_id(&peer_network_name);
tracing::info!("add tunnel as server done");
Ok(())
@@ -774,6 +791,7 @@ impl PeerManager {
my_peer_id: PeerId,
peer_map: &PeerMap,
foreign_network_mgr: &ForeignNetworkManager,
disable_relay_data: bool,
) -> Result<(), ZCPacket> {
let pm_header = packet.peer_manager_header().unwrap();
if pm_header.packet_type != PacketType::ForeignNetworkPacket as u8 {
@@ -783,6 +801,16 @@ impl PeerManager {
let from_peer_id = pm_header.from_peer_id.get();
let to_peer_id = pm_header.to_peer_id.get();
if disable_relay_data && Self::is_relay_data_zc_packet(&packet) {
tracing::debug!(
?from_peer_id,
?to_peer_id,
inner_packet_type = ?packet.foreign_network_inner_packet_type(),
"drop foreign network relay data while relay data is disabled"
);
return Ok(());
}
let foreign_hdr = packet.foreign_network_hdr().unwrap();
let foreign_network_name = foreign_hdr.get_network_name(packet.payload());
let foreign_peer_id = foreign_hdr.get_dst_peer_id();
@@ -872,6 +900,29 @@ impl PeerManager {
}
}
fn is_relay_data_packet(packet_type: u8) -> bool {
is_relay_data_packet_type(packet_type)
}
fn is_relay_data_zc_packet(packet: &ZCPacket) -> bool {
let Some(hdr) = packet.peer_manager_header() else {
return false;
};
if hdr.packet_type == PacketType::ForeignNetworkPacket as u8 {
let inner_packet_type = packet.foreign_network_inner_packet_type();
if inner_packet_type.is_none() {
tracing::warn!(
?hdr,
"foreign network packet has unparseable inner peer manager header"
);
}
return inner_packet_type.is_none_or(Self::is_relay_data_packet);
}
Self::is_relay_data_packet(hdr.packet_type)
}
async fn start_peer_recv(&self) {
let mut recv = self.packet_recv.lock().await.take().unwrap();
let my_peer_id = self.my_peer_id;
@@ -925,14 +976,21 @@ impl PeerManager {
self.tasks.lock().await.spawn(async move {
tracing::trace!("start_peer_recv");
while let Ok(ret) = recv_packet_from_chan(&mut recv).await {
let Err(mut ret) =
Self::try_handle_foreign_network_packet(ret, my_peer_id, &peers, &foreign_mgr)
.await
let disable_relay_data = global_ctx.flags_arc().disable_relay_data;
let Err(mut ret) = Self::try_handle_foreign_network_packet(
ret,
my_peer_id,
&peers,
&foreign_mgr,
disable_relay_data,
)
.await
else {
continue;
};
let buf_len = ret.buf_len();
let is_relay_data_packet = Self::is_relay_data_zc_packet(&ret);
let Some(hdr) = ret.mut_peer_manager_header() else {
tracing::warn!(?ret, "invalid packet, skip");
continue;
@@ -944,6 +1002,16 @@ impl PeerManager {
let packet_type = hdr.packet_type;
let is_encrypted = hdr.is_encrypted();
if to_peer_id != my_peer_id {
if disable_relay_data && is_relay_data_packet {
tracing::debug!(
?from_peer_id,
?to_peer_id,
packet_type,
"drop forwarded relay data while relay data is disabled"
);
continue;
}
if hdr.forward_counter > 7 {
tracing::warn!(?hdr, "forward counter exceed, drop packet");
continue;
@@ -2080,7 +2148,7 @@ mod tests {
},
},
proto::{
common::{CompressionAlgoPb, NatType, PeerFeatureFlag},
common::{CompressionAlgoPb, NatType},
peer_rpc::SecureAuthLevel,
},
tunnel::{
@@ -2224,6 +2292,84 @@ mod tests {
assert_eq!(signal.version(), initial_version + 2);
}
#[test]
fn disable_relay_data_classifies_data_plane_packets_only() {
for packet_type in [
PacketType::Data,
PacketType::KcpSrc,
PacketType::KcpDst,
PacketType::QuicSrc,
PacketType::QuicDst,
PacketType::DataWithKcpSrcModified,
PacketType::DataWithQuicSrcModified,
PacketType::ForeignNetworkPacket,
] {
assert!(PeerManager::is_relay_data_packet(packet_type as u8));
}
for packet_type in [
PacketType::RpcReq,
PacketType::RpcResp,
PacketType::Ping,
PacketType::Pong,
PacketType::HandShake,
PacketType::NoiseHandshakeMsg1,
PacketType::NoiseHandshakeMsg2,
PacketType::NoiseHandshakeMsg3,
PacketType::RelayHandshake,
PacketType::RelayHandshakeAck,
] {
assert!(!PeerManager::is_relay_data_packet(packet_type as u8));
}
}
#[test]
fn disable_relay_data_inspects_foreign_network_inner_packet_type() {
let network_name = "net1".to_string();
let mut rpc_packet = ZCPacket::new_with_payload(b"rpc");
rpc_packet.fill_peer_manager_hdr(1, 2, PacketType::RpcReq as u8);
let mut foreign_rpc_packet =
ZCPacket::new_for_foreign_network(&network_name, 2, &rpc_packet);
foreign_rpc_packet.fill_peer_manager_hdr(10, 20, PacketType::ForeignNetworkPacket as u8);
assert_eq!(
foreign_rpc_packet.foreign_network_inner_packet_type(),
Some(PacketType::RpcReq as u8)
);
assert!(!PeerManager::is_relay_data_zc_packet(&foreign_rpc_packet));
let mut data_packet = ZCPacket::new_with_payload(b"data");
data_packet.fill_peer_manager_hdr(1, 2, PacketType::Data as u8);
let mut foreign_data_packet =
ZCPacket::new_for_foreign_network(&network_name, 2, &data_packet);
foreign_data_packet.fill_peer_manager_hdr(10, 20, PacketType::ForeignNetworkPacket as u8);
assert_eq!(
foreign_data_packet.foreign_network_inner_packet_type(),
Some(PacketType::Data as u8)
);
assert!(PeerManager::is_relay_data_zc_packet(&foreign_data_packet));
}
#[tokio::test]
async fn non_whitelisted_network_avoid_relay_survives_disable_relay_data_toggle() {
let global_ctx = get_mock_global_ctx();
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = true;
flags.relay_network_whitelist = "other-network".to_string();
global_ctx.set_flags(flags);
let (packet_send, _packet_recv) = create_packet_recv_chan();
let _peer_mgr = PeerManager::new(RouteAlgoType::Ospf, global_ctx.clone(), packet_send);
let mut flags = global_ctx.get_flags();
flags.disable_relay_data = false;
global_ctx.set_flags(flags);
assert!(global_ctx.get_feature_flags().avoid_relay_data);
}
#[tokio::test]
async fn send_msg_internal_does_not_record_tx_metrics_on_failed_delivery() {
let peer_mgr = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
@@ -3121,10 +3267,7 @@ mod tests {
// when b's avoid_relay_data is true, a->c should route through d and e, cost is 3
peer_mgr_b
.get_global_ctx()
.set_feature_flags(PeerFeatureFlag {
avoid_relay_data: true,
..Default::default()
});
.set_avoid_relay_data_preference(true);
tokio::time::sleep(Duration::from_secs(2)).await;
if wait_route_appear_with_cost(peer_mgr_a.clone(), peer_mgr_c.my_peer_id, Some(3))
.await
+81 -2
View File
@@ -1228,6 +1228,25 @@ impl SyncedRouteInfo {
Vec<PeerId>,
HashMap<Vec<u8>, crate::common::global_ctx::TrustedKeyMetadata>,
)
where
F: FnMut(PeerId) -> bool,
{
self.verify_and_update_credential_trusts_with_active_peers_protecting(
network_secret,
is_peer_active,
None,
)
}
fn verify_and_update_credential_trusts_with_active_peers_protecting<F>(
&self,
network_secret: Option<&str>,
is_peer_active: F,
protected_peer_id: Option<PeerId>,
) -> (
Vec<PeerId>,
HashMap<Vec<u8>, crate::common::global_ctx::TrustedKeyMetadata>,
)
where
F: FnMut(PeerId) -> bool,
{
@@ -1248,6 +1267,9 @@ impl SyncedRouteInfo {
let mut untrusted_peers =
Self::collect_revoked_credential_peers(&peer_infos, &prev_trusted, &all_trusted);
untrusted_peers.extend(duplicate_untrusted_peers);
if let Some(protected_peer_id) = protected_peer_id {
untrusted_peers.remove(&protected_peer_id);
}
// Remove untrusted peers from peer_infos so they won't appear in route graph
if !untrusted_peers.is_empty() {
@@ -2735,7 +2757,11 @@ impl PeerRouteServiceImpl {
let network_identity = self.global_ctx.get_network_identity();
let (untrusted, global_trusted_keys) = self
.synced_route_info
.verify_and_update_credential_trusts(network_identity.network_secret.as_deref());
.verify_and_update_credential_trusts_with_active_peers_protecting(
network_identity.network_secret.as_deref(),
|_| true,
Some(self.my_peer_id),
);
self.global_ctx
.update_trusted_keys(global_trusted_keys, &network_identity.network_name);
@@ -2751,9 +2777,10 @@ impl PeerRouteServiceImpl {
let (untrusted, global_trusted_keys) = self
.synced_route_info
.verify_and_update_credential_trusts_with_active_peers(
.verify_and_update_credential_trusts_with_active_peers_protecting(
network_identity.network_secret.as_deref(),
|peer_id| self.is_active_non_reusable_credential_peer(peer_id),
Some(self.my_peer_id),
);
self.global_ctx
.update_trusted_keys(global_trusted_keys, &network_identity.network_name);
@@ -5047,6 +5074,58 @@ mod tests {
);
}
#[tokio::test]
async fn credential_trust_refresh_does_not_remove_self_peer() {
let my_peer_id = 11;
let remote_peer_id = 12;
let credential_key = vec![8; 32];
let service_impl = PeerRouteServiceImpl::new(my_peer_id, get_mock_global_ctx());
let self_info = make_credential_route_peer_info(my_peer_id, &credential_key);
let remote_info = make_credential_route_peer_info(remote_peer_id, &credential_key);
{
let mut guard = service_impl.synced_route_info.peer_infos.write();
guard.insert(self_info.peer_id, self_info);
guard.insert(remote_info.peer_id, remote_info);
}
service_impl
.synced_route_info
.trusted_credential_pubkeys
.insert(
credential_key.clone(),
TrustedCredentialPubkey {
pubkey: credential_key,
expiry_unix: i64::MAX,
..Default::default()
},
);
let (untrusted_peers, _) = service_impl
.synced_route_info
.verify_and_update_credential_trusts_with_active_peers_protecting(
None,
|_| true,
Some(my_peer_id),
);
assert_eq!(untrusted_peers, vec![remote_peer_id]);
assert!(
service_impl
.synced_route_info
.peer_infos
.read()
.contains_key(&my_peer_id)
);
assert!(
!service_impl
.synced_route_info
.peer_infos
.read()
.contains_key(&remote_peer_id)
);
}
#[tokio::test]
async fn credential_refresh_rebuilds_reachability_before_owner_election() {
const NETWORK_SECRET: &str = "sec1";
+55 -9
View File
@@ -12,6 +12,22 @@ use crate::{
tunnel::udp,
};
fn remove_easytier_managed_ipv6s(ret: &mut GetIpListResponse, global_ctx: &ArcGlobalCtx) {
ret.interface_ipv6s.retain(|ip| {
let ip = std::net::Ipv6Addr::from(*ip);
!global_ctx.is_ip_easytier_managed_ipv6(&ip)
});
if ret
.public_ipv6
.as_ref()
.map(|ip| std::net::Ipv6Addr::from(*ip))
.is_some_and(|ip| global_ctx.is_ip_easytier_managed_ipv6(&ip))
{
ret.public_ipv6 = None;
}
}
#[derive(Clone)]
pub struct DirectConnectorManagerRpcServer {
// TODO: this only cache for one src peer, should make it global
@@ -36,15 +52,7 @@ impl DirectConnectorRpc for DirectConnectorManagerRpcServer {
.chain(self.global_ctx.get_running_listeners())
.map(Into::into)
.collect();
// remove et ipv6 from the interface ipv6 list
if let Some(et_ipv6) = self.global_ctx.get_ipv6() {
let et_ipv6: crate::proto::common::Ipv6Addr = et_ipv6.address().into();
ret.interface_ipv6s.retain(|x| *x != et_ipv6);
}
if let Some(public_ipv6) = self.global_ctx.get_public_ipv6_lease() {
let public_ipv6: crate::proto::common::Ipv6Addr = public_ipv6.address().into();
ret.interface_ipv6s.retain(|x| *x != public_ipv6);
}
remove_easytier_managed_ipv6s(&mut ret, &self.global_ctx);
tracing::trace!(
"get_ip_list: public_ipv4: {:?}, public_ipv6: {:?}, listeners: {:?}",
ret.public_ipv4,
@@ -88,3 +96,41 @@ impl DirectConnectorManagerRpcServer {
Self { global_ctx }
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeSet;
use crate::{
common::global_ctx::tests::get_mock_global_ctx,
peers::peer_rpc_service::remove_easytier_managed_ipv6s, proto::peer_rpc::GetIpListResponse,
};
#[tokio::test]
async fn get_ip_list_sanitizer_removes_managed_ipv6_from_all_sources() {
let global_ctx = get_mock_global_ctx();
let virtual_ipv6 = "fd00::1/64".parse().unwrap();
let public_ipv6 = "2001:db8::2/128".parse().unwrap();
let physical_ipv6: std::net::Ipv6Addr = "2001:db8::3".parse().unwrap();
let routed_ipv6: cidr::Ipv6Inet = "2001:db8::4/128".parse().unwrap();
global_ctx.set_ipv6(Some(virtual_ipv6));
global_ctx.set_public_ipv6_lease(Some(public_ipv6));
global_ctx.set_public_ipv6_routes(BTreeSet::from([routed_ipv6]));
let mut ip_list = GetIpListResponse {
public_ipv6: Some(public_ipv6.address().into()),
interface_ipv6s: vec![
virtual_ipv6.address().into(),
public_ipv6.address().into(),
routed_ipv6.address().into(),
physical_ipv6.into(),
],
..Default::default()
};
remove_easytier_managed_ipv6s(&mut ip_list, &global_ctx);
assert_eq!(ip_list.public_ipv6, None);
assert_eq!(ip_list.interface_ipv6s, vec![physical_ipv6.into()]);
}
}
+5 -1
View File
@@ -7,7 +7,10 @@ use anyhow::anyhow;
use dashmap::DashMap;
use super::secure_datagram::{SecureDatagramDirection, SecureDatagramSession};
use crate::{common::PeerId, tunnel::packet_def::ZCPacket};
use crate::{
common::{PeerId, shrink_dashmap},
tunnel::packet_def::ZCPacket,
};
pub struct UpsertResponderSessionReturn {
pub session: Arc<PeerSession>,
@@ -78,6 +81,7 @@ impl PeerSessionStore {
pub fn evict_unused_sessions(&self) {
self.sessions
.retain(|_key, session| Arc::strong_count(session) > 1);
shrink_dashmap(&self.sessions, None);
}
#[tracing::instrument(skip(self))]
+2
View File
@@ -243,6 +243,8 @@ impl PublicIpv6Service {
.copied()
.collect::<Vec<_>>();
*cached_routes = routes;
self.global_ctx
.set_public_ipv6_routes(cached_routes.clone());
self.global_ctx
.issue_event(GlobalCtxEvent::PublicIpv6RoutesUpdated(added, removed));
}
+9 -1
View File
@@ -9,7 +9,7 @@ use tokio::time::{Duration, timeout};
use crate::peers::foreign_network_client::ForeignNetworkClient;
use crate::{
common::error::Error,
common::{PeerId, global_ctx::ArcGlobalCtx},
common::{PeerId, global_ctx::ArcGlobalCtx, shrink_dashmap},
peers::peer_map::PeerMap,
peers::peer_session::{PeerSession, PeerSessionAction, PeerSessionStore, SessionKey},
peers::route_trait::NextHopPolicy,
@@ -652,6 +652,10 @@ impl RelayPeerMap {
self.handshake_locks.remove(&peer_id);
self.pending_packets.remove(&peer_id);
}
shrink_dashmap(&self.states, None);
shrink_dashmap(&self.pending_handshakes, None);
shrink_dashmap(&self.handshake_locks, None);
shrink_dashmap(&self.pending_packets, None);
}
pub fn has_state(&self, peer_id: PeerId) -> bool {
@@ -679,6 +683,10 @@ impl RelayPeerMap {
self.pending_handshakes.remove(&peer_id);
self.handshake_locks.remove(&peer_id);
self.pending_packets.remove(&peer_id);
shrink_dashmap(&self.states, None);
shrink_dashmap(&self.pending_handshakes, None);
shrink_dashmap(&self.handshake_locks, None);
shrink_dashmap(&self.pending_packets, None);
tracing::debug!(?peer_id, "RelayPeerMap removed peer relay state");
}
+20
View File
@@ -201,6 +201,11 @@ impl LogicalTrafficMetrics {
self.per_peer.len()
}
#[cfg(test)]
fn contains_peer_cache(&self, peer_id: PeerId) -> bool {
self.per_peer.contains_key(&peer_id)
}
fn build_peer_counters(&self, instance_id: &str) -> TrafficCounters {
let instance_label = match self.label_kind {
InstanceLabelKind::To => LabelType::ToInstanceId(instance_id.to_string()),
@@ -241,6 +246,13 @@ pub(crate) fn traffic_kind(packet_type: u8) -> TrafficKind {
}
}
pub(crate) fn is_relay_data_packet_type(packet_type: u8) -> bool {
// Relay handshakes are control-plane setup; payload data is blocked by its
// original packet type after the session exists.
traffic_kind(packet_type) == TrafficKind::Data
|| packet_type == PacketType::ForeignNetworkPacket as u8
}
#[derive(Clone)]
struct TrafficMetricGroup {
data: Arc<LogicalTrafficMetrics>,
@@ -326,6 +338,14 @@ impl TrafficMetricRecorder {
self.rx_metrics.control.clear_peer_cache();
}
#[cfg(test)]
pub(crate) fn contains_peer_cache(&self, peer_id: PeerId) -> bool {
self.tx_metrics.data.contains_peer_cache(peer_id)
|| self.tx_metrics.control.contains_peer_cache(peer_id)
|| self.rx_metrics.data.contains_peer_cache(peer_id)
|| self.rx_metrics.control.contains_peer_cache(peer_id)
}
fn resolve_instance_id(&self, peer_id: PeerId) -> BoxFuture<'static, Option<String>> {
(self.resolve_instance_id)(peer_id)
}
+1
View File
@@ -27,6 +27,7 @@ message InstanceConfigPatch {
optional bool ipv6_public_addr_provider = 11;
optional bool ipv6_public_addr_auto = 12;
optional string ipv6_public_addr_prefix = 13;
optional bool disable_relay_data = 14;
}
message PortForwardPatch {
+1
View File
@@ -99,6 +99,7 @@ message NetworkConfig {
optional bool ipv6_public_addr_provider = 62;
optional bool ipv6_public_addr_auto = 63;
optional string ipv6_public_addr_prefix = 64;
optional bool disable_relay_data = 65;
}
message PortForwardConfig {
+2
View File
@@ -75,6 +75,7 @@ message FlagsInConfig {
bool need_p2p = 38;
uint64 instance_recv_bps_limit = 39;
bool disable_upnp = 40;
bool disable_relay_data = 41;
}
message RpcDescriptor {
@@ -104,6 +105,7 @@ enum CompressionAlgoPb {
Invalid = 0;
None = 1;
Zstd = 2;
Lzo = 3;
}
message RpcCompressionInfo {
+4
View File
@@ -467,6 +467,8 @@ impl TryFrom<CompressionAlgoPb> for CompressorAlgo {
match value {
#[cfg(feature = "zstd")]
CompressionAlgoPb::Zstd => Ok(CompressorAlgo::ZstdDefault),
#[cfg(feature = "lzo")]
CompressionAlgoPb::Lzo => Ok(CompressorAlgo::Lzo),
CompressionAlgoPb::None => Ok(CompressorAlgo::None),
_ => Err(anyhow::anyhow!("Invalid CompressionAlgoPb")),
}
@@ -480,6 +482,8 @@ impl TryFrom<CompressorAlgo> for CompressionAlgoPb {
match value {
#[cfg(feature = "zstd")]
CompressorAlgo::ZstdDefault => Ok(CompressionAlgoPb::Zstd),
#[cfg(feature = "lzo")]
CompressorAlgo::Lzo => Ok(CompressionAlgoPb::Lzo),
CompressorAlgo::None => Ok(CompressionAlgoPb::None),
}
}
+1 -1
View File
@@ -1,10 +1,10 @@
use std::sync::{Arc, Mutex, atomic::AtomicBool};
use futures::{SinkExt as _, StreamExt};
use guarden::defer;
use tokio::{task::JoinSet, time::timeout};
use crate::{
defer,
proto::rpc_types::error::Error,
tunnel::{Tunnel, packet_def::PacketType, ring::create_ring_tunnel_pair},
};
+2 -3
View File
@@ -4,18 +4,17 @@ use std::sync::{Arc, Mutex};
use bytes::Bytes;
use dashmap::DashMap;
use guarden::defer;
use prost::Message;
use tokio::sync::mpsc;
use tokio::task::JoinSet;
use tokio::time::timeout;
use tokio_stream::StreamExt;
use crate::common::shrink_dashmap;
use crate::common::{
PeerId,
PeerId, shrink_dashmap,
stats_manager::{LabelSet, LabelType, MetricName, StatsManager},
};
use crate::defer;
use crate::proto::common::{
CompressionAlgoPb, RpcCompressionInfo, RpcDescriptor, RpcPacket, RpcRequest, RpcResponse,
};
+330 -4
View File
@@ -14,13 +14,17 @@ use crate::{
},
instance::instance::Instance,
tests::three_node::{generate_secure_mode_config, generate_secure_mode_config_with_key},
tunnel::{common::tests::wait_for_condition, tcp::TcpTunnelConnector},
tunnel::{common::tests::wait_for_condition, tcp::TcpTunnelConnector, udp::UdpTunnelConnector},
};
use super::{add_ns_to_bridge, create_netns, del_netns, drop_insts, ping_test};
use rstest::rstest;
const PUBLIC_SERVER_NETWORK_NAME: &str = "__public_server__";
const PUBLIC_SERVER_SHARED_SECRET: &str = "public-server-shared-secret";
const NEED_P2P_ADMIN_NETWORK_NAME: &str = "need_p2p_credential_test_network";
/// Prepare network namespaces for credential tests
/// Topology:
/// br_a (10.1.1.0/24): ns_adm (10.1.1.1), ns_c1 (10.1.1.2), ns_c2 (10.1.1.3), ns_c3 (10.1.1.4), ns_c4 (10.1.1.5)
@@ -221,6 +225,328 @@ fn create_shared_config(
config
}
fn create_public_server_config() -> TomlConfigLoader {
let config = TomlConfigLoader::default();
config.set_inst_name(PUBLIC_SERVER_NETWORK_NAME.to_string());
config.set_hostname(Some("public-server".to_string()));
config.set_netns(Some("ns_adm".to_string()));
config.set_listeners(vec!["udp://0.0.0.0:11010".parse().unwrap()]);
config.set_network_identity(NetworkIdentity::new(
PUBLIC_SERVER_NETWORK_NAME.to_string(),
PUBLIC_SERVER_SHARED_SECRET.to_string(),
));
config.set_secure_mode(Some(generate_secure_mode_config()));
let mut flags = config.get_flags();
flags.no_tun = true;
flags.private_mode = true;
flags.relay_all_peer_rpc = true;
flags.relay_network_whitelist = "".to_string();
config.set_flags(flags);
config
}
fn create_need_p2p_admin_config(listener_scheme: &str) -> TomlConfigLoader {
let config = TomlConfigLoader::default();
config.set_inst_name(NEED_P2P_ADMIN_NETWORK_NAME.to_string());
config.set_hostname(Some("need-p2p-admin".to_string()));
config.set_netns(Some("ns_c3".to_string()));
config.set_listeners(vec![
format!("{listener_scheme}://0.0.0.0:0").parse().unwrap(),
]);
config.set_network_identity(NetworkIdentity::new(
NEED_P2P_ADMIN_NETWORK_NAME.to_string(),
PUBLIC_SERVER_SHARED_SECRET.to_string(),
));
config.set_secure_mode(Some(generate_secure_mode_config()));
let mut flags = config.get_flags();
flags.no_tun = true;
flags.relay_all_peer_rpc = true;
flags.need_p2p = true;
flags.disable_udp_hole_punching = true;
flags.disable_tcp_hole_punching = true;
flags.disable_sym_hole_punching = true;
config.set_flags(flags);
config
}
#[allow(clippy::too_many_arguments)]
fn create_public_server_credential_config(
credential_secret: &str,
inst_name: &str,
hostname: &str,
ns: &str,
ipv4: &str,
ipv6: &str,
tcp_listener_port: u16,
udp_listener_port: u16,
proxy_cidrs: &[&str],
) -> TomlConfigLoader {
let config = create_credential_config_from_secret(
NEED_P2P_ADMIN_NETWORK_NAME.to_string(),
credential_secret,
inst_name,
Some(ns),
ipv4,
ipv6,
);
config.set_hostname(Some(hostname.to_string()));
config.set_listeners(vec![
format!("tcp://0.0.0.0:{tcp_listener_port}")
.parse()
.unwrap(),
format!("udp://0.0.0.0:{udp_listener_port}")
.parse()
.unwrap(),
]);
for cidr in proxy_cidrs {
config
.add_proxy_cidr((*cidr).parse().unwrap(), None)
.unwrap();
}
let mut flags = config.get_flags();
flags.disable_p2p = true;
config.set_flags(flags);
config
}
async fn wait_direct_peer(inst: &Instance, peer_id: u32, timeout: Duration, label: &str) {
wait_for_condition(
|| async {
let peers = inst.get_peer_manager().get_peer_map().list_peers();
let connected = peers.contains(&peer_id);
println!("{label}: direct peers={:?}, target={}", peers, peer_id);
connected
},
timeout,
)
.await;
}
async fn wait_running_listener(inst: &Instance, scheme: &str, timeout: Duration, label: &str) {
wait_for_condition(
|| async {
let listeners = inst.get_global_ctx().get_running_listeners();
let matched = listeners.iter().any(|listener| {
listener.scheme() == scheme && listener.port().is_some_and(|p| p != 0)
});
println!("{label}: running listeners={:?}", listeners);
matched
},
timeout,
)
.await;
}
async fn wait_route_cost(inst: &Instance, peer_id: u32, cost: i32, timeout: Duration, label: &str) {
wait_for_condition(
|| async {
let routes = inst.get_peer_manager().list_routes().await;
let matched = routes
.iter()
.any(|route| route.peer_id == peer_id && route.cost == cost);
println!(
"{label}: routes={:?}, target={}, cost={}",
routes
.iter()
.map(|route| (route.peer_id, route.cost))
.collect::<Vec<_>>(),
peer_id,
cost
);
matched
},
timeout,
)
.await;
}
async fn wait_foreign_network_count(inst: &Instance, expected: usize, timeout: Duration) {
wait_for_condition(
|| async {
let foreign_networks = inst
.get_peer_manager()
.get_foreign_network_manager()
.list_foreign_networks()
.await
.foreign_networks;
println!("foreign networks: {:?}", foreign_networks);
foreign_networks.len() == expected
},
timeout,
)
.await;
}
/// Regression coverage for a public-server-mediated credential topology:
/// Public server <- admin peer (need_p2p) <- two credential peers.
///
/// Credential peers set `disable_p2p=true`, while the admin peer advertises `need_p2p=true`.
/// The credential peers should still proactively build direct peers with the admin peer through
/// peer RPC forwarded by the public server, even when the admin listener binds an ephemeral port.
#[rstest]
#[case("quic")]
#[case("wss")]
#[case("tcp")]
#[case("udp")]
#[tokio::test]
#[serial_test::serial]
async fn credential_peers_p2p_to_need_p2p_admin_through_public_server(
#[case] admin_listener_scheme: &str,
) {
prepare_credential_network();
let mut public_server_inst = Instance::new(create_public_server_config());
public_server_inst.run().await.unwrap();
let mut admin_inst = Instance::new(create_need_p2p_admin_config(admin_listener_scheme));
admin_inst.run().await.unwrap();
wait_running_listener(
&admin_inst,
admin_listener_scheme,
Duration::from_secs(10),
"admin ephemeral listener",
)
.await;
admin_inst
.get_conn_manager()
.add_connector(UdpTunnelConnector::new(
"udp://10.1.1.1:11010".parse().unwrap(),
));
wait_foreign_network_count(&public_server_inst, 1, Duration::from_secs(10)).await;
let (_credential_a_id, credential_a_secret) = admin_inst
.get_global_ctx()
.get_credential_manager()
.generate_credential_with_options(
vec![],
false,
vec!["10.1.0.0/24".to_string()],
Duration::from_secs(3600),
Some("credential-peer-a".to_string()),
false,
);
let (_credential_b_id, credential_b_secret) = admin_inst
.get_global_ctx()
.get_credential_manager()
.generate_credential_with_options(
vec![],
false,
vec![],
Duration::from_secs(3600),
Some("credential-peer-b".to_string()),
false,
);
admin_inst
.get_global_ctx()
.issue_event(GlobalCtxEvent::CredentialChanged);
wait_foreign_network_count(&public_server_inst, 1, Duration::from_secs(10)).await;
let mut credential_a_inst = Instance::new(create_public_server_credential_config(
&credential_a_secret,
"credential-peer-a",
"credential-a",
"ns_c1",
"10.154.0.1",
"fd00::1/64",
11030,
11031,
&["10.1.0.0/24"],
));
let mut credential_b_inst = Instance::new(create_public_server_credential_config(
&credential_b_secret,
"credential-peer-b",
"credential-b",
"ns_c2",
"10.154.0.2",
"fd00::2/64",
11040,
11041,
&[],
));
credential_a_inst.run().await.unwrap();
credential_b_inst.run().await.unwrap();
credential_a_inst
.get_conn_manager()
.add_connector(UdpTunnelConnector::new(
"udp://10.1.1.1:11010".parse().unwrap(),
));
credential_b_inst
.get_conn_manager()
.add_connector(UdpTunnelConnector::new(
"udp://10.1.1.1:11010".parse().unwrap(),
));
let admin_peer_id = admin_inst.peer_id();
let credential_a_peer_id = credential_a_inst.peer_id();
let credential_b_peer_id = credential_b_inst.peer_id();
println!(
"admin={}, credential_a={}, credential_b={}, admin_listener_scheme={}",
admin_peer_id, credential_a_peer_id, credential_b_peer_id, admin_listener_scheme
);
wait_direct_peer(
&credential_a_inst,
admin_peer_id,
Duration::from_secs(30),
"credential_a -> admin",
)
.await;
wait_direct_peer(
&credential_b_inst,
admin_peer_id,
Duration::from_secs(30),
"credential_b -> admin",
)
.await;
wait_direct_peer(
&admin_inst,
credential_a_peer_id,
Duration::from_secs(10),
"admin -> credential_a",
)
.await;
wait_direct_peer(
&admin_inst,
credential_b_peer_id,
Duration::from_secs(10),
"admin -> credential_b",
)
.await;
wait_route_cost(
&credential_a_inst,
admin_peer_id,
1,
Duration::from_secs(10),
"credential_a route to admin",
)
.await;
wait_route_cost(
&credential_b_inst,
admin_peer_id,
1,
Duration::from_secs(10),
"credential_b route to admin",
)
.await;
drop_insts(vec![
public_server_inst,
admin_inst,
credential_a_inst,
credential_b_inst,
])
.await;
}
fn create_generated_credential_config(
admin_inst: &Instance,
inst_name: &str,
@@ -501,10 +827,10 @@ async fn credential_relay_capability(#[case] allow_relay: bool) {
// Create admin node
let admin_config = create_admin_config("admin", Some("ns_adm"), "10.144.144.1", "fd00::1/64");
let mut admin_inst = Instance::new(admin_config);
let mut ff = admin_inst.get_global_ctx().get_feature_flags();
// if cred c allow relay, we set admin inst avoid relay (if other same-cost path available, admin will not relay data)
ff.avoid_relay_data = allow_relay;
admin_inst.get_global_ctx().set_feature_flags(ff);
admin_inst
.get_global_ctx()
.set_avoid_relay_data_preference(allow_relay);
admin_inst.run().await.unwrap();
let admin_peer_id = admin_inst.peer_id();
+147
View File
@@ -3730,6 +3730,153 @@ pub async fn config_patch_test() {
drop_insts(insts).await;
}
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
pub async fn config_patch_disable_relay_data_test() {
use crate::proto::api::config::InstanceConfigPatch;
let insts = init_three_node_ex(
"udp",
|cfg| {
cfg.set_ipv6(None);
cfg
},
false,
)
.await;
let relay_peer_id = insts[1].peer_id();
let dst_peer_id = insts[2].peer_id();
assert!(!insts[1].get_global_ctx().get_flags().disable_relay_data);
assert!(
!insts[1]
.get_global_ctx()
.get_feature_flags()
.avoid_relay_data
);
check_route_ex(
insts[0].get_peer_manager().list_routes().await,
dst_peer_id,
|route| {
assert_eq!(route.next_hop_peer_id, relay_peer_id);
true
},
);
wait_for_condition(
|| async { ping_test("net_a", "10.144.144.3", None).await },
Duration::from_secs(5),
)
.await;
insts[1]
.get_config_patcher()
.apply_patch(InstanceConfigPatch {
disable_relay_data: Some(true),
..Default::default()
})
.await
.unwrap();
assert!(insts[1].get_global_ctx().get_flags().disable_relay_data);
assert!(
insts[1]
.get_global_ctx()
.config
.get_flags()
.disable_relay_data
);
assert!(
insts[1]
.get_global_ctx()
.get_feature_flags()
.avoid_relay_data
);
wait_for_condition(
|| {
let peer_mgr = insts[0].get_peer_manager().clone();
async move {
peer_mgr.list_routes().await.iter().any(|route| {
route.peer_id == relay_peer_id
&& route
.feature_flag
.as_ref()
.map(|flag| flag.avoid_relay_data)
.unwrap_or(false)
})
}
},
Duration::from_secs(5),
)
.await;
check_route_ex(
insts[0].get_peer_manager().list_routes().await,
dst_peer_id,
|route| {
assert_eq!(route.next_hop_peer_id, relay_peer_id);
true
},
);
assert!(
!ping_test("net_a", "10.144.144.3", None).await,
"traffic from inst1 to inst3 should be blocked while inst2 relay data is disabled"
);
insts[1]
.get_config_patcher()
.apply_patch(InstanceConfigPatch {
disable_relay_data: Some(false),
..Default::default()
})
.await
.unwrap();
assert!(!insts[1].get_global_ctx().get_flags().disable_relay_data);
assert!(
!insts[1]
.get_global_ctx()
.config
.get_flags()
.disable_relay_data
);
assert!(
!insts[1]
.get_global_ctx()
.get_feature_flags()
.avoid_relay_data
);
wait_for_condition(
|| {
let peer_mgr = insts[0].get_peer_manager().clone();
async move {
peer_mgr.list_routes().await.iter().any(|route| {
route.peer_id == relay_peer_id
&& route
.feature_flag
.as_ref()
.map(|flag| !flag.avoid_relay_data)
.unwrap_or(false)
})
}
},
Duration::from_secs(5),
)
.await;
wait_for_condition(
|| async { ping_test("net_a", "10.144.144.3", None).await },
Duration::from_secs(5),
)
.await;
drop_insts(insts).await;
}
/// Generate SecureModeConfig with specified x25519 private key
pub fn generate_secure_mode_config_with_key(
private_key: &x25519_dalek::StaticSecret,
+10 -1
View File
@@ -281,6 +281,7 @@ impl TunnelListener for FakeTcpTunnelListener {
pub struct FakeTcpTunnelConnector {
addr: url::Url,
ip_to_if_name: IpToIfNameCache,
resolved_addr: Option<SocketAddr>,
}
impl FakeTcpTunnelConnector {
@@ -288,6 +289,7 @@ impl FakeTcpTunnelConnector {
FakeTcpTunnelConnector {
addr,
ip_to_if_name: IpToIfNameCache::new(),
resolved_addr: None,
}
}
}
@@ -314,7 +316,10 @@ fn get_local_ip_for_destination(destination: IpAddr) -> Option<IpAddr> {
#[async_trait::async_trait]
impl TunnelConnector for FakeTcpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let remote_addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let remote_addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?,
};
let local_ip = get_local_ip_for_destination(remote_addr.ip())
.ok_or(TunnelError::InternalError("Failed to get local ip".into()))?;
@@ -390,6 +395,10 @@ impl TunnelConnector for FakeTcpTunnelConnector {
fn remote_url(&self) -> url::Url {
self.addr.clone()
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
type RecvFut = Pin<Box<dyn Future<Output = Option<(BytesMut, usize)>> + Send + Sync>>;
@@ -57,21 +57,21 @@ cfg_select! {
pub mod windivert;
pub fn create_tun(
_interface_name: &str,
_src_addr: Option<SocketAddr>,
local_addr: SocketAddr,
interface_name: &str,
src_addr: Option<SocketAddr>,
dst_addr: SocketAddr,
) -> io::Result<Arc<dyn super::stack::Tun>> {
match windivert::WinDivertTun::new(local_addr) {
match windivert::WinDivertTun::new(src_addr, dst_addr) {
Ok(tun) => Ok(Arc::new(tun)),
Err(e) => {
tracing::warn!(
?e,
?local_addr,
?dst_addr,
"WinDivertTun init failed, falling back to PnetTun"
);
Ok(Arc::new(pnet::PnetTun::new(
local_addr.to_string().as_str(),
pnet::create_packet_filter(None, local_addr),
interface_name,
pnet::create_packet_filter(src_addr, dst_addr),
)?))
}
}
@@ -80,15 +80,11 @@ impl Drop for WinDivertTun {
}
impl WinDivertTun {
pub fn new(local_addr: SocketAddr) -> io::Result<Self> {
pub fn new(src_addr: Option<SocketAddr>, dst_addr: SocketAddr) -> io::Result<Self> {
let (tx, rx) = tokio::sync::mpsc::channel(1024);
let ip_filter = match local_addr {
SocketAddr::V4(addr) => format!("ip.DstAddr == {}", addr.ip()),
SocketAddr::V6(addr) => format!("ipv6.DstAddr == {}", addr.ip()),
};
// Filter: DstIP == LocalIP AND TCP.
let filter = format!("{} and tcp", ip_filter);
let filter = build_filter(src_addr, dst_addr)?;
tracing::debug!(%filter, "WinDivertTun created with filter");
// Sniff mode: 1 (WINDIVERT_FLAG_SNIFF)
// Layer: Network (0)
@@ -143,6 +139,46 @@ impl WinDivertTun {
}
}
fn build_filter(src_addr: Option<SocketAddr>, dst_addr: SocketAddr) -> io::Result<String> {
if let Some(src_addr) = src_addr
&& src_addr.is_ipv4() != dst_addr.is_ipv4()
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"src/dst addr family mismatch",
));
}
let mut filters = Vec::with_capacity(5);
filters.push("tcp".to_owned());
match dst_addr {
SocketAddr::V4(addr) => {
filters.push(format!("ip.DstAddr == {}", addr.ip()));
filters.push(format!("tcp.DstPort == {}", addr.port()));
}
SocketAddr::V6(addr) => {
filters.push(format!("ipv6.DstAddr == {}", addr.ip()));
filters.push(format!("tcp.DstPort == {}", addr.port()));
}
}
if let Some(src_addr) = src_addr {
match src_addr {
SocketAddr::V4(addr) => {
filters.push(format!("ip.SrcAddr == {}", addr.ip()));
filters.push(format!("tcp.SrcPort == {}", addr.port()));
}
SocketAddr::V6(addr) => {
filters.push(format!("ipv6.SrcAddr == {}", addr.ip()));
filters.push(format!("tcp.SrcPort == {}", addr.port()));
}
}
}
Ok(filters.join(" and "))
}
#[async_trait::async_trait]
impl stack::Tun for WinDivertTun {
async fn recv(&self, packet: &mut BytesMut) -> Result<usize, std::io::Error> {
-1
View File
@@ -128,7 +128,6 @@ pub fn build_tcp_packet(
eth_buf.freeze()
}
#[tracing::instrument(ret)]
pub fn parse_ip_packet(
buf: &Bytes,
) -> Option<(MacAddr, MacAddr, IPPacket<'_>, tcp::TcpPacket<'_>)> {
+4 -1
View File
@@ -517,9 +517,12 @@ impl Stack {
{
trace!(?tcp_packet, "Received SYN packet for port {}, ignoring", tcp_packet.get_destination());
continue;
} else if (tcp_packet.get_flags() & tcp::TcpFlags::RST) == 0 {
} else if (tcp_packet.get_flags() & tcp::TcpFlags::RST) != 0 {
info!("Unknown RST TCP packet from {}, ignoring", remote_addr);
continue;
} else {
trace!("Unknown TCP packet from {}, ignoring", remote_addr);
continue;
}
}
None => {
+25 -1
View File
@@ -141,6 +141,7 @@ pub trait TunnelConnector: Send {
fn remote_url(&self) -> url::Url;
fn set_bind_addrs(&mut self, _addrs: Vec<SocketAddr>) {}
fn set_ip_version(&mut self, _ip_version: IpVersion) {}
fn set_resolved_addr(&mut self, _addr: SocketAddr) {}
}
pub fn build_url_from_socket_addr(addr: &String, scheme: &str) -> url::Url {
@@ -371,9 +372,13 @@ impl TryFrom<&url::Url> for TunnelScheme {
}
}
pub(crate) fn get_scheme_by_url(l: &url::Url) -> Result<TunnelScheme, Error> {
l.try_into()
}
macro_rules! __matches_scheme__ {
($url:expr, $( $pattern:pat_param )|+ ) => {
matches!($crate::tunnel::TunnelScheme::try_from(($url).as_ref()), Ok($( $pattern )|+))
matches!($crate::tunnel::get_scheme_by_url(&$url), Ok($( $pattern )|+))
};
}
@@ -393,3 +398,22 @@ macro_rules! __matches_protocol__ {
}
pub(crate) use __matches_protocol__ as matches_protocol;
#[cfg(test)]
mod tests {
use super::{IpScheme, TunnelScheme, matches_scheme};
#[test]
fn matches_scheme_accepts_owned_url() {
let url: url::Url = "udp://[2001:db8::1]:11010".parse().unwrap();
assert!(matches_scheme!(url, TunnelScheme::Ip(IpScheme::Udp)));
}
#[test]
fn matches_scheme_accepts_borrowed_url() {
let url: url::Url = "udp://[2001:db8::1]:11010".parse().unwrap();
assert!(matches_scheme!(&url, TunnelScheme::Ip(IpScheme::Udp)));
}
}
+15
View File
@@ -309,6 +309,8 @@ pub enum CompressorAlgo {
None = 0,
#[cfg(feature = "zstd")]
ZstdDefault = 1,
#[cfg(feature = "lzo")]
Lzo = 2,
}
#[repr(C, packed)]
@@ -323,6 +325,8 @@ impl CompressorTail {
match self.algo {
#[cfg(feature = "zstd")]
1 => Some(CompressorAlgo::ZstdDefault),
#[cfg(feature = "lzo")]
2 => Some(CompressorAlgo::Lzo),
_ => None,
}
}
@@ -730,6 +734,17 @@ impl ZCPacket {
}
}
pub fn foreign_network_inner_packet_type(&self) -> Option<u8> {
if self.peer_manager_header()?.packet_type != PacketType::ForeignNetworkPacket as u8 {
return None;
}
let payload = self.payload();
let hdr = ForeignNetworkPacketHeader::ref_from_prefix(payload)?;
let inner_packet = payload.get(hdr.get_header_len()..)?;
PeerManagerHeader::ref_from_prefix(inner_packet).map(|hdr| hdr.packet_type)
}
pub fn foreign_network_packet(mut self) -> Self {
let hdr = self.foreign_network_hdr().unwrap();
let foreign_hdr_len = hdr.get_header_len();
+212 -17
View File
@@ -14,8 +14,8 @@ use derivative::Derivative;
use derive_more::{Deref, DerefMut};
use parking_lot::RwLock;
use quinn::{
ClientConfig, Connection, Endpoint, EndpointConfig, ServerConfig, TransportConfig,
congestion::BbrConfig, default_runtime,
ClientConfig, ConnectError, Connection, Endpoint, EndpointConfig, ServerConfig,
TransportConfig, congestion::BbrConfig, default_runtime,
};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::sync::OnceLock;
@@ -135,6 +135,12 @@ impl<Item> RwPool<Item> {
self.resize();
}
fn len(&self) -> usize {
let persistent_len = self.persistent.read().len();
let ephemeral_len = self.ephemeral.read().len();
persistent_len + ephemeral_len
}
/// try to push an item to the ephemeral pool, return the item if full
fn try_push(&self, item: Item) -> Option<Item> {
let mut pool = self.ephemeral.write();
@@ -168,6 +174,49 @@ impl<Item> RwPool<Item> {
f(&mut persistent.iter().chain(ephemeral.iter()))
}
}
impl RwPool<Endpoint> {
fn retain_endpoints<F>(&self, mut keep: F) -> usize
where
F: FnMut(&Endpoint) -> bool,
{
let persistent_removed = {
let mut persistent = self.persistent.write();
let before = persistent.len();
persistent.retain(|endpoint| keep(endpoint));
before - persistent.len()
};
let ephemeral_removed = {
let mut ephemeral = self.ephemeral.write();
let before = ephemeral.len();
ephemeral.retain(|endpoint| keep(endpoint));
before - ephemeral.len()
};
let removed = persistent_removed + ephemeral_removed;
if removed > 0 {
self.resize();
}
removed
}
fn remove_by_local_addr(&self, local_addr: SocketAddr) -> usize {
self.retain_endpoints(|endpoint| endpoint.local_addr().ok() != Some(local_addr))
}
fn contains_local_addr(&self, local_addr: SocketAddr) -> bool {
self.persistent
.read()
.iter()
.any(|endpoint| endpoint.local_addr().ok() == Some(local_addr))
|| self
.ephemeral
.read()
.iter()
.any(|endpoint| endpoint.local_addr().ok() == Some(local_addr))
}
}
//endregion
//region endpoint manager
@@ -262,6 +311,20 @@ impl QuicEndpointManager {
QUIC_ENDPOINT_MANAGER.get().unwrap()
}
fn client_pool(&self, ip_version: IpVersion) -> &RwPool<Endpoint> {
let dual_stack = self.both.is_enabled();
match ip_version {
IpVersion::V4 if !dual_stack => &self.ipv4,
_ => {
if dual_stack {
&self.both
} else {
&self.ipv6
}
}
}
}
/// Get a QUIC endpoint to be used as a server
///
/// # Arguments
@@ -288,14 +351,8 @@ impl QuicEndpointManager {
Ok(endpoint)
}
/// Get a quic endpoint to be used as a client
///
/// # Arguments
/// * `ip_version`: the IP version of the remote address
fn client(global_ctx: &ArcGlobalCtx, ip_version: IpVersion) -> Result<Endpoint, TunnelError> {
let mgr = Self::load(global_ctx);
let (pool, endpoint) = mgr.create(|mgr| {
fn client_endpoint(&self, ip_version: IpVersion) -> Result<Endpoint, TunnelError> {
let (pool, endpoint) = self.create(|mgr| {
let dual_stack = mgr.both.is_enabled();
let (pool, addr) = match ip_version {
IpVersion::V4 if !dual_stack => (&mgr.ipv4, (Ipv4Addr::UNSPECIFIED, 0).into()),
@@ -318,6 +375,26 @@ impl QuicEndpointManager {
Ok(pool.with_iter(|iter| iter.min_by_key(|e| e.open_connections()).unwrap().clone()))
}
fn remove_endpoint(&self, endpoint: &Endpoint) -> usize {
let Ok(local_addr) = endpoint.local_addr() else {
return 0;
};
self.remove_endpoint_by_local_addr(local_addr)
}
fn remove_endpoint_by_local_addr(&self, local_addr: SocketAddr) -> usize {
[&self.ipv4, &self.ipv6, &self.both]
.into_iter()
.map(|pool| pool.remove_by_local_addr(local_addr))
.sum()
}
fn contains_local_addr(&self, local_addr: SocketAddr) -> bool {
[&self.ipv4, &self.ipv6, &self.both]
.into_iter()
.any(|pool| pool.contains_local_addr(local_addr))
}
async fn connect(
global_ctx: &ArcGlobalCtx,
addr: SocketAddr,
@@ -327,14 +404,52 @@ impl QuicEndpointManager {
} else {
IpVersion::V6
};
let endpoint = Self::client(global_ctx, ip_version)?;
let connection = endpoint
.connect(addr, "localhost")
.with_context(|| format!("failed to create connection to {}", addr))?
Self::load(global_ctx)
.connect_with_ip_version(addr, ip_version)
.await
.with_context(|| format!("failed to connect to {}", addr))?;
}
Ok((endpoint, connection))
async fn connect_with_ip_version(
&self,
addr: SocketAddr,
ip_version: IpVersion,
) -> Result<(Endpoint, Connection), TunnelError> {
let max_endpoint_stopping_retries = self.client_pool(ip_version).len().saturating_add(1);
let mut endpoint_stopping_retries = 0;
loop {
let endpoint = self.client_endpoint(ip_version)?;
let connecting = match endpoint.connect(addr, "localhost") {
Ok(connecting) => connecting,
Err(ConnectError::EndpointStopping) => {
let local_addr = endpoint.local_addr().ok();
let removed = self.remove_endpoint(&endpoint);
endpoint_stopping_retries += 1;
tracing::warn!(
?addr,
?local_addr,
removed,
"removed stopped quic endpoint and retry connect"
);
if endpoint_stopping_retries > max_endpoint_stopping_retries {
return Err(anyhow::Error::new(ConnectError::EndpointStopping)
.context(format!("failed to create connection to {}", addr))
.into());
}
continue;
}
Err(e) => {
return Err(anyhow::Error::new(e)
.context(format!("failed to create connection to {}", addr))
.into());
}
};
let connection = connecting
.await
.with_context(|| format!("failed to connect to {}", addr))?;
return Ok((endpoint, connection));
}
}
}
//endregion
@@ -398,6 +513,18 @@ impl QuicTunnelListener {
}
}
impl Drop for QuicTunnelListener {
fn drop(&mut self) {
let Some(endpoint) = &self.endpoint else {
return;
};
let Ok(local_addr) = endpoint.local_addr() else {
return;
};
QuicEndpointManager::load(&self.global_ctx).remove_endpoint_by_local_addr(local_addr);
}
}
#[async_trait::async_trait]
impl TunnelListener for QuicTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
@@ -432,6 +559,7 @@ pub struct QuicTunnelConnector {
addr: url::Url,
global_ctx: ArcGlobalCtx,
ip_version: IpVersion,
resolved_addr: Option<SocketAddr>,
}
impl QuicTunnelConnector {
@@ -440,6 +568,7 @@ impl QuicTunnelConnector {
addr,
global_ctx,
ip_version: IpVersion::Both,
resolved_addr: None,
}
}
}
@@ -447,7 +576,10 @@ impl QuicTunnelConnector {
#[async_trait::async_trait]
impl TunnelConnector for QuicTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
let addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), self.ip_version).await?,
};
let (endpoint, connection) = QuicEndpointManager::connect(&self.global_ctx, addr).await?;
let local_addr = endpoint.local_addr()?;
@@ -484,6 +616,10 @@ impl TunnelConnector for QuicTunnelConnector {
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.ip_version = ip_version;
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
#[cfg(test)]
@@ -507,6 +643,20 @@ mod tests {
get_mock_global_ctx_with_network(Some(identity))
}
fn stopped_client_endpoint() -> (Endpoint, SocketAddr) {
let rt = Builder::new_current_thread().enable_all().build().unwrap();
let endpoint = rt.block_on(async {
QuicEndpointManager::try_create((Ipv4Addr::UNSPECIFIED, 0).into(), false).unwrap()
});
let local_addr = endpoint.local_addr().unwrap();
drop(rt);
assert!(matches!(
endpoint.connect("127.0.0.1:1".parse().unwrap(), "localhost"),
Err(ConnectError::EndpointStopping)
));
(endpoint, local_addr)
}
#[test]
fn quic_pingpong() {
RUNTIME.block_on(quic_pingpong_impl())
@@ -582,6 +732,51 @@ mod tests {
assert!(port > 0);
}
#[test]
fn listener_drop_removes_persistent_endpoint() {
RUNTIME.block_on(listener_drop_removes_persistent_endpoint_impl())
}
async fn listener_drop_removes_persistent_endpoint_impl() {
let global_ctx = global_ctx();
let endpoint_addr = {
let mut listener =
QuicTunnelListener::new("quic://127.0.0.1:0".parse().unwrap(), global_ctx.clone());
listener.listen().await.unwrap();
let endpoint_addr = listener.endpoint.as_ref().unwrap().local_addr().unwrap();
assert!(QuicEndpointManager::load(&global_ctx).contains_local_addr(endpoint_addr));
endpoint_addr
};
assert!(!QuicEndpointManager::load(&global_ctx).contains_local_addr(endpoint_addr));
}
#[test]
fn connect_removes_stopped_endpoints_and_retries() {
let (stopped_endpoint_a, stopped_addr_a) = stopped_client_endpoint();
let (stopped_endpoint_b, stopped_addr_b) = stopped_client_endpoint();
RUNTIME.block_on(async move {
let mgr = QuicEndpointManager::new(2);
mgr.both.push(stopped_endpoint_a);
mgr.both.push(stopped_endpoint_b);
assert!(mgr.contains_local_addr(stopped_addr_a));
assert!(mgr.contains_local_addr(stopped_addr_b));
let err = mgr
.connect_with_ip_version("127.0.0.1:0".parse().unwrap(), IpVersion::V4)
.await
.unwrap_err();
let err = format!("{:?}", err);
assert!(
err.contains("invalid remote address"),
"unexpected error: {}",
err
);
assert!(!mgr.contains_local_addr(stopped_addr_a));
assert!(!mgr.contains_local_addr(stopped_addr_b));
});
}
#[test]
fn invalid_peer_addr() {
RUNTIME.block_on(invalid_peer_addr_impl())
+35 -1
View File
@@ -129,6 +129,7 @@ pub struct TcpTunnelConnector {
bind_addrs: Vec<SocketAddr>,
ip_version: IpVersion,
resolved_addr: Option<SocketAddr>,
}
impl TcpTunnelConnector {
@@ -137,6 +138,7 @@ impl TcpTunnelConnector {
addr,
bind_addrs: vec![],
ip_version: IpVersion::Both,
resolved_addr: None,
}
}
@@ -175,7 +177,10 @@ impl TcpTunnelConnector {
#[async_trait]
impl super::TunnelConnector for TcpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
let addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), self.ip_version).await?,
};
if self.bind_addrs.is_empty() {
self.connect_with_default_bind(addr).await
} else {
@@ -194,6 +199,10 @@ impl super::TunnelConnector for TcpTunnelConnector {
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.ip_version = ip_version;
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
#[cfg(test)]
@@ -294,6 +303,31 @@ mod tests {
);
}
#[tokio::test]
async fn connector_uses_pre_resolved_addr_without_resolving_url() {
let mut listener = TcpTunnelListener::new("tcp://127.0.0.1:0".parse().unwrap());
listener.listen().await.unwrap();
let port = listener.local_url().port().unwrap();
let source_url: url::Url = format!("tcp://unresolvable.invalid:{port}")
.parse()
.unwrap();
let resolved_addr: SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let mut connector = TcpTunnelConnector::new(source_url.clone());
connector.set_resolved_addr(resolved_addr);
let accept_task = tokio::spawn(async move { listener.accept().await.unwrap() });
let tunnel = connector.connect().await.unwrap();
let _accepted_tunnel = accept_task.await.unwrap();
let info = tunnel.info().unwrap();
assert_eq!(info.remote_addr.unwrap().url, source_url.to_string());
let resolved_remote_addr: url::Url = info.resolved_remote_addr.unwrap().into();
assert_eq!(resolved_remote_addr.host_str(), Some("127.0.0.1"));
assert_eq!(resolved_remote_addr.port(), Some(port));
}
#[tokio::test]
async fn test_alloc_port() {
// v4
+10 -1
View File
@@ -682,6 +682,7 @@ pub struct UdpTunnelConnector {
addr: url::Url,
bind_addrs: Vec<SocketAddr>,
ip_version: IpVersion,
resolved_addr: Option<SocketAddr>,
}
impl UdpTunnelConnector {
@@ -690,6 +691,7 @@ impl UdpTunnelConnector {
addr,
bind_addrs: vec![],
ip_version: IpVersion::Both,
resolved_addr: None,
}
}
@@ -906,7 +908,10 @@ impl UdpTunnelConnector {
#[async_trait]
impl super::TunnelConnector for UdpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
let addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), self.ip_version).await?,
};
if self.bind_addrs.is_empty() || addr.is_ipv6() {
self.connect_with_default_bind(addr).await
} else {
@@ -925,6 +930,10 @@ impl super::TunnelConnector for UdpTunnelConnector {
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.ip_version = ip_version;
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
#[cfg(test)]
+13 -9
View File
@@ -198,6 +198,7 @@ impl TunnelListener for WsTunnelListener {
pub struct WsTunnelConnector {
addr: url::Url,
ip_version: IpVersion,
resolved_addr: Option<SocketAddr>,
bind_addrs: Vec<SocketAddr>,
}
@@ -207,6 +208,7 @@ impl WsTunnelConnector {
WsTunnelConnector {
addr,
ip_version: IpVersion::Both,
resolved_addr: None,
bind_addrs: vec![],
}
@@ -214,11 +216,10 @@ impl WsTunnelConnector {
async fn connect_with(
addr: url::Url,
ip_version: IpVersion,
socket_addr: SocketAddr,
tcp_socket: TcpSocket,
) -> Result<Box<dyn Tunnel>, TunnelError> {
let is_wss = is_wss(&addr)?;
let socket_addr = SocketAddr::from_url(addr.clone(), ip_version).await?;
let stream = tcp_socket.connect(socket_addr).await?;
if let Err(error) = stream.set_nodelay(true) {
tracing::warn!(?error, "set_nodelay fail in ws connect");
@@ -273,7 +274,7 @@ impl WsTunnelConnector {
} else {
TcpSocket::new_v6()?
};
Self::connect_with(self.addr.clone(), self.ip_version, socket).await
Self::connect_with(self.addr.clone(), addr, socket).await
}
async fn connect_with_custom_bind(
@@ -285,11 +286,7 @@ impl WsTunnelConnector {
for bind_addr in self.bind_addrs.iter() {
tracing::info!(?bind_addr, ?addr, "bind addr");
match bind().addr(*bind_addr).only_v6(true).call() {
Ok(socket) => futures.push(Self::connect_with(
self.addr.clone(),
self.ip_version,
socket,
)),
Ok(socket) => futures.push(Self::connect_with(self.addr.clone(), addr, socket)),
Err(error) => {
tracing::error!(?bind_addr, ?addr, ?error, "bind addr fail");
continue;
@@ -304,7 +301,10 @@ impl WsTunnelConnector {
#[async_trait::async_trait]
impl TunnelConnector for WsTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
let addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), self.ip_version).await?,
};
if self.bind_addrs.is_empty() || addr.is_ipv6() {
self.connect_with_default_bind(addr).await
} else {
@@ -323,6 +323,10 @@ impl TunnelConnector for WsTunnelConnector {
fn set_bind_addrs(&mut self, addrs: Vec<SocketAddr>) {
self.bind_addrs = addrs;
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
#[cfg(test)]
+10 -1
View File
@@ -598,6 +598,7 @@ pub struct WgTunnelConnector {
bind_addrs: Vec<SocketAddr>,
ip_version: IpVersion,
resolved_addr: Option<SocketAddr>,
}
impl Debug for WgTunnelConnector {
@@ -617,6 +618,7 @@ impl WgTunnelConnector {
udp: None,
bind_addrs: vec![],
ip_version: IpVersion::Both,
resolved_addr: None,
}
}
@@ -702,7 +704,10 @@ impl WgTunnelConnector {
impl super::TunnelConnector for WgTunnelConnector {
#[tracing::instrument]
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
let addr = match self.resolved_addr {
Some(addr) => addr,
None => SocketAddr::from_url(self.addr.clone(), self.ip_version).await?,
};
if addr.is_ipv6() {
return self.connect_with_ipv6(addr).await;
@@ -744,6 +749,10 @@ impl super::TunnelConnector for WgTunnelConnector {
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.ip_version = ip_version;
}
fn set_resolved_addr(&mut self, addr: SocketAddr) {
self.resolved_addr = Some(addr);
}
}
#[cfg(test)]
-638
View File
@@ -1,638 +0,0 @@
//! # Guard Module Utilities
//!
//! This module provides mechanisms for scope-based resource management and deferred execution.
//!
//! ### ⚠️ Critical Usage Note: Diverging Expressions
//!
//! Do not use "naked" diverging expressions—such as `panic!`, `todo!`, or `loop {}`—as
//! the sole content of sync guard closure. This prevents the compiler from
//! distinguishing between synchronous (`ASYNC = false`) and asynchronous
//! (`ASYNC = true`) implementations, leading to a type inference error (E0277).
//!
//! ### Technical Context
//!
//! The `!` (Never Type) is a bottom type that can be coerced into any other type.
//! Because it satisfies both the `()` requirement for sync guards and the `Future`
//! requirement for async guards, the compiler encounters an inference deadlock.
//!
//! ### Workaround
//!
//! For macros like `guard!` or `guarded!`, force the closure to resolve to `()`
//! by explicitly setting the guard to `sync`:
//!
//! ```rust
//! let _g = guard!([val] sync {
//! panic!("critical failure");
//! });
//! ```
use crate::utils::task::{DetachableTask, TaskSpawner};
use std::fmt::Debug;
use std::mem::ManuallyDrop;
use std::ops::{Deref, DerefMut};
pub trait CallableGuard<const ASYNC: bool, Context> {
type Output;
fn call(self, context: Context) -> Self::Output;
}
impl<Context, Guard> CallableGuard<false, Context> for Guard
where
Guard: FnOnce(Context),
{
type Output = ();
fn call(self, context: Context) -> Self::Output {
self(context)
}
}
impl<Context, Guard, Task, _R> CallableGuard<true, Context> for Guard
where
Guard: FnOnce(Context) -> Task + Send + 'static,
Task: Future<Output = _R> + Send + 'static,
_R: Send + 'static,
{
type Output = DetachableTask<TaskSpawner<Task>, Task>;
fn call(self, context: Context) -> Self::Output {
DetachableTask::new(self(context))
}
}
pub struct ContextGuard<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>> {
context: ManuallyDrop<Context>,
guard: ManuallyDrop<Guard>,
}
impl<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>> Deref
for ContextGuard<ASYNC, Context, Guard>
{
type Target = Context;
fn deref(&self) -> &Self::Target {
&self.context
}
}
impl<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>> DerefMut
for ContextGuard<ASYNC, Context, Guard>
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.context
}
}
impl<const ASYNC: bool, Context: Debug, Guard: CallableGuard<ASYNC, Context>> Debug
for ContextGuard<ASYNC, Context, Guard>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let name = if ASYNC {
"ContextGuard::Async"
} else {
"ContextGuard::Sync"
};
f.debug_struct(name)
.field("context", &self.context)
.finish_non_exhaustive()
}
}
impl<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>>
ContextGuard<ASYNC, Context, Guard>
{
/// Creates a new `ContextGuard`.
///
/// **Note on generics:** The seemingly unused `_R` generic parameter and the
/// `Guard: FnOnce(Context) -> _R` trait bound are intentionally included.
/// They act as a hint to help the compiler infer closure types.
pub fn new<_R>(context: Context, guard: Guard) -> Self
where
Guard: FnOnce(Context) -> _R,
{
ContextGuard {
context: ManuallyDrop::new(context),
guard: ManuallyDrop::new(guard),
}
}
}
impl<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>>
ContextGuard<ASYNC, Context, Guard>
{
unsafe fn call(&mut self) -> Guard::Output {
unsafe {
let context = ManuallyDrop::take(&mut self.context);
let guard = ManuallyDrop::take(&mut self.guard);
guard.call(context)
}
}
pub fn trigger(self) -> Guard::Output {
let mut this = ManuallyDrop::new(self);
unsafe { this.call() }
}
pub fn defuse(self) -> Context {
let mut this = ManuallyDrop::new(self);
unsafe {
ManuallyDrop::drop(&mut this.guard);
ManuallyDrop::take(&mut this.context)
}
}
}
impl<const ASYNC: bool, Context, Guard: CallableGuard<ASYNC, Context>> Drop
for ContextGuard<ASYNC, Context, Guard>
{
fn drop(&mut self) {
let _: Guard::Output = unsafe { self.call() };
}
}
// region macro
#[doc(hidden)]
#[macro_export]
macro_rules! __guarded {
(@parse@action $guard:ident => $($tt:tt)*) => {
$crate::__guarded! { @parse@async action: [ @stmt $guard ] ; $($tt)* }
};
(@parse@action $($tt:tt)*) => {
$crate::__guarded! { @parse@async action: [ @stmt __guard ] ; $($tt)* }
};
(@parse@async action: [ $($action:tt)* ] ; sync $($tt:tt)*) => {
$crate::__guarded! { @parse@move action: [ $($action)* ] ; async: [ false ] ; $($tt)* }
};
(@parse@async action: [ $($action:tt)* ] ; $($tt:tt)*) => {
$crate::__guarded! { @parse@move action: [ $($action)* ] ; async: [ _ ] ; $($tt)* }
};
(@parse@move action: [ $($action:tt)* ] ; async: [ $async:tt ] ; move $($tt:tt)*) => {
$crate::__guarded! { @parse action: [ $($action)* ] ; async: [ $async ] ; move: [ move ] ; $($tt)* }
};
(@parse@move action: [ $($action:tt)* ] ; async: [ $async:tt ] ; $($tt:tt)*) => {
$crate::__guarded! { @parse action: [ $($action)* ] ; async: [ $async ] ; move: [] ; $($tt)* }
};
(
@parse action: [ $($action:tt)* ] ; async: [ $async:tt ] ; move: [ $($move:tt)? ] ;
[ $($args:tt)* ] $body:block
) => {
$crate::__guarded! {
action: [ $($action)* ]
async: [ $async ]
move: [ $($move)? ]
mut: []
rest: [ $($args)* , ]
args: []
vars: []
body: [ $body ]
}
};
(
@parse action: [ $($action:tt)* ] ; async: [ $async:tt ] ; move: [ $($move:tt)? ] ;
$body:block
) => {
$crate::__guarded! {
@parse action: [ $($action)* ] ; async: [ $async ] ; move: [ $($move)? ] ;
[] $body
}
};
(
@parse action: [ $($action:tt)* ] ; async: [ $async:tt ] ; move: [ $($move:tt)? ] ;
[ $($args:tt)* ] $($body:tt)*
) => {
$crate::__guarded! {
@parse action: [ $($action)* ] ; async: [ $async ] ; move: [ $($move)? ] ;
[ $($args)* ] { $($body)* }
}
};
(
@parse action: [ $($action:tt)* ] ; async: [ $async:tt ] ; move: [ $($move:tt)? ] ;
$($body:tt)*
) => {
$crate::__guarded! {
@parse action: [ $($action)* ] ; async: [ $async ] ; move: [ $($move)? ] ;
[] { $($body)* }
}
};
(
action: [ $($action:tt)* ]
async: [ $async:tt ]
move: [ $($move:tt)? ]
mut: [ $($mut:tt)? ]
rest: [ mut $arg:ident , $($rest:tt)* ]
args: [ $($args:ident)* ]
vars: [ $($vars:tt)* ]
body: [ $body:expr ]
) => {
$crate::__guarded! {
action: [ $($action)* ]
async: [ $async ]
move: [ $($move)? ]
mut: [ mut ]
rest: [ $($rest)* ]
args: [ $($args)* $arg ]
vars: [ $($vars)* [mut $arg] ]
body: [ $body ]
}
};
(
action: [ $($action:tt)* ]
async: [ $async:tt ]
move: [ $($move:tt)? ]
mut: [ $($mut:tt)? ]
rest: [ $arg:ident , $($rest:tt)* ]
args: [ $($args:ident)* ]
vars: [ $($vars:tt)* ]
body: [ $body:expr ]
) => {
$crate::__guarded! {
action: [ $($action)* ]
async: [ $async ]
move: [ $($move)? ]
mut: [ $($mut)? ]
rest: [ $($rest)* ]
args: [ $($args)* $arg ]
vars: [ $($vars)* [$arg] ]
body: [ $body ]
}
};
(
action: [ @stmt $guard:ident ]
async: [ $async:tt ]
move: [ $($move:tt)? ]
mut: [ $($mut:tt)? ]
rest: [ $(,)* ]
args: [ $($args:ident)* ]
vars: [ $([$($vars:tt)*])* ]
body: [ $body:expr ]
) => {
let $($mut)? $guard = $crate::utils::guard::ContextGuard::<$async, _, _>::new(
( $($args),* ),
$($move)? |#[allow(unused_parens, unused_mut)] ( $($($vars)*),* )| $body
);
#[allow(unused_parens, unused_variables, clippy::toplevel_ref_arg)]
let ( $(ref $($vars)*),* ) = *$guard;
};
(
action: [ @expr ]
async: [ $async:tt ]
move: [ $($move:tt)? ]
mut: [ $($mut:tt)? ]
rest: [ $(,)* ]
args: [ $($args:ident)* ]
vars: [ $([$($vars:tt)*])* ]
body: [ $body:expr ]
) => {
$crate::utils::guard::ContextGuard::<$async, _, _>::new(
( $($args),* ),
$($move)? |#[allow(unused_parens)] ( $($($vars)*),* )| $body
)
};
}
/// Creates a [`ContextGuard`] object, binding it to a variable with the specified name (e.g., `_guard`).
/// Context variables specified in the macro invocation are available within and after the guard body.
///
/// **Note:** For usage with `panic!` or `loop`, see the [module-level documentation](self)
/// regarding type inference deadlocks.
#[macro_export]
macro_rules! guarded {
( $($tt:tt)* ) => {
$crate::__guarded! { @parse@action $($tt)* }
};
}
/// Creates a [`ContextGuard`] object, without binding it to a variable.
/// Context variables specified in the macro invocation are available within the guard body.
///
/// **Note:** For usage with `panic!` or `loop`, see the [module-level documentation](self)
/// regarding type inference deadlocks.
#[macro_export]
macro_rules! guard {
( $($tt:tt)* ) => {
$crate::__guarded! { @parse@async action: [ @expr ] ; $($tt)* }
};
}
// endregion
/// Alias for [`guarded!`].
///
/// **Note:** For usage with `panic!` or `loop`, see the [module-level documentation](self)
/// regarding type inference deadlocks.
#[macro_export]
macro_rules! defer {
( $($tt:tt)* ) => {
$crate::guarded! { $($tt)* }
};
}
#[cfg(test)]
mod tests {
use std::panic::catch_unwind;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use tokio::sync::oneshot;
#[test]
fn trigger_sync_executes_once() {
let called = Arc::new(AtomicUsize::new(0));
let observed = Arc::new(AtomicUsize::new(0));
let value = 7usize;
let guard = {
let called = called.clone();
let observed = observed.clone();
crate::guard!(move [value] {
called.fetch_add(1, Ordering::SeqCst);
observed.store(value, Ordering::SeqCst);
})
};
guard.trigger();
assert_eq!(called.load(Ordering::SeqCst), 1);
assert_eq!(observed.load(Ordering::SeqCst), 7);
}
#[test]
fn defuse_sync_returns_context_without_running_guard() {
let called = Arc::new(AtomicUsize::new(0));
let value = String::from("hello");
let guard = {
let called = called.clone();
crate::guard!(move [mut value] {
value.push_str(" world");
called.fetch_add(1, Ordering::SeqCst);
})
};
let context = guard.defuse();
assert_eq!(context, "hello");
assert_eq!(called.load(Ordering::SeqCst), 0);
}
#[test]
fn drop_sync_triggers_guard() {
let called = Arc::new(AtomicUsize::new(0));
{
let called = called.clone();
crate::guarded!([called] {
called.fetch_add(1, Ordering::SeqCst);
});
}
assert_eq!(called.load(Ordering::SeqCst), 1);
}
#[test]
fn drop_propagates_guard_panic() {
let dropped = catch_unwind(|| {
guarded! {
sync {
panic!("boom");
}
}
});
assert!(dropped.is_err());
}
#[tokio::test]
async fn trigger_async_returns_runnable_task() {
let called = Arc::new(AtomicUsize::new(0));
let value = 5usize;
let guard = {
let called = called.clone();
crate::guard!(move [value] async move {
called.fetch_add(value, Ordering::SeqCst);
})
};
let task = guard.trigger();
task.await;
assert_eq!(called.load(Ordering::SeqCst), 5);
}
#[tokio::test]
async fn drop_async_detaches_task() {
let (tx, rx) = oneshot::channel();
{
let mut tx = Some(tx);
let value = 9usize;
let _guard = crate::guard!(move [value] {
let tx = tx.take();
async move {
if let Some(tx) = tx {
let _ = tx.send(value);
}
}
});
}
let value = tokio::time::timeout(Duration::from_secs(1), rx)
.await
.expect("detached task should run")
.expect("detached task should send value");
assert_eq!(value, 9);
}
#[tokio::test]
async fn defuse_async_does_not_execute() {
let called = Arc::new(AtomicUsize::new(0));
let value = 11usize;
let guard = {
let called = called.clone();
crate::guard!(move [value] async move {
called.fetch_add(value, Ordering::SeqCst);
})
};
let context = guard.defuse();
assert_eq!(context, 11);
tokio::time::sleep(Duration::from_millis(20)).await;
assert_eq!(called.load(Ordering::SeqCst), 0);
}
#[test]
fn guarded_named_mut_binding_updates_context_before_drop() {
let committed = Arc::new(AtomicUsize::new(0));
{
let value = 1usize;
let step = 2usize;
let committed = committed.clone();
crate::guarded!(scope_guard => [mut value, step] {
committed.store(value + step, Ordering::SeqCst);
});
*value += 10;
assert_eq!(*value, 11);
assert_eq!(*step, 2);
drop(scope_guard);
}
assert_eq!(committed.load(Ordering::SeqCst), 13);
}
#[test]
fn guard_expression_parses_without_braces() {
let observed = Arc::new(AtomicUsize::new(0));
let value = 3usize;
let observed_clone = observed.clone();
let guard = crate::guard!([value] observed_clone.store(value, Ordering::SeqCst));
guard.trigger();
assert_eq!(observed.load(Ordering::SeqCst), 3);
}
#[test]
fn defer_alias_behaves_like_guarded_statement() {
let called = Arc::new(AtomicUsize::new(0));
{
let n = 42usize;
let called = called.clone();
crate::defer!([n] {
called.store(n, Ordering::SeqCst);
});
}
assert_eq!(called.load(Ordering::SeqCst), 42);
}
#[tokio::test]
async fn guard_and_guarded_macro_usage_matrix() {
// 1) guard!: block body + trailing comma args + trigger()
let sink = Arc::new(AtomicUsize::new(0));
let v = 1usize;
let sink_clone = sink.clone();
let g1 = crate::guard!([v,] {
sink_clone.store(v, Ordering::SeqCst);
});
g1.trigger();
assert_eq!(sink.load(Ordering::SeqCst), 1);
// 2) guard!: expression body (no braces)
let sink = Arc::new(AtomicUsize::new(0));
let sink_clone = sink.clone();
let v = 2usize;
let g2 = crate::guard!([v] sink_clone.store(v, Ordering::SeqCst));
g2.trigger();
assert_eq!(sink.load(Ordering::SeqCst), 2);
// 3) guard!: explicit sync + no args form
let sink = Arc::new(AtomicUsize::new(0));
let sink_clone = sink.clone();
let g3 = crate::guard!(sync {
sink_clone.store(3, Ordering::SeqCst);
});
g3.trigger();
assert_eq!(sink.load(Ordering::SeqCst), 3);
// 4) guard!: move capture + defuse() prevents execution
let sink = Arc::new(AtomicUsize::new(0));
let owned = String::from("owned");
let sink_clone = sink.clone();
let g4 = crate::guard!(move [owned] {
if owned == "owned" {
sink_clone.store(4, Ordering::SeqCst);
}
});
let context = g4.defuse();
assert_eq!(context, "owned");
assert_eq!(sink.load(Ordering::SeqCst), 0);
// 5) guard!: async block inference + trigger() returns task
let sink = Arc::new(AtomicUsize::new(0));
let sink_clone = sink.clone();
let n = 5usize;
let g5 = crate::guard!([n] async move {
sink_clone.fetch_add(n, Ordering::SeqCst);
});
g5.trigger().await;
assert_eq!(sink.load(Ordering::SeqCst), 5);
// 6) guarded!: named binding + mut arg visible outside + explicit drop
let sink = Arc::new(AtomicUsize::new(0));
{
let value = 6usize;
let delta = 1usize;
let sink_clone = sink.clone();
crate::guarded!(named => [mut value, delta] {
sink_clone.store(value + delta, Ordering::SeqCst);
});
*value += 10;
assert_eq!(*value, 16);
assert_eq!(*delta, 1);
drop(named);
}
assert_eq!(sink.load(Ordering::SeqCst), 17);
// 7) guarded!: unnamed statement + expression body + implicit drop at scope end
let sink = Arc::new(AtomicUsize::new(0));
{
let n = 7usize;
let sink_clone = sink.clone();
crate::guarded!([n] sink_clone.store(n, Ordering::SeqCst));
}
assert_eq!(sink.load(Ordering::SeqCst), 7);
// 8) guarded!: explicit sync + panic path propagates on drop
let dropped = catch_unwind(|| {
guarded! {
sync {
panic!("matrix-boom");
}
}
});
assert!(dropped.is_err());
// 9) guarded!: async inference on drop detaches and executes
let (tx, rx) = oneshot::channel();
{
let tx = Some(tx);
crate::guarded!([mut tx] {
let tx = tx.take();
async move {
if let Some(tx) = tx {
let _ = tx.send(9usize);
}
}
});
}
let detached = tokio::time::timeout(Duration::from_secs(1), rx)
.await
.expect("detached task should complete")
.expect("detached task should send value");
assert_eq!(detached, 9);
}
}
-1
View File
@@ -1,4 +1,3 @@
pub mod guard;
pub mod panic;
pub mod string;
pub mod task;
-283
View File
@@ -1,7 +1,5 @@
use crate::utils::guard::ContextGuard;
use std::future::Future;
use std::io;
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
@@ -80,284 +78,3 @@ impl<Output> Future for CancellableTask<Output> {
}
// endregion
// region DetachableTask
/// A pinned, heap-allocated task.
///
/// **Why Box?** Heap allocation is required because if the task detaches,
/// it outlives the current stack frame. `Pin<Box<_>>` ensures its memory address
/// remains completely stable during and after the transfer.
type BoxTask<Task> = Pin<Box<Task>>;
struct DetachableTaskContext<Spawner, Task> {
spawner: Spawner,
task: Option<BoxTask<Task>>,
}
type DetachableTaskGuardHelper<Context> = ContextGuard<false, Context, fn(Context)>;
type DetachableTaskGuard<Spawner, Task> =
DetachableTaskGuardHelper<DetachableTaskContext<Spawner, Task>>;
/// A task wrapper that executes inline but automatically detaches to a background spawner
/// if the current execution context is interrupted or dropped.
///
/// `DetachableTask` ensures anti-cancellation. If the outer future is dropped (e.g., due to
/// a timeout or a `select!` branch failing), the underlying unfinished task is seamlessly
/// transferred to a background executor via an RAII guard.
///
/// # Advantages over `tokio::spawn` + `.await JoinHandle`
///
/// 1. **Zero Initial Scheduling Overhead**: Prioritizes inline execution. If the task
/// completes before being interrupted, it entirely bypasses the runtime's scheduling queue,
/// eliminating queuing latency and context-switching CPU costs. Spawning is strictly a fallback.
///
/// 2. **Context Locality**: Before detachment, the task is polled directly by the caller's thread.
/// This implicitly preserves the current execution context, including thread-local storage (TLS),
/// Tokio `task_local!` variables, and `tracing` spans, which would otherwise be immediately
/// lost or require explicit propagation across task boundaries.
pub struct DetachableTask<Spawner, Task> {
guard: DetachableTaskGuard<Spawner, Task>,
}
impl<Spawner, Task> DetachableTask<Spawner, Task> {
pub fn detach(self) {
self.guard.trigger()
}
pub fn reclaim(self) -> BoxTask<Task> {
self.guard.defuse().task.unwrap()
}
}
pub type TaskSpawner<Task, R = JoinHandle<<Task as Future>::Output>> = fn(BoxTask<Task>) -> R;
impl DetachableTask<fn(()), ()> {
pub fn with_spawner<Spawner, _R, Task>(
spawner: Spawner,
task: Task,
) -> DetachableTask<Spawner, Task>
where
Spawner: FnOnce(BoxTask<Task>) -> _R,
{
let context = DetachableTaskContext {
spawner,
task: Some(Box::pin(task)),
};
DetachableTask {
guard: crate::guard!([context] if let Some(task) = context.task {
(context.spawner)(task);
}),
}
}
pub fn new<Task>(task: Task) -> DetachableTask<TaskSpawner<Task>, Task>
where
Task: Future + Send + 'static,
<Task as Future>::Output: Send + 'static,
{
Self::with_spawner(|task| tokio::runtime::Handle::current().spawn(task), task)
}
}
impl<Spawner: FnOnce(BoxTask<Task>) -> _R, _R, Task> IntoFuture for DetachableTask<Spawner, Task>
where
Task: Future,
{
type Output = Task::Output;
type IntoFuture = DetachableTaskFuture<Spawner, Task>;
fn into_future(self) -> Self::IntoFuture {
DetachableTaskFuture { guard: self.guard }
}
}
pub struct DetachableTaskFuture<Spawner, Task> {
guard: DetachableTaskGuard<Spawner, Task>,
}
impl<Spawner: FnOnce(BoxTask<Task>) -> _R, _R, Task> Future for DetachableTaskFuture<Spawner, Task>
where
Task: Future,
{
type Output = Task::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY:
// 1. We only access the outer struct's unpinned fields.
// 2. The inner task remains securely pinned on the heap via `BoxTask<Task>`.
// 3. We never expose a mutable, unpinned reference to the underlying task.
let this = unsafe { self.get_unchecked_mut() };
let context = this.guard.deref_mut();
let mut task = context.task.take().expect("polled after completion");
let poll = task.as_mut().poll(cx);
if poll.is_pending() {
context.task = Some(task);
}
poll
}
}
// endregion
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::time::Duration;
use tokio::sync::{mpsc, oneshot};
#[tokio::test]
async fn spawn_when_dropped() {
let spawned = Arc::new(AtomicBool::new(false));
{
let spawned = spawned.clone();
let _task = DetachableTask::new(async move {
spawned.store(true, Ordering::SeqCst);
});
}
tokio::time::timeout(Duration::from_secs(1), async {
while !spawned.load(Ordering::SeqCst) {
tokio::task::yield_now().await;
}
})
.await
.expect("task should be spawned on drop");
}
#[tokio::test]
async fn await_completed_task_does_not_detach() {
let spawn_count = Arc::new(AtomicUsize::new(0));
let result = {
let spawn_count = spawn_count.clone();
DetachableTask::with_spawner(
move |_| {
spawn_count.fetch_add(1, Ordering::SeqCst);
},
async { 7usize },
)
.await
};
assert_eq!(result, 7);
assert_eq!(spawn_count.load(Ordering::SeqCst), 0);
}
#[tokio::test]
async fn drop_without_await_and_runs_once() {
let spawn_count = Arc::new(AtomicUsize::new(0));
let (done_tx, done_rx) = oneshot::channel();
{
let spawn_count = spawn_count.clone();
let _task = DetachableTask::with_spawner(
move |f| {
spawn_count.fetch_add(1, Ordering::SeqCst);
tokio::spawn(async move {
let result = f.await;
let _ = done_tx.send(result);
});
},
async { 42usize },
);
}
let detached_result = tokio::time::timeout(Duration::from_secs(1), done_rx)
.await
.expect("detached task should finish")
.expect("detached task should send result");
assert_eq!(detached_result, 42);
assert_eq!(spawn_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn drop_after_await_still_detaches() {
let spawn_count = Arc::new(AtomicUsize::new(0));
let (value_tx, mut value_rx) = mpsc::channel(4);
let (done_tx, done_rx) = oneshot::channel();
let handle = {
let future = async move {
let mut sum = 0;
while let Some(value) = value_rx.recv().await {
sum += value;
}
sum
};
let spawn_count = spawn_count.clone();
let task = DetachableTask::with_spawner(
move |f| {
spawn_count.fetch_add(1, Ordering::SeqCst);
tokio::spawn(async move {
let result = f.await;
let _ = done_tx.send(result);
});
},
future,
);
tokio::spawn(task.into_future())
};
value_tx
.send(10)
.await
.expect("value receiver should still exist");
handle.abort();
value_tx
.send(11)
.await
.expect("value receiver should still exist");
drop(value_tx);
let detached_result = tokio::time::timeout(Duration::from_secs(1), done_rx)
.await
.expect("detached polled task should finish")
.expect("detached polled task should send result");
assert_eq!(detached_result, 21);
assert_eq!(spawn_count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn panic_during_inline_poll_does_not_detach_on_drop() {
struct PanicOnPollFuture {
poll_count: Arc<AtomicUsize>,
}
impl Future for PanicOnPollFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
self.poll_count.fetch_add(1, Ordering::SeqCst);
panic!("panic during inline poll")
}
}
let poll_count = Arc::new(AtomicUsize::new(0));
let detach_count = Arc::new(AtomicUsize::new(0));
let task = {
let detach_count = detach_count.clone();
DetachableTask::with_spawner(
move |_| {
detach_count.fetch_add(1, Ordering::SeqCst);
},
PanicOnPollFuture {
poll_count: poll_count.clone(),
},
)
};
let err = tokio::spawn(task.into_future())
.await
.expect_err("inline poll panic should propagate");
assert!(err.is_panic());
assert_eq!(poll_count.load(Ordering::SeqCst), 1);
assert_eq!(detach_count.load(Ordering::SeqCst), 0);
}
}
+63 -5
View File
@@ -2,13 +2,17 @@ use std::sync::Arc;
use crate::{
common::{
config::TomlConfigLoader, global_ctx::GlobalCtx, log, os_info::collect_device_os_info,
set_default_machine_id, stun::MockStunInfoCollector,
config::TomlConfigLoader,
global_ctx::{ArcGlobalCtx, GlobalCtx},
log,
os_info::collect_device_os_info,
set_default_machine_id,
stun::MockStunInfoCollector,
},
connector::create_connector_by_url,
instance_manager::{DaemonGuard, NetworkInstanceManager},
proto::common::NatType,
tunnel::{IpVersion, TunnelConnector},
tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelScheme},
};
use anyhow::{Context as _, Result};
use async_trait::async_trait;
@@ -49,6 +53,30 @@ pub struct WebClient {
connected: Arc<AtomicBool>,
}
struct ConfigServerConnector {
url: Url,
global_ctx: ArcGlobalCtx,
}
#[async_trait]
impl TunnelConnector for ConfigServerConnector {
async fn connect(&mut self) -> std::result::Result<Box<dyn Tunnel>, TunnelError> {
let mut connector =
create_connector_by_url(self.url.as_str(), &self.global_ctx, IpVersion::Both)
.await
.map_err(|err| match err {
crate::common::error::Error::TunnelError(err) => err,
err => TunnelError::Anyhow(err.into()),
})?;
connector.connect().await
}
fn remote_url(&self) -> Url {
self.url.clone()
}
}
impl WebClient {
pub fn new<T: TunnelConnector + 'static, S: ToString, H: ToString>(
connector: T,
@@ -218,6 +246,13 @@ pub async fn run_web_client(
.with_context(|| "failed to parse config server URL")?,
};
TunnelScheme::try_from(&config_server_url).map_err(|_| {
anyhow::anyhow!(
"unsupported config server scheme: {}",
config_server_url.scheme()
)
})?;
let mut c_url = config_server_url.clone();
if !matches!(c_url.scheme(), "ws" | "wss") {
c_url.set_path("");
@@ -243,16 +278,20 @@ pub async fn run_web_client(
let mut flags = global_ctx.get_flags();
flags.bind_device = false;
global_ctx.set_flags(flags);
let hostname = match hostname {
None => gethostname::gethostname().to_string_lossy().to_string(),
Some(hostname) => hostname,
};
Ok(WebClient::new(
create_connector_by_url(c_url.as_str(), &global_ctx, IpVersion::Both).await?,
ConfigServerConnector {
url: c_url,
global_ctx,
},
token.to_string(),
hostname,
secure_mode,
manager.clone(),
manager,
hooks,
))
}
@@ -292,4 +331,23 @@ mod tests {
assert!(sleep_finish.load(std::sync::atomic::Ordering::Relaxed));
println!("Manager stopped.");
}
#[tokio::test]
async fn test_run_web_client_with_unreachable_config_server() {
let manager = Arc::new(NetworkInstanceManager::new());
let client = super::run_web_client(
"udp://config-server.invalid:22020/test",
None,
None,
false,
manager,
None,
)
.await
.unwrap();
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
assert!(!client.is_connected());
drop(client);
}
}