Compare commits

...

27 Commits

Author SHA1 Message Date
sijie.sun dde7a4dff1 bps limit should throttle kcp packet 2025-06-19 22:53:41 +08:00
Sijie.Sun 40601bd05b add bps limiter (#1015)
* add token bucket
* remove quinn-proto
2025-06-19 21:15:04 +08:00
chenxudong2020 72d5ed908e quic uses the bbr congestion control algorithm (#1010) 2025-06-18 23:17:52 +08:00
liusen373 72673a9d52 Add is_hole_punched flag to PeerConn (#1001) 2025-06-18 12:14:57 +08:00
tianxiayu007 327ccdcf38 installing by homebrew should use easytier-gui (#1004) 2025-06-18 11:06:26 +08:00
Sijie.Sun 8c2f96d1aa allow set machine uid with command line (#1009) 2025-06-18 11:02:29 +08:00
Sijie.Sun 34ba0bc95b add keepalive option for quic proxy (#1008)
avoid connection loss when idle
2025-06-17 23:39:56 +08:00
Mg Pig ed162c2e66 Add conversion method from TomlConfigLoader to NetworkConfig to enhance configuration experience (#990)
* add method to create NetworkConfig from TomlConfigLoader
* allow web export/import toml config file and gui edit toml config
* Extract the configuration file dialog into a separate component and allow direct editing of the configuration file on the web
2025-06-15 23:41:42 +08:00
Sijie.Sun 40b5fe9a54 support quic proxy (#993)
QUIC proxy works like kcp proxy, it can proxy TCP streams and transfer data with QUIC.
QUIC has better congestion algorithm (BBR) for network with both high loss rate and high bandwidth. 
QUIC proxy can be enabled by passing `--enable-quic-proxy` to easytier in the client side. The proxy status can be viewed by `easytier-cli proxy`.
2025-06-15 19:43:45 +08:00
Sijie.Sun 5a98fac395 Update core.yml,use upx4.2.4 (#991) 2025-06-14 23:04:55 +08:00
Sijie.Sun 0bab14cd72 use bulk compress instead of streaming to reduce mem usage (#985) 2025-06-14 14:55:48 +08:00
Mg Pig b407cfd9d4 Fixed the issue where the GUI would panic after using InstanceManager (#982)
Co-authored-by: Sijie.Sun <sunsijie@buaa.edu.cn>
2025-06-14 13:06:53 +08:00
Sijie.Sun 25dcdc652a support mapping subnet proxy (#978)
- **support mapping subproxy network cidr**
- **add command line option for proxy network mapping**
- **fix Instance leak in tests.
2025-06-14 11:42:45 +08:00
Sijie.Sun 950cb04534 remove macos default route on utun device (#976) 2025-06-12 22:24:34 +08:00
Sijie.Sun c07d1286ef internal stun server should use xor mapped addr (#975) 2025-06-12 08:09:59 +08:00
Mg Pig 8ddd153022 easytier-core支持多配置文件 (#964)
* 将web和gui允许多网络实例逻辑抽离到NetworkInstanceManager中

* easytier-core支持多配置文件

* FFI复用instance manager

* 添加instance manager 单元测试
2025-06-11 23:17:09 +08:00
Sijie.Sun 870353c499 fix ospf route (#970)
- **fix deadlock in ospf route introducd by #958 **
- **use random peer id for foreign network entry, because ospf route algo need peer id change after peer info version reset. this may interfere route propagation and cause node residual**
- **allow multiple nodes broadcast same network ranges for subnet proxy**
- **bump version to v2.3.2**
2025-06-11 09:44:03 +08:00
BlackLuny ecebbecd3b add check for rpc packet fix #963 (#969) 2025-06-09 19:35:29 +08:00
Sijie.Sun f39fbb2ce2 ipv4-peerid table should use peer with least hop (#958)
sometimes route table may not be updated in time, so some dead nodes are still showing in the peer list.
when generating ipv4-peer table, we should avoid these dead devices overrides the entry of healthy nodes.
2025-06-08 11:28:59 +08:00
Kiva ec56c0bc45 feat: allow using --proxy-forward-by-system together with --enable-exit-node (#957) 2025-06-07 22:27:57 +08:00
Mg Pig 20a6025075 Added RPC portal whitelist function, allowing only local access by default to enhance security (#929) 2025-06-07 22:05:47 +08:00
BlackLuny 707963c0d9 Web dual stack (#953)
* reimplement easytier-web dual stack
* add protocol check for dual stack listener current only support tcp and udp
2025-06-07 22:05:11 +08:00
Kiva 3c7837692e fix(vpn-portal): wireguard peer table should be kept if the client roamed to another endpoint address (#954) 2025-06-07 21:19:03 +08:00
Sijie.Sun f890812577 kcp connect retry (#952) 2025-06-07 12:24:11 +08:00
Sijie.Sun 47f3efe71b Create LICENSE (#951) 2025-06-07 10:56:54 +08:00
Sijie.Sun 6d88b10b14 remove LICENSE (#950) 2025-06-07 10:39:42 +08:00
Zisu Zhang d34a51739f Update default_port and sni logic to improve reverse proxy reachability (#947) 2025-06-07 08:19:31 +08:00
91 changed files with 4586 additions and 1213 deletions
+4 -4
View File
@@ -175,14 +175,14 @@ jobs:
fi fi
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier cargo +nightly build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier
else else
if [[ $OS =~ ^windows.*$ ]]; then if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe SUFFIX=.exe
fi fi
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed cargo build --release --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX" mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
cargo build --release --verbose --target $TARGET cargo build --release --target $TARGET
fi fi
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267) # Copied and slightly modified from @lmq8267 (https://github.com/lmq8267)
@@ -244,7 +244,7 @@ jobs:
fi fi
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ ]]; then if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ ]]; then
UPX_VERSION=5.0.1 UPX_VERSION=4.2.4
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf - curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
cp upx-${UPX_VERSION}-amd64_linux/upx . cp upx-${UPX_VERSION}-amd64_linux/upx .
./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX" ./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
+1 -1
View File
@@ -11,7 +11,7 @@ on:
image_tag: image_tag:
description: 'Tag for this image build' description: 'Tag for this image build'
type: string type: string
default: 'v2.3.1' default: 'v2.3.2'
required: true required: true
mark_latest: mark_latest:
description: 'Mark this image as latest' description: 'Mark this image as latest'
+1 -1
View File
@@ -21,7 +21,7 @@ on:
version: version:
description: 'Version for this release' description: 'Version for this release'
type: string type: string
default: 'v2.3.1' default: 'v2.3.2'
required: true required: true
make_latest: make_latest:
description: 'Mark this release as latest' description: 'Mark this release as latest'
+2 -1
View File
@@ -91,6 +91,7 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1 --nocapture sudo prlimit --pid $$ --nofile=1048576:1048576
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1
sudo chown -R $USER:$USER ./target sudo chown -R $USER:$USER ./target
sudo chown -R $USER:$USER ~/.cargo sudo chown -R $USER:$USER ~/.cargo
Generated
+125 -67
View File
@@ -1942,7 +1942,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]] [[package]]
name = "easytier" name = "easytier"
version = "2.3.1" version = "2.3.2"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"anyhow", "anyhow",
@@ -2066,11 +2066,12 @@ dependencies = [
"once_cell", "once_cell",
"serde", "serde",
"serde_json", "serde_json",
"uuid",
] ]
[[package]] [[package]]
name = "easytier-gui" name = "easytier-gui"
version = "2.3.1" version = "2.3.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -2094,6 +2095,7 @@ dependencies = [
"tauri-plugin-vpnservice", "tauri-plugin-vpnservice",
"thunk-rs", "thunk-rs",
"tokio", "tokio",
"uuid",
] ]
[[package]] [[package]]
@@ -2116,7 +2118,7 @@ dependencies = [
[[package]] [[package]]
name = "easytier-web" name = "easytier-web"
version = "2.3.1" version = "2.3.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -2381,6 +2383,18 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
] ]
[[package]]
name = "fastbloom"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8"
dependencies = [
"getrandom 0.3.2",
"rand 0.9.1",
"siphasher 1.0.1",
"wide",
]
[[package]] [[package]]
name = "fastrand" name = "fastrand"
version = "2.1.0" version = "2.1.0"
@@ -3921,20 +3935,6 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "jni"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec"
dependencies = [
"cesu8",
"combine",
"jni-sys",
"log",
"thiserror 1.0.63",
"walkdir",
]
[[package]] [[package]]
name = "jni" name = "jni"
version = "0.21.1" version = "0.21.1"
@@ -4252,6 +4252,12 @@ dependencies = [
"tracing-subscriber", "tracing-subscriber",
] ]
[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]] [[package]]
name = "mac" name = "mac"
version = "0.1.1" version = "0.1.1"
@@ -4502,7 +4508,7 @@ dependencies = [
"openssl-probe", "openssl-probe",
"openssl-sys", "openssl-sys",
"schannel", "schannel",
"security-framework", "security-framework 2.11.1",
"security-framework-sys", "security-framework-sys",
"tempfile", "tempfile",
] ]
@@ -6041,38 +6047,45 @@ dependencies = [
[[package]] [[package]]
name = "quinn" name = "quinn"
version = "0.11.3" version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8"
dependencies = [ dependencies = [
"bytes", "bytes",
"cfg_aliases",
"pin-project-lite", "pin-project-lite",
"quinn-proto", "quinn-proto",
"quinn-udp", "quinn-udp",
"rustc-hash", "rustc-hash",
"rustls", "rustls",
"socket2", "socket2",
"thiserror 1.0.63", "thiserror 2.0.11",
"tokio", "tokio",
"tracing", "tracing",
"web-time",
] ]
[[package]] [[package]]
name = "quinn-proto" name = "quinn-proto"
version = "0.11.6" version = "0.11.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e"
dependencies = [ dependencies = [
"bytes", "bytes",
"rand 0.8.5", "fastbloom",
"getrandom 0.3.2",
"lru-slab",
"rand 0.9.1",
"ring", "ring",
"rustc-hash", "rustc-hash",
"rustls", "rustls",
"rustls-pki-types",
"rustls-platform-verifier", "rustls-platform-verifier",
"slab", "slab",
"thiserror 1.0.63", "thiserror 2.0.11",
"tinyvec", "tinyvec",
"tracing", "tracing",
"web-time",
] ]
[[package]] [[package]]
@@ -6090,9 +6103,9 @@ dependencies = [
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.36" version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
] ]
@@ -6511,24 +6524,25 @@ dependencies = [
[[package]] [[package]]
name = "rstest" name = "rstest"
version = "0.18.2" version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" checksum = "6fc39292f8613e913f7df8fa892b8944ceb47c247b78e1b1ae2f09e019be789d"
dependencies = [ dependencies = [
"futures",
"futures-timer", "futures-timer",
"futures-util",
"rstest_macros", "rstest_macros",
"rustc_version", "rustc_version",
] ]
[[package]] [[package]]
name = "rstest_macros" name = "rstest_macros"
version = "0.18.2" version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" checksum = "1f168d99749d307be9de54d23fd226628d99768225ef08f6ffb52e0182a27746"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"glob", "glob",
"proc-macro-crate 3.2.0",
"proc-macro2", "proc-macro2",
"quote", "quote",
"regex", "regex",
@@ -6656,9 +6670,9 @@ checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497"
[[package]] [[package]]
name = "rustc_version" name = "rustc_version"
version = "0.4.0" version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [ dependencies = [
"semver", "semver",
] ]
@@ -6691,9 +6705,9 @@ dependencies = [
[[package]] [[package]]
name = "rustls" name = "rustls"
version = "0.23.12" version = "0.23.27"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"ring", "ring",
@@ -6705,15 +6719,14 @@ dependencies = [
[[package]] [[package]]
name = "rustls-native-certs" name = "rustls-native-certs"
version = "0.7.1" version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
dependencies = [ dependencies = [
"openssl-probe", "openssl-probe",
"rustls-pemfile",
"rustls-pki-types", "rustls-pki-types",
"schannel", "schannel",
"security-framework", "security-framework 3.2.0",
] ]
[[package]] [[package]]
@@ -6730,26 +6743,29 @@ name = "rustls-pki-types"
version = "1.11.0" version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c"
dependencies = [
"web-time",
]
[[package]] [[package]]
name = "rustls-platform-verifier" name = "rustls-platform-verifier"
version = "0.3.3" version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93bda3f493b9abe5b93b3e7e3ecde0df292f2bd28c0296b90586ee0055ff5123" checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1"
dependencies = [ dependencies = [
"core-foundation 0.9.4", "core-foundation 0.10.0",
"core-foundation-sys", "core-foundation-sys",
"jni 0.19.0", "jni",
"log", "log",
"once_cell", "once_cell",
"rustls", "rustls",
"rustls-native-certs", "rustls-native-certs",
"rustls-platform-verifier-android", "rustls-platform-verifier-android",
"rustls-webpki", "rustls-webpki",
"security-framework", "security-framework 3.2.0",
"security-framework-sys", "security-framework-sys",
"webpki-roots", "webpki-root-certs 0.26.11",
"winapi", "windows-sys 0.59.0",
] ]
[[package]] [[package]]
@@ -6760,9 +6776,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
[[package]] [[package]]
name = "rustls-webpki" name = "rustls-webpki"
version = "0.102.6" version = "0.103.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435"
dependencies = [ dependencies = [
"ring", "ring",
"rustls-pki-types", "rustls-pki-types",
@@ -7047,15 +7063,27 @@ dependencies = [
"core-foundation 0.9.4", "core-foundation 0.9.4",
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
"num-bigint", "security-framework-sys",
]
[[package]]
name = "security-framework"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316"
dependencies = [
"bitflags 2.8.0",
"core-foundation 0.10.0",
"core-foundation-sys",
"libc",
"security-framework-sys", "security-framework-sys",
] ]
[[package]] [[package]]
name = "security-framework-sys" name = "security-framework-sys"
version = "2.11.1" version = "2.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
dependencies = [ dependencies = [
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
@@ -7982,7 +8010,7 @@ dependencies = [
"gdkx11-sys", "gdkx11-sys",
"gtk", "gtk",
"instant", "instant",
"jni 0.21.1", "jni",
"lazy_static", "lazy_static",
"libc", "libc",
"log", "log",
@@ -8044,7 +8072,7 @@ dependencies = [
"heck 0.5.0", "heck 0.5.0",
"http", "http",
"image 0.25.2", "image 0.25.2",
"jni 0.21.1", "jni",
"libc", "libc",
"log", "log",
"mime", "mime",
@@ -8285,7 +8313,7 @@ dependencies = [
"dpi", "dpi",
"gtk", "gtk",
"http", "http",
"jni 0.21.1", "jni",
"raw-window-handle", "raw-window-handle",
"serde", "serde",
"serde_json", "serde_json",
@@ -8303,7 +8331,7 @@ checksum = "62fa2068e8498ad007b54d5773d03d57c3ff6dd96f8c8ce58beff44d0d5e0d30"
dependencies = [ dependencies = [
"gtk", "gtk",
"http", "http",
"jni 0.21.1", "jni",
"log", "log",
"objc2", "objc2",
"objc2-app-kit", "objc2-app-kit",
@@ -8540,9 +8568,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]] [[package]]
name = "tokio" name = "tokio"
version = "1.44.1" version = "1.45.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
dependencies = [ dependencies = [
"backtrace", "backtrace",
"bytes", "bytes",
@@ -9115,9 +9143,9 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
[[package]] [[package]]
name = "unicode-ident" name = "unicode-ident"
version = "1.0.12" version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]] [[package]]
name = "unicode-normalization" name = "unicode-normalization"
@@ -9230,21 +9258,23 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "1.10.0" version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [ dependencies = [
"getrandom 0.2.15", "getrandom 0.3.2",
"rand 0.8.5", "js-sys",
"rand 0.9.1",
"serde", "serde",
"uuid-macro-internal", "uuid-macro-internal",
"wasm-bindgen",
] ]
[[package]] [[package]]
name = "uuid-macro-internal" name = "uuid-macro-internal"
version = "1.10.0" version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee1cd046f83ea2c4e920d6ee9f7c3537ef928d75dce5d84a87c2c5d6b3999a3a" checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -9440,6 +9470,16 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]]
name = "web-time"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]] [[package]]
name = "webkit2gtk" name = "webkit2gtk"
version = "2.0.1" version = "2.0.1"
@@ -9494,6 +9534,24 @@ dependencies = [
"untrusted", "untrusted",
] ]
[[package]]
name = "webpki-root-certs"
version = "0.26.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e"
dependencies = [
"webpki-root-certs 1.0.0",
]
[[package]]
name = "webpki-root-certs"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01a83f7e1a9f8712695c03eabe9ed3fbca0feff0152f33f12593e5a6303cb1a4"
dependencies = [
"rustls-pki-types",
]
[[package]] [[package]]
name = "webpki-roots" name = "webpki-roots"
version = "0.26.3" version = "0.26.3"
@@ -10094,7 +10152,7 @@ dependencies = [
"html5ever", "html5ever",
"http", "http",
"javascriptcore-rs", "javascriptcore-rs",
"jni 0.21.1", "jni",
"kuchikiki", "kuchikiki",
"libc", "libc",
"ndk", "ndk",
+133 -41
View File
@@ -1,73 +1,165 @@
Apache License GNU LESSER GENERAL PUBLIC LICENSE
Version 2.0, January 2004 Version 3, 29 June 2007
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 0. Additional Definitions.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 1. Exception to Section 3 of the GNU GPL.
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 2. Conveying Modified Versions.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and 3. Object Code Incorporating Material from Library Header Files.
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. b) Accompany the object code with a copy of the GNU GPL and this license
document.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 4. Combined Works.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
END OF TERMS AND CONDITIONS d) Do one of the following:
APPENDIX: How to apply the Apache License to your work. 0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. 1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
Copyright 2023 sunsijie e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
Licensed under the Apache License, Version 2.0 (the "License"); 5. Combined Libraries.
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
Unless required by applicable law or agreed to in writing, software a) Accompany the combined library with a copy of the same work based
distributed under the License is distributed on an "AS IS" BASIS, on the Library, uncombined with any other library facilities,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. conveyed under the terms of this License.
See the License for the specific language governing permissions and
limitations under the License. b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
+1 -1
View File
@@ -99,7 +99,7 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
```sh ```sh
brew tap brewforge/chinese brew tap brewforge/chinese
brew install --cask easytier brew install --cask easytier-gui
``` ```
## Quick Start ## Quick Start
+1 -1
View File
@@ -96,7 +96,7 @@
```sh ```sh
brew tap brewforge/chinese brew tap brewforge/chinese
brew install --cask easytier brew install --cask easytier-gui
``` ```
## 快速开始 ## 快速开始
+1
View File
@@ -14,3 +14,4 @@ dashmap = "6.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1" serde_json = "1"
uuid = "1.17.0"
+40 -15
View File
@@ -3,11 +3,14 @@ use std::sync::Mutex;
use dashmap::DashMap; use dashmap::DashMap;
use easytier::{ use easytier::{
common::config::{ConfigLoader as _, TomlConfigLoader}, common::config::{ConfigLoader as _, TomlConfigLoader},
launcher::NetworkInstance, instance_manager::NetworkInstanceManager,
launcher::ConfigSource,
}; };
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> = static INSTANCE_NAME_ID_MAP: once_cell::sync::Lazy<DashMap<String, uuid::Uuid>> =
once_cell::sync::Lazy::new(DashMap::new); once_cell::sync::Lazy::new(DashMap::new);
static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
once_cell::sync::Lazy::new(NetworkInstanceManager::new);
static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> = static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> =
once_cell::sync::Lazy::new(|| Mutex::new(Vec::new())); once_cell::sync::Lazy::new(|| Mutex::new(Vec::new()));
@@ -86,18 +89,20 @@ pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std:
let inst_name = cfg.get_inst_name(); let inst_name = cfg.get_inst_name();
if INSTANCE_MAP.contains_key(&inst_name) { if INSTANCE_NAME_ID_MAP.contains_key(&inst_name) {
set_error_msg("instance already exists"); set_error_msg("instance already exists");
return -1; return -1;
} }
let mut instance = NetworkInstance::new(cfg); let instance_id = match INSTANCE_MANAGER.run_network_instance(cfg, ConfigSource::FFI) {
if let Err(e) = instance.start().map_err(|e| e.to_string()) { Ok(id) => id,
set_error_msg(&format!("failed to start instance: {}", e)); Err(e) => {
return -1; set_error_msg(&format!("failed to start instance: {}", e));
} return -1;
}
};
INSTANCE_MAP.insert(inst_name, instance); INSTANCE_NAME_ID_MAP.insert(inst_name, instance_id);
0 0
} }
@@ -108,7 +113,11 @@ pub extern "C" fn retain_network_instance(
length: usize, length: usize,
) -> std::ffi::c_int { ) -> std::ffi::c_int {
if length == 0 { if length == 0 {
INSTANCE_MAP.clear(); if let Err(e) = INSTANCE_MANAGER.retain_network_instance(Vec::new()) {
set_error_msg(&format!("failed to retain instances: {}", e));
return -1;
}
INSTANCE_NAME_ID_MAP.clear();
return 0; return 0;
} }
@@ -125,7 +134,17 @@ pub extern "C" fn retain_network_instance(
.collect::<Vec<_>>() .collect::<Vec<_>>()
}; };
let _ = INSTANCE_MAP.retain(|k, _| inst_names.contains(k)); let inst_ids: Vec<uuid::Uuid> = inst_names
.iter()
.filter_map(|name| INSTANCE_NAME_ID_MAP.get(name).map(|id| *id))
.collect();
if let Err(e) = INSTANCE_MANAGER.retain_network_instance(inst_ids) {
set_error_msg(&format!("failed to retain instances: {}", e));
return -1;
}
let _ = INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k));
0 0
} }
@@ -144,13 +163,20 @@ pub extern "C" fn collect_network_infos(
std::slice::from_raw_parts_mut(infos, max_length) std::slice::from_raw_parts_mut(infos, max_length)
}; };
let collected_infos = match INSTANCE_MANAGER.collect_network_infos() {
Ok(infos) => infos,
Err(e) => {
set_error_msg(&format!("failed to collect network infos: {}", e));
return -1;
}
};
let mut index = 0; let mut index = 0;
for instance in INSTANCE_MAP.iter() { for (instance_id, value) in collected_infos.iter() {
if index >= max_length { if index >= max_length {
break; break;
} }
let key = instance.key(); let Some(key) = INSTANCE_MANAGER.get_network_instance_name(instance_id) else {
let Some(value) = instance.get_running_info() else {
continue; continue;
}; };
// convert value to json string // convert value to json string
@@ -181,7 +207,6 @@ mod tests {
let cfg_str = r#" let cfg_str = r#"
inst_name = "test" inst_name = "test"
network = "test_network" network = "test_network"
fdsafdsa
"#; "#;
let cstr = std::ffi::CString::new(cfg_str).unwrap(); let cstr = std::ffi::CString::new(cfg_str).unwrap();
assert_eq!(parse_config(cstr.as_ptr()), 0); assert_eq!(parse_config(cstr.as_ptr()), 0);
+1 -1
View File
@@ -1,6 +1,6 @@
id=easytier_magisk id=easytier_magisk
name=EasyTier_Magisk name=EasyTier_Magisk
version=v2.3.1 version=v2.3.2
versionCode=1 versionCode=1
author=EasyTier author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier) description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
+4
View File
@@ -50,7 +50,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名
off_text: 点击关闭 off_text: 点击关闭
on_text: 点击开启 on_text: 点击开启
show_config: 显示配置 show_config: 显示配置
edit_config: 编辑配置文件
close: 关闭 close: 关闭
save: 保存
config_saved: 配置已保存
use_latency_first: 延迟优先模式 use_latency_first: 延迟优先模式
my_node_info: 当前节点信息 my_node_info: 当前节点信息
+3
View File
@@ -51,7 +51,10 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n
off_text: Press to disable off_text: Press to disable
on_text: Press to enable on_text: Press to enable
show_config: Show Config show_config: Show Config
edit_config: Edit Config File
close: Close close: Close
save: Save
config_saved: Configuration saved
my_node_info: My Node Info my_node_info: My Node Info
peer_count: Connected peer_count: Connected
upload: Upload upload: Upload
+1 -1
View File
@@ -1,7 +1,7 @@
{ {
"name": "easytier-gui", "name": "easytier-gui",
"type": "module", "type": "module",
"version": "2.3.1", "version": "2.3.2",
"private": true, "private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4", "packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": { "scripts": {
+2 -1
View File
@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-gui" name = "easytier-gui"
version = "2.3.1" version = "2.3.2"
description = "EasyTier GUI" description = "EasyTier GUI"
authors = ["you"] authors = ["you"]
edition = "2021" edition = "2021"
@@ -53,6 +53,7 @@ tauri-plugin-positioner = { version = "2.0", features = ["tray-icon"] }
tauri-plugin-vpnservice = { path = "../../tauri-plugin-vpnservice" } tauri-plugin-vpnservice = { path = "../../tauri-plugin-vpnservice" }
tauri-plugin-os = "2.0" tauri-plugin-os = "2.0"
tauri-plugin-autostart = "2.0" tauri-plugin-autostart = "2.0"
uuid = "1.17.0"
[features] [features]
+44 -36
View File
@@ -3,10 +3,10 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use dashmap::DashMap;
use easytier::{ use easytier::{
common::config::{ConfigLoader, FileLoggerConfig, TomlConfigLoader}, common::config::{ConfigLoader, FileLoggerConfig, LoggingConfigBuilder, TomlConfigLoader},
launcher::{NetworkConfig, NetworkInstance, NetworkInstanceRunningInfo}, instance_manager::NetworkInstanceManager,
launcher::{ConfigSource, NetworkConfig, NetworkInstanceRunningInfo},
utils::{self, NewFilterSender}, utils::{self, NewFilterSender},
}; };
@@ -17,8 +17,8 @@ pub const AUTOSTART_ARG: &str = "--autostart";
#[cfg(not(target_os = "android"))] #[cfg(not(target_os = "android"))]
use tauri::tray::{MouseButton, MouseButtonState, TrayIconBuilder, TrayIconEvent}; use tauri::tray::{MouseButton, MouseButtonState, TrayIconBuilder, TrayIconEvent};
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> = static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
once_cell::sync::Lazy::new(DashMap::new); once_cell::sync::Lazy::new(NetworkInstanceManager::new);
static mut LOGGER_LEVEL_SENDER: once_cell::sync::Lazy<Option<NewFilterSender>> = static mut LOGGER_LEVEL_SENDER: once_cell::sync::Lazy<Option<NewFilterSender>> =
once_cell::sync::Lazy::new(Default::default); once_cell::sync::Lazy::new(Default::default);
@@ -42,43 +42,48 @@ fn parse_network_config(cfg: NetworkConfig) -> Result<String, String> {
Ok(toml.dump()) Ok(toml.dump())
} }
#[tauri::command]
fn generate_network_config(toml_config: String) -> Result<NetworkConfig, String> {
let config = TomlConfigLoader::new_from_str(&toml_config).map_err(|e| e.to_string())?;
let cfg = NetworkConfig::new_from_config(&config).map_err(|e| e.to_string())?;
Ok(cfg)
}
#[tauri::command] #[tauri::command]
fn run_network_instance(cfg: NetworkConfig) -> Result<(), String> { fn run_network_instance(cfg: NetworkConfig) -> Result<(), String> {
if INSTANCE_MAP.contains_key(cfg.instance_id()) {
return Err("instance already exists".to_string());
}
let instance_id = cfg.instance_id().to_string(); let instance_id = cfg.instance_id().to_string();
let cfg = cfg.gen_config().map_err(|e| e.to_string())?; let cfg = cfg.gen_config().map_err(|e| e.to_string())?;
let mut instance = NetworkInstance::new(cfg); INSTANCE_MANAGER
instance.start().map_err(|e| e.to_string())?; .run_network_instance(cfg, ConfigSource::GUI)
.map_err(|e| e.to_string())?;
println!("instance {} started", instance_id); println!("instance {} started", instance_id);
INSTANCE_MAP.insert(instance_id, instance);
Ok(()) Ok(())
} }
#[tauri::command] #[tauri::command]
fn retain_network_instance(instance_ids: Vec<String>) -> Result<(), String> { fn retain_network_instance(instance_ids: Vec<String>) -> Result<(), String> {
let _ = INSTANCE_MAP.retain(|k, _| instance_ids.contains(k)); let instance_ids = instance_ids
println!( .into_iter()
"instance {:?} retained", .filter_map(|id| uuid::Uuid::parse_str(&id).ok())
INSTANCE_MAP .collect();
.iter() let retained = INSTANCE_MANAGER
.map(|item| item.key().clone()) .retain_network_instance(instance_ids)
.collect::<Vec<_>>() .map_err(|e| e.to_string())?;
); println!("instance {:?} retained", retained);
Ok(()) Ok(())
} }
#[tauri::command] #[tauri::command]
fn collect_network_infos() -> Result<BTreeMap<String, NetworkInstanceRunningInfo>, String> { fn collect_network_infos() -> Result<BTreeMap<String, NetworkInstanceRunningInfo>, String> {
let infos = INSTANCE_MANAGER
.collect_network_infos()
.map_err(|e| e.to_string())?;
let mut ret = BTreeMap::new(); let mut ret = BTreeMap::new();
for instance in INSTANCE_MAP.iter() { for (uuid, info) in infos {
if let Some(info) = instance.get_running_info() { ret.insert(uuid.to_string(), info);
ret.insert(instance.key().clone(), info);
}
} }
Ok(ret) Ok(ret)
} }
@@ -97,10 +102,10 @@ fn set_logging_level(level: String) -> Result<(), String> {
#[tauri::command] #[tauri::command]
fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> { fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> {
let mut instance = INSTANCE_MAP let uuid = uuid::Uuid::parse_str(&instance_id).map_err(|e| e.to_string())?;
.get_mut(&instance_id) INSTANCE_MANAGER
.ok_or("instance not found")?; .set_tun_fd(&uuid, fd)
instance.set_tun_fd(fd); .map_err(|e| e.to_string())?;
Ok(()) Ok(())
} }
@@ -185,13 +190,15 @@ pub fn run() {
let Ok(log_dir) = app.path().app_log_dir() else { let Ok(log_dir) = app.path().app_log_dir() else {
return Ok(()); return Ok(());
}; };
let config = TomlConfigLoader::default(); let config = LoggingConfigBuilder::default()
config.set_file_logger_config(FileLoggerConfig { .file_logger(FileLoggerConfig {
dir: Some(log_dir.to_string_lossy().to_string()), dir: Some(log_dir.to_string_lossy().to_string()),
level: None, level: None,
file: None, file: None,
}); })
let Ok(Some(logger_reinit)) = utils::init_logger(config, true) else { .build()
.map_err(|e| e.to_string())?;
let Ok(Some(logger_reinit)) = utils::init_logger(&config, true) else {
return Ok(()); return Ok(());
}; };
#[allow(static_mut_refs)] #[allow(static_mut_refs)]
@@ -224,6 +231,7 @@ pub fn run() {
}) })
.invoke_handler(tauri::generate_handler![ .invoke_handler(tauri::generate_handler![
parse_network_config, parse_network_config,
generate_network_config,
run_network_instance, run_network_instance,
retain_network_instance, retain_network_instance,
collect_network_infos, collect_network_infos,
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false "createUpdaterArtifacts": false
}, },
"productName": "easytier-gui", "productName": "easytier-gui",
"version": "2.3.1", "version": "2.3.2",
"identifier": "com.kkrainbow.easytier", "identifier": "com.kkrainbow.easytier",
"plugins": {}, "plugins": {},
"app": { "app": {
+1
View File
@@ -8,5 +8,6 @@ onBeforeMount(async () => {
</script> </script>
<template> <template>
<Toast position="bottom-right" />
<RouterView /> <RouterView />
</template> </template>
+2
View File
@@ -23,6 +23,7 @@ declare global {
const effectScope: typeof import('vue')['effectScope'] const effectScope: typeof import('vue')['effectScope']
const event2human: typeof import('./composables/utils')['event2human'] const event2human: typeof import('./composables/utils')['event2human']
const generateMenuItem: typeof import('./composables/tray')['generateMenuItem'] const generateMenuItem: typeof import('./composables/tray')['generateMenuItem']
const generateNetworkConfig: typeof import('./composables/network')['generateNetworkConfig']
const getActivePinia: typeof import('pinia')['getActivePinia'] const getActivePinia: typeof import('pinia')['getActivePinia']
const getCurrentInstance: typeof import('vue')['getCurrentInstance'] const getCurrentInstance: typeof import('vue')['getCurrentInstance']
const getCurrentScope: typeof import('vue')['getCurrentScope'] const getCurrentScope: typeof import('vue')['getCurrentScope']
@@ -134,6 +135,7 @@ declare module 'vue' {
readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']> readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']>
readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']> readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']>
readonly generateMenuItem: UnwrapRef<typeof import('./composables/tray')['generateMenuItem']> readonly generateMenuItem: UnwrapRef<typeof import('./composables/tray')['generateMenuItem']>
readonly generateNetworkConfig: UnwrapRef<typeof import('./composables/network')['generateNetworkConfig']>
readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']> readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']>
readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']> readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']>
readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']> readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']>
+4
View File
@@ -8,6 +8,10 @@ export async function parseNetworkConfig(cfg: NetworkConfig) {
return invoke<string>('parse_network_config', { cfg }) return invoke<string>('parse_network_config', { cfg })
} }
export async function generateNetworkConfig(tomlConfig: string) {
return invoke<NetworkConfig>('generate_network_config', { tomlConfig })
}
export async function runNetworkInstance(cfg: NetworkConfig) { export async function runNetworkInstance(cfg: NetworkConfig) {
return invoke('run_network_instance', { cfg }) return invoke('run_network_instance', { cfg })
} }
+11 -13
View File
@@ -8,7 +8,7 @@ import { exit } from '@tauri-apps/plugin-process'
import { open } from '@tauri-apps/plugin-shell' import { open } from '@tauri-apps/plugin-shell'
import TieredMenu from 'primevue/tieredmenu' import TieredMenu from 'primevue/tieredmenu'
import { useToast } from 'primevue/usetoast' import { useToast } from 'primevue/usetoast'
import { NetworkTypes, Config, Status, Utils, I18nUtils } from 'easytier-frontend-lib' import { NetworkTypes, Config, Status, Utils, I18nUtils, ConfigEditDialog } from 'easytier-frontend-lib'
import { isAutostart, setLoggingLevel } from '~/composables/network' import { isAutostart, setLoggingLevel } from '~/composables/network'
import { useTray } from '~/composables/tray' import { useTray } from '~/composables/tray'
@@ -23,7 +23,7 @@ useTray(true)
const items = ref([ const items = ref([
{ {
label: () => t('show_config'), label: () => activeStep.value == "2" ? t('show_config') : t('edit_config'),
icon: 'pi pi-file-edit', icon: 'pi pi-file-edit',
command: async () => { command: async () => {
try { try {
@@ -262,6 +262,13 @@ onMounted(async () => {
function isRunning(id: string) { function isRunning(id: string) {
return networkStore.networkInstanceIds.includes(id) return networkStore.networkInstanceIds.includes(id)
} }
async function saveTomlConfig(tomlConfig: string) {
const config = await generateNetworkConfig(tomlConfig)
networkStore.replaceCurNetwork(config);
toast.add({ severity: 'success', detail: t('config_saved'), life: 3000 })
visible.value = false
}
</script> </script>
<script lang="ts"> <script lang="ts">
@@ -269,17 +276,8 @@ function isRunning(id: string) {
<template> <template>
<div id="root" class="flex flex-col"> <div id="root" class="flex flex-col">
<Dialog v-model:visible="visible" modal header="Config File" :style="{ width: '70%' }"> <ConfigEditDialog v-model:visible="visible" :cur-network="curNetworkConfig" :readonly="activeStep !== '1'"
<Panel> :save-config="saveTomlConfig" :generate-config="parseNetworkConfig" />
<ScrollPanel style="width: 100%; height: 300px">
<pre>{{ tomlConfig }}</pre>
</ScrollPanel>
</Panel>
<Divider />
<div class="flex gap-2 justify-end">
<Button type="button" :label="t('close')" @click="visible = false" />
</div>
</Dialog>
<Dialog v-model:visible="aboutVisible" modal :header="t('about.title')" :style="{ width: '70%' }"> <Dialog v-model:visible="aboutVisible" modal :header="t('about.title')" :style="{ width: '70%' }">
<About /> <About />
+6
View File
@@ -48,6 +48,12 @@ export const useNetworkStore = defineStore('networkStore', {
this.curNetwork = this.networkList[nextCurNetworkIdx] this.curNetwork = this.networkList[nextCurNetworkIdx]
}, },
replaceCurNetwork(cfg: NetworkTypes.NetworkConfig) {
const curNetworkIdx = this.networkList.indexOf(this.curNetwork)
this.networkList[curNetworkIdx] = cfg
this.curNetwork = cfg
},
removeNetworkInstance(instanceId: string) { removeNetworkInstance(instanceId: string) {
delete this.instances[instanceId] delete this.instances[instanceId]
}, },
+1 -1
View File
@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-web" name = "easytier-web"
version = "2.3.1" version = "2.3.2"
edition = "2021" edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server." description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -147,6 +147,8 @@ const bool_flags: BoolFlag[] = [
{ field: 'use_smoltcp', help: 'use_smoltcp_help' }, { field: 'use_smoltcp', help: 'use_smoltcp_help' },
{ field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' }, { field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' },
{ field: 'disable_kcp_input', help: 'disable_kcp_input_help' }, { field: 'disable_kcp_input', help: 'disable_kcp_input_help' },
{ field: 'enable_quic_proxy', help: 'enable_quic_proxy_help' },
{ field: 'disable_quic_input', help: 'disable_quic_input_help' },
{ field: 'disable_p2p', help: 'disable_p2p_help' }, { field: 'disable_p2p', help: 'disable_p2p_help' },
{ field: 'bind_device', help: 'bind_device_help' }, { field: 'bind_device', help: 'bind_device_help' },
{ field: 'no_tun', help: 'no_tun_help' }, { field: 'no_tun', help: 'no_tun_help' },
@@ -200,7 +202,7 @@ const bool_flags: BoolFlag[] = [
<div class="flex flex-col gap-2 basis-5/12 grow"> <div class="flex flex-col gap-2 basis-5/12 grow">
<label for="network_secret">{{ t('network_secret') }}</label> <label for="network_secret">{{ t('network_secret') }}</label>
<Password id="network_secret" v-model="curNetwork.network_secret" <Password id="network_secret" v-model="curNetwork.network_secret"
aria-describedby="network_secret-help" toggleMask :feedback="false"/> aria-describedby="network_secret-help" toggleMask :feedback="false" />
</div> </div>
</div> </div>
@@ -271,7 +273,7 @@ const bool_flags: BoolFlag[] = [
<div class="flex flex-col gap-2 basis-8/12 grow"> <div class="flex flex-col gap-2 basis-8/12 grow">
<InputGroup> <InputGroup>
<InputText v-model="curNetwork.vpn_portal_client_network_addr" <InputText v-model="curNetwork.vpn_portal_client_network_addr"
:placeholder="t('vpn_portal_client_network')" /> :placeholder="t('vpn_portal_client_network')" />
<InputGroupAddon> <InputGroupAddon>
<span>/{{ curNetwork.vpn_portal_client_network_len }}</span> <span>/{{ curNetwork.vpn_portal_client_network_len }}</span>
</InputGroupAddon> </InputGroupAddon>
@@ -279,7 +281,7 @@ const bool_flags: BoolFlag[] = [
</div> </div>
<div class="flex flex-col gap-2 basis-3/12 grow"> <div class="flex flex-col gap-2 basis-3/12 grow">
<InputNumber v-model="curNetwork.vpn_portal_listen_port" :allow-empty="false" :format="false" <InputNumber v-model="curNetwork.vpn_portal_listen_port" :allow-empty="false" :format="false"
:min="0" :max="65535" fluid /> :min="0" :max="65535" fluid />
</div> </div>
</div> </div>
</div> </div>
@@ -304,6 +306,15 @@ const bool_flags: BoolFlag[] = [
</div> </div>
</div> </div>
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-col gap-2 grow p-fluid">
<label for="">{{ t('rpc_portal_whitelists') }}</label>
<AutoComplete id="rpc_portal_whitelists" v-model="curNetwork.rpc_portal_whitelists"
:placeholder="t('chips_placeholder', ['127.0.0.0/8'])" class="w-full" multiple fluid
:suggestions="inetSuggestions" @complete="searchInetSuggestions" />
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap"> <div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow"> <div class="flex flex-col gap-2 basis-5/12 grow">
<label for="dev_name">{{ t('dev_name') }}</label> <label for="dev_name">{{ t('dev_name') }}</label>
@@ -316,11 +327,10 @@ const bool_flags: BoolFlag[] = [
<div class="flex flex-col gap-2 basis-5/12 grow"> <div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex"> <div class="flex">
<label for="mtu">{{ t('mtu') }}</label> <label for="mtu">{{ t('mtu') }}</label>
<span class="pi pi-question-circle ml-2 self-center" <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mtu_help')"></span>
v-tooltip="t('mtu_help')"></span>
</div> </div>
<InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help" <InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help" :format="false"
:format="false" :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid/> :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid />
</div> </div>
</div> </div>
@@ -329,15 +339,15 @@ const bool_flags: BoolFlag[] = [
<div class="flex"> <div class="flex">
<label for="relay_network_whitelist">{{ t('relay_network_whitelist') }}</label> <label for="relay_network_whitelist">{{ t('relay_network_whitelist') }}</label>
<span class="pi pi-question-circle ml-2 self-center" <span class="pi pi-question-circle ml-2 self-center"
v-tooltip="t('relay_network_whitelist_help')"></span> v-tooltip="t('relay_network_whitelist_help')"></span>
</div> </div>
<ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check" off-icon="pi pi-times" <ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check"
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> off-icon="pi pi-times" :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
<div v-if="curNetwork.enable_relay_network_whitelist" class="items-center flex flex-row gap-x-4"> <div v-if="curNetwork.enable_relay_network_whitelist" class="items-center flex flex-row gap-x-4">
<div class="min-w-64 w-full"> <div class="min-w-64 w-full">
<AutoComplete id="relay_network_whitelist" v-model="curNetwork.relay_network_whitelist" <AutoComplete id="relay_network_whitelist" v-model="curNetwork.relay_network_whitelist"
:placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid :placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid
:suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" /> :suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" />
</div> </div>
</div> </div>
</div> </div>
@@ -350,12 +360,12 @@ const bool_flags: BoolFlag[] = [
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('manual_routes_help')"></span> <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('manual_routes_help')"></span>
</div> </div>
<ToggleButton v-model="curNetwork.enable_manual_routes" on-icon="pi pi-check" off-icon="pi pi-times" <ToggleButton v-model="curNetwork.enable_manual_routes" on-icon="pi pi-check" off-icon="pi pi-times"
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
<div v-if="curNetwork.enable_manual_routes" class="items-center flex flex-row gap-x-4"> <div v-if="curNetwork.enable_manual_routes" class="items-center flex flex-row gap-x-4">
<div class="min-w-64 w-full"> <div class="min-w-64 w-full">
<AutoComplete id="routes" v-model="curNetwork.routes" <AutoComplete id="routes" v-model="curNetwork.routes"
:placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid :placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid
:suggestions="inetSuggestions" @complete="searchInetSuggestions" /> :suggestions="inetSuggestions" @complete="searchInetSuggestions" />
</div> </div>
</div> </div>
</div> </div>
@@ -368,11 +378,11 @@ const bool_flags: BoolFlag[] = [
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('socks5_help')"></span> <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('socks5_help')"></span>
</div> </div>
<ToggleButton v-model="curNetwork.enable_socks5" on-icon="pi pi-check" off-icon="pi pi-times" <ToggleButton v-model="curNetwork.enable_socks5" on-icon="pi pi-check" off-icon="pi pi-times"
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
<div v-if="curNetwork.enable_socks5" class="items-center flex flex-row gap-x-4"> <div v-if="curNetwork.enable_socks5" class="items-center flex flex-row gap-x-4">
<div class="min-w-64 w-full"> <div class="min-w-64 w-full">
<InputNumber id="socks5_port" v-model="curNetwork.socks5_port" aria-describedby="rpc_port-help" <InputNumber id="socks5_port" v-model="curNetwork.socks5_port" aria-describedby="rpc_port-help"
:format="false" :allow-empty="false" :min="0" :max="65535" class="w-full"/> :format="false" :allow-empty="false" :min="0" :max="65535" class="w-full" />
</div> </div>
</div> </div>
</div> </div>
@@ -385,8 +395,8 @@ const bool_flags: BoolFlag[] = [
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('exit_nodes_help')"></span> <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('exit_nodes_help')"></span>
</div> </div>
<AutoComplete id="exit_nodes" v-model="curNetwork.exit_nodes" <AutoComplete id="exit_nodes" v-model="curNetwork.exit_nodes"
:placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid :placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid
:suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" /> :suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" />
</div> </div>
</div> </div>
@@ -397,8 +407,8 @@ const bool_flags: BoolFlag[] = [
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span> <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span>
</div> </div>
<AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners" <AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners"
:placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full" :placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full" multiple fluid
multiple fluid :suggestions="peerSuggestions" @complete="searchPeerSuggestions" /> :suggestions="peerSuggestions" @complete="searchPeerSuggestions" />
</div> </div>
</div> </div>
@@ -0,0 +1,103 @@
<script setup lang="ts">
import { onMounted, ref, watch } from 'vue';
import { NetworkConfig } from '../types/network';
import { Divider, Button, Dialog, Textarea } from 'primevue'
import { useI18n } from 'vue-i18n'
const { t } = useI18n()
const props = defineProps({
readonly: {
type: Boolean,
default: false,
},
generateConfig: {
type: Object as () => (config: NetworkConfig) => Promise<string>,
required: true,
},
saveConfig: {
type: Object as () => (config: string) => Promise<void>,
required: true,
},
})
const curNetwork = defineModel('curNetwork', {
type: Object as () => NetworkConfig | undefined,
required: true,
})
const visible = defineModel('visible', {
type: Boolean,
default: false,
})
watch([visible, curNetwork], async ([newVisible, newCurNetwork]) => {
if (!newVisible) {
tomlConfig.value = '';
return;
}
if (!newCurNetwork) {
tomlConfig.value = '';
return;
}
const config = newCurNetwork;
try {
errorMessage.value = '';
tomlConfig.value = await props.generateConfig(config);
} catch (e) {
errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e));
tomlConfig.value = '';
}
})
onMounted(async () => {
if (!visible.value) {
return;
}
if (!curNetwork.value) {
tomlConfig.value = '';
return;
}
const config = curNetwork.value;
try {
tomlConfig.value = await props.generateConfig(config);
errorMessage.value = '';
} catch (e) {
errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e));
tomlConfig.value = '';
}
});
const handleConfigSave = async () => {
if (props.readonly) return;
try {
await props.saveConfig(tomlConfig.value);
visible.value = false;
} catch (e) {
errorMessage.value = 'Failed to save config: ' + (e instanceof Error ? e.message : String(e));
}
};
const tomlConfig = ref<string>('')
const tomlConfigRows = ref<number>(1);
const errorMessage = ref<string>('');
watch(tomlConfig, (newValue) => {
tomlConfigRows.value = newValue.split('\n').length;
errorMessage.value = '';
});
</script>
<template>
<Dialog v-model:visible="visible" modal :header="t('config_file')" :style="{ width: '70%' }">
<pre v-if="errorMessage"
class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre>
<div class="flex w-full" style="max-height: 60vh; overflow-y: auto;">
<Textarea v-model="tomlConfig" class="w-full h-full font-mono flex flex-col resize-none" :rows="tomlConfigRows"
spellcheck="false" :readonly="props.readonly"></Textarea>
</div>
<Divider />
<div class="flex gap-2 justify-end">
<Button v-if="!props.readonly" type="button" :label="t('save')" @click="handleConfigSave" />
<Button type="button" :label="t('close')" @click="visible = false" />
</div>
</Dialog>
</template>
@@ -1,2 +1,3 @@
export { default as Config } from './Config.vue'; export { default as Config } from './Config.vue';
export { default as Status } from './Status.vue'; export { default as Status } from './Status.vue';
export { default as ConfigEditDialog } from './ConfigEditDialog.vue';
@@ -1,7 +1,7 @@
import './style.css' import './style.css'
import type { App } from 'vue'; import type { App } from 'vue';
import { Config, Status } from "./components"; import { Config, Status, ConfigEditDialog } from "./components";
import Aura from '@primevue/themes/aura' import Aura from '@primevue/themes/aura'
import PrimeVue from 'primevue/config' import PrimeVue from 'primevue/config'
@@ -41,10 +41,11 @@ export default {
}); });
app.component('Config', Config); app.component('Config', Config);
app.component('ConfigEditDialog', ConfigEditDialog);
app.component('Status', Status); app.component('Status', Status);
app.component('HumanEvent', HumanEvent); app.component('HumanEvent', HumanEvent);
app.directive('tooltip', vTooltip as any); app.directive('tooltip', vTooltip as any);
} }
}; };
export { Config, Status, I18nUtils, NetworkTypes, Api, Utils }; export { Config, ConfigEditDialog, Status, I18nUtils, NetworkTypes, Api, Utils };
@@ -18,6 +18,7 @@ advanced_settings: 高级设置
basic_settings: 基础设置 basic_settings: 基础设置
listener_urls: 监听地址 listener_urls: 监听地址
rpc_port: RPC端口 rpc_port: RPC端口
rpc_portal_whitelists: RPC白名单
config_network: 配置网络 config_network: 配置网络
running: 运行中 running: 运行中
error_msg: 错误信息 error_msg: 错误信息
@@ -50,7 +51,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名
off_text: 点击关闭 off_text: 点击关闭
on_text: 点击开启 on_text: 点击开启
show_config: 显示配置 show_config: 显示配置
edit_config: 编辑配置文件
config_file: 配置文件
close: 关闭 close: 关闭
save: 保存
config_saved: 配置已保存
use_latency_first: 延迟优先模式 use_latency_first: 延迟优先模式
my_node_info: 当前节点信息 my_node_info: 当前节点信息
@@ -84,6 +89,12 @@ enable_kcp_proxy_help: 将 TCP 流量转为 KCP 流量,降低传输延迟,
disable_kcp_input: 禁用 KCP 输入 disable_kcp_input: 禁用 KCP 输入
disable_kcp_input_help: 禁用 KCP 入站流量,其他开启 KCP 代理的节点仍然使用 TCP 连接到本节点。 disable_kcp_input_help: 禁用 KCP 入站流量,其他开启 KCP 代理的节点仍然使用 TCP 连接到本节点。
enable_quic_proxy: 启用 QUIC 代理
enable_quic_proxy_help: 将 TCP 流量转为 QUIC 流量,降低传输延迟,提升传输速度。
disable_quic_input: 禁用 QUIC 输入
disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。
disable_p2p: 禁用 P2P disable_p2p: 禁用 P2P
disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转。 disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转。
@@ -18,6 +18,7 @@ advanced_settings: Advanced Settings
basic_settings: Basic Settings basic_settings: Basic Settings
listener_urls: Listener URLs listener_urls: Listener URLs
rpc_port: RPC Port rpc_port: RPC Port
rpc_portal_whitelists: RPC Whitelist
config_network: Config Network config_network: Config Network
running: Running running: Running
error_msg: Error Message error_msg: Error Message
@@ -51,7 +52,11 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n
off_text: Press to disable off_text: Press to disable
on_text: Press to enable on_text: Press to enable
show_config: Show Config show_config: Show Config
edit_config: Edit Config File
config_file: Config File
close: Close close: Close
save: Save
config_saved: Configuration saved
my_node_info: My Node Info my_node_info: My Node Info
peer_count: Connected peer_count: Connected
upload: Upload upload: Upload
@@ -83,6 +88,12 @@ enable_kcp_proxy_help: Convert TCP traffic to KCP traffic to reduce latency and
disable_kcp_input: Disable KCP Input disable_kcp_input: Disable KCP Input
disable_kcp_input_help: Disable inbound KCP traffic, while nodes with KCP proxy enabled continue to connect using TCP. disable_kcp_input_help: Disable inbound KCP traffic, while nodes with KCP proxy enabled continue to connect using TCP.
enable_quic_proxy: Enable QUIC Proxy
enable_quic_proxy_help: Convert TCP traffic to QUIC traffic to reduce latency and boost transmission speed.
disable_quic_input: Disable QUIC Input
disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP.
disable_p2p: Disable P2P disable_p2p: Disable P2P
disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server. disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server.
@@ -47,6 +47,15 @@ export interface GenerateConfigResponse {
error?: string; error?: string;
} }
export interface ParseConfigRequest {
toml_config: string;
}
export interface ParseConfigResponse {
config?: NetworkConfig;
error?: string;
}
export class ApiClient { export class ApiClient {
private client: AxiosInstance; private client: AxiosInstance;
private authFailedCb: Function | undefined; private authFailedCb: Function | undefined;
@@ -215,6 +224,18 @@ export class ApiClient {
return { error: 'Unknown error: ' + error }; return { error: 'Unknown error: ' + error };
} }
} }
public async parse_config(config: ParseConfigRequest): Promise<ParseConfigResponse> {
try {
const response = await this.client.post<any, ParseConfigResponse>('/parse-config', config);
return response;
} catch (error) {
if (error instanceof AxiosError) {
return { error: error.response?.data };
}
return { error: 'Unknown error: ' + error };
}
}
} }
export default ApiClient; export default ApiClient;
@@ -39,6 +39,8 @@ export interface NetworkConfig {
use_smoltcp?: boolean use_smoltcp?: boolean
enable_kcp_proxy?: boolean enable_kcp_proxy?: boolean
disable_kcp_input?: boolean disable_kcp_input?: boolean
enable_quic_proxy?: boolean
disable_quic_input?: boolean
disable_p2p?: boolean disable_p2p?: boolean
bind_device?: boolean bind_device?: boolean
no_tun?: boolean no_tun?: boolean
@@ -65,6 +67,8 @@ export interface NetworkConfig {
enable_magic_dns?: boolean enable_magic_dns?: boolean
enable_private_mode?: boolean enable_private_mode?: boolean
rpc_portal_whitelists: string[]
} }
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -103,6 +107,8 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
use_smoltcp: false, use_smoltcp: false,
enable_kcp_proxy: false, enable_kcp_proxy: false,
disable_kcp_input: false, disable_kcp_input: false,
enable_quic_proxy: false,
disable_quic_input: false,
disable_p2p: false, disable_p2p: false,
bind_device: true, bind_device: true,
no_tun: false, no_tun: false,
@@ -123,6 +129,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
mapped_listeners: [], mapped_listeners: [],
enable_magic_dns: false, enable_magic_dns: false,
enable_private_mode: false, enable_private_mode: false,
rpc_portal_whitelists: [],
} }
} }
@@ -2,12 +2,11 @@
import { NetworkTypes } from 'easytier-frontend-lib'; import { NetworkTypes } from 'easytier-frontend-lib';
import {computed, ref} from 'vue'; import {computed, ref} from 'vue';
import { Api } from 'easytier-frontend-lib' import { Api } from 'easytier-frontend-lib'
import {AutoComplete, Divider} from "primevue"; import {AutoComplete, Divider, Button, Textarea} from "primevue";
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host" import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value)); const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value));
const apiHost = ref<string>(getInitialApiHost()) const apiHost = ref<string>(getInitialApiHost())
const apiHostSuggestions = ref<Array<string>>([]) const apiHostSuggestions = ref<Array<string>>([])
const apiHostSearch = async (event: { query: string }) => { const apiHostSearch = async (event: { query: string }) => {
@@ -22,23 +21,46 @@ const apiHostSearch = async (event: { query: string }) => {
} }
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG()); const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration"); const toml_config = ref<string>("");
const errorMessage = ref<string>("");
const generateConfig = (config: NetworkTypes.NetworkConfig) => { const generateConfig = (config: NetworkTypes.NetworkConfig) => {
saveApiHost(apiHost.value) saveApiHost(apiHost.value)
errorMessage.value = "";
api.value?.generate_config({ api.value?.generate_config({
config: config config: config
}).then((res) => { }).then((res) => {
if (res.error) { if (res.error) {
toml_config.value = res.error; errorMessage.value = "Generation failed: " + res.error;
} else if (res.toml_config) { } else if (res.toml_config) {
toml_config.value = res.toml_config; toml_config.value = res.toml_config;
} else { } else {
toml_config.value = "Api server returned an unexpected response"; errorMessage.value = "Api server returned an unexpected response";
} }
}).catch(err => {
errorMessage.value = "Generate request failed: " + (err instanceof Error ? err.message : String(err));
}); });
}; };
const parseConfig = async () => {
try {
errorMessage.value = "";
const res = await api.value?.parse_config({
toml_config: toml_config.value
});
if (res.error) {
errorMessage.value = "Parse failed: " + res.error;
} else if (res.config) {
newNetworkConfig.value = res.config;
} else {
errorMessage.value = "API returned an unexpected response";
}
} catch (e) {
errorMessage.value = "Parse request failed: " + (e instanceof Error ? e.message : String(e));
}
};
</script> </script>
<template> <template>
@@ -55,8 +77,17 @@ const generateConfig = (config: NetworkTypes.NetworkConfig) => {
</div> </div>
<Config :cur-network="newNetworkConfig" @run-network="generateConfig" /> <Config :cur-network="newNetworkConfig" @run-network="generateConfig" />
</div> </div>
<div class="sm:w-full md:w-1/2 p-4 bg-gray-100"> <div class="sm:w-full md:w-1/2 p-4 flex flex-col h-[calc(100vh-80px)]">
<pre class="whitespace-pre-wrap">{{ toml_config }}</pre> <pre v-if="errorMessage" class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre>
<Textarea
v-model="toml_config"
spellcheck="false"
class="w-full flex-grow p-2 bg-gray-100 whitespace-pre-wrap font-mono border-none focus:outline-none resize-none"
placeholder="Press 'Run Network' to generate TOML configuration, or paste your TOML configuration here to parse it"
></Textarea>
<div class="mt-3 flex justify-center">
<Button label="Parse Config" icon="pi pi-arrow-left" icon-pos="left" @click="parseConfig" />
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -1,6 +1,6 @@
<script setup lang="ts"> <script setup lang="ts">
import {Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider} from 'primevue'; import { Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider } from 'primevue';
import { NetworkTypes, Status, Utils, Api, } from 'easytier-frontend-lib'; import { NetworkTypes, Status, Utils, Api, ConfigEditDialog } from 'easytier-frontend-lib';
import { watch, computed, onMounted, onUnmounted, ref } from 'vue'; import { watch, computed, onMounted, onUnmounted, ref } from 'vue';
import { useRoute, useRouter } from 'vue-router'; import { useRoute, useRouter } from 'vue-router';
@@ -33,6 +33,7 @@ const curNetworkInfo = ref<NetworkTypes.NetworkInstance | null>(null);
const isEditing = ref(false); const isEditing = ref(false);
const showCreateNetworkDialog = ref(false); const showCreateNetworkDialog = ref(false);
const showConfigEditDialog = ref(false);
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG()); const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
const listInstanceIdResponse = ref<Api.ListNetworkInstanceIdResponse | undefined>(undefined); const listInstanceIdResponse = ref<Api.ListNetworkInstanceIdResponse | undefined>(undefined);
@@ -103,7 +104,12 @@ const updateNetworkState = async (disabled: boolean) => {
return; return;
} }
await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled); if (disabled || !disabledNetworkConfig.value) {
await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled);
} else if (disabledNetworkConfig.value) {
await props.api?.delete_network(deviceId.value, disabledNetworkConfig.value.instance_id);
await props.api?.run_network(deviceId.value, disabledNetworkConfig.value);
}
await loadNetworkInstanceIds(); await loadNetworkInstanceIds();
} }
@@ -211,62 +217,97 @@ const loadDeviceInfo = async () => {
} }
const exportConfig = async () => { const exportConfig = async () => {
if (!deviceId.value || !instanceId.value) { if (!deviceId.value || !instanceId.value) {
toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 }); toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 });
return; return;
} }
try { try {
let ret = await props.api?.get_network_config(deviceId.value, instanceId.value); let networkConfig = await props.api?.get_network_config(deviceId.value, instanceId.value);
delete ret.instance_id; delete networkConfig.instance_id;
exportJsonFile(JSON.stringify(ret, null, 2),instanceId.value +'.json'); let { toml_config: tomlConfig, error } = await props.api?.generate_config({
} catch (e: any) { config: networkConfig
console.error(e); });
toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 }); if (error) {
return; throw { response: { data: error } };
} }
exportTomlFile(tomlConfig ?? '', instanceId.value + '.toml');
} catch (e: any) {
console.error(e);
toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 });
return;
}
} }
const importConfig = () => { const importConfig = () => {
configFile.value.click(); configFile.value.click();
} }
const handleFileUpload = (event: Event) => { const handleFileUpload = (event: Event) => {
const files = (event.target as HTMLInputElement).files; const files = (event.target as HTMLInputElement).files;
const file = files ? files[0] : null; const file = files ? files[0] : null;
if (file) { if (!file) return;
const reader = new FileReader(); const reader = new FileReader();
reader.onload = (e) => { reader.onload = async (e) => {
try { try {
let str = e.target?.result?.toString(); let tomlConfig = e.target?.result?.toString();
if(str){ if (!tomlConfig) return;
const config = JSON.parse(str); const resp = await props.api?.parse_config({ toml_config: tomlConfig });
if(config === null || typeof config !== "object"){ if (resp.error) {
throw new Error(); throw resp.error;
} }
Object.assign(newNetworkConfig.value, config);
toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 }); const config = resp.config;
if (!config) return;
config.instance_id = newNetworkConfig.value?.instance_id ?? config?.instance_id;
Object.assign(newNetworkConfig.value, resp.config);
toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 });
} catch (error) {
toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error: ' + error, life: 2000 });
} }
} catch (error) { configFile.value.value = null;
toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error.', life: 2000 });
}
configFile.value.value = null;
} }
reader.readAsText(file); reader.readAsText(file);
}
} }
const exportJsonFile = (context: string, name: string) => { const exportTomlFile = (context: string, name: string) => {
let url = window.URL.createObjectURL(new Blob([context], { type: 'application/json' })); let url = window.URL.createObjectURL(new Blob([context], { type: 'application/toml' }));
let link = document.createElement('a'); let link = document.createElement('a');
link.style.display = 'none'; link.style.display = 'none';
link.href = url; link.href = url;
link.setAttribute('download', name); link.setAttribute('download', name);
document.body.appendChild(link); document.body.appendChild(link);
link.click(); link.click();
document.body.removeChild(link); document.body.removeChild(link);
window.URL.revokeObjectURL(url); window.URL.revokeObjectURL(url);
}
const generateConfig = async (config: NetworkTypes.NetworkConfig): Promise<string> => {
let { toml_config: tomlConfig, error } = await props.api?.generate_config({ config });
if (error) {
throw error;
}
return tomlConfig ?? '';
}
const saveConfig = async (tomlConfig: string): Promise<void> => {
let resp = await props.api?.parse_config({ toml_config: tomlConfig });
if (resp.error) {
throw resp.error;
};
const config = resp.config;
if (!config) {
throw new Error("Parsed config is empty");
}
config.instance_id = disabledNetworkConfig.value?.instance_id ?? config?.instance_id;
if (networkIsDisabled.value) {
disabledNetworkConfig.value = config;
} else {
newNetworkConfig.value = config;
}
} }
let periodFunc = new Utils.PeriodicTask(async () => { let periodFunc = new Utils.PeriodicTask(async () => {
@@ -288,18 +329,23 @@ onUnmounted(() => {
</script> </script>
<template> <template>
<input type="file" @change="handleFileUpload" class="hidden" accept="application/json" ref="configFile"/> <input type="file" @change="handleFileUpload" class="hidden" accept="application/toml" ref="configFile" />
<ConfirmPopup></ConfirmPopup> <ConfirmPopup></ConfirmPopup>
<Dialog v-model:visible="showCreateNetworkDialog" modal :header="!isEditing ? 'Create New Network' : 'Edit Network'" <Dialog v-if="!networkIsDisabled" v-model:visible="showCreateNetworkDialog" modal
:style="{ width: '55rem' }"> :header="!isEditing ? 'Create New Network' : 'Edit Network'" :style="{ width: '55rem' }">
<div class="flex flex-col"> <div class="flex flex-col">
<div class="w-11/12 self-center "> <div class="w-11/12 self-center space-x-2">
<Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" /> <Button @click="showConfigEditDialog = true" icon="pi pi-pen-to-square" label="Edit File" iconPos="right" />
<Divider /> <Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" />
</div> </div>
</div> </div>
<Divider />
<Config :cur-network="newNetworkConfig" @run-network="createNewNetwork"></Config> <Config :cur-network="newNetworkConfig" @run-network="createNewNetwork"></Config>
</Dialog> </Dialog>
<ConfigEditDialog v-if="networkIsDisabled" v-model:visible="showCreateNetworkDialog"
:cur-network="disabledNetworkConfig" :generate-config="generateConfig" :save-config="saveConfig" />
<ConfigEditDialog v-else v-model:visible="showConfigEditDialog" :cur-network="newNetworkConfig"
:generate-config="generateConfig" :save-config="saveConfig" />
<Toolbar> <Toolbar>
<template #start> <template #start>
@@ -329,7 +375,7 @@ onUnmounted(() => {
</Status> </Status>
<Divider /> <Divider />
<div class="text-center"> <div class="text-center">
<Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" /> <Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" />
</div> </div>
</div> </div>
+29 -25
View File
@@ -1,21 +1,24 @@
pub mod session; pub mod session;
pub mod storage; pub mod storage;
use std::sync::Arc; use std::sync::{
atomic::{AtomicU32, Ordering},
Arc,
};
use dashmap::DashMap; use dashmap::DashMap;
use easytier::{ use easytier::{proto::web::HeartbeatRequest, tunnel::TunnelListener};
common::scoped_task::ScopedTask, proto::web::HeartbeatRequest, tunnel::TunnelListener,
};
use session::Session; use session::Session;
use storage::{Storage, StorageToken}; use storage::{Storage, StorageToken};
use tokio::task::JoinSet;
use crate::db::{Db, UserIdInDb}; use crate::db::{Db, UserIdInDb};
#[derive(Debug)] #[derive(Debug)]
pub struct ClientManager { pub struct ClientManager {
accept_task: Option<ScopedTask<()>>, tasks: JoinSet<()>,
clear_task: Option<ScopedTask<()>>,
listeners_cnt: Arc<AtomicU32>,
client_sessions: Arc<DashMap<url::Url, Arc<Session>>>, client_sessions: Arc<DashMap<url::Url, Arc<Session>>>,
storage: Storage, storage: Storage,
@@ -23,24 +26,35 @@ pub struct ClientManager {
impl ClientManager { impl ClientManager {
pub fn new(db: Db) -> Self { pub fn new(db: Db) -> Self {
let client_sessions = Arc::new(DashMap::new());
let sessions: Arc<DashMap<url::Url, Arc<Session>>> = client_sessions.clone();
let mut tasks = JoinSet::new();
tasks.spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(15)).await;
sessions.retain(|_, session| session.is_running());
}
});
ClientManager { ClientManager {
accept_task: None, tasks,
clear_task: None,
client_sessions: Arc::new(DashMap::new()), listeners_cnt: Arc::new(AtomicU32::new(0)),
client_sessions,
storage: Storage::new(db), storage: Storage::new(db),
} }
} }
pub async fn serve<L: TunnelListener + 'static>( pub async fn add_listener<L: TunnelListener + 'static>(
&mut self, &mut self,
mut listener: L, mut listener: L,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
listener.listen().await?; listener.listen().await?;
self.listeners_cnt.fetch_add(1, Ordering::Relaxed);
let sessions = self.client_sessions.clone(); let sessions = self.client_sessions.clone();
let storage = self.storage.weak_ref(); let storage = self.storage.weak_ref();
let task = tokio::spawn(async move { let listeners_cnt = self.listeners_cnt.clone();
self.tasks.spawn(async move {
while let Ok(tunnel) = listener.accept().await { while let Ok(tunnel) = listener.accept().await {
let info = tunnel.info().unwrap(); let info = tunnel.info().unwrap();
let client_url: url::Url = info.remote_addr.unwrap().into(); let client_url: url::Url = info.remote_addr.unwrap().into();
@@ -49,24 +63,14 @@ impl ClientManager {
session.serve(tunnel).await; session.serve(tunnel).await;
sessions.insert(client_url, Arc::new(session)); sessions.insert(client_url, Arc::new(session));
} }
listeners_cnt.fetch_sub(1, Ordering::Relaxed);
}); });
self.accept_task = Some(ScopedTask::from(task));
let sessions = self.client_sessions.clone();
let task = tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(15)).await;
sessions.retain(|_, session| session.is_running());
}
});
self.clear_task = Some(ScopedTask::from(task));
Ok(()) Ok(())
} }
pub fn is_running(&self) -> bool { pub fn is_running(&self) -> bool {
self.accept_task.is_some() && self.clear_task.is_some() self.listeners_cnt.load(Ordering::Relaxed) > 0
} }
pub async fn list_sessions(&self) -> Vec<StorageToken> { pub async fn list_sessions(&self) -> Vec<StorageToken> {
@@ -132,7 +136,7 @@ mod tests {
async fn test_client() { async fn test_client() {
let listener = UdpTunnelListener::new("udp://0.0.0.0:54333".parse().unwrap()); let listener = UdpTunnelListener::new("udp://0.0.0.0:54333".parse().unwrap());
let mut mgr = ClientManager::new(Db::memory_db().await); let mut mgr = ClientManager::new(Db::memory_db().await);
mgr.serve(Box::new(listener)).await.unwrap(); mgr.add_listener(Box::new(listener)).await.unwrap();
mgr.db() mgr.db()
.inner() .inner()
+58 -22
View File
@@ -8,9 +8,10 @@ use std::sync::Arc;
use clap::Parser; use clap::Parser;
use easytier::{ use easytier::{
common::{ common::{
config::{ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, TomlConfigLoader}, config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader},
constants::EASYTIER_VERSION, constants::EASYTIER_VERSION,
error::Error, error::Error,
network::{local_ipv4, local_ipv6},
}, },
tunnel::{ tunnel::{
tcp::TcpTunnelListener, udp::UdpTunnelListener, websocket::WSTunnelListener, TunnelListener, tcp::TcpTunnelListener, udp::UdpTunnelListener, websocket::WSTunnelListener, TunnelListener,
@@ -100,6 +101,22 @@ struct Cli {
api_host: Option<url::Url>, api_host: Option<url::Url>,
} }
impl LoggingConfigLoader for &Cli {
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
ConsoleLoggerConfig {
level: self.console_log_level.clone(),
}
}
fn get_file_logger_config(&self) -> FileLoggerConfig {
FileLoggerConfig {
dir: self.file_log_dir.clone(),
level: self.file_log_level.clone(),
file: None,
}
}
}
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> { pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.scheme() { Ok(match l.scheme() {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())), "tcp" => Box::new(TcpTunnelListener::new(l.clone())),
@@ -111,6 +128,31 @@ pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Erro
}) })
} }
async fn get_dual_stack_listener(
protocol: &str,
port: u16,
) -> Result<
(
Option<Box<dyn TunnelListener>>,
Option<Box<dyn TunnelListener>>,
),
Error,
> {
let is_protocol_support_dual_stack =
protocol.trim().to_lowercase() == "tcp" || protocol.trim().to_lowercase() == "udp";
let v6_listener = if is_protocol_support_dual_stack && local_ipv6().await.is_ok() {
get_listener_by_url(&format!("{}://[::0]:{}", protocol, port).parse().unwrap()).ok()
} else {
None
};
let v4_listener = if let Ok(_) = local_ipv4().await {
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok()
} else {
None
};
Ok((v6_listener, v4_listener))
}
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
let locale = sys_locale::get_locale().unwrap_or_else(|| String::from("en-US")); let locale = sys_locale::get_locale().unwrap_or_else(|| String::from("en-US"));
@@ -118,31 +160,25 @@ async fn main() {
setup_panic_handler(); setup_panic_handler();
let cli = Cli::parse(); let cli = Cli::parse();
let config = TomlConfigLoader::default(); init_logger(&cli, false).unwrap();
config.set_console_logger_config(ConsoleLoggerConfig {
level: cli.console_log_level,
});
config.set_file_logger_config(FileLoggerConfig {
dir: cli.file_log_dir,
level: cli.file_log_level,
file: None,
});
init_logger(config, false).unwrap();
// let db = db::Db::new(":memory:").await.unwrap(); // let db = db::Db::new(":memory:").await.unwrap();
let db = db::Db::new(cli.db).await.unwrap(); let db = db::Db::new(cli.db).await.unwrap();
let listener = get_listener_by_url(
&format!(
"{}://0.0.0.0:{}",
cli.config_server_protocol, cli.config_server_port
)
.parse()
.unwrap(),
)
.unwrap();
let mut mgr = client_manager::ClientManager::new(db.clone()); let mut mgr = client_manager::ClientManager::new(db.clone());
mgr.serve(listener).await.unwrap(); let (v6_listener, v4_listener) =
get_dual_stack_listener(&cli.config_server_protocol, cli.config_server_port)
.await
.unwrap();
if v4_listener.is_none() && v6_listener.is_none() {
panic!("Listen to both IPv4 and IPv6 failed");
}
if let Some(listener) = v6_listener {
mgr.add_listener(listener).await.unwrap();
}
if let Some(listener) = v4_listener {
mgr.add_listener(listener).await.unwrap();
}
let mgr = Arc::new(mgr); let mgr = Arc::new(mgr);
#[cfg(feature = "embed")] #[cfg(feature = "embed")]
+32 -1
View File
@@ -11,7 +11,7 @@ use axum::{extract::State, routing::get, Json, Router};
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer}; use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend}; use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend};
use axum_messages::MessagesManagerLayer; use axum_messages::MessagesManagerLayer;
use easytier::common::config::ConfigLoader; use easytier::common::config::{ConfigLoader, TomlConfigLoader};
use easytier::common::scoped_task::ScopedTask; use easytier::common::scoped_task::ScopedTask;
use easytier::launcher::NetworkConfig; use easytier::launcher::NetworkConfig;
use easytier::proto::rpc_types; use easytier::proto::rpc_types;
@@ -68,6 +68,17 @@ struct GenerateConfigResponse {
toml_config: Option<String>, toml_config: Option<String>,
} }
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct ParseConfigRequest {
toml_config: String,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct ParseConfigResponse {
error: Option<String>,
config: Option<NetworkConfig>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)] #[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Error { pub struct Error {
message: String, message: String,
@@ -158,6 +169,25 @@ impl RestfulServer {
} }
} }
async fn handle_parse_config(
Json(req): Json<ParseConfigRequest>,
) -> Result<Json<ParseConfigResponse>, HttpHandleError> {
let config = TomlConfigLoader::new_from_str(&req.toml_config)
.and_then(|config| NetworkConfig::new_from_config(&config));
match config {
Ok(c) => Ok(ParseConfigResponse {
error: None,
config: Some(c),
}
.into()),
Err(e) => Ok(ParseConfigResponse {
error: Some(format!("{:?}", e)),
config: None,
}
.into()),
}
}
pub async fn start( pub async fn start(
mut self, mut self,
) -> Result< ) -> Result<
@@ -216,6 +246,7 @@ impl RestfulServer {
"/api/v1/generate-config", "/api/v1/generate-config",
post(Self::handle_generate_config), post(Self::handle_generate_config),
) )
.route("/api/v1/parse-config", post(Self::handle_parse_config))
.layer(MessagesManagerLayer) .layer(MessagesManagerLayer)
.layer(auth_layer) .layer(auth_layer)
.layer(tower_http::cors::CorsLayer::very_permissive()) .layer(tower_http::cors::CorsLayer::very_permissive())
+5 -6
View File
@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command." description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier" homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier" repository = "https://github.com/EasyTier/EasyTier"
version = "2.3.1" version = "2.3.2"
edition = "2021" edition = "2021"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
@@ -64,7 +64,8 @@ bytes = "1.5.0"
pin-project-lite = "0.2.13" pin-project-lite = "0.2.13"
tachyonix = "0.3.0" tachyonix = "0.3.0"
quinn = { version = "0.11.0", optional = true, features = ["ring"] } quinn = { version = "0.11.8", optional = true, features = ["ring"] }
rustls = { version = "0.23.0", features = [ rustls = { version = "0.23.0", features = [
"ring", "ring",
], default-features = false, optional = true } ], default-features = false, optional = true }
@@ -270,7 +271,7 @@ thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = f
[dev-dependencies] [dev-dependencies]
serial_test = "3.0.0" serial_test = "3.0.0"
rstest = "0.18.2" rstest = "0.25.0"
futures-util = "0.3.30" futures-util = "0.3.30"
maplit = "1.0.2" maplit = "1.0.2"
@@ -280,9 +281,8 @@ tokio-socks = "0.5.2"
[features] [features]
default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5"] default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5", "quic"]
full = [ full = [
"quic",
"websocket", "websocket",
"wireguard", "wireguard",
"mimalloc", "mimalloc",
@@ -291,7 +291,6 @@ full = [
"tun", "tun",
"socks5", "socks5",
] ]
mips = ["aes-gcm", "mimalloc", "wireguard", "tun", "smoltcp", "socks5"]
wireguard = ["dep:boringtun", "dep:ring"] wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"] quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
mimalloc = ["dep:mimalloc"] mimalloc = ["dep:mimalloc"]
+22 -2
View File
@@ -10,6 +10,11 @@ core_clap:
配置服务器地址。允许格式: 配置服务器地址。允许格式:
完整URL--config-server udp://127.0.0.1:22020/admin 完整URL--config-server udp://127.0.0.1:22020/admin
仅用户名:--config-server admin,将使用官方的服务器 仅用户名:--config-server admin,将使用官方的服务器
machine_id:
en: |+
the machine id to identify this machine, used for config recovery after disconnection, must be unique and fixed. default is from system.
zh-CN: |+
Web 配置服务器通过 machine id 来识别机器,用于断线重连后的配置恢复,需要保证唯一且固定不变。默认从系统获得。
config_file: config_file:
en: "path to the config file, NOTE: the options set by cmdline args will override options in config file" en: "path to the config file, NOTE: the options set by cmdline args will override options in config file"
zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项" zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项"
@@ -32,11 +37,20 @@ core_clap:
en: "use a public shared node to discover peers" en: "use a public shared node to discover peers"
zh-CN: "使用公共共享节点来发现对等节点" zh-CN: "使用公共共享节点来发现对等节点"
proxy_networks: proxy_networks:
en: "export local networks to other peers in the vpn" en: |+
zh-CN: "将本地网络导出到VPN中的其他对等节点" export local networks to other peers in the vpn, e.g.: 10.0.0.0/24.
also support mapping proxy network to other cidr, e.g.: 10.0.0.0/24->192.168.0.0/24
other peers can access 10.0.0.1 with ip 192.168.0.1
zh-CN: |+
将本地网络导出到VPN中的其他对等节点,例如:10.0.0.0/24。
还支持将代理网络映射到其他CIDR,例如:10.0.0.0/24->192.168.0.0/24
其他对等节点可以通过 IP 192.168.0.1 来访问 10.0.0.1
rpc_portal: rpc_portal:
en: "rpc portal address to listen for management. 0 means random port, 12345 means listen on 12345 of localhost, 0.0.0.0:12345 means listen on 12345 of all interfaces. default is 0 and will try 15888 first" en: "rpc portal address to listen for management. 0 means random port, 12345 means listen on 12345 of localhost, 0.0.0.0:12345 means listen on 12345 of all interfaces. default is 0 and will try 15888 first"
zh-CN: "用于管理的RPC门户地址。0表示随机端口,12345表示在localhost的12345上监听,0.0.0.0:12345表示在所有接口的12345上监听。默认是0,首先尝试15888" zh-CN: "用于管理的RPC门户地址。0表示随机端口,12345表示在localhost的12345上监听,0.0.0.0:12345表示在所有接口的12345上监听。默认是0,首先尝试15888"
rpc_portal_whitelist:
en: "rpc portal whitelist, only allow these addresses to access rpc portal, e.g.: 127.0.0.1,127.0.0.0/8,::1/128"
zh-CN: "RPC门户白名单,仅允许这些地址访问RPC门户,例如:127.0.0.1/32,127.0.0.0/8,::1/128"
listeners: listeners:
en: |+ en: |+
listeners to accept connections, allow format: listeners to accept connections, allow format:
@@ -149,6 +163,12 @@ core_clap:
disable_kcp_input: disable_kcp_input:
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved." en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。" zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
enable_quic_proxy:
en: "proxy tcp streams with QUIC, improving the latency and throughput on the network with udp packet loss."
zh-CN: "使用 QUIC 代理 TCP 流,提高在 UDP 丢包网络上的延迟和吞吐量。"
disable_quic_input:
en: "do not allow other nodes to use QUIC to proxy tcp streams to this node. when a node with QUIC proxy enabled accesses this node, the original tcp connection is preserved."
zh-CN: "不允许其他节点使用 QUIC 代理 TCP 流到此节点。开启 QUIC 代理的节点访问此节点时,依然使用原始 TCP 连接。"
port_forward: port_forward:
en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple." en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如:udp://0.0.0.0:12345/10.126.126.1:23456,表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。" zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如:udp://0.0.0.0:12345/10.126.126.1:23456,表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
+31 -22
View File
@@ -1,10 +1,7 @@
use std::io::{Read, Write}; use anyhow::Context;
use dashmap::DashMap; use dashmap::DashMap;
use std::cell::RefCell; use std::cell::RefCell;
use zstd::stream::read::Decoder; use zstd::bulk;
use zstd::stream::write::Encoder;
use zstd::zstd_safe::{CCtx, DCtx};
use zerocopy::{AsBytes as _, FromBytes as _}; use zerocopy::{AsBytes as _, FromBytes as _};
@@ -35,17 +32,16 @@ impl DefaultCompressor {
compress_algo: CompressorAlgo, compress_algo: CompressorAlgo,
) -> Result<Vec<u8>, Error> { ) -> Result<Vec<u8>, Error> {
match compress_algo { match compress_algo {
CompressorAlgo::ZstdDefault => { CompressorAlgo::ZstdDefault => CTX_MAP.with(|map_cell| {
let ret = CTX_MAP.with(|map_cell| { let map = map_cell.borrow();
let map = map_cell.borrow(); let mut ctx_entry = map.entry(compress_algo).or_default();
let mut ctx_entry = map.entry(compress_algo).or_default(); ctx_entry.compress(data).with_context(|| {
let writer = Vec::new(); format!(
let mut o = Encoder::with_context(writer, ctx_entry.value_mut()); "Failed to compress data with algorithm: {:?}",
o.write_all(data)?; compress_algo
o.finish() )
}); })
Ok(ret?) }),
}
CompressorAlgo::None => Ok(data.to_vec()), CompressorAlgo::None => Ok(data.to_vec()),
} }
} }
@@ -59,10 +55,23 @@ impl DefaultCompressor {
CompressorAlgo::ZstdDefault => DCTX_MAP.with(|map_cell| { CompressorAlgo::ZstdDefault => DCTX_MAP.with(|map_cell| {
let map = map_cell.borrow(); let map = map_cell.borrow();
let mut ctx_entry = map.entry(compress_algo).or_default(); let mut ctx_entry = map.entry(compress_algo).or_default();
let mut decoder = Decoder::with_context(data, ctx_entry.value_mut()); for i in 1..=5 {
let mut output = Vec::new(); let mut len = data.len() * 2usize.pow(i);
decoder.read_to_end(&mut output)?; if i == 5 && len < 64 * 1024 {
Ok(output) len = 64 * 1024; // Ensure a minimum buffer size
}
match ctx_entry.decompress(data, len) {
Ok(buf) => return Ok(buf),
Err(e) if e.to_string().contains("buffer is too small") => {
continue; // Try with a larger buffer
}
Err(e) => return Err(e.into()),
}
}
Err(anyhow::anyhow!(
"Failed to decompress data after multiple attempts with algorithm: {:?}",
compress_algo
))
}), }),
CompressorAlgo::None => Ok(data.to_vec()), CompressorAlgo::None => Ok(data.to_vec()),
} }
@@ -155,8 +164,8 @@ impl Compressor for DefaultCompressor {
} }
thread_local! { thread_local! {
static CTX_MAP: RefCell<DashMap<CompressorAlgo, CCtx<'static>>> = RefCell::new(DashMap::new()); static CTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Compressor<'static>>> = RefCell::new(DashMap::new());
static DCTX_MAP: RefCell<DashMap<CompressorAlgo, DCtx<'static>>> = RefCell::new(DashMap::new()); static DCTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Decompressor<'static>>> = RefCell::new(DashMap::new());
} }
#[cfg(test)] #[cfg(test)]
+61 -51
View File
@@ -2,9 +2,11 @@ use std::{
net::{Ipv4Addr, SocketAddr}, net::{Ipv4Addr, SocketAddr},
path::PathBuf, path::PathBuf,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
u64,
}; };
use anyhow::Context; use anyhow::Context;
use cidr::IpCidr;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
@@ -38,6 +40,9 @@ pub fn gen_default_flags() -> Flags {
disable_relay_kcp: true, disable_relay_kcp: true,
accept_dns: false, accept_dns: false,
private_mode: false, private_mode: false,
enable_quic_proxy: false,
disable_quic_input: false,
foreign_relay_bps_limit: u64::MAX,
} }
} }
@@ -61,20 +66,15 @@ pub trait ConfigLoader: Send + Sync {
fn get_dhcp(&self) -> bool; fn get_dhcp(&self) -> bool;
fn set_dhcp(&self, dhcp: bool); fn set_dhcp(&self, dhcp: bool);
fn add_proxy_cidr(&self, cidr: cidr::IpCidr); fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>);
fn remove_proxy_cidr(&self, cidr: cidr::IpCidr); fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr);
fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr>; fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig>;
fn get_network_identity(&self) -> NetworkIdentity; fn get_network_identity(&self) -> NetworkIdentity;
fn set_network_identity(&self, identity: NetworkIdentity); fn set_network_identity(&self, identity: NetworkIdentity);
fn get_listener_uris(&self) -> Vec<url::Url>; fn get_listener_uris(&self) -> Vec<url::Url>;
fn get_file_logger_config(&self) -> FileLoggerConfig;
fn set_file_logger_config(&self, config: FileLoggerConfig);
fn get_console_logger_config(&self) -> ConsoleLoggerConfig;
fn set_console_logger_config(&self, config: ConsoleLoggerConfig);
fn get_peers(&self) -> Vec<PeerConfig>; fn get_peers(&self) -> Vec<PeerConfig>;
fn set_peers(&self, peers: Vec<PeerConfig>); fn set_peers(&self, peers: Vec<PeerConfig>);
@@ -87,6 +87,9 @@ pub trait ConfigLoader: Send + Sync {
fn get_rpc_portal(&self) -> Option<SocketAddr>; fn get_rpc_portal(&self) -> Option<SocketAddr>;
fn set_rpc_portal(&self, addr: SocketAddr); fn set_rpc_portal(&self, addr: SocketAddr);
fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>>;
fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>);
fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig>; fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig>;
fn set_vpn_portal_config(&self, config: VpnPortalConfig); fn set_vpn_portal_config(&self, config: VpnPortalConfig);
@@ -108,6 +111,12 @@ pub trait ConfigLoader: Send + Sync {
fn dump(&self) -> String; fn dump(&self) -> String;
} }
pub trait LoggingConfigLoader {
fn get_file_logger_config(&self) -> FileLoggerConfig;
fn get_console_logger_config(&self) -> ConsoleLoggerConfig;
}
pub type NetworkSecretDigest = [u8; 32]; pub type NetworkSecretDigest = [u8; 32];
#[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)] #[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)]
@@ -166,7 +175,8 @@ pub struct PeerConfig {
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct ProxyNetworkConfig { pub struct ProxyNetworkConfig {
pub cidr: String, pub cidr: cidr::Ipv4Cidr, // the CIDR of the proxy network
pub mapped_cidr: Option<cidr::Ipv4Cidr>, // allow remap the proxy CIDR to another CIDR
pub allow: Option<Vec<String>>, pub allow: Option<Vec<String>>,
} }
@@ -182,6 +192,24 @@ pub struct ConsoleLoggerConfig {
pub level: Option<String>, pub level: Option<String>,
} }
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, derive_builder::Builder)]
pub struct LoggingConfig {
#[builder(setter(into, strip_option), default = None)]
file_logger: Option<FileLoggerConfig>,
#[builder(setter(into, strip_option), default = None)]
console_logger: Option<ConsoleLoggerConfig>,
}
impl LoggingConfigLoader for &LoggingConfig {
fn get_file_logger_config(&self) -> FileLoggerConfig {
self.file_logger.clone().unwrap_or_default()
}
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
self.console_logger.clone().unwrap_or_default()
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct VpnPortalConfig { pub struct VpnPortalConfig {
pub client_cidr: cidr::Ipv4Cidr, pub client_cidr: cidr::Ipv4Cidr,
@@ -239,10 +267,8 @@ struct Config {
peer: Option<Vec<PeerConfig>>, peer: Option<Vec<PeerConfig>>,
proxy_network: Option<Vec<ProxyNetworkConfig>>, proxy_network: Option<Vec<ProxyNetworkConfig>>,
file_logger: Option<FileLoggerConfig>,
console_logger: Option<ConsoleLoggerConfig>,
rpc_portal: Option<SocketAddr>, rpc_portal: Option<SocketAddr>,
rpc_portal_whitelist: Option<Vec<IpCidr>>,
vpn_portal_config: Option<VpnPortalConfig>, vpn_portal_config: Option<VpnPortalConfig>,
@@ -397,50 +423,52 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().dhcp = Some(dhcp); self.config.lock().unwrap().dhcp = Some(dhcp);
} }
fn add_proxy_cidr(&self, cidr: cidr::IpCidr) { fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>) {
let mut locked_config = self.config.lock().unwrap(); let mut locked_config = self.config.lock().unwrap();
if locked_config.proxy_network.is_none() { if locked_config.proxy_network.is_none() {
locked_config.proxy_network = Some(vec![]); locked_config.proxy_network = Some(vec![]);
} }
let cidr_str = cidr.to_string(); if let Some(mapped_cidr) = mapped_cidr.as_ref() {
assert_eq!(
cidr.network_length(),
mapped_cidr.network_length(),
"Mapped CIDR must have the same network length as the original CIDR",
);
}
// insert if no duplicate // insert if no duplicate
if !locked_config if !locked_config
.proxy_network .proxy_network
.as_ref() .as_ref()
.unwrap() .unwrap()
.iter() .iter()
.any(|c| c.cidr == cidr_str) .any(|c| c.cidr == cidr && c.mapped_cidr == mapped_cidr)
{ {
locked_config locked_config
.proxy_network .proxy_network
.as_mut() .as_mut()
.unwrap() .unwrap()
.push(ProxyNetworkConfig { .push(ProxyNetworkConfig {
cidr: cidr_str, cidr,
mapped_cidr,
allow: None, allow: None,
}); });
} }
} }
fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) { fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr) {
let mut locked_config = self.config.lock().unwrap(); let mut locked_config = self.config.lock().unwrap();
if let Some(proxy_cidrs) = &mut locked_config.proxy_network { if let Some(proxy_cidrs) = &mut locked_config.proxy_network {
let cidr_str = cidr.to_string(); proxy_cidrs.retain(|c| c.cidr != cidr);
proxy_cidrs.retain(|c| c.cidr != cidr_str);
} }
} }
fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> { fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig> {
self.config self.config
.lock() .lock()
.unwrap() .unwrap()
.proxy_network .proxy_network
.as_ref() .as_ref()
.map(|v| { .cloned()
v.iter()
.map(|c| c.cidr.parse().unwrap())
.collect::<Vec<cidr::IpCidr>>()
})
.unwrap_or_default() .unwrap_or_default()
} }
@@ -481,32 +509,6 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap_or_default() .unwrap_or_default()
} }
fn get_file_logger_config(&self) -> FileLoggerConfig {
self.config
.lock()
.unwrap()
.file_logger
.clone()
.unwrap_or_default()
}
fn set_file_logger_config(&self, config: FileLoggerConfig) {
self.config.lock().unwrap().file_logger = Some(config);
}
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
self.config
.lock()
.unwrap()
.console_logger
.clone()
.unwrap_or_default()
}
fn set_console_logger_config(&self, config: ConsoleLoggerConfig) {
self.config.lock().unwrap().console_logger = Some(config);
}
fn get_peers(&self) -> Vec<PeerConfig> { fn get_peers(&self) -> Vec<PeerConfig> {
self.config.lock().unwrap().peer.clone().unwrap_or_default() self.config.lock().unwrap().peer.clone().unwrap_or_default()
} }
@@ -544,6 +546,14 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().rpc_portal = Some(addr); self.config.lock().unwrap().rpc_portal = Some(addr);
} }
fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>> {
self.config.lock().unwrap().rpc_portal_whitelist.clone()
}
fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>) {
self.config.lock().unwrap().rpc_portal_whitelist = whitelist;
}
fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig> { fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig> {
self.config.lock().unwrap().vpn_portal_config.clone() self.config.lock().unwrap().vpn_portal_config.clone()
} }
+6 -4
View File
@@ -1,21 +1,21 @@
macro_rules! define_global_var { macro_rules! define_global_var {
($name:ident, $type:ty, $init:expr) => { ($name:ident, $type:ty, $init:expr) => {
pub static $name: once_cell::sync::Lazy<tokio::sync::Mutex<$type>> = pub static $name: once_cell::sync::Lazy<std::sync::Mutex<$type>> =
once_cell::sync::Lazy::new(|| tokio::sync::Mutex::new($init)); once_cell::sync::Lazy::new(|| std::sync::Mutex::new($init));
}; };
} }
#[macro_export] #[macro_export]
macro_rules! use_global_var { macro_rules! use_global_var {
($name:ident) => { ($name:ident) => {
crate::common::constants::$name.lock().await.to_owned() crate::common::constants::$name.lock().unwrap().to_owned()
}; };
} }
#[macro_export] #[macro_export]
macro_rules! set_global_var { macro_rules! set_global_var {
($name:ident, $val:expr) => { ($name:ident, $val:expr) => {
*crate::common::constants::$name.lock().await = $val *crate::common::constants::$name.lock().unwrap() = $val
}; };
} }
@@ -23,6 +23,8 @@ define_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS, u64, 1000);
define_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, u64, 10); define_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, u64, 10);
define_global_var!(MACHINE_UID, Option<String>, None);
pub const UDP_HOLE_PUNCH_CONNECTOR_SERVICE_ID: u32 = 2; pub const UDP_HOLE_PUNCH_CONNECTOR_SERVICE_ID: u32 = 2;
pub const WIN_SERVICE_WORK_DIR_REG_KEY: &str = "SOFTWARE\\EasyTier\\Service\\WorkDir"; pub const WIN_SERVICE_WORK_DIR_REG_KEY: &str = "SOFTWARE\\EasyTier\\Service\\WorkDir";
+9
View File
@@ -77,6 +77,15 @@ pub async fn socket_addrs(
.port() .port()
.or_else(default_port_number) .or_else(default_port_number)
.ok_or(Error::InvalidUrl(url.to_string()))?; .ok_or(Error::InvalidUrl(url.to_string()))?;
// See https://github.com/EasyTier/EasyTier/pull/947
let port = match port {
0 => match url.scheme() {
"ws" => 80,
"wss" => 443,
_ => port,
},
_ => port,
};
// if host is an ip address, return it directly // if host is an ip address, return it directly
if let Ok(ip) = host.parse::<std::net::IpAddr>() { if let Ok(ip) = host.parse::<std::net::IpAddr>() {
+22 -24
View File
@@ -4,6 +4,8 @@ use std::{
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use crate::common::config::ProxyNetworkConfig;
use crate::common::token_bucket::TokenBucketManager;
use crate::proto::cli::PeerConnInfo; use crate::proto::cli::PeerConnInfo;
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb}; use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
use crossbeam::atomic::AtomicCell; use crossbeam::atomic::AtomicCell;
@@ -59,7 +61,7 @@ pub struct GlobalCtx {
event_bus: EventBus, event_bus: EventBus,
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>, cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>, cached_proxy_cidrs: AtomicCell<Option<Vec<ProxyNetworkConfig>>>,
ip_collector: Mutex<Option<Arc<IPCollector>>>, ip_collector: Mutex<Option<Arc<IPCollector>>>,
@@ -74,6 +76,10 @@ pub struct GlobalCtx {
no_tun: bool, no_tun: bool,
feature_flags: AtomicCell<PeerFeatureFlag>, feature_flags: AtomicCell<PeerFeatureFlag>,
quic_proxy_port: AtomicCell<Option<u16>>,
token_bucket_manager: TokenBucketManager,
} }
impl std::fmt::Debug for GlobalCtx { impl std::fmt::Debug for GlobalCtx {
@@ -136,6 +142,9 @@ impl GlobalCtx {
no_tun, no_tun,
feature_flags: AtomicCell::new(feature_flags), feature_flags: AtomicCell::new(feature_flags),
quic_proxy_port: AtomicCell::new(None),
token_bucket_manager: TokenBucketManager::new(),
} }
} }
@@ -182,29 +191,6 @@ impl GlobalCtx {
self.cached_ipv4.store(None); self.cached_ipv4.store(None);
} }
pub fn add_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> {
self.config.add_proxy_cidr(cidr);
self.cached_proxy_cidrs.store(None);
Ok(())
}
pub fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> {
self.config.remove_proxy_cidr(cidr);
self.cached_proxy_cidrs.store(None);
Ok(())
}
pub fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> {
if let Some(proxy_cidrs) = self.cached_proxy_cidrs.take() {
self.cached_proxy_cidrs.store(Some(proxy_cidrs.clone()));
return proxy_cidrs;
}
let ret = self.config.get_proxy_cidrs();
self.cached_proxy_cidrs.store(Some(ret.clone()));
ret
}
pub fn get_id(&self) -> uuid::Uuid { pub fn get_id(&self) -> uuid::Uuid {
self.config.get_id() self.config.get_id()
} }
@@ -303,6 +289,18 @@ impl GlobalCtx {
pub fn set_feature_flags(&self, flags: PeerFeatureFlag) { pub fn set_feature_flags(&self, flags: PeerFeatureFlag) {
self.feature_flags.store(flags); self.feature_flags.store(flags);
} }
pub fn get_quic_proxy_port(&self) -> Option<u16> {
self.quic_proxy_port.load()
}
pub fn set_quic_proxy_port(&self, port: Option<u16>) {
self.quic_proxy_port.store(port);
}
pub fn token_bucket_manager(&self) -> &TokenBucketManager {
&self.token_bucket_manager
}
} }
#[cfg(test)] #[cfg(test)]
+13
View File
@@ -8,6 +8,8 @@ use time::util::refresh_tz;
use tokio::{task::JoinSet, time::timeout}; use tokio::{task::JoinSet, time::timeout};
use tracing::Instrument; use tracing::Instrument;
use crate::{set_global_var, use_global_var};
pub mod compressor; pub mod compressor;
pub mod config; pub mod config;
pub mod constants; pub mod constants;
@@ -21,6 +23,7 @@ pub mod network;
pub mod scoped_task; pub mod scoped_task;
pub mod stun; pub mod stun;
pub mod stun_codec_ext; pub mod stun_codec_ext;
pub mod token_bucket;
pub fn get_logger_timer<F: time::formatting::Formattable>( pub fn get_logger_timer<F: time::formatting::Formattable>(
format: F, format: F,
@@ -87,7 +90,17 @@ pub fn join_joinset_background<T: Debug + Send + Sync + 'static>(
); );
} }
pub fn set_default_machine_id(mid: Option<String>) {
set_global_var!(MACHINE_UID, mid);
}
pub fn get_machine_id() -> uuid::Uuid { pub fn get_machine_id() -> uuid::Uuid {
if let Some(default_mid) = use_global_var!(MACHINE_UID) {
let mut b = [0u8; 16];
crate::tunnel::generate_digest_from_str("", &default_mid, &mut b);
return uuid::Uuid::from_bytes(b);
}
// a path same as the binary // a path same as the binary
let machine_id_file = std::env::current_exe() let machine_id_file = std::env::current_exe()
.map(|x| x.with_file_name("et_machine_id")) .map(|x| x.with_file_name("et_machine_id"))
+12 -3
View File
@@ -955,9 +955,18 @@ mod tests {
async fn test_txt_public_stun_server() { async fn test_txt_public_stun_server() {
let stun_servers = vec!["txt:stun.easytier.cn".to_string()]; let stun_servers = vec!["txt:stun.easytier.cn".to_string()];
let detector = UdpNatTypeDetector::new(stun_servers, 1); let detector = UdpNatTypeDetector::new(stun_servers, 1);
let ret = detector.detect_nat_type(0).await; for _ in 0..5 {
println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type()); let ret = detector.detect_nat_type(0).await;
assert!(!ret.unwrap().stun_resps.is_empty()); println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type());
if ret.is_ok() {
assert!(!ret.unwrap().stun_resps.is_empty());
return;
}
}
debug_assert!(
false,
"should not reach here, stun server should be available"
);
} }
#[tokio::test] #[tokio::test]
+312
View File
@@ -0,0 +1,312 @@
use atomic_shim::AtomicU64;
use dashmap::DashMap;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use tokio::time;
use crate::common::scoped_task::ScopedTask;
use crate::proto::common::LimiterConfig;
/// Token Bucket rate limiter using atomic operations
pub struct TokenBucket {
available_tokens: AtomicU64, // Current token count (atomic)
last_refill_time: AtomicU64, // Last refill time as micros since epoch
config: BucketConfig, // Immutable configuration
refill_task: Mutex<Option<ScopedTask<()>>>, // Background refill task
start_time: Instant, // Bucket creation time
}
#[derive(Clone, Copy)]
pub struct BucketConfig {
capacity: u64, // Maximum token capacity
fill_rate: u64, // Tokens added per second
refill_interval: Duration, // Time between refill operations
}
impl From<LimiterConfig> for BucketConfig {
fn from(cfg: LimiterConfig) -> Self {
let burst_rate = 1.max(cfg.burst_rate.unwrap_or(1));
let fill_rate = 8196.max(cfg.bps.unwrap_or(u64::MAX / burst_rate));
let refill_interval = cfg
.fill_duration_ms
.map(|x| Duration::from_millis(1.max(x)))
.unwrap_or(Duration::from_millis(10));
BucketConfig {
capacity: burst_rate * fill_rate,
fill_rate: fill_rate,
refill_interval,
}
}
}
impl TokenBucket {
pub fn new(capacity: u64, bps: u64, refill_interval: Duration) -> Arc<Self> {
let config = BucketConfig {
capacity,
fill_rate: bps,
refill_interval,
};
Self::new_from_cfg(config)
}
/// Creates a new Token Bucket rate limiter
///
/// # Arguments
/// * `capacity` - Bucket capacity in bytes
/// * `bps` - Bandwidth limit in bytes per second
/// * `refill_interval` - Refill interval (recommended 10-50ms)
pub fn new_from_cfg(config: BucketConfig) -> Arc<Self> {
// Create Arc instance with placeholder task
let arc_self = Arc::new(Self {
available_tokens: AtomicU64::new(config.capacity),
last_refill_time: AtomicU64::new(0),
config,
refill_task: Mutex::new(None),
start_time: std::time::Instant::now(),
});
// Start background refill task
let arc_clone = arc_self.clone();
let refill_task = tokio::spawn(async move {
let mut interval = time::interval(arc_clone.config.refill_interval);
loop {
interval.tick().await;
arc_clone.refill();
}
});
// Replace placeholder task with actual one
arc_self
.refill_task
.lock()
.unwrap()
.replace(refill_task.into());
arc_self
}
/// Internal refill method (called only by background task)
fn refill(&self) {
let now_micros = self.elapsed_micros();
let prev_time = self.last_refill_time.swap(now_micros, Ordering::Acquire);
// Calculate elapsed time in seconds
let elapsed_secs = (now_micros.saturating_sub(prev_time)) as f64 / 1_000_000.0;
// Calculate tokens to add
let tokens_to_add = (self.config.fill_rate as f64 * elapsed_secs) as u64;
if tokens_to_add == 0 {
return;
}
// Add tokens without exceeding capacity
let mut current = self.available_tokens.load(Ordering::Relaxed);
loop {
let new = current
.saturating_add(tokens_to_add)
.min(self.config.capacity);
match self.available_tokens.compare_exchange_weak(
current,
new,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(actual) => current = actual,
}
}
}
/// Calculate microseconds since bucket creation
fn elapsed_micros(&self) -> u64 {
self.start_time.elapsed().as_micros() as u64
}
/// Attempt to consume tokens without blocking
///
/// # Returns
/// `true` if tokens were consumed, `false` if insufficient tokens
pub fn try_consume(&self, tokens: u64) -> bool {
// Fast path for oversized packets
if tokens > self.config.capacity {
return false;
}
let mut current = self.available_tokens.load(Ordering::Relaxed);
loop {
if current < tokens {
return false;
}
let new = current - tokens;
match self.available_tokens.compare_exchange_weak(
current,
new,
Ordering::AcqRel,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(actual) => current = actual,
}
}
}
}
pub struct TokenBucketManager {
buckets: Arc<DashMap<String, Arc<TokenBucket>>>,
retain_task: ScopedTask<()>,
}
impl TokenBucketManager {
/// Creates a new TokenBucketManager
pub fn new() -> Self {
let buckets = Arc::new(DashMap::new());
let buckets_clone = buckets.clone();
let retain_task = tokio::spawn(async move {
loop {
// Retain only buckets that are still in use
buckets_clone.retain(|_, bucket| Arc::<TokenBucket>::strong_count(bucket) <= 1);
// Sleep for a while before next retention check
tokio::time::sleep(Duration::from_secs(60)).await;
}
});
Self {
buckets,
retain_task: retain_task.into(),
}
}
/// Get or create a token bucket for the given key
pub fn get_or_create(&self, key: &str, cfg: BucketConfig) -> Arc<TokenBucket> {
self.buckets
.entry(key.to_string())
.or_insert_with(|| TokenBucket::new_from_cfg(cfg))
.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::time::{sleep, Duration};
/// Test initial state after creation
#[tokio::test]
async fn test_initial_state() {
let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10));
// Should have full capacity initially
assert!(bucket.try_consume(1000));
assert!(!bucket.try_consume(1)); // Should be empty now
}
/// Test token consumption behavior
#[tokio::test]
async fn test_consumption() {
let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10));
// First packet should succeed
assert!(bucket.try_consume(1000));
// Second packet should fail (only 500 left)
assert!(!bucket.try_consume(600));
// Should be able to take remaining tokens
assert!(bucket.try_consume(500));
}
/// Test background refill functionality
#[tokio::test]
async fn test_refill() {
let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10));
// Drain the bucket
assert!(bucket.try_consume(1000));
assert!(!bucket.try_consume(1));
// Wait for refill (1 refill interval + buffer)
sleep(Duration::from_millis(25)).await;
// Should have approximately 20 tokens (1000 tokens/s * 0.02s)
assert!(bucket.try_consume(15));
assert!(!bucket.try_consume(10)); // But not full capacity
}
/// Test capacity enforcement
#[tokio::test]
async fn test_capacity_limit() {
let bucket = TokenBucket::new(500, 1000, Duration::from_millis(10));
// Wait longer than refill interval
sleep(Duration::from_millis(50)).await;
// Should not exceed capacity despite time passed
assert!(bucket.try_consume(500));
assert!(!bucket.try_consume(1));
}
/// Test high load with concurrent access
#[tokio::test]
async fn test_concurrent_access() {
let bucket = TokenBucket::new(10_000, 1_000_000, Duration::from_millis(10));
let mut handles = vec![];
// Spawn 100 tasks to consume tokens concurrently
for _ in 0..100 {
let bucket = bucket.clone();
handles.push(tokio::spawn(async move {
for _ in 0..100 {
let _ = bucket.try_consume(10);
}
}));
}
// Wait for all tasks to complete
for handle in handles {
handle.await.unwrap();
}
// Verify we didn't exceed capacity
let tokens_left = bucket.available_tokens.load(Ordering::Relaxed);
assert!(
tokens_left <= 10_000,
"Tokens exceeded capacity: {}",
tokens_left
);
}
/// Test behavior when packet size exceeds capacity
#[tokio::test]
async fn test_oversized_packet() {
let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10));
// Packet larger than capacity should be rejected
assert!(!bucket.try_consume(1600));
// Regular packets should still work
assert!(bucket.try_consume(1000));
}
/// Test refill precision with small intervals
#[tokio::test]
async fn test_refill_precision() {
let bucket = TokenBucket::new(10_000, 10_000, Duration::from_micros(100)); // 100μs interval
// Drain most tokens
assert!(bucket.try_consume(9900));
// Wait for multiple refills
sleep(Duration::from_millis(1)).await;
// Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s)
let tokens = bucket.available_tokens.load(Ordering::Relaxed);
assert!(
tokens >= 100 && tokens <= 200,
"Unexpected token count: {}",
tokens
);
}
}
+1 -1
View File
@@ -186,7 +186,7 @@ impl DirectConnectorManagerData {
.await?; .await?;
// NOTICE: must add as directly connected tunnel // NOTICE: must add as directly connected tunnel
self.peer_manager.add_direct_tunnel(ret).await self.peer_manager.add_client_tunnel(ret, true).await
} }
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> { async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
+29 -21
View File
@@ -1,4 +1,7 @@
use std::{collections::BTreeSet, sync::Arc}; use std::{
collections::BTreeSet,
sync::{Arc, Weak},
};
use anyhow::Context; use anyhow::Context;
use dashmap::{DashMap, DashSet}; use dashmap::{DashMap, DashSet};
@@ -12,7 +15,7 @@ use tokio::{
}; };
use crate::{ use crate::{
common::PeerId, common::{join_joinset_background, PeerId},
peers::peer_conn::PeerConnId, peers::peer_conn::PeerConnId,
proto::{ proto::{
cli::{ cli::{
@@ -53,7 +56,7 @@ struct ReconnResult {
struct ConnectorManagerData { struct ConnectorManagerData {
connectors: ConnectorMap, connectors: ConnectorMap,
reconnecting: DashSet<String>, reconnecting: DashSet<String>,
peer_manager: Arc<PeerManager>, peer_manager: Weak<PeerManager>,
alive_conn_urls: Arc<DashSet<String>>, alive_conn_urls: Arc<DashSet<String>>,
// user removed connector urls // user removed connector urls
removed_conn_urls: Arc<DashSet<String>>, removed_conn_urls: Arc<DashSet<String>>,
@@ -78,7 +81,7 @@ impl ManualConnectorManager {
data: Arc::new(ConnectorManagerData { data: Arc::new(ConnectorManagerData {
connectors, connectors,
reconnecting: DashSet::new(), reconnecting: DashSet::new(),
peer_manager, peer_manager: Arc::downgrade(&peer_manager),
alive_conn_urls: Arc::new(DashSet::new()), alive_conn_urls: Arc::new(DashSet::new()),
removed_conn_urls: Arc::new(DashSet::new()), removed_conn_urls: Arc::new(DashSet::new()),
net_ns: global_ctx.net_ns.clone(), net_ns: global_ctx.net_ns.clone(),
@@ -190,20 +193,18 @@ impl ManualConnectorManager {
tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n); tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n);
event_recv = event_recv.resubscribe(); event_recv = event_recv.resubscribe();
data.alive_conn_urls.clear(); data.alive_conn_urls.clear();
for x in data let Some(pm) = data.peer_manager.upgrade() else {
.peer_manager tracing::warn!("peer manager is gone, exit");
.get_peer_map() break;
.get_alive_conns() };
.iter() for x in pm.get_peer_map().get_alive_conns().iter().map(|x| {
.map(|x| { x.tunnel
x.tunnel .clone()
.clone() .unwrap_or_default()
.unwrap_or_default() .remote_addr
.remote_addr .unwrap_or_default()
.unwrap_or_default() .to_string()
.to_string() }) {
})
{
data.alive_conn_urls.insert(x); data.alive_conn_urls.insert(x);
} }
continue; continue;
@@ -222,6 +223,8 @@ impl ManualConnectorManager {
use_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS), use_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS),
)); ));
let (reconn_result_send, mut reconn_result_recv) = mpsc::channel(100); let (reconn_result_send, mut reconn_result_recv) = mpsc::channel(100);
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
join_joinset_background(tasks.clone(), "connector_reconnect_tasks".to_string());
loop { loop {
tokio::select! { tokio::select! {
@@ -237,7 +240,7 @@ impl ManualConnectorManager {
let insert_succ = data.reconnecting.insert(dead_url.clone()); let insert_succ = data.reconnecting.insert(dead_url.clone());
assert!(insert_succ); assert!(insert_succ);
tokio::spawn(async move { tasks.lock().unwrap().spawn(async move {
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await; let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await;
sender.send(reconn_ret).await.unwrap(); sender.send(reconn_ret).await.unwrap();
@@ -340,8 +343,13 @@ impl ManualConnectorManager {
connector.lock().await.remote_url().clone(), connector.lock().await.remote_url().clone(),
)); ));
tracing::info!("reconnect try connect... conn: {:?}", connector); tracing::info!("reconnect try connect... conn: {:?}", connector);
let (peer_id, conn_id) = data let Some(pm) = data.peer_manager.upgrade() else {
.peer_manager return Err(Error::AnyhowError(anyhow::anyhow!(
"peer manager is gone, cannot reconnect"
)));
};
let (peer_id, conn_id) = pm
.try_direct_connect(connector.lock().await.as_mut()) .try_direct_connect(connector.lock().await.as_mut())
.await?; .await?;
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url); tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
+1 -1
View File
@@ -221,7 +221,7 @@ impl UdpHoePunchConnectorData {
Ok(Some(tunnel)) => { Ok(Some(tunnel)) => {
tracing::info!(?tunnel, "hole punching get tunnel success"); tracing::info!(?tunnel, "hole punching get tunnel success");
if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel).await { if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel, false).await {
tracing::warn!(?e, "add client tunnel failed"); tracing::warn!(?e, "add client tunnel failed");
op(true); op(true);
false false
+9 -17
View File
@@ -1083,7 +1083,8 @@ async fn main() -> Result<(), Error> {
.iter() .iter()
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,)) .map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let direct_peers: Vec<_> = v.direct_peers let direct_peers: Vec<_> = v
.direct_peers
.iter() .iter()
.map(|(k, v)| DirectPeerItem { .map(|(k, v)| DirectPeerItem {
node_id: k.to_string(), node_id: k.to_string(),
@@ -1257,23 +1258,14 @@ async fn main() -> Result<(), Error> {
} }
SubCommand::Proxy => { SubCommand::Proxy => {
let mut entries = vec![]; let mut entries = vec![];
let client = handler.get_tcp_proxy_client("tcp").await?;
let ret = client
.list_tcp_proxy_entry(BaseController::default(), Default::default())
.await;
entries.extend(ret.unwrap_or_default().entries);
let client = handler.get_tcp_proxy_client("kcp_src").await?; for client_type in &["tcp", "kcp_src", "kcp_dst", "quic_src", "quic_dst"] {
let ret = client let client = handler.get_tcp_proxy_client(client_type).await?;
.list_tcp_proxy_entry(BaseController::default(), Default::default()) let ret = client
.await; .list_tcp_proxy_entry(BaseController::default(), Default::default())
entries.extend(ret.unwrap_or_default().entries); .await;
entries.extend(ret.unwrap_or_default().entries);
let client = handler.get_tcp_proxy_client("kcp_dst").await?; }
let ret = client
.list_tcp_proxy_entry(BaseController::default(), Default::default())
.await;
entries.extend(ret.unwrap_or_default().entries);
if cli.verbose { if cli.verbose {
println!("{}", serde_json::to_string_pretty(&entries)?); println!("{}", serde_json::to_string_pretty(&entries)?);
+206 -262
View File
@@ -11,25 +11,24 @@ use std::{
}; };
use anyhow::Context; use anyhow::Context;
use cidr::IpCidr;
use clap::Parser; use clap::Parser;
use easytier::{ use easytier::{
common::{ common::{
config::{ config::{
ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig, ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader,
PortForwardConfig, TomlConfigLoader, VpnPortalConfig, NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader, VpnPortalConfig,
}, },
constants::EASYTIER_VERSION, constants::EASYTIER_VERSION,
global_ctx::{EventBusSubscriber, GlobalCtx, GlobalCtxEvent}, global_ctx::GlobalCtx,
scoped_task::ScopedTask, set_default_machine_id,
stun::MockStunInfoCollector, stun::MockStunInfoCollector,
}, },
connector::create_connector_by_url, connector::create_connector_by_url,
launcher, instance_manager::NetworkInstanceManager,
proto::{ launcher::{add_proxy_network_to_config, ConfigSource},
self, proto::common::{CompressionAlgoPb, NatType},
common::{CompressionAlgoPb, NatType},
},
tunnel::{IpVersion, PROTO_PORT_OFFSET}, tunnel::{IpVersion, PROTO_PORT_OFFSET},
utils::{init_logger, setup_panic_handler}, utils::{init_logger, setup_panic_handler},
web_client, web_client,
@@ -101,14 +100,32 @@ struct Cli {
)] )]
config_server: Option<String>, config_server: Option<String>,
#[arg(
long,
env = "ET_MACHINE_ID",
help = t!("core_clap.machine_id").to_string()
)]
machine_id: Option<String>,
#[arg( #[arg(
short, short,
long, long,
env = "ET_CONFIG_FILE", env = "ET_CONFIG_FILE",
help = t!("core_clap.config_file").to_string() value_delimiter = ',',
help = t!("core_clap.config_file").to_string(),
num_args = 1..,
)] )]
config_file: Option<PathBuf>, config_file: Option<Vec<PathBuf>>,
#[command(flatten)]
network_options: NetworkOptions,
#[command(flatten)]
logging_options: LoggingOptions,
}
#[derive(Parser, Debug)]
struct NetworkOptions {
#[arg( #[arg(
long, long,
env = "ET_NETWORK_NAME", env = "ET_NETWORK_NAME",
@@ -176,6 +193,14 @@ struct Cli {
)] )]
rpc_portal: Option<String>, rpc_portal: Option<String>,
#[arg(
long,
env = "ET_RPC_PORTAL_WHITELIST",
value_delimiter = ',',
help = t!("core_clap.rpc_portal_whitelist").to_string(),
)]
rpc_portal_whitelist: Option<Vec<IpCidr>>,
#[arg( #[arg(
short, short,
long, long,
@@ -203,27 +228,6 @@ struct Cli {
)] )]
no_listener: bool, no_listener: bool,
#[arg(
long,
env = "ET_CONSOLE_LOG_LEVEL",
help = t!("core_clap.console_log_level").to_string()
)]
console_log_level: Option<String>,
#[arg(
long,
env = "ET_FILE_LOG_LEVEL",
help = t!("core_clap.file_log_level").to_string()
)]
file_log_level: Option<String>,
#[arg(
long,
env = "ET_FILE_LOG_DIR",
help = t!("core_clap.file_log_dir").to_string()
)]
file_log_dir: Option<String>,
#[arg( #[arg(
long, long,
env = "ET_HOSTNAME", env = "ET_HOSTNAME",
@@ -437,6 +441,24 @@ struct Cli {
)] )]
disable_kcp_input: Option<bool>, disable_kcp_input: Option<bool>,
#[arg(
long,
env = "ET_ENABLE_QUIC_PROXY",
help = t!("core_clap.enable_quic_proxy").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
enable_quic_proxy: Option<bool>,
#[arg(
long,
env = "ET_DISABLE_QUIC_INPUT",
help = t!("core_clap.disable_quic_input").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
disable_quic_input: Option<bool>,
#[arg( #[arg(
long, long,
env = "ET_PORT_FORWARD", env = "ET_PORT_FORWARD",
@@ -459,6 +481,37 @@ struct Cli {
help = t!("core_clap.private_mode").to_string(), help = t!("core_clap.private_mode").to_string(),
)] )]
private_mode: Option<bool>, private_mode: Option<bool>,
#[arg(
long,
env = "ET_FOREIGN_RELAY_BPS_LIMIT",
help = t!("core_clap.foreign_relay_bps_limit").to_string(),
)]
foreign_relay_bps_limit: Option<u64>,
}
#[derive(Parser, Debug)]
struct LoggingOptions {
#[arg(
long,
env = "ET_CONSOLE_LOG_LEVEL",
help = t!("core_clap.console_log_level").to_string()
)]
console_log_level: Option<String>,
#[arg(
long,
env = "ET_FILE_LOG_LEVEL",
help = t!("core_clap.file_log_level").to_string()
)]
file_log_level: Option<String>,
#[arg(
long,
env = "ET_FILE_LOG_DIR",
help = t!("core_clap.file_log_dir").to_string()
)]
file_log_dir: Option<String>,
} }
rust_i18n::i18n!("locales", fallback = "en"); rust_i18n::i18n!("locales", fallback = "en");
@@ -518,43 +571,47 @@ impl Cli {
} }
} }
impl TryFrom<&Cli> for TomlConfigLoader { impl NetworkOptions {
type Error = anyhow::Error; fn can_merge(&self, cfg: &TomlConfigLoader, config_file_count: usize) -> bool {
if config_file_count == 1 {
fn try_from(cli: &Cli) -> Result<Self, Self::Error> { return true;
let cfg = if let Some(config_file) = &cli.config_file { }
TomlConfigLoader::new(config_file) let Some(network_name) = &self.network_name else {
.with_context(|| format!("failed to load config file: {:?}", cli.config_file))? return false;
} else {
TomlConfigLoader::default()
}; };
if cfg.get_network_identity().network_name == *network_name {
return true;
}
false
}
if cli.hostname.is_some() { fn merge_into(&self, cfg: &mut TomlConfigLoader) -> anyhow::Result<()> {
cfg.set_hostname(cli.hostname.clone()); if self.hostname.is_some() {
cfg.set_hostname(self.hostname.clone());
} }
let old_ns = cfg.get_network_identity(); let old_ns = cfg.get_network_identity();
let network_name = cli.network_name.clone().unwrap_or(old_ns.network_name); let network_name = self.network_name.clone().unwrap_or(old_ns.network_name);
let network_secret = cli let network_secret = self
.network_secret .network_secret
.clone() .clone()
.unwrap_or(old_ns.network_secret.unwrap_or_default()); .unwrap_or(old_ns.network_secret.unwrap_or_default());
cfg.set_network_identity(NetworkIdentity::new(network_name, network_secret)); cfg.set_network_identity(NetworkIdentity::new(network_name, network_secret));
if let Some(dhcp) = cli.dhcp { if let Some(dhcp) = self.dhcp {
cfg.set_dhcp(dhcp); cfg.set_dhcp(dhcp);
} }
if let Some(ipv4) = &cli.ipv4 { if let Some(ipv4) = &self.ipv4 {
cfg.set_ipv4(Some(ipv4.parse().with_context(|| { cfg.set_ipv4(Some(ipv4.parse().with_context(|| {
format!("failed to parse ipv4 address: {}", ipv4) format!("failed to parse ipv4 address: {}", ipv4)
})?)) })?))
} }
if !cli.peers.is_empty() { if !self.peers.is_empty() {
let mut peers = cfg.get_peers(); let mut peers = cfg.get_peers();
peers.reserve(peers.len() + cli.peers.len()); peers.reserve(peers.len() + self.peers.len());
for p in &cli.peers { for p in &self.peers {
peers.push(PeerConfig { peers.push(PeerConfig {
uri: p uri: p
.parse() .parse()
@@ -564,9 +621,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
cfg.set_peers(peers); cfg.set_peers(peers);
} }
if cli.no_listener || !cli.listeners.is_empty() { if self.no_listener || !self.listeners.is_empty() {
cfg.set_listeners( cfg.set_listeners(
Cli::parse_listeners(cli.no_listener, cli.listeners.clone())? Cli::parse_listeners(self.no_listener, self.listeners.clone())?
.into_iter() .into_iter()
.map(|s| s.parse().unwrap()) .map(|s| s.parse().unwrap())
.collect(), .collect(),
@@ -580,9 +637,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
); );
} }
if !cli.mapped_listeners.is_empty() { if !self.mapped_listeners.is_empty() {
cfg.set_mapped_listeners(Some( cfg.set_mapped_listeners(Some(
cli.mapped_listeners self.mapped_listeners
.iter() .iter()
.map(|s| { .map(|s| {
s.parse() s.parse()
@@ -599,14 +656,11 @@ impl TryFrom<&Cli> for TomlConfigLoader {
)); ));
} }
for n in cli.proxy_networks.iter() { for n in self.proxy_networks.iter() {
cfg.add_proxy_cidr( add_proxy_network_to_config(n, &cfg)?;
n.parse()
.with_context(|| format!("failed to parse proxy network: {}", n))?,
);
} }
let rpc_portal = if let Some(r) = &cli.rpc_portal { let rpc_portal = if let Some(r) = &self.rpc_portal {
Cli::parse_rpc_portal(r.clone()) Cli::parse_rpc_portal(r.clone())
.with_context(|| format!("failed to parse rpc portal: {}", r))? .with_context(|| format!("failed to parse rpc portal: {}", r))?
} else if let Some(r) = cfg.get_rpc_portal() { } else if let Some(r) = cfg.get_rpc_portal() {
@@ -616,7 +670,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
}; };
cfg.set_rpc_portal(rpc_portal); cfg.set_rpc_portal(rpc_portal);
if let Some(external_nodes) = cli.external_node.as_ref() { cfg.set_rpc_portal_whitelist(self.rpc_portal_whitelist.clone());
if let Some(external_nodes) = self.external_node.as_ref() {
let mut old_peers = cfg.get_peers(); let mut old_peers = cfg.get_peers();
old_peers.push(PeerConfig { old_peers.push(PeerConfig {
uri: external_nodes.parse().with_context(|| { uri: external_nodes.parse().with_context(|| {
@@ -626,37 +682,11 @@ impl TryFrom<&Cli> for TomlConfigLoader {
cfg.set_peers(old_peers); cfg.set_peers(old_peers);
} }
if cli.console_log_level.is_some() { if let Some(inst_name) = &self.instance_name {
cfg.set_console_logger_config(ConsoleLoggerConfig {
level: cli.console_log_level.clone(),
});
}
if let Some(inst_name) = &cli.instance_name {
cfg.set_inst_name(inst_name.clone()); cfg.set_inst_name(inst_name.clone());
} }
if cli.file_log_dir.is_some() || cli.file_log_level.is_some() { if let Some(vpn_portal) = self.vpn_portal.as_ref() {
let inst_name = cfg.get_inst_name();
let old_fl = cfg.get_file_logger_config();
let file_log_dir = if cli.file_log_dir.is_some() {
&cli.file_log_dir
} else {
&old_fl.dir
};
let file_log_level = if cli.file_log_level.is_some() {
&cli.file_log_level
} else {
&old_fl.level
};
cfg.set_file_logger_config(FileLoggerConfig {
level: file_log_level.clone(),
dir: file_log_dir.clone(),
file: Some(format!("easytier-{}", inst_name)),
});
}
if let Some(vpn_portal) = cli.vpn_portal.as_ref() {
let url: url::Url = vpn_portal let url: url::Url = vpn_portal
.parse() .parse()
.with_context(|| format!("failed to parse vpn portal url: {}", vpn_portal))?; .with_context(|| format!("failed to parse vpn portal url: {}", vpn_portal))?;
@@ -676,7 +706,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
}); });
} }
if let Some(manual_routes) = cli.manual_routes.as_ref() { if let Some(manual_routes) = self.manual_routes.as_ref() {
let mut routes = Vec::<cidr::Ipv4Cidr>::with_capacity(manual_routes.len()); let mut routes = Vec::<cidr::Ipv4Cidr>::with_capacity(manual_routes.len());
for r in manual_routes { for r in manual_routes {
routes.push( routes.push(
@@ -688,7 +718,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
} }
#[cfg(feature = "socks5")] #[cfg(feature = "socks5")]
if let Some(socks5_proxy) = cli.socks5 { if let Some(socks5_proxy) = self.socks5 {
cfg.set_socks5_portal(Some( cfg.set_socks5_portal(Some(
format!("socks5://0.0.0.0:{}", socks5_proxy) format!("socks5://0.0.0.0:{}", socks5_proxy)
.parse() .parse()
@@ -697,7 +727,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
} }
#[cfg(feature = "socks5")] #[cfg(feature = "socks5")]
for port_forward in cli.port_forward.iter() { for port_forward in self.port_forward.iter() {
let example_str = ", example: udp://0.0.0.0:12345/10.126.126.1:12345"; let example_str = ", example: udp://0.0.0.0:12345/10.126.126.1:12345";
let bind_addr = format!( let bind_addr = format!(
@@ -731,38 +761,38 @@ impl TryFrom<&Cli> for TomlConfigLoader {
} }
let mut f = cfg.get_flags(); let mut f = cfg.get_flags();
if let Some(default_protocol) = &cli.default_protocol { if let Some(default_protocol) = &self.default_protocol {
f.default_protocol = default_protocol.clone() f.default_protocol = default_protocol.clone()
}; };
if let Some(v) = cli.disable_encryption { if let Some(v) = self.disable_encryption {
f.enable_encryption = !v; f.enable_encryption = !v;
} }
if let Some(v) = cli.disable_ipv6 { if let Some(v) = self.disable_ipv6 {
f.enable_ipv6 = !v; f.enable_ipv6 = !v;
} }
f.latency_first = cli.latency_first.unwrap_or(f.latency_first); f.latency_first = self.latency_first.unwrap_or(f.latency_first);
if let Some(dev_name) = &cli.dev_name { if let Some(dev_name) = &self.dev_name {
f.dev_name = dev_name.clone() f.dev_name = dev_name.clone()
} }
if let Some(mtu) = cli.mtu { if let Some(mtu) = self.mtu {
f.mtu = mtu as u32; f.mtu = mtu as u32;
} }
f.enable_exit_node = cli.enable_exit_node.unwrap_or(f.enable_exit_node); f.enable_exit_node = self.enable_exit_node.unwrap_or(f.enable_exit_node);
f.proxy_forward_by_system = cli f.proxy_forward_by_system = self
.proxy_forward_by_system .proxy_forward_by_system
.unwrap_or(f.proxy_forward_by_system); .unwrap_or(f.proxy_forward_by_system);
f.no_tun = cli.no_tun.unwrap_or(f.no_tun) || cfg!(not(feature = "tun")); f.no_tun = self.no_tun.unwrap_or(f.no_tun) || cfg!(not(feature = "tun"));
f.use_smoltcp = cli.use_smoltcp.unwrap_or(f.use_smoltcp); f.use_smoltcp = self.use_smoltcp.unwrap_or(f.use_smoltcp);
if let Some(wl) = cli.relay_network_whitelist.as_ref() { if let Some(wl) = self.relay_network_whitelist.as_ref() {
f.relay_network_whitelist = wl.join(" "); f.relay_network_whitelist = wl.join(" ");
} }
f.disable_p2p = cli.disable_p2p.unwrap_or(f.disable_p2p); f.disable_p2p = self.disable_p2p.unwrap_or(f.disable_p2p);
f.disable_udp_hole_punching = cli f.disable_udp_hole_punching = self
.disable_udp_hole_punching .disable_udp_hole_punching
.unwrap_or(f.disable_udp_hole_punching); .unwrap_or(f.disable_udp_hole_punching);
f.relay_all_peer_rpc = cli.relay_all_peer_rpc.unwrap_or(f.relay_all_peer_rpc); f.relay_all_peer_rpc = self.relay_all_peer_rpc.unwrap_or(f.relay_all_peer_rpc);
f.multi_thread = cli.multi_thread.unwrap_or(f.multi_thread); f.multi_thread = self.multi_thread.unwrap_or(f.multi_thread);
if let Some(compression) = &cli.compression { if let Some(compression) = &self.compression {
f.data_compress_algo = match compression.as_str() { f.data_compress_algo = match compression.as_str() {
"none" => CompressionAlgoPb::None, "none" => CompressionAlgoPb::None,
"zstd" => CompressionAlgoPb::Zstd, "zstd" => CompressionAlgoPb::Zstd,
@@ -773,154 +803,40 @@ impl TryFrom<&Cli> for TomlConfigLoader {
} }
.into(); .into();
} }
f.bind_device = cli.bind_device.unwrap_or(f.bind_device); f.bind_device = self.bind_device.unwrap_or(f.bind_device);
f.enable_kcp_proxy = cli.enable_kcp_proxy.unwrap_or(f.enable_kcp_proxy); f.enable_kcp_proxy = self.enable_kcp_proxy.unwrap_or(f.enable_kcp_proxy);
f.disable_kcp_input = cli.disable_kcp_input.unwrap_or(f.disable_kcp_input); f.disable_kcp_input = self.disable_kcp_input.unwrap_or(f.disable_kcp_input);
f.accept_dns = cli.accept_dns.unwrap_or(f.accept_dns); f.enable_quic_proxy = self.enable_quic_proxy.unwrap_or(f.enable_quic_proxy);
f.private_mode = cli.private_mode.unwrap_or(f.private_mode); f.disable_quic_input = self.disable_quic_input.unwrap_or(f.disable_quic_input);
f.accept_dns = self.accept_dns.unwrap_or(f.accept_dns);
f.private_mode = self.private_mode.unwrap_or(f.private_mode);
f.foreign_relay_bps_limit = self
.foreign_relay_bps_limit
.unwrap_or(f.foreign_relay_bps_limit);
cfg.set_flags(f); cfg.set_flags(f);
if !cli.exit_nodes.is_empty() { if !self.exit_nodes.is_empty() {
cfg.set_exit_nodes(cli.exit_nodes.clone()); cfg.set_exit_nodes(self.exit_nodes.clone());
} }
Ok(cfg) Ok(())
} }
} }
fn print_event(msg: String) { impl LoggingConfigLoader for &LoggingOptions {
println!( fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
"{}: {}", ConsoleLoggerConfig {
chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), level: self.console_log_level.clone(),
msg
);
}
fn peer_conn_info_to_string(p: proto::cli::PeerConnInfo) -> String {
format!(
"my_peer_id: {}, dst_peer_id: {}, tunnel_info: {:?}",
p.my_peer_id, p.peer_id, p.tunnel
)
}
#[tracing::instrument]
pub fn handle_event(mut events: EventBusSubscriber) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
loop {
if let Ok(e) = events.recv().await {
match e {
GlobalCtxEvent::PeerAdded(p) => {
print_event(format!("new peer added. peer_id: {}", p));
}
GlobalCtxEvent::PeerRemoved(p) => {
print_event(format!("peer removed. peer_id: {}", p));
}
GlobalCtxEvent::PeerConnAdded(p) => {
print_event(format!(
"new peer connection added. conn_info: {}",
peer_conn_info_to_string(p)
));
}
GlobalCtxEvent::PeerConnRemoved(p) => {
print_event(format!(
"peer connection removed. conn_info: {}",
peer_conn_info_to_string(p)
));
}
GlobalCtxEvent::ListenerAddFailed(p, msg) => {
print_event(format!(
"listener add failed. listener: {}, msg: {}",
p, msg
));
}
GlobalCtxEvent::ListenerAcceptFailed(p, msg) => {
print_event(format!(
"listener accept failed. listener: {}, msg: {}",
p, msg
));
}
GlobalCtxEvent::ListenerAdded(p) => {
if p.scheme() == "ring" {
continue;
}
print_event(format!("new listener added. listener: {}", p));
}
GlobalCtxEvent::ConnectionAccepted(local, remote) => {
print_event(format!(
"new connection accepted. local: {}, remote: {}",
local, remote
));
}
GlobalCtxEvent::ConnectionError(local, remote, err) => {
print_event(format!(
"connection error. local: {}, remote: {}, err: {}",
local, remote, err
));
}
GlobalCtxEvent::TunDeviceReady(dev) => {
print_event(format!("tun device ready. dev: {}", dev));
}
GlobalCtxEvent::TunDeviceError(err) => {
print_event(format!("tun device error. err: {}", err));
}
GlobalCtxEvent::Connecting(dst) => {
print_event(format!("connecting to peer. dst: {}", dst));
}
GlobalCtxEvent::ConnectError(dst, ip_version, err) => {
print_event(format!(
"connect to peer error. dst: {}, ip_version: {}, err: {}",
dst, ip_version, err
));
}
GlobalCtxEvent::VpnPortalClientConnected(portal, client_addr) => {
print_event(format!(
"vpn portal client connected. portal: {}, client_addr: {}",
portal, client_addr
));
}
GlobalCtxEvent::VpnPortalClientDisconnected(portal, client_addr) => {
print_event(format!(
"vpn portal client disconnected. portal: {}, client_addr: {}",
portal, client_addr
));
}
GlobalCtxEvent::DhcpIpv4Changed(old, new) => {
print_event(format!("dhcp ip changed. old: {:?}, new: {:?}", old, new));
}
GlobalCtxEvent::DhcpIpv4Conflicted(ip) => {
print_event(format!("dhcp ip conflict. ip: {:?}", ip));
}
GlobalCtxEvent::PortForwardAdded(cfg) => {
print_event(format!(
"port forward added. local: {}, remote: {}, proto: {}",
cfg.bind_addr.unwrap().to_string(),
cfg.dst_addr.unwrap().to_string(),
cfg.socket_type().as_str_name()
));
}
}
} else {
events = events.resubscribe();
}
} }
}) }
fn get_file_logger_config(&self) -> FileLoggerConfig {
FileLoggerConfig {
level: self.file_log_level.clone(),
dir: self.file_log_dir.clone(),
file: None,
}
}
} }
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
@@ -1035,10 +951,10 @@ fn win_service_main(arg: Vec<std::ffi::OsString>) {
} }
async fn run_main(cli: Cli) -> anyhow::Result<()> { async fn run_main(cli: Cli) -> anyhow::Result<()> {
let cfg = TomlConfigLoader::try_from(&cli)?; init_logger(&cli.logging_options, false)?;
init_logger(&cfg, false)?;
if cli.config_server.is_some() { if cli.config_server.is_some() {
set_default_machine_id(cli.machine_id);
let config_server_url_s = cli.config_server.clone().unwrap(); let config_server_url_s = cli.config_server.clone().unwrap();
let config_server_url = match url::Url::parse(&config_server_url_s) { let config_server_url = match url::Url::parse(&config_server_url_s) {
Ok(u) => u, Ok(u) => u,
@@ -1077,7 +993,7 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
let mut flags = global_ctx.get_flags(); let mut flags = global_ctx.get_flags();
flags.bind_device = false; flags.bind_device = false;
global_ctx.set_flags(flags); global_ctx.set_flags(flags);
let hostname = match cli.hostname { let hostname = match cli.network_options.hostname {
None => gethostname::gethostname().to_string_lossy().to_string(), None => gethostname::gethostname().to_string_lossy().to_string(),
Some(hostname) => hostname.to_string(), Some(hostname) => hostname.to_string(),
}; };
@@ -1089,19 +1005,47 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
tokio::signal::ctrl_c().await.unwrap(); tokio::signal::ctrl_c().await.unwrap();
return Ok(()); return Ok(());
} }
let manager = NetworkInstanceManager::new();
let mut crate_cli_network =
cli.config_file.is_none() || cli.network_options.network_name.is_some();
if let Some(config_files) = cli.config_file {
let config_file_count = config_files.len();
for config_file in config_files {
let mut cfg = TomlConfigLoader::new(&config_file)
.with_context(|| format!("failed to load config file: {:?}", config_file))?;
println!("Starting easytier with config:"); if cli.network_options.can_merge(&cfg, config_file_count) {
println!("############### TOML ###############\n"); cli.network_options.merge_into(&mut cfg).with_context(|| {
println!("{}", cfg.dump()); format!("failed to merge config from cli: {:?}", config_file)
println!("-----------------------------------"); })?;
crate_cli_network = false;
let mut l = launcher::NetworkInstance::new(cfg).set_fetch_node_info(false);
let _t = ScopedTask::from(handle_event(l.start().unwrap()));
tokio::select! {
e = l.wait() => {
if let Some(e) = e {
eprintln!("launcher error: {}", e);
} }
println!(
"Starting easytier from config file {:?} with config:",
config_file
);
println!("############### TOML ###############\n");
println!("{}", cfg.dump());
println!("-----------------------------------");
manager.run_network_instance(cfg, ConfigSource::File)?;
}
}
if crate_cli_network {
let mut cfg = TomlConfigLoader::default();
cli.network_options
.merge_into(&mut cfg)
.with_context(|| format!("failed to create config from cli"))?;
println!("Starting easytier from cli with config:");
println!("############### TOML ###############\n");
println!("{}", cfg.dump());
println!("-----------------------------------");
manager.run_network_instance(cfg, ConfigSource::Cli)?;
}
tokio::select! {
_ = manager.wait() => {
} }
_ = tokio::signal::ctrl_c() => { _ = tokio::signal::ctrl_c() => {
println!("ctrl-c received, exiting..."); println!("ctrl-c received, exiting...");
+74 -22
View File
@@ -1,7 +1,7 @@
use std::{ use std::{
mem::MaybeUninit, mem::MaybeUninit,
net::{IpAddr, Ipv4Addr, SocketAddrV4}, net::{IpAddr, Ipv4Addr, SocketAddrV4},
sync::Arc, sync::{Arc, Weak},
thread, thread,
time::Duration, time::Duration,
}; };
@@ -34,7 +34,7 @@ use super::{
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct IcmpNatKey { struct IcmpNatKey {
dst_ip: std::net::IpAddr, real_dst_ip: std::net::IpAddr,
icmp_id: u16, icmp_id: u16,
icmp_seq: u16, icmp_seq: u16,
} }
@@ -45,15 +45,22 @@ struct IcmpNatEntry {
my_peer_id: PeerId, my_peer_id: PeerId,
src_ip: IpAddr, src_ip: IpAddr,
start_time: std::time::Instant, start_time: std::time::Instant,
mapped_dst_ip: std::net::Ipv4Addr,
} }
impl IcmpNatEntry { impl IcmpNatEntry {
fn new(src_peer_id: PeerId, my_peer_id: PeerId, src_ip: IpAddr) -> Result<Self, Error> { fn new(
src_peer_id: PeerId,
my_peer_id: PeerId,
src_ip: IpAddr,
mapped_dst_ip: Ipv4Addr,
) -> Result<Self, Error> {
Ok(Self { Ok(Self {
src_peer_id, src_peer_id,
my_peer_id, my_peer_id,
src_ip, src_ip,
start_time: std::time::Instant::now(), start_time: std::time::Instant::now(),
mapped_dst_ip,
}) })
} }
} }
@@ -65,10 +72,10 @@ type NewPacketReceiver = tokio::sync::mpsc::UnboundedReceiver<IcmpNatKey>;
#[derive(Debug)] #[derive(Debug)]
pub struct IcmpProxy { pub struct IcmpProxy {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
peer_manager: Arc<PeerManager>, peer_manager: Weak<PeerManager>,
cidr_set: CidrSet, cidr_set: CidrSet,
socket: std::sync::Mutex<Option<socket2::Socket>>, socket: std::sync::Mutex<Option<Arc<socket2::Socket>>>,
nat_table: IcmpNatTable, nat_table: IcmpNatTable,
@@ -78,7 +85,10 @@ pub struct IcmpProxy {
icmp_sender: Arc<std::sync::Mutex<Option<UnboundedSender<ZCPacket>>>>, icmp_sender: Arc<std::sync::Mutex<Option<UnboundedSender<ZCPacket>>>>,
} }
fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, IpAddr), Error> { fn socket_recv(
socket: &Socket,
buf: &mut [MaybeUninit<u8>],
) -> Result<(usize, IpAddr), std::io::Error> {
let (size, addr) = socket.recv_from(buf)?; let (size, addr) = socket.recv_from(buf)?;
let addr = match addr.as_socket() { let addr = match addr.as_socket() {
None => IpAddr::V4(Ipv4Addr::UNSPECIFIED), None => IpAddr::V4(Ipv4Addr::UNSPECIFIED),
@@ -87,15 +97,32 @@ fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, I
Ok((size, addr)) Ok((size, addr))
} }
fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSender<ZCPacket>) { fn socket_recv_loop(
socket: Arc<Socket>,
nat_table: IcmpNatTable,
sender: UnboundedSender<ZCPacket>,
) {
let mut buf = [0u8; 8192]; let mut buf = [0u8; 8192];
let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[..]) }; let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[..]) };
loop { loop {
let Ok((len, peer_ip)) = socket_recv(&socket, data) else { let (len, peer_ip) = match socket_recv(&socket, data) {
continue; Ok((len, peer_ip)) => (len, peer_ip),
Err(e) => {
tracing::error!("recv icmp packet failed: {:?}", e);
if sender.is_closed() {
break;
} else {
continue;
}
}
}; };
if len <= 0 {
tracing::error!("recv empty packet, len: {}", len);
return;
}
if !peer_ip.is_ipv4() { if !peer_ip.is_ipv4() {
continue; continue;
} }
@@ -114,7 +141,7 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
} }
let key = IcmpNatKey { let key = IcmpNatKey {
dst_ip: peer_ip, real_dst_ip: peer_ip,
icmp_id: icmp_packet.get_identifier(), icmp_id: icmp_packet.get_identifier(),
icmp_seq: icmp_packet.get_sequence_number(), icmp_seq: icmp_packet.get_sequence_number(),
}; };
@@ -128,12 +155,11 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
continue; continue;
}; };
let src_v4 = ipv4_packet.get_source();
let payload_len = len - ipv4_packet.get_header_length() as usize * 4; let payload_len = len - ipv4_packet.get_header_length() as usize * 4;
let id = ipv4_packet.get_identification(); let id = ipv4_packet.get_identification();
let _ = compose_ipv4_packet( let _ = compose_ipv4_packet(
&mut buf[..], &mut buf[..],
&src_v4, &v.mapped_dst_ip,
&dest_ip, &dest_ip,
IpNextHeaderProtocols::Icmp, IpNextHeaderProtocols::Icmp,
payload_len, payload_len,
@@ -176,7 +202,7 @@ impl IcmpProxy {
let cidr_set = CidrSet::new(global_ctx.clone()); let cidr_set = CidrSet::new(global_ctx.clone());
let ret = Self { let ret = Self {
global_ctx, global_ctx,
peer_manager, peer_manager: Arc::downgrade(&peer_manager),
cidr_set, cidr_set,
socket: std::sync::Mutex::new(None), socket: std::sync::Mutex::new(None),
@@ -208,7 +234,7 @@ impl IcmpProxy {
let socket = self.create_raw_socket(); let socket = self.create_raw_socket();
match socket { match socket {
Ok(socket) => { Ok(socket) => {
self.socket.lock().unwrap().replace(socket); self.socket.lock().unwrap().replace(Arc::new(socket));
} }
Err(e) => { Err(e) => {
tracing::warn!("create icmp socket failed: {:?}", e); tracing::warn!("create icmp socket failed: {:?}", e);
@@ -241,7 +267,7 @@ impl IcmpProxy {
let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel();
self.icmp_sender.lock().unwrap().replace(sender.clone()); self.icmp_sender.lock().unwrap().replace(sender.clone());
if let Some(socket) = self.socket.lock().unwrap().as_ref() { if let Some(socket) = self.socket.lock().unwrap().as_ref() {
let socket = socket.try_clone()?; let socket = socket.clone();
let nat_table = self.nat_table.clone(); let nat_table = self.nat_table.clone();
thread::spawn(|| { thread::spawn(|| {
socket_recv_loop(socket, nat_table, sender); socket_recv_loop(socket, nat_table, sender);
@@ -254,7 +280,11 @@ impl IcmpProxy {
while let Some(msg) = receiver.recv().await { while let Some(msg) = receiver.recv().await {
let hdr = msg.peer_manager_header().unwrap(); let hdr = msg.peer_manager_header().unwrap();
let to_peer_id = hdr.to_peer_id.into(); let to_peer_id = hdr.to_peer_id.into();
let ret = peer_manager.send_msg(msg, to_peer_id).await; let Some(pm) = peer_manager.upgrade() else {
tracing::warn!("peer manager is gone, icmp proxy send loop exit");
return;
};
let ret = pm.send_msg(msg, to_peer_id).await;
if ret.is_err() { if ret.is_err() {
tracing::error!("send icmp packet to peer failed: {:?}", ret); tracing::error!("send icmp packet to peer failed: {:?}", ret);
} }
@@ -271,9 +301,12 @@ impl IcmpProxy {
} }
}); });
self.peer_manager let Some(pm) = self.peer_manager.upgrade() else {
.add_packet_process_pipeline(Box::new(self.clone())) tracing::warn!("peer manager is gone, icmp proxy init failed");
.await; return Err(anyhow::anyhow!("peer manager is gone").into());
};
pm.add_packet_process_pipeline(Box::new(self.clone())).await;
Ok(()) Ok(())
} }
@@ -361,7 +394,11 @@ impl IcmpProxy {
return None; return None;
} }
if !self.cidr_set.contains_v4(ipv4.get_destination()) let mut real_dst_ip = ipv4.get_destination();
if !self
.cidr_set
.contains_v4(ipv4.get_destination(), &mut real_dst_ip)
&& !is_exit_node && !is_exit_node
&& !(self.global_ctx.no_tun() && !(self.global_ctx.no_tun()
&& Some(ipv4.get_destination()) && Some(ipv4.get_destination())
@@ -416,7 +453,7 @@ impl IcmpProxy {
let icmp_seq = icmp_packet.get_sequence_number(); let icmp_seq = icmp_packet.get_sequence_number();
let key = IcmpNatKey { let key = IcmpNatKey {
dst_ip: ipv4.get_destination().into(), real_dst_ip: real_dst_ip.into(),
icmp_id, icmp_id,
icmp_seq, icmp_seq,
}; };
@@ -425,6 +462,7 @@ impl IcmpProxy {
hdr.from_peer_id.into(), hdr.from_peer_id.into(),
hdr.to_peer_id.into(), hdr.to_peer_id.into(),
ipv4.get_source().into(), ipv4.get_source().into(),
ipv4.get_destination(),
) )
.ok()?; .ok()?;
@@ -432,10 +470,24 @@ impl IcmpProxy {
tracing::info!("icmp nat table entry replaced: {:?}", old); tracing::info!("icmp nat table entry replaced: {:?}", old);
} }
if let Err(e) = self.send_icmp_packet(ipv4.get_destination(), &icmp_packet) { if let Err(e) = self.send_icmp_packet(real_dst_ip, &icmp_packet) {
tracing::error!("send icmp packet failed: {:?}", e); tracing::error!("send icmp packet failed: {:?}", e);
} }
Some(()) Some(())
} }
} }
impl Drop for IcmpProxy {
fn drop(&mut self) {
tracing::info!(
"dropping icmp proxy, {:?}",
self.socket.lock().unwrap().as_ref()
);
self.socket.lock().unwrap().as_ref().and_then(|s| {
tracing::info!("shutting down icmp socket");
let _ = s.shutdown(std::net::Shutdown::Both);
Some(())
});
}
}
+123 -48
View File
@@ -20,7 +20,7 @@ use pnet::packet::{
Packet as _, Packet as _,
}; };
use prost::Message; use prost::Message;
use tokio::{io::copy_bidirectional, task::JoinSet}; use tokio::{io::copy_bidirectional, select, task::JoinSet};
use super::{ use super::{
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy}, tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
@@ -107,7 +107,7 @@ async fn handle_kcp_output(
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct NatDstKcpConnector { pub struct NatDstKcpConnector {
pub(crate) kcp_endpoint: Arc<KcpEndpoint>, pub(crate) kcp_endpoint: Arc<KcpEndpoint>,
pub(crate) peer_mgr: Arc<PeerManager>, pub(crate) peer_mgr: Weak<PeerManager>,
} }
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -120,35 +120,68 @@ impl NatDstConnector for NatDstKcpConnector {
dst: Some(nat_dst.into()), dst: Some(nat_dst.into()),
}; };
let (dst_peers, _) = match nat_dst { let Some(peer_mgr) = self.peer_mgr.upgrade() else {
SocketAddr::V4(addr) => { return Err(anyhow::anyhow!("peer manager is not available").into());
let ip = addr.ip(); };
self.peer_mgr.get_msg_dst_peer(&ip).await
} let dst_peer_id = match nat_dst {
SocketAddr::V4(addr) => peer_mgr.get_peer_map().get_peer_id_by_ipv4(addr.ip()).await,
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()), SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
}; };
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peers); let Some(dst_peer) = dst_peer_id else {
return Err(anyhow::anyhow!("no peer found for nat dst: {}", nat_dst).into());
};
if dst_peers.len() != 1 { tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peer);
return Err(anyhow::anyhow!("no dst peer found for nat dst: {}", nat_dst).into());
let mut connect_tasks: JoinSet<std::result::Result<ConnId, anyhow::Error>> = JoinSet::new();
let mut retry_remain = 5;
loop {
select! {
Some(Ok(Ok(ret))) = connect_tasks.join_next() => {
// just wait for the previous connection to finish
let stream = KcpStream::new(&self.kcp_endpoint, ret)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
return Ok(stream);
}
_ = tokio::time::sleep(Duration::from_millis(200)), if !connect_tasks.is_empty() && retry_remain > 0 => {
// no successful connection yet, trigger another connection attempt
}
else => {
// got error in connect_tasks, continue to retry
if retry_remain == 0 && connect_tasks.is_empty() {
break;
}
}
}
// create a new connection task
if retry_remain == 0 {
continue;
}
retry_remain -= 1;
let kcp_endpoint = self.kcp_endpoint.clone();
let my_peer_id = peer_mgr.my_peer_id();
let conn_data_clone = conn_data.clone();
connect_tasks.spawn(async move {
kcp_endpoint
.connect(
Duration::from_secs(10),
my_peer_id,
dst_peer,
Bytes::from(conn_data_clone.encode_to_vec()),
)
.await
.with_context(|| {
format!("failed to connect to nat dst: {}", nat_dst.to_string())
})
});
} }
let ret = self Err(anyhow::anyhow!("failed to connect to nat dst: {}", nat_dst).into())
.kcp_endpoint
.connect(
Duration::from_secs(10),
self.peer_mgr.my_peer_id(),
dst_peers[0],
Bytes::from(conn_data.encode_to_vec()),
)
.await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst.to_string()))?;
let stream = KcpStream::new(&self.kcp_endpoint, ret)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
Ok(stream)
} }
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool { fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
@@ -161,8 +194,9 @@ impl NatDstConnector for NatDstKcpConnector {
_global_ctx: &GlobalCtx, _global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader, hdr: &PeerManagerHeader,
_ipv4: &Ipv4Packet, _ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr,
) -> bool { ) -> bool {
return hdr.from_peer_id == hdr.to_peer_id; return hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified();
} }
fn transport_type(&self) -> TcpProxyEntryTransportType { fn transport_type(&self) -> TcpProxyEntryTransportType {
@@ -173,32 +207,41 @@ impl NatDstConnector for NatDstKcpConnector {
#[derive(Clone)] #[derive(Clone)]
struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>); struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>);
pub struct KcpProxySrc { #[async_trait::async_trait]
kcp_endpoint: Arc<KcpEndpoint>, pub(crate) trait TcpProxyForKcpSrcTrait: Send + Sync + 'static {
peer_manager: Arc<PeerManager>, type Connector: NatDstConnector;
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>>;
tcp_proxy: TcpProxyForKcpSrc, async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool;
tasks: JoinSet<()>,
} }
impl TcpProxyForKcpSrc { #[async_trait::async_trait]
impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc {
type Connector = NatDstKcpConnector;
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> {
&self.0
}
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool { async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
let peer_map: Arc<crate::peers::peer_map::PeerMap> = let peer_map: Arc<crate::peers::peer_map::PeerMap> =
self.0.get_peer_manager().get_peer_map(); self.0.get_peer_manager().get_peer_map();
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else { let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
return false; return false;
}; };
let Some(feature_flag) = peer_map.get_peer_feature_flag(dst_peer_id).await else { let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
return false; return false;
}; };
feature_flag.kcp_input peer_info.feature_flag.map(|x| x.kcp_input).unwrap_or(false)
} }
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl NicPacketFilter for TcpProxyForKcpSrc { impl<C: NatDstConnector, T: TcpProxyForKcpSrcTrait<Connector = C>> NicPacketFilter for T {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool { async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let ret = self.0.try_process_packet_from_nic(zc_packet).await; let ret = self
.get_tcp_proxy()
.try_process_packet_from_nic(zc_packet)
.await;
if ret { if ret {
return true; return true;
} }
@@ -225,29 +268,45 @@ impl NicPacketFilter for TcpProxyForKcpSrc {
} }
} else { } else {
// if not syn packet, only allow established connection // if not syn packet, only allow established connection
if !self.0.is_tcp_proxy_connection(SocketAddr::new( if !self
IpAddr::V4(ip_packet.get_source()), .get_tcp_proxy()
tcp_packet.get_source(), .is_tcp_proxy_connection(SocketAddr::new(
)) { IpAddr::V4(ip_packet.get_source()),
tcp_packet.get_source(),
))
{
return false; return false;
} }
} }
if let Some(my_ipv4) = self.0.get_global_ctx().get_ipv4() { if let Some(my_ipv4) = self.get_tcp_proxy().get_global_ctx().get_ipv4() {
// this is a net-to-net packet, only allow it when smoltcp is enabled // this is a net-to-net packet, only allow it when smoltcp is enabled
// because the syn-ack packet will not be through and handled by the tun device when // because the syn-ack packet will not be through and handled by the tun device when
// the source ip is in the local network // the source ip is in the local network
if ip_packet.get_source() != my_ipv4.address() && !self.0.is_smoltcp_enabled() { if ip_packet.get_source() != my_ipv4.address()
&& !self.get_tcp_proxy().is_smoltcp_enabled()
{
return false; return false;
} }
}; };
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.0.get_my_peer_id().into(); let hdr = zc_packet.mut_peer_manager_header().unwrap();
hdr.to_peer_id = self.get_tcp_proxy().get_my_peer_id().into();
if self.get_tcp_proxy().get_transport_type() == TcpProxyEntryTransportType::Kcp {
hdr.set_kcp_src_modified(true);
}
true true
} }
} }
pub struct KcpProxySrc {
kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>,
tcp_proxy: TcpProxyForKcpSrc,
tasks: JoinSet<()>,
}
impl KcpProxySrc { impl KcpProxySrc {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self { pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = create_kcp_endpoint(); let mut kcp_endpoint = create_kcp_endpoint();
@@ -268,7 +327,7 @@ impl KcpProxySrc {
peer_manager.clone(), peer_manager.clone(),
NatDstKcpConnector { NatDstKcpConnector {
kcp_endpoint: kcp_endpoint.clone(), kcp_endpoint: kcp_endpoint.clone(),
peer_mgr: peer_manager.clone(), peer_mgr: Arc::downgrade(&peer_manager),
}, },
); );
@@ -309,6 +368,7 @@ pub struct KcpProxyDst {
kcp_endpoint: Arc<KcpEndpoint>, kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>, peer_manager: Arc<PeerManager>,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>, proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
cidr_set: Arc<CidrSet>,
tasks: JoinSet<()>, tasks: JoinSet<()>,
} }
@@ -324,11 +384,12 @@ impl KcpProxyDst {
output_receiver, output_receiver,
false, false,
)); ));
let cidr_set = CidrSet::new(peer_manager.get_global_ctx());
Self { Self {
kcp_endpoint: Arc::new(kcp_endpoint), kcp_endpoint: Arc::new(kcp_endpoint),
peer_manager, peer_manager,
proxy_entries: Arc::new(DashMap::new()), proxy_entries: Arc::new(DashMap::new()),
cidr_set: Arc::new(cidr_set),
tasks, tasks,
} }
} }
@@ -338,6 +399,7 @@ impl KcpProxyDst {
mut kcp_stream: KcpStream, mut kcp_stream: KcpStream,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>, proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
cidr_set: Arc<CidrSet>,
) -> Result<()> { ) -> Result<()> {
let mut conn_data = kcp_stream.conn_data().clone(); let mut conn_data = kcp_stream.conn_data().clone();
let parsed_conn_data = KcpConnData::decode(&mut conn_data) let parsed_conn_data = KcpConnData::decode(&mut conn_data)
@@ -350,6 +412,16 @@ impl KcpProxyDst {
))? ))?
.into(); .into();
match dst_socket.ip() {
IpAddr::V4(dst_v4_ip) => {
let mut real_ip = dst_v4_ip;
if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) {
dst_socket.set_ip(real_ip.into());
}
}
_ => {}
};
let conn_id = kcp_stream.conn_id(); let conn_id = kcp_stream.conn_id();
proxy_entries.insert( proxy_entries.insert(
conn_id, conn_id,
@@ -391,6 +463,7 @@ impl KcpProxyDst {
let kcp_endpoint = self.kcp_endpoint.clone(); let kcp_endpoint = self.kcp_endpoint.clone();
let global_ctx = self.peer_manager.get_global_ctx().clone(); let global_ctx = self.peer_manager.get_global_ctx().clone();
let proxy_entries = self.proxy_entries.clone(); let proxy_entries = self.proxy_entries.clone();
let cidr_set = self.cidr_set.clone();
self.tasks.spawn(async move { self.tasks.spawn(async move {
while let Ok(conn) = kcp_endpoint.accept().await { while let Ok(conn) = kcp_endpoint.accept().await {
let stream = KcpStream::new(&kcp_endpoint, conn) let stream = KcpStream::new(&kcp_endpoint, conn)
@@ -399,8 +472,10 @@ impl KcpProxyDst {
let global_ctx = global_ctx.clone(); let global_ctx = global_ctx.clone();
let proxy_entries = proxy_entries.clone(); let proxy_entries = proxy_entries.clone();
let cidr_set = cidr_set.clone();
tokio::spawn(async move { tokio::spawn(async move {
let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries).await; let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries, cidr_set)
.await;
}); });
} }
}); });
+32 -5
View File
@@ -1,3 +1,4 @@
use dashmap::DashMap;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use tokio::task::JoinSet; use tokio::task::JoinSet;
@@ -17,11 +18,15 @@ pub mod socks5;
pub mod kcp_proxy; pub mod kcp_proxy;
pub mod quic_proxy;
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct CidrSet { pub(crate) struct CidrSet {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>, cidr_set: Arc<Mutex<Vec<cidr::Ipv4Cidr>>>,
tasks: JoinSet<()>, tasks: JoinSet<()>,
mapped_to_real: Arc<DashMap<cidr::Ipv4Cidr, cidr::Ipv4Cidr>>,
} }
impl CidrSet { impl CidrSet {
@@ -30,6 +35,8 @@ impl CidrSet {
global_ctx, global_ctx,
cidr_set: Arc::new(Mutex::new(vec![])), cidr_set: Arc::new(Mutex::new(vec![])),
tasks: JoinSet::new(), tasks: JoinSet::new(),
mapped_to_real: Arc::new(DashMap::new()),
}; };
ret.run_cidr_updater(); ret.run_cidr_updater();
ret ret
@@ -38,15 +45,23 @@ impl CidrSet {
fn run_cidr_updater(&mut self) { fn run_cidr_updater(&mut self) {
let global_ctx = self.global_ctx.clone(); let global_ctx = self.global_ctx.clone();
let cidr_set = self.cidr_set.clone(); let cidr_set = self.cidr_set.clone();
let mapped_to_real = self.mapped_to_real.clone();
self.tasks.spawn(async move { self.tasks.spawn(async move {
let mut last_cidrs = vec![]; let mut last_cidrs = vec![];
loop { loop {
let cidrs = global_ctx.get_proxy_cidrs(); let cidrs = global_ctx.config.get_proxy_cidrs();
if cidrs != last_cidrs { if cidrs != last_cidrs {
last_cidrs = cidrs.clone(); last_cidrs = cidrs.clone();
mapped_to_real.clear();
cidr_set.lock().unwrap().clear(); cidr_set.lock().unwrap().clear();
for cidr in cidrs.iter() { for cidr in cidrs.iter() {
cidr_set.lock().unwrap().push(cidr.clone()); let real_cidr = cidr.cidr;
let mapped = cidr.mapped_cidr.unwrap_or(real_cidr.clone());
cidr_set.lock().unwrap().push(mapped.clone());
if mapped != real_cidr {
mapped_to_real.insert(mapped.clone(), real_cidr.clone());
}
} }
} }
tokio::time::sleep(std::time::Duration::from_secs(1)).await; tokio::time::sleep(std::time::Duration::from_secs(1)).await;
@@ -54,11 +69,23 @@ impl CidrSet {
}); });
} }
pub fn contains_v4(&self, ip: std::net::Ipv4Addr) -> bool { pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool {
let ip = ip.into(); let ip = ipv4.into();
let s = self.cidr_set.lock().unwrap(); let s = self.cidr_set.lock().unwrap();
for cidr in s.iter() { for cidr in s.iter() {
if cidr.contains(&ip) { if cidr.contains(&ip) {
if let Some(real_cidr) = self.mapped_to_real.get(&cidr).map(|v| v.value().clone()) {
let origin_network_bits = real_cidr.first().address().to_bits();
let network_mask = cidr.mask().to_bits();
let mut converted_ip = ipv4.to_bits();
converted_ip &= !network_mask;
converted_ip |= origin_network_bits;
*real_ip = std::net::Ipv4Addr::from(converted_ip);
} else {
*real_ip = ipv4;
}
return true; return true;
} }
} }
+443
View File
@@ -0,0 +1,443 @@
use std::net::{IpAddr, Ipv4Addr};
use std::sync::{Arc, Mutex, Weak};
use std::{net::SocketAddr, pin::Pin};
use anyhow::Context;
use dashmap::DashMap;
use pnet::packet::ipv4::Ipv4Packet;
use prost::Message as _;
use quinn::{Endpoint, Incoming};
use tokio::io::{copy_bidirectional, AsyncRead, AsyncReadExt, AsyncWrite};
use tokio::net::TcpStream;
use tokio::task::JoinSet;
use tokio::time::timeout;
use crate::common::error::Result;
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx};
use crate::common::join_joinset_background;
use crate::defer;
use crate::gateway::kcp_proxy::TcpProxyForKcpSrcTrait;
use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy};
use crate::gateway::CidrSet;
use crate::peers::peer_manager::PeerManager;
use crate::proto::cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc,
};
use crate::proto::common::ProxyDstInfo;
use crate::proto::rpc_types;
use crate::proto::rpc_types::controller::BaseController;
use crate::tunnel::packet_def::PeerManagerHeader;
use crate::tunnel::quic::{configure_client, make_server_endpoint};
pub struct QUICStream {
endpoint: Option<quinn::Endpoint>,
connection: Option<quinn::Connection>,
sender: quinn::SendStream,
receiver: quinn::RecvStream,
}
impl AsyncRead for QUICStream {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.get_mut();
Pin::new(&mut this.receiver).poll_read(cx, buf)
}
}
impl AsyncWrite for QUICStream {
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<std::io::Result<usize>> {
let this = self.get_mut();
AsyncWrite::poll_write(Pin::new(&mut this.sender), cx, buf)
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.get_mut();
Pin::new(&mut this.sender).poll_flush(cx)
}
fn poll_shutdown(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.get_mut();
Pin::new(&mut this.sender).poll_shutdown(cx)
}
}
#[derive(Debug, Clone)]
pub struct NatDstQUICConnector {
pub(crate) peer_mgr: Weak<PeerManager>,
}
#[async_trait::async_trait]
impl NatDstConnector for NatDstQUICConnector {
type DstStream = QUICStream;
#[tracing::instrument(skip(self), level = "debug", name = "NatDstQUICConnector::connect")]
async fn connect(&self, src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer manager is not available").into());
};
let IpAddr::V4(dst_ipv4) = nat_dst.ip() else {
return Err(anyhow::anyhow!("src must be an IPv4 address").into());
};
let Some(dst_peer) = peer_mgr.get_peer_map().get_peer_id_by_ipv4(&dst_ipv4).await else {
return Err(anyhow::anyhow!("no peer found for dst: {}", nat_dst).into());
};
let Some(dst_peer_info) = peer_mgr.get_peer_map().get_route_peer_info(dst_peer).await
else {
return Err(anyhow::anyhow!("no peer info found for dst peer: {}", dst_peer).into());
};
let Some(dst_ipv4): Option<Ipv4Addr> = dst_peer_info.ipv4_addr.map(Into::into) else {
return Err(anyhow::anyhow!("no ipv4 found for dst peer: {}", dst_peer).into());
};
let Some(quic_port) = dst_peer_info.quic_port else {
return Err(anyhow::anyhow!("no quic port found for dst peer: {}", dst_peer).into());
};
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap())
.with_context(|| format!("failed to create QUIC endpoint for src: {}", src))?;
endpoint.set_default_client_config(configure_client());
// connect to server
let connection = {
let _g = peer_mgr.get_global_ctx().net_ns.guard();
endpoint
.connect(
SocketAddr::new(dst_ipv4.into(), quic_port as u16),
"localhost",
)
.unwrap()
.await
.with_context(|| {
format!(
"failed to connect to NAT destination {} from {}, real dst: {}",
nat_dst, src, dst_ipv4
)
})?
};
let (mut w, r) = connection
.open_bi()
.await
.with_context(|| "open_bi failed")?;
let proxy_dst_info = ProxyDstInfo {
dst_addr: Some(nat_dst.into()),
};
let proxy_dst_info_buf = proxy_dst_info.encode_to_vec();
let buf_len = proxy_dst_info_buf.len() as u8;
w.write(&buf_len.to_le_bytes())
.await
.with_context(|| "failed to write proxy dst info buf len to QUIC stream")?;
w.write(&proxy_dst_info_buf)
.await
.with_context(|| "failed to write proxy dst info to QUIC stream")?;
Ok(QUICStream {
endpoint: Some(endpoint),
connection: Some(connection),
sender: w,
receiver: r,
})
}
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
true
}
fn check_packet_from_peer(
&self,
_cidr_set: &CidrSet,
_global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
_ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr,
) -> bool {
return hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified();
}
fn transport_type(&self) -> TcpProxyEntryTransportType {
TcpProxyEntryTransportType::Quic
}
}
#[derive(Clone)]
struct TcpProxyForQUICSrc(Arc<TcpProxy<NatDstQUICConnector>>);
#[async_trait::async_trait]
impl TcpProxyForKcpSrcTrait for TcpProxyForQUICSrc {
type Connector = NatDstQUICConnector;
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> {
&self.0
}
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
self.0.get_peer_manager().get_peer_map();
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
return false;
};
let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
return false;
};
let Some(quic_port) = peer_info.quic_port else {
return false;
};
quic_port > 0
}
}
pub struct QUICProxySrc {
peer_manager: Arc<PeerManager>,
tcp_proxy: TcpProxyForQUICSrc,
}
impl QUICProxySrc {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let tcp_proxy = TcpProxy::new(
peer_manager.clone(),
NatDstQUICConnector {
peer_mgr: Arc::downgrade(&peer_manager),
},
);
Self {
peer_manager,
tcp_proxy: TcpProxyForQUICSrc(tcp_proxy),
}
}
pub async fn start(&self) {
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
.await;
self.tcp_proxy.0.start(false).await.unwrap();
}
pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstQUICConnector>> {
self.tcp_proxy.0.clone()
}
}
pub struct QUICProxyDst {
global_ctx: Arc<GlobalCtx>,
endpoint: Arc<quinn::Endpoint>,
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
tasks: Arc<Mutex<JoinSet<()>>>,
}
impl QUICProxyDst {
pub fn new(global_ctx: ArcGlobalCtx) -> Result<Self> {
let _g = global_ctx.net_ns.guard();
let (endpoint, _) = make_server_endpoint("0.0.0.0:0".parse().unwrap())
.map_err(|e| anyhow::anyhow!("failed to create QUIC endpoint: {}", e))?;
let tasks = Arc::new(Mutex::new(JoinSet::new()));
join_joinset_background(tasks.clone(), "QUICProxyDst tasks".to_string());
Ok(Self {
global_ctx,
endpoint: Arc::new(endpoint),
proxy_entries: Arc::new(DashMap::new()),
tasks,
})
}
pub async fn start(&self) -> Result<()> {
let endpoint = self.endpoint.clone();
let tasks = Arc::downgrade(&self.tasks.clone());
let ctx = self.global_ctx.clone();
let cidr_set = Arc::new(CidrSet::new(ctx.clone()));
let proxy_entries = self.proxy_entries.clone();
let task = async move {
loop {
match endpoint.accept().await {
Some(conn) => {
let Some(tasks) = tasks.upgrade() else {
tracing::warn!(
"QUICProxyDst tasks is not available, stopping accept loop"
);
return;
};
tasks
.lock()
.unwrap()
.spawn(Self::handle_connection_with_timeout(
conn,
ctx.clone(),
cidr_set.clone(),
proxy_entries.clone(),
));
}
None => {
return;
}
}
}
};
self.tasks.lock().unwrap().spawn(task);
Ok(())
}
pub fn local_addr(&self) -> Result<SocketAddr> {
self.endpoint.local_addr().map_err(Into::into)
}
async fn handle_connection_with_timeout(
conn: Incoming,
ctx: Arc<GlobalCtx>,
cidr_set: Arc<CidrSet>,
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
) {
let remote_addr = conn.remote_address();
defer!(
proxy_entries.remove(&remote_addr);
);
let ret = timeout(
std::time::Duration::from_secs(10),
Self::handle_connection(conn, ctx, cidr_set, remote_addr, proxy_entries.clone()),
)
.await;
match ret {
Ok(Ok((mut quic_stream, mut tcp_stream))) => {
let ret = copy_bidirectional(&mut quic_stream, &mut tcp_stream).await;
tracing::info!(
"QUIC connection handled, result: {:?}, remote addr: {:?}",
ret,
quic_stream.connection.as_ref().map(|c| c.remote_address())
);
}
Ok(Err(e)) => {
tracing::error!("Failed to handle QUIC connection: {}", e);
}
Err(_) => {
tracing::warn!("Timeout while handling QUIC connection");
}
}
}
async fn handle_connection(
incoming: Incoming,
ctx: ArcGlobalCtx,
cidr_set: Arc<CidrSet>,
proxy_entry_key: SocketAddr,
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
) -> Result<(QUICStream, TcpStream)> {
let conn = incoming.await.with_context(|| "accept failed")?;
let addr = conn.remote_address();
tracing::info!("Accepted QUIC connection from {}", addr);
let (w, mut r) = conn.accept_bi().await.with_context(|| "accept_bi failed")?;
let len = r
.read_u8()
.await
.with_context(|| "failed to read proxy dst info buf len")?;
let mut buf = vec![0u8; len as usize];
r.read_exact(&mut buf)
.await
.with_context(|| "failed to read proxy dst info")?;
let proxy_dst_info =
ProxyDstInfo::decode(&buf[..]).with_context(|| "failed to decode proxy dst info")?;
let dst_socket: SocketAddr = proxy_dst_info
.dst_addr
.map(Into::into)
.ok_or_else(|| anyhow::anyhow!("no dst addr in proxy dst info"))?;
let SocketAddr::V4(mut dst_socket) = dst_socket else {
return Err(anyhow::anyhow!("NAT destination must be an IPv4 address").into());
};
let mut real_ip = *dst_socket.ip();
if cidr_set.contains_v4(*dst_socket.ip(), &mut real_ip) {
dst_socket.set_ip(real_ip);
}
if Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()) && ctx.no_tun() {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
proxy_entries.insert(
proxy_entry_key,
TcpProxyEntry {
src: Some(addr.into()),
dst: Some(SocketAddr::V4(dst_socket).into()),
start_time: chrono::Local::now().timestamp() as u64,
state: TcpProxyEntryState::ConnectingDst.into(),
transport_type: TcpProxyEntryTransportType::Quic.into(),
},
);
let connector = NatDstTcpConnector {};
let dst_stream = {
let _g = ctx.net_ns.guard();
connector
.connect("0.0.0.0:0".parse().unwrap(), dst_socket.into())
.await?
};
if let Some(mut e) = proxy_entries.get_mut(&proxy_entry_key) {
e.state = TcpProxyEntryState::Connected.into();
}
let quic_stream = QUICStream {
endpoint: None,
connection: Some(conn),
sender: w,
receiver: r,
};
Ok((quic_stream, dst_stream))
}
}
#[derive(Clone)]
pub struct QUICProxyDstRpcService(Weak<DashMap<SocketAddr, TcpProxyEntry>>);
impl QUICProxyDstRpcService {
pub fn new(quic_proxy_dst: &QUICProxyDst) -> Self {
Self(Arc::downgrade(&quic_proxy_dst.proxy_entries))
}
}
#[async_trait::async_trait]
impl TcpProxyRpc for QUICProxyDstRpcService {
type Controller = BaseController;
async fn list_tcp_proxy_entry(
&self,
_: BaseController,
_request: ListTcpProxyEntryRequest, // Accept request of type HelloRequest
) -> std::result::Result<ListTcpProxyEntryResponse, rpc_types::error::Error> {
let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() {
reply.entries.push(item.value().clone());
}
}
Ok(reply)
}
}
+1 -4
View File
@@ -237,12 +237,9 @@ impl AsyncTcpConnector for Socks5KcpConnector {
let Some(kcp_endpoint) = self.kcp_endpoint.upgrade() else { let Some(kcp_endpoint) = self.kcp_endpoint.upgrade() else {
return Err(anyhow::anyhow!("kcp endpoint is not ready").into()); return Err(anyhow::anyhow!("kcp endpoint is not ready").into());
}; };
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer mgr is not ready").into());
};
let c = NatDstKcpConnector { let c = NatDstKcpConnector {
kcp_endpoint, kcp_endpoint,
peer_mgr, peer_mgr: self.peer_mgr.clone(),
}; };
println!("connect to kcp endpoint, addr = {:?}", addr); println!("connect to kcp endpoint, addr = {:?}", addr);
let ret = c let ret = c
+38 -21
View File
@@ -52,6 +52,7 @@ pub(crate) trait NatDstConnector: Send + Sync + Clone + 'static {
global_ctx: &GlobalCtx, global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader, hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet, ipv4: &Ipv4Packet,
real_dst_ip: &mut Ipv4Addr,
) -> bool; ) -> bool;
fn transport_type(&self) -> TcpProxyEntryTransportType; fn transport_type(&self) -> TcpProxyEntryTransportType;
} }
@@ -99,10 +100,11 @@ impl NatDstConnector for NatDstTcpConnector {
global_ctx: &GlobalCtx, global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader, hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet, ipv4: &Ipv4Packet,
real_dst_ip: &mut Ipv4Addr,
) -> bool { ) -> bool {
let is_exit_node = hdr.is_exit_node(); let is_exit_node = hdr.is_exit_node();
if !cidr_set.contains_v4(ipv4.get_destination()) if !cidr_set.contains_v4(ipv4.get_destination(), real_dst_ip)
&& !is_exit_node && !is_exit_node
&& !(global_ctx.no_tun() && !(global_ctx.no_tun()
&& Some(ipv4.get_destination()) && Some(ipv4.get_destination())
@@ -125,7 +127,8 @@ type NatDstEntryState = TcpProxyEntryState;
pub struct NatDstEntry { pub struct NatDstEntry {
id: uuid::Uuid, id: uuid::Uuid,
src: SocketAddr, src: SocketAddr,
dst: SocketAddr, real_dst: SocketAddr,
mapped_dst: SocketAddr,
start_time: Instant, start_time: Instant,
start_time_local: chrono::DateTime<chrono::Local>, start_time_local: chrono::DateTime<chrono::Local>,
tasks: Mutex<JoinSet<()>>, tasks: Mutex<JoinSet<()>>,
@@ -133,11 +136,12 @@ pub struct NatDstEntry {
} }
impl NatDstEntry { impl NatDstEntry {
pub fn new(src: SocketAddr, dst: SocketAddr) -> Self { pub fn new(src: SocketAddr, real_dst: SocketAddr, mapped_dst: SocketAddr) -> Self {
Self { Self {
id: uuid::Uuid::new_v4(), id: uuid::Uuid::new_v4(),
src, src,
dst, real_dst,
mapped_dst,
start_time: Instant::now(), start_time: Instant::now(),
start_time_local: chrono::Local::now(), start_time_local: chrono::Local::now(),
tasks: Mutex::new(JoinSet::new()), tasks: Mutex::new(JoinSet::new()),
@@ -148,7 +152,7 @@ impl NatDstEntry {
fn into_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry { fn into_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry {
TcpProxyEntry { TcpProxyEntry {
src: Some(self.src.clone().into()), src: Some(self.src.clone().into()),
dst: Some(self.dst.clone().into()), dst: Some(self.real_dst.clone().into()),
start_time: self.start_time_local.timestamp() as u64, start_time: self.start_time_local.timestamp() as u64,
state: self.state.load().into(), state: self.state.load().into(),
transport_type: transport_type.into(), transport_type: transport_type.into(),
@@ -396,7 +400,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
drop(entry); drop(entry);
assert_eq!(nat_entry.src, dst_addr); assert_eq!(nat_entry.src, dst_addr);
let IpAddr::V4(ip) = nat_entry.dst.ip() else { let IpAddr::V4(ip) = nat_entry.mapped_dst.ip() else {
panic!("v4 nat entry src ip is not v4"); panic!("v4 nat entry src ip is not v4");
}; };
@@ -416,7 +420,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
let dst = ip_packet.get_destination(); let dst = ip_packet.get_destination();
let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap(); let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap();
tcp_packet.set_source(nat_entry.dst.port()); tcp_packet.set_source(nat_entry.real_dst.port());
Self::update_tcp_packet_checksum(&mut tcp_packet, &ip, &dst); Self::update_tcp_packet_checksum(&mut tcp_packet, &ip, &dst);
drop(tcp_packet); drop(tcp_packet);
@@ -537,7 +541,6 @@ impl<C: NatDstConnector> TcpProxy<C> {
} }
} }
tracing::error!("smoltcp stack sink exited"); tracing::error!("smoltcp stack sink exited");
panic!("smoltcp stack sink exited");
}); });
let peer_mgr = self.peer_manager.clone(); let peer_mgr = self.peer_manager.clone();
@@ -559,7 +562,6 @@ impl<C: NatDstConnector> TcpProxy<C> {
} }
} }
tracing::error!("smoltcp stack stream exited"); tracing::error!("smoltcp stack stream exited");
panic!("smoltcp stack stream exited");
}); });
let interface_config = smoltcp::iface::Config::new(smoltcp::wire::HardwareAddress::Ip); let interface_config = smoltcp::iface::Config::new(smoltcp::wire::HardwareAddress::Ip);
@@ -607,7 +609,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
let mut tcp_listener = self.get_proxy_listener().await?; let mut tcp_listener = self.get_proxy_listener().await?;
let global_ctx = self.global_ctx.clone(); let global_ctx = self.global_ctx.clone();
let tasks = self.tasks.clone(); let tasks = Arc::downgrade(&self.tasks);
let syn_map = self.syn_map.clone(); let syn_map = self.syn_map.clone();
let conn_map = self.conn_map.clone(); let conn_map = self.conn_map.clone();
let addr_conn_map = self.addr_conn_map.clone(); let addr_conn_map = self.addr_conn_map.clone();
@@ -644,7 +646,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
tracing::info!( tracing::info!(
?socket_addr, ?socket_addr,
"tcp connection accepted for proxy, nat dst: {:?}", "tcp connection accepted for proxy, nat dst: {:?}",
entry.dst entry.real_dst
); );
assert_eq!(entry.state.load(), NatDstEntryState::SynReceived); assert_eq!(entry.state.load(), NatDstEntryState::SynReceived);
@@ -658,6 +660,11 @@ impl<C: NatDstConnector> TcpProxy<C> {
let old_nat_val = conn_map.insert(entry_clone.id, entry_clone.clone()); let old_nat_val = conn_map.insert(entry_clone.id, entry_clone.clone());
assert!(old_nat_val.is_none()); assert!(old_nat_val.is_none());
let Some(tasks) = tasks.upgrade() else {
tracing::error!("tcp proxy tasks is dropped, exit accept loop");
break;
};
tasks.lock().unwrap().spawn(Self::connect_to_nat_dst( tasks.lock().unwrap().spawn(Self::connect_to_nat_dst(
connector.clone(), connector.clone(),
global_ctx.clone(), global_ctx.clone(),
@@ -697,14 +704,14 @@ impl<C: NatDstConnector> TcpProxy<C> {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e); tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
} }
let nat_dst = if Some(nat_entry.dst.ip()) let nat_dst = if Some(nat_entry.real_dst.ip())
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address())) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
{ {
format!("127.0.0.1:{}", nat_entry.dst.port()) format!("127.0.0.1:{}", nat_entry.real_dst.port())
.parse() .parse()
.unwrap() .unwrap()
} else { } else {
nat_entry.dst nat_entry.real_dst
}; };
let _guard = global_ctx.net_ns.guard(); let _guard = global_ctx.net_ns.guard();
@@ -818,10 +825,15 @@ impl<C: NatDstConnector> TcpProxy<C> {
return None; return None;
} }
if !self let mut real_dst_ip = ipv4.get_destination();
.connector
.check_packet_from_peer(&self.cidr_set, &self.global_ctx, &hdr, &ipv4) if !self.connector.check_packet_from_peer(
{ &self.cidr_set,
&self.global_ctx,
&hdr,
&ipv4,
&mut real_dst_ip,
) {
return None; return None;
} }
@@ -839,12 +851,13 @@ impl<C: NatDstConnector> TcpProxy<C> {
if is_tcp_syn && !is_tcp_ack { if is_tcp_syn && !is_tcp_ack {
let dest_ip = ip_packet.get_destination(); let dest_ip = ip_packet.get_destination();
let dest_port = tcp_packet.get_destination(); let dest_port = tcp_packet.get_destination();
let dst = SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port)); let mapped_dst = SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port));
let real_dst = SocketAddr::V4(SocketAddrV4::new(real_dst_ip, dest_port));
let old_val = self let old_val = self
.syn_map .syn_map
.insert(src, Arc::new(NatDstEntry::new(src, dst))); .insert(src, Arc::new(NatDstEntry::new(src, real_dst, mapped_dst)));
tracing::info!(src = ?src, dst = ?dst, old_entry = ?old_val, "tcp syn received"); tracing::info!(src = ?src, ?real_dst, ?mapped_dst, old_entry = ?old_val, "tcp syn received");
} else if !self.addr_conn_map.contains_key(&src) && !self.syn_map.contains_key(&src) { } else if !self.addr_conn_map.contains_key(&src) && !self.syn_map.contains_key(&src) {
// if not in syn map and addr conn map, may forwarding n2n packet // if not in syn map and addr conn map, may forwarding n2n packet
return None; return None;
@@ -889,6 +902,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
} }
entries entries
} }
pub fn get_transport_type(&self) -> TcpProxyEntryTransportType {
self.connector.transport_type()
}
} }
#[derive(Clone)] #[derive(Clone)]
+13 -2
View File
@@ -139,6 +139,8 @@ impl UdpNatEntry {
self: Arc<Self>, self: Arc<Self>,
mut packet_sender: Sender<ZCPacket>, mut packet_sender: Sender<ZCPacket>,
virtual_ipv4: Ipv4Addr, virtual_ipv4: Ipv4Addr,
real_ipv4: Ipv4Addr,
mapped_ipv4: Ipv4Addr,
) { ) {
let (s, mut r) = tachyonix::channel(128); let (s, mut r) = tachyonix::channel(128);
@@ -197,6 +199,10 @@ impl UdpNatEntry {
src_v4.set_ip(virtual_ipv4); src_v4.set_ip(virtual_ipv4);
} }
if *src_v4.ip() == real_ipv4 {
src_v4.set_ip(mapped_ipv4);
}
let Ok(_) = Self::compose_ipv4_packet( let Ok(_) = Self::compose_ipv4_packet(
&self_clone, &self_clone,
&mut packet_sender, &mut packet_sender,
@@ -266,7 +272,10 @@ impl UdpProxy {
return None; return None;
} }
if !self.cidr_set.contains_v4(ipv4.get_destination()) let mut real_dst_ip = ipv4.get_destination();
if !self
.cidr_set
.contains_v4(ipv4.get_destination(), &mut real_dst_ip)
&& !is_exit_node && !is_exit_node
&& !(self.global_ctx.no_tun() && !(self.global_ctx.no_tun()
&& Some(ipv4.get_destination()) && Some(ipv4.get_destination())
@@ -322,6 +331,8 @@ impl UdpProxy {
nat_entry.clone(), nat_entry.clone(),
self.sender.clone(), self.sender.clone(),
self.global_ctx.get_ipv4().map(|x| x.address())?, self.global_ctx.get_ipv4().map(|x| x.address())?,
real_dst_ip,
ipv4.get_destination(),
))); )));
} }
@@ -335,7 +346,7 @@ impl UdpProxy {
.parse() .parse()
.unwrap() .unwrap()
} else { } else {
SocketAddr::new(ipv4.get_destination().into(), udp_packet.get_destination()) SocketAddr::new(real_dst_ip.into(), udp_packet.get_destination())
}; };
let send_ret = { let send_ret = {
@@ -298,12 +298,13 @@ impl NicPacketFilter for MagicDnsServerInstanceData {
#[async_trait::async_trait] #[async_trait::async_trait]
impl RpcServerHook for MagicDnsServerInstanceData { impl RpcServerHook for MagicDnsServerInstanceData {
async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>) { async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>)-> Result<Option<TunnelInfo>, anyhow::Error> {
println!("New client connected: {:?}", tunnel_info); tracing::info!(?tunnel_info, "New client connected");
Ok(tunnel_info)
} }
async fn on_client_disconnected(&self, tunnel_info: Option<TunnelInfo>) { async fn on_client_disconnected(&self, tunnel_info: Option<TunnelInfo>) {
println!("Client disconnected: {:?}", tunnel_info); tracing::info!(?tunnel_info, "Client disconnected");
let Some(tunnel_info) = tunnel_info else { let Some(tunnel_info) = tunnel_info else {
return; return;
}; };
+280 -9
View File
@@ -1,25 +1,26 @@
use std::any::Any; use std::any::Any;
use std::collections::HashSet; use std::collections::HashSet;
use std::net::Ipv4Addr; use std::net::{IpAddr, Ipv4Addr};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use anyhow::Context; use anyhow::Context;
use cidr::Ipv4Inet; use cidr::{IpCidr, Ipv4Inet};
use tokio::task::JoinHandle;
use tokio::{sync::Mutex, task::JoinSet}; use tokio::{sync::Mutex, task::JoinSet};
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use crate::common::config::ConfigLoader; use crate::common::config::ConfigLoader;
use crate::common::error::Error; use crate::common::error::Error;
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent}; use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent};
use crate::common::scoped_task::ScopedTask;
use crate::common::PeerId; use crate::common::PeerId;
use crate::connector::direct::DirectConnectorManager; use crate::connector::direct::DirectConnectorManager;
use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager}; use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager};
use crate::connector::udp_hole_punch::UdpHolePunchConnector; use crate::connector::udp_hole_punch::UdpHolePunchConnector;
use crate::gateway::icmp_proxy::IcmpProxy; use crate::gateway::icmp_proxy::IcmpProxy;
use crate::gateway::kcp_proxy::{KcpProxyDst, KcpProxyDstRpcService, KcpProxySrc}; use crate::gateway::kcp_proxy::{KcpProxyDst, KcpProxyDstRpcService, KcpProxySrc};
use crate::gateway::quic_proxy::{QUICProxyDst, QUICProxyDstRpcService, QUICProxySrc};
use crate::gateway::tcp_proxy::{NatDstTcpConnector, TcpProxy, TcpProxyRpcService}; use crate::gateway::tcp_proxy::{NatDstTcpConnector, TcpProxy, TcpProxyRpcService};
use crate::gateway::udp_proxy::UdpProxy; use crate::gateway::udp_proxy::UdpProxy;
use crate::peer_center::instance::PeerCenterInstance; use crate::peer_center::instance::PeerCenterInstance;
@@ -29,8 +30,9 @@ use crate::peers::rpc_service::PeerManagerRpcService;
use crate::peers::{create_packet_recv_chan, recv_packet_from_chan, PacketRecvChanReceiver}; use crate::peers::{create_packet_recv_chan, recv_packet_from_chan, PacketRecvChanReceiver};
use crate::proto::cli::VpnPortalRpc; use crate::proto::cli::VpnPortalRpc;
use crate::proto::cli::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo}; use crate::proto::cli::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo};
use crate::proto::common::TunnelInfo;
use crate::proto::peer_rpc::PeerCenterRpcServer; use crate::proto::peer_rpc::PeerCenterRpcServer;
use crate::proto::rpc_impl::standalone::StandAloneServer; use crate::proto::rpc_impl::standalone::{RpcServerHook, StandAloneServer};
use crate::proto::rpc_types; use crate::proto::rpc_types;
use crate::proto::rpc_types::controller::BaseController; use crate::proto::rpc_types::controller::BaseController;
use crate::tunnel::tcp::TcpTunnelListener; use crate::tunnel::tcp::TcpTunnelListener;
@@ -69,8 +71,7 @@ impl IpProxy {
} }
async fn start(&self) -> Result<(), Error> { async fn start(&self) -> Result<(), Error> {
if (self.global_ctx.get_proxy_cidrs().is_empty() if (self.global_ctx.config.get_proxy_cidrs().is_empty()
|| self.global_ctx.proxy_forward_by_system()
|| self.started.load(Ordering::Relaxed)) || self.started.load(Ordering::Relaxed))
&& !self.global_ctx.enable_exit_node() && !self.global_ctx.enable_exit_node()
&& !self.global_ctx.no_tun() && !self.global_ctx.no_tun()
@@ -78,6 +79,12 @@ impl IpProxy {
return Ok(()); return Ok(());
} }
// Actually, if this node is enabled as an exit node,
// we still can use the system stack to forward packets.
if self.global_ctx.proxy_forward_by_system() && !self.global_ctx.no_tun() {
return Ok(());
}
self.started.store(true, Ordering::Relaxed); self.started.store(true, Ordering::Relaxed);
self.tcp_proxy.start(true).await?; self.tcp_proxy.start(true).await?;
if let Err(e) = self.icmp_proxy.start().await { if let Err(e) = self.icmp_proxy.start().await {
@@ -112,7 +119,7 @@ impl NicCtx {
} }
struct MagicDnsContainer { struct MagicDnsContainer {
dns_runner_task: JoinHandle<()>, dns_runner_task: ScopedTask<()>,
dns_runner_cancel_token: CancellationToken, dns_runner_cancel_token: CancellationToken,
} }
@@ -133,7 +140,7 @@ impl NicCtxContainer {
Self { Self {
nic_ctx: Some(Box::new(nic_ctx)), nic_ctx: Some(Box::new(nic_ctx)),
magic_dns: Some(MagicDnsContainer { magic_dns: Some(MagicDnsContainer {
dns_runner_task: task, dns_runner_task: task.into(),
dns_runner_cancel_token: token, dns_runner_cancel_token: token,
}), }),
} }
@@ -155,6 +162,58 @@ impl NicCtxContainer {
type ArcNicCtx = Arc<Mutex<Option<NicCtxContainer>>>; type ArcNicCtx = Arc<Mutex<Option<NicCtxContainer>>>;
pub struct InstanceRpcServerHook {
rpc_portal_whitelist: Vec<IpCidr>,
}
impl InstanceRpcServerHook {
pub fn new(rpc_portal_whitelist: Option<Vec<IpCidr>>) -> Self {
let rpc_portal_whitelist = rpc_portal_whitelist
.unwrap_or_else(|| vec!["127.0.0.0/8".parse().unwrap(), "::1/128".parse().unwrap()]);
InstanceRpcServerHook {
rpc_portal_whitelist,
}
}
}
#[async_trait::async_trait]
impl RpcServerHook for InstanceRpcServerHook {
async fn on_new_client(
&self,
tunnel_info: Option<TunnelInfo>,
) -> Result<Option<TunnelInfo>, anyhow::Error> {
let tunnel_info = tunnel_info.ok_or_else(|| anyhow::anyhow!("tunnel info is None"))?;
let remote_url = tunnel_info
.remote_addr
.clone()
.ok_or_else(|| anyhow::anyhow!("remote_addr is None"))?;
let url_str = &remote_url.url;
let url = url::Url::parse(url_str)
.map_err(|e| anyhow::anyhow!("Failed to parse remote URL '{}': {}", url_str, e))?;
let host = url
.host_str()
.ok_or_else(|| anyhow::anyhow!("No host found in remote URL '{}'", url_str))?;
let ip_addr: IpAddr = host
.parse()
.map_err(|e| anyhow::anyhow!("Failed to parse IP address '{}': {}", host, e))?;
for cidr in &self.rpc_portal_whitelist {
if cidr.contains(&ip_addr) {
return Ok(Some(tunnel_info));
}
}
return Err(anyhow::anyhow!(
"Rpc portal client IP {} not in whitelist: {:?}, ignoring client.",
ip_addr,
self.rpc_portal_whitelist
));
}
}
pub struct Instance { pub struct Instance {
inst_name: String, inst_name: String,
@@ -174,6 +233,9 @@ pub struct Instance {
kcp_proxy_src: Option<KcpProxySrc>, kcp_proxy_src: Option<KcpProxySrc>,
kcp_proxy_dst: Option<KcpProxyDst>, kcp_proxy_dst: Option<KcpProxyDst>,
quic_proxy_src: Option<QUICProxySrc>,
quic_proxy_dst: Option<QUICProxyDst>,
peer_center: Arc<PeerCenterInstance>, peer_center: Arc<PeerCenterInstance>,
vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>, vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>,
@@ -254,6 +316,9 @@ impl Instance {
kcp_proxy_src: None, kcp_proxy_src: None,
kcp_proxy_dst: None, kcp_proxy_dst: None,
quic_proxy_src: None,
quic_proxy_dst: None,
peer_center, peer_center,
vpn_portal: Arc::new(Mutex::new(Box::new(vpn_portal_inst))), vpn_portal: Arc::new(Mutex::new(Box::new(vpn_portal_inst))),
@@ -341,7 +406,7 @@ impl Instance {
// Warning, if there is an IP conflict in the network when using DHCP, the IP will be automatically changed. // Warning, if there is an IP conflict in the network when using DHCP, the IP will be automatically changed.
fn check_dhcp_ip_conflict(&self) { fn check_dhcp_ip_conflict(&self) {
use rand::Rng; use rand::Rng;
let peer_manager_c = self.peer_manager.clone(); let peer_manager_c = Arc::downgrade(&self.peer_manager.clone());
let global_ctx_c = self.get_global_ctx(); let global_ctx_c = self.get_global_ctx();
let nic_ctx = self.nic_ctx.clone(); let nic_ctx = self.nic_ctx.clone();
let _peer_packet_receiver = self.peer_packet_receiver.clone(); let _peer_packet_receiver = self.peer_packet_receiver.clone();
@@ -352,6 +417,11 @@ impl Instance {
loop { loop {
tokio::time::sleep(std::time::Duration::from_secs(next_sleep_time)).await; tokio::time::sleep(std::time::Duration::from_secs(next_sleep_time)).await;
let Some(peer_manager_c) = peer_manager_c.upgrade() else {
tracing::warn!("peer manager is dropped, stop dhcp check.");
return;
};
// do not allocate ip if no peer connected // do not allocate ip if no peer connected
let routes = peer_manager_c.list_routes().await; let routes = peer_manager_c.list_routes().await;
if routes.is_empty() { if routes.is_empty() {
@@ -499,6 +569,20 @@ impl Instance {
self.kcp_proxy_dst = Some(dst_proxy); self.kcp_proxy_dst = Some(dst_proxy);
} }
if self.global_ctx.get_flags().enable_quic_proxy {
let quic_src = QUICProxySrc::new(self.get_peer_manager()).await;
quic_src.start().await;
self.quic_proxy_src = Some(quic_src);
}
if !self.global_ctx.get_flags().disable_quic_input {
let quic_dst = QUICProxyDst::new(self.global_ctx.clone())?;
quic_dst.start().await?;
self.global_ctx
.set_quic_proxy_port(Some(quic_dst.local_addr()?.port()));
self.quic_proxy_dst = Some(quic_dst);
}
// run after tun device created, so listener can bind to tun device, which may be required by win 10 // run after tun device created, so listener can bind to tun device, which may be required by win 10
self.ip_proxy = Some(IpProxy::new( self.ip_proxy = Some(IpProxy::new(
self.get_global_ctx(), self.get_global_ctx(),
@@ -674,6 +758,24 @@ impl Instance {
); );
} }
if let Some(quic_proxy) = self.quic_proxy_src.as_ref() {
s.registry().register(
TcpProxyRpcServer::new(TcpProxyRpcService::new(quic_proxy.get_tcp_proxy())),
"quic_src",
);
}
if let Some(quic_proxy) = self.quic_proxy_dst.as_ref() {
s.registry().register(
TcpProxyRpcServer::new(QUICProxyDstRpcService::new(quic_proxy)),
"quic_dst",
);
}
s.set_hook(Arc::new(InstanceRpcServerHook::new(
self.global_ctx.config.get_rpc_portal_whitelist(),
)));
let _g = self.global_ctx.net_ns.guard(); let _g = self.global_ctx.net_ns.guard();
Ok(s.serve().await.with_context(|| "rpc server start failed")?) Ok(s.serve().await.with_context(|| "rpc server start failed")?)
} }
@@ -725,4 +827,173 @@ impl Instance {
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, magic_dns_runner).await; Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, magic_dns_runner).await;
Ok(()) Ok(())
} }
pub async fn clear_resources(&mut self) {
self.peer_manager.clear_resources().await;
let _ = self.nic_ctx.lock().await.take();
if let Some(rpc_server) = self.rpc_server.take() {
rpc_server.registry().unregister_all();
};
}
}
impl Drop for Instance {
fn drop(&mut self) {
let my_peer_id = self.peer_manager.my_peer_id();
let pm = Arc::downgrade(&self.peer_manager);
let nic_ctx = self.nic_ctx.clone();
if let Some(rpc_server) = self.rpc_server.take() {
rpc_server.registry().unregister_all();
};
tokio::spawn(async move {
nic_ctx.lock().await.take();
if let Some(pm) = pm.upgrade() {
pm.clear_resources().await;
};
let now = std::time::Instant::now();
while now.elapsed().as_secs() < 1 {
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
if pm.strong_count() == 0 {
tracing::info!(
"Instance for peer {} dropped, all resources cleared.",
my_peer_id
);
return;
}
}
debug_assert!(
false,
"Instance for peer {} dropped, but resources not cleared in 1 seconds.",
my_peer_id
);
});
}
}
#[cfg(test)]
mod tests {
use crate::{
instance::instance::InstanceRpcServerHook, proto::rpc_impl::standalone::RpcServerHook,
};
#[tokio::test]
async fn test_rpc_portal_whitelist() {
use cidr::IpCidr;
struct TestCase {
remote_url: String,
whitelist: Option<Vec<IpCidr>>,
expected_result: bool,
}
let test_cases: Vec<TestCase> = vec![
// Test default whitelist (127.0.0.0/8, ::1/128)
TestCase {
remote_url: "tcp://127.0.0.1:15888".to_string(),
whitelist: None,
expected_result: true,
},
TestCase {
remote_url: "tcp://127.1.2.3:15888".to_string(),
whitelist: None,
expected_result: true,
},
TestCase {
remote_url: "tcp://192.168.1.1:15888".to_string(),
whitelist: None,
expected_result: false,
},
// Test custom whitelist
TestCase {
remote_url: "tcp://192.168.1.10:15888".to_string(),
whitelist: Some(vec![
"192.168.1.0/24".parse().unwrap(),
"10.0.0.0/8".parse().unwrap(),
]),
expected_result: true,
},
TestCase {
remote_url: "tcp://10.1.2.3:15888".to_string(),
whitelist: Some(vec![
"192.168.1.0/24".parse().unwrap(),
"10.0.0.0/8".parse().unwrap(),
]),
expected_result: true,
},
TestCase {
remote_url: "tcp://172.16.0.1:15888".to_string(),
whitelist: Some(vec![
"192.168.1.0/24".parse().unwrap(),
"10.0.0.0/8".parse().unwrap(),
]),
expected_result: false,
},
// Test empty whitelist (should reject all connections)
TestCase {
remote_url: "tcp://127.0.0.1:15888".to_string(),
whitelist: Some(vec![]),
expected_result: false,
},
// Test broad whitelist (0.0.0.0/0 and ::/0 accept all IP addresses)
TestCase {
remote_url: "tcp://8.8.8.8:15888".to_string(),
whitelist: Some(vec!["0.0.0.0/0".parse().unwrap()]),
expected_result: true,
},
// Test edge case: specific IP whitelist
TestCase {
remote_url: "tcp://192.168.1.5:15888".to_string(),
whitelist: Some(vec!["192.168.1.5/32".parse().unwrap()]),
expected_result: true,
},
TestCase {
remote_url: "tcp://192.168.1.6:15888".to_string(),
whitelist: Some(vec!["192.168.1.5/32".parse().unwrap()]),
expected_result: false,
},
// Test invalid URL (this case will fail during URL parsing)
TestCase {
remote_url: "invalid-url".to_string(),
whitelist: None,
expected_result: false,
},
// Test URL without IP address (this case will fail during IP parsing)
TestCase {
remote_url: "tcp://localhost:15888".to_string(),
whitelist: None,
expected_result: false,
},
];
for case in test_cases {
let hook = InstanceRpcServerHook::new(case.whitelist.clone());
let tunnel_info = Some(crate::proto::common::TunnelInfo {
remote_addr: Some(crate::proto::common::Url {
url: case.remote_url.clone(),
}),
..Default::default()
});
let result = hook.on_new_client(tunnel_info).await;
if case.expected_result {
assert!(
result.is_ok(),
"Expected success for remote_url:{},whitelist:{:?},but got: {:?}",
case.remote_url,
case.whitelist,
result
);
} else {
assert!(
result.is_err(),
"Expected failure for remote_url:{},whitelist:{:?},but got: {:?}",
case.remote_url,
case.whitelist,
result
);
}
}
}
} }
+15 -4
View File
@@ -1,4 +1,9 @@
use std::{fmt::Debug, net::IpAddr, str::FromStr, sync::Arc}; use std::{
fmt::Debug,
net::IpAddr,
str::FromStr,
sync::{Arc, Weak},
};
use anyhow::Context; use anyhow::Context;
use async_trait::async_trait; use async_trait::async_trait;
@@ -89,7 +94,7 @@ pub struct ListenerManager<H> {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
net_ns: NetNS, net_ns: NetNS,
listeners: Vec<ListenerFactory>, listeners: Vec<ListenerFactory>,
peer_manager: Arc<H>, peer_manager: Weak<H>,
tasks: JoinSet<()>, tasks: JoinSet<()>,
} }
@@ -100,7 +105,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
global_ctx: global_ctx.clone(), global_ctx: global_ctx.clone(),
net_ns: global_ctx.net_ns.clone(), net_ns: global_ctx.net_ns.clone(),
listeners: Vec::new(), listeners: Vec::new(),
peer_manager, peer_manager: Arc::downgrade(&peer_manager),
tasks: JoinSet::new(), tasks: JoinSet::new(),
} }
} }
@@ -137,6 +142,8 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
if self.global_ctx.config.get_flags().enable_ipv6 if self.global_ctx.config.get_flags().enable_ipv6
&& !is_url_host_ipv6(&l) && !is_url_host_ipv6(&l)
&& is_url_host_unspecified(&l) && is_url_host_unspecified(&l)
// quic enables dual-stack by default, may conflict with v4 listener
&& l.scheme() != "quic"
{ {
let mut ipv6_listener = l.clone(); let mut ipv6_listener = l.clone();
ipv6_listener ipv6_listener
@@ -169,7 +176,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
#[tracing::instrument(skip(creator))] #[tracing::instrument(skip(creator))]
async fn run_listener( async fn run_listener(
creator: Arc<ListenerCreator>, creator: Arc<ListenerCreator>,
peer_manager: Arc<H>, peer_manager: Weak<H>,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
) { ) {
loop { loop {
@@ -221,6 +228,10 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
let peer_manager = peer_manager.clone(); let peer_manager = peer_manager.clone();
let global_ctx = global_ctx.clone(); let global_ctx = global_ctx.clone();
tokio::spawn(async move { tokio::spawn(async move {
let Some(peer_manager) = peer_manager.upgrade() else {
tracing::error!("peer manager is gone, cannot handle tunnel");
return;
};
let server_ret = peer_manager.handle_tunnel(ret).await; let server_ret = peer_manager.handle_tunnel(ret).await;
if let Err(e) = &server_ret { if let Err(e) = &server_ret {
global_ctx.issue_event(GlobalCtxEvent::ConnectionError( global_ctx.issue_event(GlobalCtxEvent::ConnectionError(
+15 -5
View File
@@ -1,4 +1,5 @@
use std::{ use std::{
collections::BTreeSet,
io, io,
net::Ipv4Addr, net::Ipv4Addr,
pin::Pin, pin::Pin,
@@ -569,26 +570,26 @@ impl NicCtx {
let ifname = nic.ifname().to_owned(); let ifname = nic.ifname().to_owned();
self.tasks.spawn(async move { self.tasks.spawn(async move {
let mut cur_proxy_cidrs = vec![]; let mut cur_proxy_cidrs = BTreeSet::new();
loop { loop {
let mut proxy_cidrs = vec![]; let mut proxy_cidrs = BTreeSet::new();
let routes = peer_mgr.list_routes().await; let routes = peer_mgr.list_routes().await;
for r in routes { for r in routes {
for cidr in r.proxy_cidrs { for cidr in r.proxy_cidrs {
let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else { let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else {
continue; continue;
}; };
proxy_cidrs.push(cidr); proxy_cidrs.insert(cidr);
} }
} }
// add vpn portal cidr to proxy_cidrs // add vpn portal cidr to proxy_cidrs
if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() { if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() {
proxy_cidrs.push(vpn_cfg.client_cidr); proxy_cidrs.insert(vpn_cfg.client_cidr);
} }
if let Some(routes) = global_ctx.config.get_routes() { if let Some(routes) = global_ctx.config.get_routes() {
// if has manual routes, just override entire proxy_cidrs // if has manual routes, just override entire proxy_cidrs
proxy_cidrs = routes; proxy_cidrs = routes.into_iter().collect();
} }
// if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it. // if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it.
@@ -657,6 +658,15 @@ impl NicCtx {
let _ = RegistryManager::reg_change_catrgory_in_profile(&dev_name); let _ = RegistryManager::reg_change_catrgory_in_profile(&dev_name);
} }
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
{
// remove the 10.0.0.0/24 route (which is added by rust-tun by default)
let _ = nic
.ifcfg
.remove_ipv4_route(&nic.ifname(), "10.0.0.0".parse().unwrap(), 24)
.await;
}
self.global_ctx self.global_ctx
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string())); .issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
ret ret
+564
View File
@@ -0,0 +1,564 @@
use std::{collections::BTreeMap, sync::Arc};
use dashmap::DashMap;
use crate::{
common::{
config::{ConfigLoader, TomlConfigLoader},
global_ctx::{EventBusSubscriber, GlobalCtxEvent},
scoped_task::ScopedTask,
},
launcher::{ConfigSource, NetworkInstance, NetworkInstanceRunningInfo},
proto,
};
pub struct NetworkInstanceManager {
instance_map: Arc<DashMap<uuid::Uuid, NetworkInstance>>,
instance_stop_tasks: Arc<DashMap<uuid::Uuid, ScopedTask<()>>>,
stop_check_notifier: Arc<tokio::sync::Notify>,
}
impl NetworkInstanceManager {
pub fn new() -> Self {
NetworkInstanceManager {
instance_map: Arc::new(DashMap::new()),
instance_stop_tasks: Arc::new(DashMap::new()),
stop_check_notifier: Arc::new(tokio::sync::Notify::new()),
}
}
fn start_instance_task(&self, instance_id: uuid::Uuid) -> Result<(), anyhow::Error> {
let instance = self
.instance_map
.get(&instance_id)
.ok_or_else(|| anyhow::anyhow!("instance {} not found", instance_id))?;
match instance.get_config_source() {
ConfigSource::FFI | ConfigSource::GUI => {
// FFI and GUI have no tokio runtime, so we don't need to spawn a task
return Ok(());
}
_ => {
if tokio::runtime::Handle::try_current().is_err() {
return Err(anyhow::anyhow!(
"tokio runtime not found, cannot start instance task"
));
}
}
}
let instance_stop_notifier = instance.get_stop_notifier();
let instance_config_source = instance.get_config_source();
let instance_event_receiver = match instance.get_config_source() {
ConfigSource::Cli | ConfigSource::File | ConfigSource::Web => {
Some(instance.subscribe_event())
}
ConfigSource::GUI | ConfigSource::FFI => None,
};
let instance_map = self.instance_map.clone();
let instance_stop_tasks = self.instance_stop_tasks.clone();
let stop_check_notifier = self.stop_check_notifier.clone();
self.instance_stop_tasks.insert(
instance_id,
ScopedTask::from(tokio::spawn(async move {
let Some(instance_stop_notifier) = instance_stop_notifier else {
return;
};
let _t = if let Some(event) = instance_event_receiver.flatten() {
Some(ScopedTask::from(handle_event(instance_id, event)))
} else {
None
};
instance_stop_notifier.notified().await;
if let Some(instance) = instance_map.get(&instance_id) {
if let Some(e) = instance.get_latest_error_msg() {
tracing::error!(?e, ?instance_id, "instance stopped with error");
eprintln!("instance {} stopped with error: {}", instance_id, e);
}
}
match instance_config_source {
ConfigSource::Cli | ConfigSource::File => {
instance_map.remove(&instance_id);
}
ConfigSource::Web | ConfigSource::GUI | ConfigSource::FFI => {}
}
instance_stop_tasks.remove(&instance_id);
stop_check_notifier.notify_waiters();
})),
);
Ok(())
}
pub fn run_network_instance(
&self,
cfg: TomlConfigLoader,
source: ConfigSource,
) -> Result<uuid::Uuid, anyhow::Error> {
let instance_id = cfg.get_id();
if self.instance_map.contains_key(&instance_id) {
anyhow::bail!("instance {} already exists", instance_id);
}
let mut instance = NetworkInstance::new(cfg, source);
instance.start()?;
self.instance_map.insert(instance_id, instance);
self.start_instance_task(instance_id)?;
Ok(instance_id)
}
pub fn retain_network_instance(
&self,
instance_ids: Vec<uuid::Uuid>,
) -> Result<Vec<uuid::Uuid>, anyhow::Error> {
self.instance_map.retain(|k, _| instance_ids.contains(k));
Ok(self.list_network_instance_ids())
}
pub fn delete_network_instance(
&self,
instance_ids: Vec<uuid::Uuid>,
) -> Result<Vec<uuid::Uuid>, anyhow::Error> {
self.instance_map.retain(|k, _| !instance_ids.contains(k));
Ok(self.list_network_instance_ids())
}
pub fn collect_network_infos(
&self,
) -> Result<BTreeMap<uuid::Uuid, NetworkInstanceRunningInfo>, anyhow::Error> {
let mut ret = BTreeMap::new();
for instance in self.instance_map.iter() {
if let Some(info) = instance.get_running_info() {
ret.insert(instance.key().clone(), info);
}
}
Ok(ret)
}
pub fn list_network_instance_ids(&self) -> Vec<uuid::Uuid> {
self.instance_map
.iter()
.map(|item| item.key().clone())
.collect()
}
pub fn get_network_instance_name(&self, instance_id: &uuid::Uuid) -> Option<String> {
self.instance_map
.get(instance_id)
.map(|instance| instance.value().get_inst_name())
}
pub fn set_tun_fd(&self, instance_id: &uuid::Uuid, fd: i32) -> Result<(), anyhow::Error> {
let mut instance = self
.instance_map
.get_mut(instance_id)
.ok_or_else(|| anyhow::anyhow!("instance not found"))?;
instance.set_tun_fd(fd);
Ok(())
}
pub async fn wait(&self) {
while self.instance_map.len() > 0 {
self.stop_check_notifier.notified().await;
}
}
}
#[tracing::instrument]
fn handle_event(
instance_id: uuid::Uuid,
mut events: EventBusSubscriber,
) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
loop {
if let Ok(e) = events.recv().await {
match e {
GlobalCtxEvent::PeerAdded(p) => {
print_event(instance_id, format!("new peer added. peer_id: {}", p));
}
GlobalCtxEvent::PeerRemoved(p) => {
print_event(instance_id, format!("peer removed. peer_id: {}", p));
}
GlobalCtxEvent::PeerConnAdded(p) => {
print_event(
instance_id,
format!(
"new peer connection added. conn_info: {}",
peer_conn_info_to_string(p)
),
);
}
GlobalCtxEvent::PeerConnRemoved(p) => {
print_event(
instance_id,
format!(
"peer connection removed. conn_info: {}",
peer_conn_info_to_string(p)
),
);
}
GlobalCtxEvent::ListenerAddFailed(p, msg) => {
print_event(
instance_id,
format!("listener add failed. listener: {}, msg: {}", p, msg),
);
}
GlobalCtxEvent::ListenerAcceptFailed(p, msg) => {
print_event(
instance_id,
format!("listener accept failed. listener: {}, msg: {}", p, msg),
);
}
GlobalCtxEvent::ListenerAdded(p) => {
if p.scheme() == "ring" {
continue;
}
print_event(instance_id, format!("new listener added. listener: {}", p));
}
GlobalCtxEvent::ConnectionAccepted(local, remote) => {
print_event(
instance_id,
format!(
"new connection accepted. local: {}, remote: {}",
local, remote
),
);
}
GlobalCtxEvent::ConnectionError(local, remote, err) => {
print_event(
instance_id,
format!(
"connection error. local: {}, remote: {}, err: {}",
local, remote, err
),
);
}
GlobalCtxEvent::TunDeviceReady(dev) => {
print_event(instance_id, format!("tun device ready. dev: {}", dev));
}
GlobalCtxEvent::TunDeviceError(err) => {
print_event(instance_id, format!("tun device error. err: {}", err));
}
GlobalCtxEvent::Connecting(dst) => {
print_event(instance_id, format!("connecting to peer. dst: {}", dst));
}
GlobalCtxEvent::ConnectError(dst, ip_version, err) => {
print_event(
instance_id,
format!(
"connect to peer error. dst: {}, ip_version: {}, err: {}",
dst, ip_version, err
),
);
}
GlobalCtxEvent::VpnPortalClientConnected(portal, client_addr) => {
print_event(
instance_id,
format!(
"vpn portal client connected. portal: {}, client_addr: {}",
portal, client_addr
),
);
}
GlobalCtxEvent::VpnPortalClientDisconnected(portal, client_addr) => {
print_event(
instance_id,
format!(
"vpn portal client disconnected. portal: {}, client_addr: {}",
portal, client_addr
),
);
}
GlobalCtxEvent::DhcpIpv4Changed(old, new) => {
print_event(
instance_id,
format!("dhcp ip changed. old: {:?}, new: {:?}", old, new),
);
}
GlobalCtxEvent::DhcpIpv4Conflicted(ip) => {
print_event(instance_id, format!("dhcp ip conflict. ip: {:?}", ip));
}
GlobalCtxEvent::PortForwardAdded(cfg) => {
print_event(
instance_id,
format!(
"port forward added. local: {}, remote: {}, proto: {}",
cfg.bind_addr.unwrap().to_string(),
cfg.dst_addr.unwrap().to_string(),
cfg.socket_type().as_str_name()
),
);
}
}
} else {
events = events.resubscribe();
}
}
})
}
fn print_event(instance_id: uuid::Uuid, msg: String) {
println!(
"{}: [{}] {}",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S"),
instance_id,
msg
);
}
fn peer_conn_info_to_string(p: proto::cli::PeerConnInfo) -> String {
format!(
"my_peer_id: {}, dst_peer_id: {}, tunnel_info: {:?}",
p.my_peer_id, p.peer_id, p.tunnel
)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::config::*;
#[tokio::test]
async fn it_works() {
let manager = NetworkInstanceManager::new();
let cfg_str = r#"
listeners = []
"#;
let port = crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
let instance_id1 = manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str)
.map(|c| {
c.set_listeners(vec![format!("tcp://0.0.0.0:{}", port).parse().unwrap()]);
c
})
.unwrap(),
ConfigSource::Cli,
)
.unwrap();
let instance_id2 = manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::File,
)
.unwrap();
let instance_id3 = manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::GUI,
)
.unwrap();
let instance_id4 = manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::Web,
)
.unwrap();
let instance_id5 = manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::FFI,
)
.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(1)).await; // to make instance actually started
assert!(!crate::utils::check_tcp_available(port));
assert!(manager.instance_map.contains_key(&instance_id1));
assert!(manager.instance_map.contains_key(&instance_id2));
assert!(manager.instance_map.contains_key(&instance_id3));
assert!(manager.instance_map.contains_key(&instance_id4));
assert!(manager.instance_map.contains_key(&instance_id5));
assert_eq!(manager.list_network_instance_ids().len(), 5);
assert_eq!(manager.instance_stop_tasks.len(), 3); // FFI and GUI instance does not have a stop task
manager
.delete_network_instance(vec![instance_id3, instance_id4, instance_id5])
.unwrap();
assert!(!manager.instance_map.contains_key(&instance_id3));
assert!(!manager.instance_map.contains_key(&instance_id4));
assert!(!manager.instance_map.contains_key(&instance_id5));
assert_eq!(manager.list_network_instance_ids().len(), 2);
}
#[test]
fn test_no_tokio_runtime() {
let manager = NetworkInstanceManager::new();
let cfg_str = r#"
listeners = []
"#;
let port = crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
assert!(manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::Cli,
)
.is_err());
assert!(manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::File,
)
.is_err());
assert!(manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str)
.map(|c| {
c.set_listeners(vec![format!("tcp://0.0.0.0:{}", port).parse().unwrap()]);
c
})
.unwrap(),
ConfigSource::GUI,
)
.is_ok());
assert!(manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::Web,
)
.is_err());
assert!(manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
ConfigSource::FFI,
)
.is_ok());
std::thread::sleep(std::time::Duration::from_secs(1)); // wait instance actually started
assert!(!crate::utils::check_tcp_available(port));
assert_eq!(manager.list_network_instance_ids().len(), 5);
assert_eq!(
manager
.instance_map
.iter()
.map(|item| item.is_easytier_running())
.filter(|x| *x)
.count(),
5
); // stop tasks failed not affect instance running status
assert_eq!(manager.instance_stop_tasks.len(), 0);
}
#[tokio::test]
async fn test_single_instance_failed() {
let free_tcp_port =
crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
for config_source in [ConfigSource::Cli, ConfigSource::File] {
let _port_holder =
std::net::TcpListener::bind(format!("0.0.0.0:{}", free_tcp_port)).unwrap();
let cfg_str = format!(
r#"
listeners = ["tcp://0.0.0.0:{}"]
"#,
free_tcp_port
);
let manager = NetworkInstanceManager::new();
manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
config_source.clone(),
)
.unwrap();
tokio::select! {
_ = manager.wait() => {
assert_eq!(manager.list_network_instance_ids().len(), 0);
}
_ = tokio::time::sleep(std::time::Duration::from_secs(5)) => {
panic!("instance manager with single failed instance({:?}) should not running", config_source);
}
}
}
for config_source in [ConfigSource::Web, ConfigSource::GUI, ConfigSource::FFI] {
let _port_holder =
std::net::TcpListener::bind(format!("0.0.0.0:{}", free_tcp_port)).unwrap();
let cfg_str = format!(
r#"
listeners = ["tcp://0.0.0.0:{}"]
"#,
free_tcp_port
);
let manager = NetworkInstanceManager::new();
manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
config_source.clone(),
)
.unwrap();
assert_eq!(manager.list_network_instance_ids().len(), 1);
}
}
#[tokio::test]
async fn test_multiple_instances_one_failed() {
let free_tcp_port =
crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
let manager = NetworkInstanceManager::new();
let cfg_str = format!(
r#"
listeners = ["tcp://0.0.0.0:{}"]
[flags]
enable_ipv6 = false
"#,
free_tcp_port
);
manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
ConfigSource::Cli,
)
.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
manager
.run_network_instance(
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
ConfigSource::Cli,
)
.unwrap();
tokio::select! {
_ = manager.wait() => {
panic!("instance manager with multiple instances one failed should still running");
}
_ = tokio::time::sleep(std::time::Duration::from_secs(2)) => {
assert_eq!(manager.list_network_instance_ids().len(), 1);
}
}
}
}
+491 -30
View File
@@ -1,6 +1,5 @@
use std::{ use std::{
collections::VecDeque, collections::VecDeque,
net::SocketAddr,
sync::{atomic::AtomicBool, Arc, RwLock}, sync::{atomic::AtomicBool, Arc, RwLock},
}; };
@@ -136,8 +135,6 @@ impl EasyTierLauncher {
fetch_node_info: bool, fetch_node_info: bool,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
let mut instance = Instance::new(cfg); let mut instance = Instance::new(cfg);
let peer_mgr = instance.get_peer_manager();
let mut tasks = JoinSet::new(); let mut tasks = JoinSet::new();
// Subscribe to global context events // Subscribe to global context events
@@ -165,7 +162,7 @@ impl EasyTierLauncher {
if fetch_node_info { if fetch_node_info {
let data_c = data.clone(); let data_c = data.clone();
let global_ctx_c = instance.get_global_ctx(); let global_ctx_c = instance.get_global_ctx();
let peer_mgr_c = peer_mgr.clone(); let peer_mgr_c = instance.get_peer_manager().clone();
let vpn_portal = instance.get_vpn_portal_inst(); let vpn_portal = instance.get_vpn_portal_inst();
tasks.spawn(async move { tasks.spawn(async move {
loop { loop {
@@ -211,12 +208,10 @@ impl EasyTierLauncher {
tasks.abort_all(); tasks.abort_all();
drop(tasks); drop(tasks);
Ok(()) instance.clear_resources().await;
} drop(instance);
fn check_tcp_available(port: u16) -> bool { Ok(())
let s = format!("0.0.0.0:{}", port).parse::<SocketAddr>().unwrap();
std::net::TcpListener::bind(s).is_ok()
} }
fn select_proper_rpc_port(cfg: &TomlConfigLoader) { fn select_proper_rpc_port(cfg: &TomlConfigLoader) {
@@ -225,13 +220,12 @@ impl EasyTierLauncher {
}; };
if f.port() == 0 { if f.port() == 0 {
for i in 15888..15900 { let Some(port) = crate::utils::find_free_tcp_port(15888..15900) else {
if Self::check_tcp_available(i) { tracing::warn!("No free port found for RPC portal, skipping setting RPC portal");
f.set_port(i); return;
cfg.set_rpc_portal(f); };
break; f.set_port(port);
} cfg.set_rpc_portal(f);
}
} }
} }
@@ -343,25 +337,40 @@ impl Drop for EasyTierLauncher {
pub type NetworkInstanceRunningInfo = crate::proto::web::NetworkInstanceRunningInfo; pub type NetworkInstanceRunningInfo = crate::proto::web::NetworkInstanceRunningInfo;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ConfigSource {
Cli,
File,
Web,
GUI,
FFI,
}
pub struct NetworkInstance { pub struct NetworkInstance {
config: TomlConfigLoader, config: TomlConfigLoader,
launcher: Option<EasyTierLauncher>, launcher: Option<EasyTierLauncher>,
fetch_node_info: bool, config_source: ConfigSource,
} }
impl NetworkInstance { impl NetworkInstance {
pub fn new(config: TomlConfigLoader) -> Self { pub fn new(config: TomlConfigLoader, source: ConfigSource) -> Self {
Self { Self {
config, config,
launcher: None, launcher: None,
fetch_node_info: true, config_source: source,
} }
} }
pub fn set_fetch_node_info(mut self, fetch_node_info: bool) -> Self { fn get_fetch_node_info(&self) -> bool {
self.fetch_node_info = fetch_node_info; match self.config_source {
self ConfigSource::Cli | ConfigSource::File => false,
ConfigSource::Web | ConfigSource::GUI | ConfigSource::FFI => true,
}
}
pub fn get_config_source(&self) -> ConfigSource {
self.config_source.clone()
} }
pub fn is_easytier_running(&self) -> bool { pub fn is_easytier_running(&self) -> bool {
@@ -395,6 +404,10 @@ impl NetworkInstance {
}) })
} }
pub fn get_inst_name(&self) -> String {
self.config.get_inst_name()
}
pub fn set_tun_fd(&mut self, tun_fd: i32) { pub fn set_tun_fd(&mut self, tun_fd: i32) {
if let Some(launcher) = self.launcher.as_ref() { if let Some(launcher) = self.launcher.as_ref() {
launcher.data.tun_fd.write().unwrap().replace(tun_fd); launcher.data.tun_fd.write().unwrap().replace(tun_fd);
@@ -406,7 +419,7 @@ impl NetworkInstance {
return Ok(self.subscribe_event().unwrap()); return Ok(self.subscribe_event().unwrap());
} }
let launcher = EasyTierLauncher::new(self.fetch_node_info); let launcher = EasyTierLauncher::new(self.get_fetch_node_info());
self.launcher = Some(launcher); self.launcher = Some(launcher);
let ev = self.subscribe_event().unwrap(); let ev = self.subscribe_event().unwrap();
@@ -418,7 +431,7 @@ impl NetworkInstance {
Ok(ev) Ok(ev)
} }
fn subscribe_event(&self) -> Option<broadcast::Receiver<GlobalCtxEvent>> { pub fn subscribe_event(&self) -> Option<broadcast::Receiver<GlobalCtxEvent>> {
if let Some(launcher) = self.launcher.as_ref() { if let Some(launcher) = self.launcher.as_ref() {
Some(launcher.data.event_subscriber.read().unwrap().subscribe()) Some(launcher.data.event_subscriber.read().unwrap().subscribe())
} else { } else {
@@ -426,9 +439,16 @@ impl NetworkInstance {
} }
} }
pub async fn wait(&self) -> Option<String> { pub fn get_stop_notifier(&self) -> Option<Arc<tokio::sync::Notify>> {
if let Some(launcher) = self.launcher.as_ref() {
Some(launcher.data.instance_stop_notifier.clone())
} else {
None
}
}
pub fn get_latest_error_msg(&self) -> Option<String> {
if let Some(launcher) = self.launcher.as_ref() { if let Some(launcher) = self.launcher.as_ref() {
launcher.data.instance_stop_notifier.notified().await;
launcher.error_msg.read().unwrap().clone() launcher.error_msg.read().unwrap().clone()
} else { } else {
None None
@@ -436,6 +456,36 @@ impl NetworkInstance {
} }
} }
pub fn add_proxy_network_to_config(
proxy_network: &str,
cfg: &TomlConfigLoader,
) -> Result<(), anyhow::Error> {
let parts: Vec<&str> = proxy_network.split("->").collect();
let real_cidr = parts[0]
.parse()
.with_context(|| format!("failed to parse proxy network: {}", parts[0]))?;
if parts.len() > 2 {
return Err(anyhow::anyhow!(
"invalid proxy network format: {}, support format: <real_cidr> or <real_cidr>-><mapped_cidr>, example:
10.0.0.0/24 or 10.0.0.0/24->192.168.0.0/24",
proxy_network
));
}
let mapped_cidr = if parts.len() == 2 {
Some(
parts[1]
.parse()
.with_context(|| format!("failed to parse mapped network: {}", parts[1]))?,
)
} else {
None
};
cfg.add_proxy_cidr(real_cidr, mapped_cidr);
Ok(())
}
pub type NetworkingMethod = crate::proto::web::NetworkingMethod; pub type NetworkingMethod = crate::proto::web::NetworkingMethod;
pub type NetworkConfig = crate::proto::web::NetworkConfig; pub type NetworkConfig = crate::proto::web::NetworkConfig;
@@ -515,10 +565,7 @@ impl NetworkConfig {
cfg.set_listeners(listener_urls); cfg.set_listeners(listener_urls);
for n in self.proxy_cidrs.iter() { for n in self.proxy_cidrs.iter() {
cfg.add_proxy_cidr( add_proxy_network_to_config(n, &cfg)?;
n.parse()
.with_context(|| format!("failed to parse proxy network: {}", n))?,
);
} }
cfg.set_rpc_portal( cfg.set_rpc_portal(
@@ -527,6 +574,20 @@ impl NetworkConfig {
.with_context(|| format!("failed to parse rpc portal port: {:?}", self.rpc_port))?, .with_context(|| format!("failed to parse rpc portal port: {:?}", self.rpc_port))?,
); );
if self.rpc_portal_whitelists.is_empty() {
cfg.set_rpc_portal_whitelist(None);
} else {
cfg.set_rpc_portal_whitelist(Some(
self.rpc_portal_whitelists
.iter()
.map(|s| {
s.parse()
.with_context(|| format!("failed to parse rpc portal whitelist: {}", s))
})
.collect::<Result<Vec<_>, _>>()?,
));
}
if self.enable_vpn_portal.unwrap_or_default() { if self.enable_vpn_portal.unwrap_or_default() {
let cidr = format!( let cidr = format!(
"{}/{}", "{}/{}",
@@ -624,6 +685,14 @@ impl NetworkConfig {
flags.disable_kcp_input = disable_kcp_input; flags.disable_kcp_input = disable_kcp_input;
} }
if let Some(enable_quic_proxy) = self.enable_quic_proxy {
flags.enable_quic_proxy = enable_quic_proxy;
}
if let Some(disable_quic_input) = self.disable_quic_input {
flags.disable_quic_input = disable_quic_input;
}
if let Some(disable_p2p) = self.disable_p2p { if let Some(disable_p2p) = self.disable_p2p {
flags.disable_p2p = disable_p2p; flags.disable_p2p = disable_p2p;
} }
@@ -683,4 +752,396 @@ impl NetworkConfig {
cfg.set_flags(flags); cfg.set_flags(flags);
Ok(cfg) Ok(cfg)
} }
pub fn new_from_config(config: &TomlConfigLoader) -> Result<Self, anyhow::Error> {
let default_config = TomlConfigLoader::default();
let mut result = Self::default();
result.instance_id = Some(config.get_id().to_string());
if config.get_hostname() != default_config.get_hostname() {
result.hostname = Some(config.get_hostname());
}
result.dhcp = Some(config.get_dhcp());
let network_identity = config.get_network_identity();
result.network_name = Some(network_identity.network_name.clone());
result.network_secret = network_identity.network_secret.clone();
if let Some(ipv4) = config.get_ipv4() {
result.virtual_ipv4 = Some(ipv4.address().to_string());
result.network_length = Some(ipv4.network_length() as i32);
}
let peers = config.get_peers();
match peers.len() {
1 => {
result.networking_method = Some(NetworkingMethod::PublicServer as i32);
result.public_server_url = Some(peers[0].uri.to_string());
}
0 => {
result.networking_method = Some(NetworkingMethod::Standalone as i32);
}
_ => {
result.networking_method = Some(NetworkingMethod::Manual as i32);
result.peer_urls = peers.iter().map(|p| p.uri.to_string()).collect();
}
}
result.listener_urls = config
.get_listeners()
.unwrap_or_else(|| vec![])
.iter()
.map(|l| l.to_string())
.collect();
result.proxy_cidrs = config
.get_proxy_cidrs()
.iter()
.map(|c| {
if let Some(mapped) = c.mapped_cidr {
format!("{}->{}", c.cidr, mapped)
} else {
c.cidr.to_string()
}
})
.collect();
if let Some(rpc_portal) = config.get_rpc_portal() {
result.rpc_port = Some(rpc_portal.port() as i32);
}
if let Some(whitelist) = config.get_rpc_portal_whitelist() {
result.rpc_portal_whitelists = whitelist.iter().map(|w| w.to_string()).collect();
}
if let Some(vpn_config) = config.get_vpn_portal_config() {
result.enable_vpn_portal = Some(true);
let cidr = vpn_config.client_cidr;
result.vpn_portal_client_network_addr = Some(cidr.first_address().to_string());
result.vpn_portal_client_network_len = Some(cidr.network_length() as i32);
result.vpn_portal_listen_port = Some(vpn_config.wireguard_listen.port() as i32);
}
if let Some(routes) = config.get_routes() {
if !routes.is_empty() {
result.enable_manual_routes = Some(true);
result.routes = routes.iter().map(|r| r.to_string()).collect();
}
}
let exit_nodes = config.get_exit_nodes();
if !exit_nodes.is_empty() {
result.exit_nodes = exit_nodes.iter().map(|n| n.to_string()).collect();
}
if let Some(socks5_portal) = config.get_socks5_portal() {
result.enable_socks5 = Some(true);
result.socks5_port = socks5_portal.port().map(|p| p as i32);
}
let mapped_listeners = config.get_mapped_listeners();
if !mapped_listeners.is_empty() {
result.mapped_listeners = mapped_listeners.iter().map(|l| l.to_string()).collect();
}
let flags = config.get_flags();
result.latency_first = Some(flags.latency_first);
result.dev_name = Some(flags.dev_name.clone());
result.use_smoltcp = Some(flags.use_smoltcp);
result.enable_kcp_proxy = Some(flags.enable_kcp_proxy);
result.disable_kcp_input = Some(flags.disable_kcp_input);
result.enable_quic_proxy = Some(flags.enable_quic_proxy);
result.disable_quic_input = Some(flags.disable_quic_input);
result.disable_p2p = Some(flags.disable_p2p);
result.bind_device = Some(flags.bind_device);
result.no_tun = Some(flags.no_tun);
result.enable_exit_node = Some(flags.enable_exit_node);
result.relay_all_peer_rpc = Some(flags.relay_all_peer_rpc);
result.multi_thread = Some(flags.multi_thread);
result.proxy_forward_by_system = Some(flags.proxy_forward_by_system);
result.disable_encryption = Some(!flags.enable_encryption);
result.disable_udp_hole_punching = Some(flags.disable_udp_hole_punching);
result.enable_magic_dns = Some(flags.accept_dns);
result.mtu = Some(flags.mtu as i32);
result.enable_private_mode = Some(flags.private_mode);
if !flags.relay_network_whitelist.is_empty() && flags.relay_network_whitelist != "*" {
result.enable_relay_network_whitelist = Some(true);
result.relay_network_whitelist = flags
.relay_network_whitelist
.split_whitespace()
.map(|s| s.to_string())
.collect();
}
Ok(result)
}
}
#[cfg(test)]
mod tests {
use crate::common::config::ConfigLoader;
use rand::Rng;
use std::net::Ipv4Addr;
fn gen_default_config() -> crate::common::config::TomlConfigLoader {
let config = crate::common::config::TomlConfigLoader::default();
config.set_id(uuid::Uuid::new_v4());
config.set_dhcp(false);
config.set_inst_name("default".to_string());
config.set_listeners(vec![]);
config.set_rpc_portal(std::net::SocketAddr::from(([0, 0, 0, 0], 0)));
config
}
#[test]
fn test_network_config_conversion_basic() -> Result<(), anyhow::Error> {
let config = gen_default_config();
let network_config = super::NetworkConfig::new_from_config(&config)?;
let generated_config = network_config.gen_config()?;
let config_str = config.dump();
let generated_config_str = generated_config.dump();
assert_eq!(
config_str, generated_config_str,
"Generated config does not match original config:\nOriginal:\n{}\n\nGenerated:\n{}\nNetwork Config: {}\n",
config_str, generated_config_str, serde_json::to_string(&network_config).unwrap()
);
Ok(())
}
#[test]
fn test_network_config_conversion_random() -> Result<(), anyhow::Error> {
let mut rng = rand::thread_rng();
for _ in 0..100 {
let config = gen_default_config();
config.set_id(uuid::Uuid::new_v4());
config.set_dhcp(rng.gen_bool(0.5));
if rng.gen_bool(0.7) {
let hostname = format!("host-{}", rng.gen::<u16>());
config.set_hostname(Some(hostname));
}
config.set_network_identity(crate::common::config::NetworkIdentity::new(
format!("network-{}", rng.gen::<u16>()),
format!("secret-{}", rng.gen::<u64>()),
));
config.set_inst_name(config.get_network_identity().network_name.clone());
if !config.get_dhcp() {
let addr = Ipv4Addr::new(
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(1..254),
);
let prefix_len = rng.gen_range(1..31);
let ipv4 = format!("{}/{}", addr, prefix_len).parse().unwrap();
config.set_ipv4(Some(ipv4));
}
let peer_count = rng.gen_range(0..3);
let mut peers = Vec::new();
for _ in 0..peer_count {
let port = rng.gen_range(10000..60000);
let protocol = if rng.gen_bool(0.5) { "tcp" } else { "udp" };
let uri = format!("{}://127.0.0.1:{}", protocol, port)
.parse()
.unwrap();
peers.push(crate::common::config::PeerConfig { uri });
}
config.set_peers(peers);
if rng.gen_bool(0.7) {
let listener_count = rng.gen_range(0..3);
let mut listeners = Vec::new();
for _ in 0..listener_count {
let port = rng.gen_range(10000..60000);
let protocol = if rng.gen_bool(0.5) { "tcp" } else { "udp" };
listeners.push(format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap());
}
config.set_listeners(listeners);
}
if rng.gen_bool(0.6) {
let proxy_count = rng.gen_range(0..3);
for _ in 0..proxy_count {
let network = format!(
"{}.{}.{}.0/{}",
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(24..30)
)
.parse::<cidr::Ipv4Cidr>()
.unwrap();
let mapped_network = if rng.gen_bool(0.5) {
Some(
format!(
"{}.{}.{}.0/{}",
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
network.network_length()
)
.parse::<cidr::Ipv4Cidr>()
.unwrap(),
)
} else {
None
};
config.add_proxy_cidr(network, mapped_network);
}
}
if rng.gen_bool(0.8) {
let port = rng.gen_range(0..65535);
config.set_rpc_portal(std::net::SocketAddr::from(([0, 0, 0, 0], port)));
if rng.gen_bool(0.6) {
let whitelist_count = rng.gen_range(1..3);
let mut whitelist = Vec::new();
for _ in 0..whitelist_count {
let ip = Ipv4Addr::new(
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(0..255),
);
let cidr = format!("{}/32", ip);
whitelist.push(cidr.parse().unwrap());
}
config.set_rpc_portal_whitelist(Some(whitelist));
}
}
if rng.gen_bool(0.5) {
let vpn_network = format!(
"{}.{}.{}.0/{}",
rng.gen_range(10..173),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(24..30)
);
let vpn_port = rng.gen_range(10000..60000);
config.set_vpn_portal_config(crate::common::config::VpnPortalConfig {
client_cidr: vpn_network.parse().unwrap(),
wireguard_listen: format!("0.0.0.0:{}", vpn_port).parse().unwrap(),
});
}
if rng.gen_bool(0.6) {
let route_count = rng.gen_range(1..3);
let mut routes = Vec::new();
for _ in 0..route_count {
let route = format!(
"{}.{}.{}.0/{}",
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(24..30)
);
routes.push(route.parse().unwrap());
}
config.set_routes(Some(routes));
}
if rng.gen_bool(0.4) {
let node_count = rng.gen_range(1..3);
let mut nodes = Vec::new();
for _ in 0..node_count {
let ip = Ipv4Addr::new(
rng.gen_range(1..254),
rng.gen_range(0..255),
rng.gen_range(0..255),
rng.gen_range(1..254),
);
nodes.push(ip);
}
config.set_exit_nodes(nodes);
}
if rng.gen_bool(0.5) {
let socks5_port = rng.gen_range(10000..60000);
config.set_socks5_portal(Some(
format!("socks5://0.0.0.0:{}", socks5_port).parse().unwrap(),
));
}
if rng.gen_bool(0.4) {
let count = rng.gen_range(1..3);
let mut mapped_listeners = Vec::new();
for _ in 0..count {
let port = rng.gen_range(10000..60000);
mapped_listeners.push(format!("tcp://0.0.0.0:{}", port).parse().unwrap());
}
config.set_mapped_listeners(Some(mapped_listeners));
}
if rng.gen_bool(0.9) {
let mut flags = crate::common::config::gen_default_flags();
flags.latency_first = rng.gen_bool(0.5);
flags.dev_name = format!("etun{}", rng.gen_range(0..10));
flags.use_smoltcp = rng.gen_bool(0.3);
flags.enable_kcp_proxy = rng.gen_bool(0.5);
flags.disable_kcp_input = rng.gen_bool(0.3);
flags.enable_quic_proxy = rng.gen_bool(0.5);
flags.disable_quic_input = rng.gen_bool(0.3);
flags.disable_p2p = rng.gen_bool(0.2);
flags.bind_device = rng.gen_bool(0.3);
flags.no_tun = rng.gen_bool(0.1);
flags.enable_exit_node = rng.gen_bool(0.4);
flags.relay_all_peer_rpc = rng.gen_bool(0.5);
flags.multi_thread = rng.gen_bool(0.7);
flags.proxy_forward_by_system = rng.gen_bool(0.3);
flags.enable_encryption = rng.gen_bool(0.8);
flags.disable_udp_hole_punching = rng.gen_bool(0.2);
flags.accept_dns = rng.gen_bool(0.6);
flags.mtu = rng.gen_range(1200..1500);
flags.private_mode = rng.gen_bool(0.3);
if rng.gen_bool(0.4) {
flags.relay_network_whitelist = (0..rng.gen_range(1..3))
.map(|_| {
format!(
"{}.{}.0.0/16",
rng.gen_range(10..192),
rng.gen_range(0..255)
)
})
.collect::<Vec<_>>()
.join(" ");
}
config.set_flags(flags);
}
let network_config = super::NetworkConfig::new_from_config(&config)?;
let generated_config = network_config.gen_config()?;
generated_config.set_peers(generated_config.get_peers()); // Ensure peers field is not None
let config_str = config.dump();
let generated_config_str = generated_config.dump();
assert_eq!(
config_str, generated_config_str,
"Generated config does not match original config:\nOriginal:\n{}\n\nGenerated:\n{}\nNetwork Config: {}\n",
config_str, generated_config_str, serde_json::to_string(&network_config).unwrap()
);
}
Ok(())
}
} }
+1
View File
@@ -9,6 +9,7 @@ mod vpn_portal;
pub mod common; pub mod common;
pub mod connector; pub mod connector;
pub mod launcher; pub mod launcher;
pub mod instance_manager;
pub mod peers; pub mod peers;
pub mod proto; pub mod proto;
pub mod tunnel; pub mod tunnel;
+22 -9
View File
@@ -1,6 +1,6 @@
use std::{ use std::{
collections::BTreeSet, collections::BTreeSet,
sync::Arc, sync::{Arc, Weak},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@@ -31,7 +31,8 @@ use crate::{
use super::{server::PeerCenterServer, Digest, Error}; use super::{server::PeerCenterServer, Digest, Error};
struct PeerCenterBase { struct PeerCenterBase {
peer_mgr: Arc<PeerManager>, peer_mgr: Weak<PeerManager>,
my_peer_id: PeerId,
tasks: Mutex<JoinSet<()>>, tasks: Mutex<JoinSet<()>>,
lock: Arc<Mutex<()>>, lock: Arc<Mutex<()>>,
} }
@@ -40,20 +41,25 @@ struct PeerCenterBase {
static SERVICE_ID: u32 = 50; static SERVICE_ID: u32 = 50;
struct PeridicJobCtx<T> { struct PeridicJobCtx<T> {
peer_mgr: Arc<PeerManager>, peer_mgr: Weak<PeerManager>,
my_peer_id: PeerId,
center_peer: AtomicCell<PeerId>, center_peer: AtomicCell<PeerId>,
job_ctx: T, job_ctx: T,
} }
impl PeerCenterBase { impl PeerCenterBase {
pub async fn init(&self) -> Result<(), Error> { pub async fn init(&self) -> Result<(), Error> {
self.peer_mgr let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(Error::Shutdown);
};
peer_mgr
.get_peer_rpc_mgr() .get_peer_rpc_mgr()
.rpc_server() .rpc_server()
.registry() .registry()
.register( .register(
PeerCenterRpcServer::new(PeerCenterServer::new(self.peer_mgr.my_peer_id())), PeerCenterRpcServer::new(PeerCenterServer::new(peer_mgr.my_peer_id())),
&self.peer_mgr.get_global_ctx().get_network_name(), &peer_mgr.get_global_ctx().get_network_name(),
); );
Ok(()) Ok(())
} }
@@ -91,17 +97,23 @@ impl PeerCenterBase {
+ Sync + Sync
+ 'static), + 'static),
) -> () { ) -> () {
let my_peer_id = self.peer_mgr.my_peer_id(); let my_peer_id = self.my_peer_id;
let peer_mgr = self.peer_mgr.clone(); let peer_mgr = self.peer_mgr.clone();
let lock = self.lock.clone(); let lock = self.lock.clone();
self.tasks.lock().await.spawn( self.tasks.lock().await.spawn(
async move { async move {
let ctx = Arc::new(PeridicJobCtx { let ctx = Arc::new(PeridicJobCtx {
peer_mgr: peer_mgr.clone(), peer_mgr: peer_mgr.clone(),
my_peer_id,
center_peer: AtomicCell::new(PeerId::default()), center_peer: AtomicCell::new(PeerId::default()),
job_ctx, job_ctx,
}); });
loop { loop {
let Some(peer_mgr) = peer_mgr.upgrade() else {
tracing::error!("peer manager is shutdown, exit periodic job");
return;
};
let Some(center_peer) = Self::select_center_peer(&peer_mgr).await else { let Some(center_peer) = Self::select_center_peer(&peer_mgr).await else {
tracing::trace!("no center peer found, sleep 1 second"); tracing::trace!("no center peer found, sleep 1 second");
tokio::time::sleep(Duration::from_secs(1)).await; tokio::time::sleep(Duration::from_secs(1)).await;
@@ -138,7 +150,8 @@ impl PeerCenterBase {
pub fn new(peer_mgr: Arc<PeerManager>) -> Self { pub fn new(peer_mgr: Arc<PeerManager>) -> Self {
PeerCenterBase { PeerCenterBase {
peer_mgr, peer_mgr: Arc::downgrade(&peer_mgr),
my_peer_id: peer_mgr.my_peer_id(),
tasks: Mutex::new(JoinSet::new()), tasks: Mutex::new(JoinSet::new()),
lock: Arc::new(Mutex::new(())), lock: Arc::new(Mutex::new(())),
} }
@@ -289,7 +302,7 @@ impl PeerCenterInstance {
self.client self.client
.init_periodic_job(ctx, |client, ctx| async move { .init_periodic_job(ctx, |client, ctx| async move {
let my_node_id = ctx.peer_mgr.my_peer_id(); let my_node_id = ctx.my_peer_id;
let peers: PeerInfoForGlobalMap = ctx.job_ctx.service.list_peers().await.into(); let peers: PeerInfoForGlobalMap = ctx.job_ctx.service.list_peers().await.into();
let peer_list = peers.direct_peers.keys().map(|k| *k).collect(); let peer_list = peers.direct_peers.keys().map(|k| *k).collect();
let job_ctx = &ctx.job_ctx; let job_ctx = &ctx.job_ctx;
+2
View File
@@ -19,6 +19,8 @@ pub enum Error {
DigestMismatch, DigestMismatch,
#[error("Not center server")] #[error("Not center server")]
NotCenterServer, NotCenterServer,
#[error("Instance shutdown")]
Shutdown,
} }
pub type Digest = u64; pub type Digest = u64;
+153 -31
View File
@@ -26,12 +26,13 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent, NetworkIdentity}, global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent, NetworkIdentity},
join_joinset_background, join_joinset_background,
stun::MockStunInfoCollector, stun::MockStunInfoCollector,
token_bucket::TokenBucket,
PeerId, PeerId,
}, },
peers::route_trait::{Route, RouteInterface}, peers::route_trait::{Route, RouteInterface},
proto::{ proto::{
cli::{ForeignNetworkEntryPb, ListForeignNetworkResponse, PeerInfo}, cli::{ForeignNetworkEntryPb, ListForeignNetworkResponse, PeerInfo},
common::NatType, common::{LimiterConfig, NatType},
peer_rpc::DirectConnectorRpcServer, peer_rpc::DirectConnectorRpcServer,
}, },
tunnel::packet_def::{PacketType, ZCPacket}, tunnel::packet_def::{PacketType, ZCPacket},
@@ -69,14 +70,19 @@ struct ForeignNetworkEntry {
packet_recv: Mutex<Option<PacketRecvChanReceiver>>, packet_recv: Mutex<Option<PacketRecvChanReceiver>>,
bps_limiter: Arc<TokenBucket>,
tasks: Mutex<JoinSet<()>>, tasks: Mutex<JoinSet<()>>,
pub lock: Mutex<()>,
} }
impl ForeignNetworkEntry { impl ForeignNetworkEntry {
fn new( fn new(
network: NetworkIdentity, network: NetworkIdentity,
global_ctx: ArcGlobalCtx, // NOTICE: ospf route need my_peer_id be changed after restart.
my_peer_id: PeerId, my_peer_id: PeerId,
global_ctx: ArcGlobalCtx,
relay_data: bool, relay_data: bool,
pm_packet_sender: PacketRecvChan, pm_packet_sender: PacketRecvChan,
) -> Self { ) -> Self {
@@ -99,6 +105,16 @@ impl ForeignNetworkEntry {
&network.network_name, &network.network_name,
); );
let relay_bps_limit = global_ctx.config.get_flags().foreign_relay_bps_limit;
let limiter_config = LimiterConfig {
burst_rate: None,
bps: Some(relay_bps_limit),
fill_duration_ms: None,
};
let bps_limiter = global_ctx
.token_bucket_manager()
.get_or_create(&network.network_name, limiter_config.into());
Self { Self {
my_peer_id, my_peer_id,
@@ -113,7 +129,11 @@ impl ForeignNetworkEntry {
packet_recv: Mutex::new(Some(packet_recv)), packet_recv: Mutex::new(Some(packet_recv)),
bps_limiter,
tasks: Mutex::new(JoinSet::new()), tasks: Mutex::new(JoinSet::new()),
lock: Mutex::new(()),
} }
} }
@@ -202,11 +222,7 @@ impl ForeignNetworkEntry {
(peer_rpc, rpc_transport_sender) (peer_rpc, rpc_transport_sender)
} }
async fn prepare_route( async fn prepare_route(&self, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
&self,
my_peer_id: PeerId,
accessor: Box<dyn GlobalForeignNetworkAccessor>,
) {
struct Interface { struct Interface {
my_peer_id: PeerId, my_peer_id: PeerId,
peer_map: Weak<PeerMap>, peer_map: Weak<PeerMap>,
@@ -238,10 +254,14 @@ impl ForeignNetworkEntry {
} }
} }
let route = PeerRoute::new(my_peer_id, self.global_ctx.clone(), self.peer_rpc.clone()); let route = PeerRoute::new(
self.my_peer_id,
self.global_ctx.clone(),
self.peer_rpc.clone(),
);
route route
.open(Box::new(Interface { .open(Box::new(Interface {
my_peer_id, my_peer_id: self.my_peer_id,
network_identity: self.network.clone(), network_identity: self.network.clone(),
peer_map: Arc::downgrade(&self.peer_map), peer_map: Arc::downgrade(&self.peer_map),
accessor, accessor,
@@ -260,6 +280,7 @@ impl ForeignNetworkEntry {
let relay_data = self.relay_data; let relay_data = self.relay_data;
let pm_sender = self.pm_packet_sender.lock().await.take().unwrap(); let pm_sender = self.pm_packet_sender.lock().await.take().unwrap();
let network_name = self.network.network_name.clone(); let network_name = self.network.network_name.clone();
let bps_limiter = self.bps_limiter.clone();
self.tasks.lock().await.spawn(async move { self.tasks.lock().await.spawn(async move {
while let Ok(zc_packet) = recv_packet_from_chan(&mut recv).await { while let Ok(zc_packet) = recv_packet_from_chan(&mut recv).await {
@@ -279,8 +300,16 @@ impl ForeignNetworkEntry {
} }
tracing::trace!(?hdr, "ignore packet in foreign network"); tracing::trace!(?hdr, "ignore packet in foreign network");
} else { } else {
if !relay_data && hdr.packet_type == PacketType::Data as u8 { if hdr.packet_type == PacketType::Data as u8
continue; || hdr.packet_type == PacketType::KcpSrc as u8
|| hdr.packet_type == PacketType::KcpDst as u8
{
if !relay_data {
continue;
}
if !bps_limiter.try_consume(hdr.len.into()) {
continue;
}
} }
let gateway_peer_id = peer_map let gateway_peer_id = peer_map
@@ -317,8 +346,8 @@ impl ForeignNetworkEntry {
}); });
} }
async fn prepare(&self, my_peer_id: PeerId, accessor: Box<dyn GlobalForeignNetworkAccessor>) { async fn prepare(&self, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
self.prepare_route(my_peer_id, accessor).await; self.prepare_route(accessor).await;
self.start_packet_recv().await; self.start_packet_recv().await;
self.peer_rpc.run(); self.peer_rpc.run();
} }
@@ -400,8 +429,8 @@ impl ForeignNetworkManagerData {
new_added = true; new_added = true;
Arc::new(ForeignNetworkEntry::new( Arc::new(ForeignNetworkEntry::new(
network_identity.clone(), network_identity.clone(),
global_ctx.clone(),
my_peer_id, my_peer_id,
global_ctx.clone(),
relay_data, relay_data,
pm_packet_sender.clone(), pm_packet_sender.clone(),
)) ))
@@ -417,9 +446,7 @@ impl ForeignNetworkManagerData {
drop(l); drop(l);
if new_added { if new_added {
entry entry.prepare(Box::new(self.accessor.clone())).await;
.prepare(my_peer_id, Box::new(self.accessor.clone()))
.await;
} }
(entry, new_added) (entry, new_added)
@@ -467,6 +494,13 @@ impl ForeignNetworkManager {
} }
} }
pub fn get_network_peer_id(&self, network_name: &str) -> Option<PeerId> {
self.data
.network_peer_maps
.get(network_name)
.and_then(|v| Some(v.my_peer_id))
}
pub async fn add_peer_conn(&self, peer_conn: PeerConn) -> Result<(), Error> { pub async fn add_peer_conn(&self, peer_conn: PeerConn) -> Result<(), Error> {
tracing::info!(peer_conn = ?peer_conn.get_conn_info(), network = ?peer_conn.get_network_identity(), "add new peer conn in foreign network manager"); tracing::info!(peer_conn = ?peer_conn.get_conn_info(), network = ?peer_conn.get_network_identity(), "add new peer conn in foreign network manager");
@@ -483,7 +517,7 @@ impl ForeignNetworkManager {
.data .data
.get_or_insert_entry( .get_or_insert_entry(
&peer_conn.get_network_identity(), &peer_conn.get_network_identity(),
self.my_peer_id, peer_conn.get_my_peer_id(),
peer_conn.get_peer_id(), peer_conn.get_peer_id(),
!ret.is_err(), !ret.is_err(),
&self.global_ctx, &self.global_ctx,
@@ -491,17 +525,30 @@ impl ForeignNetworkManager {
) )
.await; .await;
if entry.network != peer_conn.get_network_identity() { let _g = entry.lock.lock().await;
if entry.network != peer_conn.get_network_identity()
|| entry.my_peer_id != peer_conn.get_my_peer_id()
{
if new_added { if new_added {
self.data self.data
.remove_network(&entry.network.network_name.clone()); .remove_network(&entry.network.network_name.clone());
} }
return Err(anyhow::anyhow!( let err = if entry.my_peer_id != peer_conn.get_my_peer_id() {
"network secret not match. exp: {:?} real: {:?}", anyhow::anyhow!(
entry.network, "my peer id not match. exp: {:?} real: {:?}, need retry connect",
peer_conn.get_network_identity() entry.my_peer_id,
) peer_conn.get_my_peer_id()
.into()); )
} else {
anyhow::anyhow!(
"network secret not match. exp: {:?} real: {:?}",
entry.network,
peer_conn.get_network_identity()
)
};
tracing::error!(?err, "foreign network entry not match, disconnect peer");
return Err(err.into());
} }
if new_added { if new_added {
@@ -567,7 +614,8 @@ impl ForeignNetworkManager {
.network_secret_digest .network_secret_digest
.unwrap_or_default() .unwrap_or_default()
.to_vec(), .to_vec(),
..Default::default() my_peer_id_for_this_network: item.my_peer_id,
peers: Default::default(),
}; };
for peer in item.peer_map.list_peers().await { for peer in item.peer_map.list_peers().await {
let mut peer_info = PeerInfo::default(); let mut peer_info = PeerInfo::default();
@@ -614,8 +662,6 @@ impl Drop for ForeignNetworkManager {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::time::Duration;
use crate::{ use crate::{
common::global_ctx::tests::get_mock_global_ctx_with_network, common::global_ctx::tests::get_mock_global_ctx_with_network,
connector::udp_hole_punch::tests::{ connector::udp_hole_punch::tests::{
@@ -629,6 +675,7 @@ mod tests {
set_global_var, set_global_var,
tunnel::common::tests::wait_for_condition, tunnel::common::tests::wait_for_condition,
}; };
use std::time::Duration;
use super::*; use super::*;
@@ -698,7 +745,7 @@ mod tests {
let s_ret = let s_ret =
tokio::spawn(async move { b_mgr_copy.add_tunnel_as_server(b_ring, true).await }); tokio::spawn(async move { b_mgr_copy.add_tunnel_as_server(b_ring, true).await });
pma_net1.add_client_tunnel(a_ring).await.unwrap(); pma_net1.add_client_tunnel(a_ring, false).await.unwrap();
s_ret.await.unwrap().unwrap(); s_ret.await.unwrap().unwrap();
} }
@@ -769,7 +816,10 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!( assert_eq!(
vec![pm_center.my_peer_id()], vec![pm_center
.get_foreign_network_manager()
.get_network_peer_id("net1")
.unwrap()],
pma_net1 pma_net1
.get_foreign_network_client() .get_foreign_network_client()
.get_peer_map() .get_peer_map()
@@ -777,7 +827,10 @@ mod tests {
.await .await
); );
assert_eq!( assert_eq!(
vec![pm_center.my_peer_id()], vec![pm_center
.get_foreign_network_manager()
.get_network_peer_id("net1")
.unwrap()],
pmb_net1 pmb_net1
.get_foreign_network_client() .get_foreign_network_client()
.get_peer_map() .get_peer_map()
@@ -894,6 +947,75 @@ mod tests {
.await; .await;
} }
#[tokio::test]
async fn test_foreign_network_manager_cluster_simple() {
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
let pm_center1 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pm_center2 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
connect_peer_manager(pm_center1.clone(), pm_center2.clone()).await;
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
let pmb_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
connect_peer_manager(pma_net1.clone(), pm_center1.clone()).await;
connect_peer_manager(pmb_net1.clone(), pm_center2.clone()).await;
wait_route_appear(pma_net1.clone(), pmb_net1.clone())
.await
.unwrap();
let pma_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
let pmb_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
connect_peer_manager(pma_net2.clone(), pm_center1.clone()).await;
connect_peer_manager(pmb_net2.clone(), pm_center2.clone()).await;
wait_route_appear(pma_net2.clone(), pmb_net2.clone())
.await
.unwrap();
}
#[tokio::test]
async fn test_foreign_network_manager_cluster_multiple_hops() {
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
let pm_center1 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pm_center2 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pm_center3 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
let pm_center4 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
connect_peer_manager(pm_center1.clone(), pm_center2.clone()).await;
connect_peer_manager(pm_center2.clone(), pm_center3.clone()).await;
connect_peer_manager(pm_center3.clone(), pm_center4.clone()).await;
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
let pmb_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
connect_peer_manager(pma_net1.clone(), pm_center1.clone()).await;
connect_peer_manager(pmb_net1.clone(), pm_center3.clone()).await;
wait_route_appear(pma_net1.clone(), pmb_net1.clone())
.await
.unwrap();
let pmc_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
connect_peer_manager(pmc_net1.clone(), pm_center4.clone()).await;
wait_route_appear(pma_net1.clone(), pmc_net1.clone())
.await
.unwrap();
let pma_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
let pmb_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
connect_peer_manager(pma_net2.clone(), pm_center1.clone()).await;
connect_peer_manager(pmb_net2.clone(), pm_center4.clone()).await;
wait_route_appear(pma_net2.clone(), pmb_net2.clone())
.await
.unwrap();
drop(pmb_net2);
wait_for_condition(
|| async { pma_net2.list_routes().await.len() == 1 },
Duration::from_secs(5),
)
.await;
}
#[tokio::test] #[tokio::test]
async fn test_foreign_network_manager_cluster() { async fn test_foreign_network_manager_cluster() {
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1); set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
+12 -1
View File
@@ -1,7 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use crossbeam::atomic::AtomicCell; use crossbeam::atomic::AtomicCell;
use dashmap::DashMap; use dashmap::{DashMap, DashSet};
use tokio::{select, sync::mpsc}; use tokio::{select, sync::mpsc};
@@ -200,6 +200,17 @@ impl Peer {
ret ret
} }
pub fn has_directly_connected_conn(&self) -> bool {
self.conns.iter().any(|entry|!(entry.value()).is_hole_punched())
}
pub fn get_directly_connections(&self) -> DashSet<uuid::Uuid> {
self.conns.iter()
.filter(|entry| !(entry.value()).is_hole_punched())
.map(|entry|(entry.value()).get_conn_id())
.collect()
}
pub fn get_default_conn_id(&self) -> PeerConnId { pub fn get_default_conn_id(&self) -> PeerConnId {
self.default_conn_id.load() self.default_conn_id.load()
} }
+49
View File
@@ -101,6 +101,9 @@ pub struct PeerConn {
info: Option<HandshakeRequest>, info: Option<HandshakeRequest>,
is_client: Option<bool>, is_client: Option<bool>,
// remote or local
is_hole_punched: bool,
close_event_notifier: Arc<PeerConnCloseNotify>, close_event_notifier: Arc<PeerConnCloseNotify>,
ctrl_resp_sender: broadcast::Sender<ZCPacket>, ctrl_resp_sender: broadcast::Sender<ZCPacket>,
@@ -152,6 +155,8 @@ impl PeerConn {
info: None, info: None,
is_client: None, is_client: None,
is_hole_punched: true,
close_event_notifier: Arc::new(PeerConnCloseNotify::new(conn_id)), close_event_notifier: Arc::new(PeerConnCloseNotify::new(conn_id)),
ctrl_resp_sender: ctrl_sender, ctrl_resp_sender: ctrl_sender,
@@ -166,6 +171,14 @@ impl PeerConn {
self.conn_id self.conn_id
} }
pub fn set_is_hole_punched(&mut self, is_hole_punched: bool) {
self.is_hole_punched = is_hole_punched;
}
pub fn is_hole_punched(&self) -> bool {
self.is_hole_punched
}
async fn wait_handshake(&mut self, need_retry: &mut bool) -> Result<HandshakeRequest, Error> { async fn wait_handshake(&mut self, need_retry: &mut bool) -> Result<HandshakeRequest, Error> {
*need_retry = false; *need_retry = false;
@@ -266,6 +279,31 @@ impl PeerConn {
Ok(()) Ok(())
} }
#[tracing::instrument(skip(handshake_recved))]
pub async fn do_handshake_as_server_ext<Fn>(
&mut self,
mut handshake_recved: Fn,
) -> Result<(), Error>
where
Fn: FnMut(&mut Self, &HandshakeRequest) -> Result<(), Error> + Send,
{
let rsp = self.wait_handshake_loop().await?;
handshake_recved(self, &rsp)?;
tracing::info!("handshake request: {:?}", rsp);
self.info = Some(rsp);
self.is_client = Some(false);
self.send_handshake().await?;
if self.get_peer_id() == self.my_peer_id {
Err(Error::WaitRespError("peer id conflict".to_owned()))
} else {
Ok(())
}
}
#[tracing::instrument] #[tracing::instrument]
pub async fn do_handshake_as_server(&mut self) -> Result<(), Error> { pub async fn do_handshake_as_server(&mut self) -> Result<(), Error> {
let rsp = self.wait_handshake_loop().await?; let rsp = self.wait_handshake_loop().await?;
@@ -435,6 +473,17 @@ impl PeerConn {
is_closed: self.close_event_notifier.is_closed(), is_closed: self.close_event_notifier.is_closed(),
} }
} }
pub fn set_peer_id(&mut self, peer_id: PeerId) {
if self.info.is_some() {
panic!("set_peer_id should only be called before handshake");
}
self.my_peer_id = peer_id;
}
pub fn get_my_peer_id(&self) -> PeerId {
self.my_peer_id
}
} }
impl Drop for PeerConn { impl Drop for PeerConn {
+105 -101
View File
@@ -23,7 +23,7 @@ use crate::{
compressor::{Compressor as _, DefaultCompressor}, compressor::{Compressor as _, DefaultCompressor},
constants::EASYTIER_VERSION, constants::EASYTIER_VERSION,
error::Error, error::Error,
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity}, global_ctx::{ArcGlobalCtx, NetworkIdentity},
stun::StunInfoCollectorTrait, stun::StunInfoCollectorTrait,
PeerId, PeerId,
}, },
@@ -142,8 +142,7 @@ pub struct PeerManager {
exit_nodes: Vec<Ipv4Addr>, exit_nodes: Vec<Ipv4Addr>,
// conns that are directly connected (which are not hole punched) reserved_my_peer_id_map: DashMap<String, PeerId>,
directly_connected_conn_map: Arc<DashMap<PeerId, DashSet<uuid::Uuid>>>,
} }
impl Debug for PeerManager { impl Debug for PeerManager {
@@ -271,7 +270,7 @@ impl PeerManager {
exit_nodes, exit_nodes,
directly_connected_conn_map: Arc::new(DashMap::new()), reserved_my_peer_id_map: DashMap::new(),
} }
} }
@@ -315,8 +314,10 @@ impl PeerManager {
pub async fn add_client_tunnel( pub async fn add_client_tunnel(
&self, &self,
tunnel: Box<dyn Tunnel>, tunnel: Box<dyn Tunnel>,
is_directly_connected: bool,
) -> Result<(PeerId, PeerConnId), Error> { ) -> Result<(PeerId, PeerConnId), Error> {
let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel); let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel);
peer.set_is_hole_punched(!is_directly_connected);
peer.do_handshake_as_client().await?; peer.do_handshake_as_client().await?;
let conn_id = peer.get_conn_id(); let conn_id = peer.get_conn_id();
let peer_id = peer.get_peer_id(); let peer_id = peer.get_peer_id();
@@ -330,72 +331,12 @@ impl PeerManager {
Ok((peer_id, conn_id)) Ok((peer_id, conn_id))
} }
fn add_directly_connected_conn(&self, peer_id: PeerId, conn_id: uuid::Uuid) {
let _ = self
.directly_connected_conn_map
.entry(peer_id)
.or_insert_with(DashSet::new)
.insert(conn_id);
}
pub fn has_directly_connected_conn(&self, peer_id: PeerId) -> bool { pub fn has_directly_connected_conn(&self, peer_id: PeerId) -> bool {
self.directly_connected_conn_map if let Some(peer) = self.peers.get_peer_by_id(peer_id) {
.get(&peer_id) peer.has_directly_connected_conn()
.map_or(false, |x| !x.is_empty()) } else {
} false
}
async fn start_peer_conn_close_event_handler(&self) {
let dmap = self.directly_connected_conn_map.clone();
let mut event_recv = self.global_ctx.subscribe();
let peer_map = self.peers.clone();
use tokio::sync::broadcast::error::RecvError;
self.tasks.lock().await.spawn(async move {
loop {
match event_recv.recv().await {
Err(RecvError::Closed) => {
tracing::error!("peer conn close event handler exit");
break;
}
Err(RecvError::Lagged(_)) => {
tracing::warn!("peer conn close event handler lagged");
event_recv = event_recv.resubscribe();
let alive_conns = peer_map.get_alive_conns();
for p in dmap.iter_mut() {
p.retain(|x| alive_conns.contains_key(&(*p.key(), *x)));
}
dmap.retain(|_, v| !v.is_empty());
}
Ok(event) => {
if let GlobalCtxEvent::PeerConnRemoved(info) = event {
let mut need_remove = false;
if let Some(set) = dmap.get_mut(&info.peer_id) {
let conn_id = info.conn_id.parse().unwrap();
let old = set.remove(&conn_id);
tracing::info!(
?old,
?info,
"try remove conn id from directly connected map"
);
need_remove = set.is_empty();
}
if need_remove {
dmap.remove(&info.peer_id);
}
}
}
}
}
});
}
pub async fn add_direct_tunnel(
&self,
t: Box<dyn Tunnel>,
) -> Result<(PeerId, PeerConnId), Error> {
let (peer_id, conn_id) = self.add_client_tunnel(t).await?;
self.add_directly_connected_conn(peer_id, conn_id);
Ok((peer_id, conn_id))
} }
#[tracing::instrument] #[tracing::instrument]
@@ -410,10 +351,10 @@ impl PeerManager {
let t = ns let t = ns
.run_async(|| async move { connector.connect().await }) .run_async(|| async move { connector.connect().await })
.await?; .await?;
self.add_direct_tunnel(t).await self.add_client_tunnel(t, true).await
} }
#[tracing::instrument] #[tracing::instrument(ret)]
pub async fn add_tunnel_as_server( pub async fn add_tunnel_as_server(
&self, &self,
tunnel: Box<dyn Tunnel>, tunnel: Box<dyn Tunnel>,
@@ -421,32 +362,57 @@ impl PeerManager {
) -> Result<(), Error> { ) -> Result<(), Error> {
tracing::info!("add tunnel as server start"); tracing::info!("add tunnel as server start");
let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel); let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel);
peer.do_handshake_as_server().await?; peer.do_handshake_as_server_ext(|peer, msg| {
if self.global_ctx.config.get_flags().private_mode if msg.network_name
&& peer.get_network_identity().network_name == self.global_ctx.get_network_identity().network_name
!= self.global_ctx.get_network_identity().network_name {
{ return Ok(());
return Err(Error::SecretKeyError(
"private mode is turned on, network identity not match".to_string(),
));
}
if peer.get_network_identity().network_name
== self.global_ctx.get_network_identity().network_name
{
let (peer_id, conn_id) = (peer.get_peer_id(), peer.get_conn_id());
self.add_new_peer_conn(peer).await?;
if is_directly_connected {
self.add_directly_connected_conn(peer_id, conn_id);
} }
if self.global_ctx.config.get_flags().private_mode {
return Err(Error::SecretKeyError(
"private mode is turned on, network identity not match".to_string(),
));
}
let mut peer_id = self
.foreign_network_manager
.get_network_peer_id(&msg.network_name);
if peer_id.is_none() {
peer_id = Some(*self.reserved_my_peer_id_map.entry(msg.network_name.clone()).or_insert_with(|| {
rand::random::<PeerId>()
}).value());
}
peer.set_peer_id(peer_id.clone().unwrap());
tracing::info!(
?peer_id,
?msg.network_name,
"handshake as server with foreign network, new peer id: {}, peer id in foreign manager: {:?}",
peer.get_my_peer_id(), peer_id
);
Ok(())
})
.await?;
let peer_network_name = peer.get_network_identity().network_name.clone();
if peer_network_name == self.global_ctx.get_network_identity().network_name {
peer.set_is_hole_punched(!is_directly_connected);
self.add_new_peer_conn(peer).await?;
} else { } else {
self.foreign_network_manager.add_peer_conn(peer).await?; self.foreign_network_manager.add_peer_conn(peer).await?;
} }
self.reserved_my_peer_id_map.remove(&peer_network_name);
tracing::info!("add tunnel as server done"); tracing::info!("add tunnel as server done");
Ok(()) Ok(())
} }
async fn try_handle_foreign_network_packet( async fn try_handle_foreign_network_packet(
packet: ZCPacket, mut packet: ZCPacket,
my_peer_id: PeerId, my_peer_id: PeerId,
peer_map: &PeerMap, peer_map: &PeerMap,
foreign_network_mgr: &ForeignNetworkManager, foreign_network_mgr: &ForeignNetworkManager,
@@ -463,6 +429,10 @@ impl PeerManager {
let foreign_network_name = foreign_hdr.get_network_name(packet.payload()); let foreign_network_name = foreign_hdr.get_network_name(packet.payload());
let foreign_peer_id = foreign_hdr.get_dst_peer_id(); let foreign_peer_id = foreign_hdr.get_dst_peer_id();
let foreign_network_my_peer_id =
foreign_network_mgr.get_network_peer_id(&foreign_network_name);
// NOTICE: the to peer id is modified by the src from foreign network my peer id to the origin my peer id
if to_peer_id == my_peer_id { if to_peer_id == my_peer_id {
// packet sent from other peer to me, extract the inner packet and forward it // packet sent from other peer to me, extract the inner packet and forward it
if let Err(e) = foreign_network_mgr if let Err(e) = foreign_network_mgr
@@ -481,7 +451,27 @@ impl PeerManager {
); );
} }
Ok(()) Ok(())
} else if from_peer_id == my_peer_id { } else if Some(from_peer_id) == foreign_network_my_peer_id {
// to_peer_id is my peer id for the foreign network, need to convert to the origin my_peer_id of dst
let Some(to_peer_id) = peer_map
.get_origin_my_peer_id(&foreign_network_name, to_peer_id)
.await
else {
tracing::debug!(
?foreign_network_name,
?to_peer_id,
"cannot find origin my peer id for foreign network."
);
return Err(packet);
};
// modify the to_peer id from foreign network my peer id to the origin my peer id
packet
.mut_peer_manager_header()
.unwrap()
.to_peer_id
.set(to_peer_id);
// packet is generated from foreign network mgr and should be forward to other peer // packet is generated from foreign network mgr and should be forward to other peer
if let Err(e) = peer_map if let Err(e) = peer_map
.send_msg(packet, to_peer_id, NextHopPolicy::LeastHop) .send_msg(packet, to_peer_id, NextHopPolicy::LeastHop)
@@ -496,7 +486,7 @@ impl PeerManager {
Ok(()) Ok(())
} else { } else {
// target is not me, forward it // target is not me, forward it. try get origin peer id
Err(packet) Err(packet)
} }
} }
@@ -717,6 +707,7 @@ impl PeerManager {
last_update: Some(last_update.into()), last_update: Some(last_update.into()),
version: 0, version: 0,
network_secret_digest: info.network_secret_digest.clone(), network_secret_digest: info.network_secret_digest.clone(),
my_peer_id_for_this_network: info.my_peer_id_for_this_network,
}, },
); );
} }
@@ -963,11 +954,9 @@ impl PeerManager {
async fn run_clean_peer_without_conn_routine(&self) { async fn run_clean_peer_without_conn_routine(&self) {
let peer_map = self.peers.clone(); let peer_map = self.peers.clone();
let dmap = self.directly_connected_conn_map.clone();
self.tasks.lock().await.spawn(async move { self.tasks.lock().await.spawn(async move {
loop { loop {
peer_map.clean_peer_without_conn().await; peer_map.clean_peer_without_conn().await;
dmap.retain(|p, v| peer_map.has_peer(*p) && !v.is_empty());
tokio::time::sleep(std::time::Duration::from_secs(3)).await; tokio::time::sleep(std::time::Duration::from_secs(3)).await;
} }
}); });
@@ -984,8 +973,6 @@ impl PeerManager {
} }
pub async fn run(&self) -> Result<(), Error> { pub async fn run(&self) -> Result<(), Error> {
self.start_peer_conn_close_event_handler().await;
match &self.route_algo_inst { match &self.route_algo_inst {
RouteAlgoInst::Ospf(route) => self.add_route(route.clone()).await, RouteAlgoInst::Ospf(route) => self.add_route(route.clone()).await,
RouteAlgoInst::None => {} RouteAlgoInst::None => {}
@@ -1044,9 +1031,16 @@ impl PeerManager {
.unwrap_or_default(), .unwrap_or_default(),
proxy_cidrs: self proxy_cidrs: self
.global_ctx .global_ctx
.config
.get_proxy_cidrs() .get_proxy_cidrs()
.into_iter() .into_iter()
.map(|x| x.to_string()) .map(|x| {
if x.mapped_cidr.is_none() {
x.cidr.to_string()
} else {
format!("{}->{}", x.cidr, x.mapped_cidr.unwrap())
}
})
.collect(), .collect(),
hostname: self.global_ctx.get_hostname(), hostname: self.global_ctx.get_hostname(),
stun_info: Some(self.global_ctx.get_stun_info_collector().get_stun_info()), stun_info: Some(self.global_ctx.get_stun_info_collector().get_stun_info()),
@@ -1071,10 +1065,20 @@ impl PeerManager {
} }
pub fn get_directly_connections(&self, peer_id: PeerId) -> DashSet<uuid::Uuid> { pub fn get_directly_connections(&self, peer_id: PeerId) -> DashSet<uuid::Uuid> {
self.directly_connected_conn_map if let Some(peer) = self.peers.get_peer_by_id(peer_id) {
.get(&peer_id) return peer.get_directly_connections();
.map(|x| x.clone()) }
.unwrap_or_default()
DashSet::new()
}
pub async fn clear_resources(&self) {
let mut peer_pipeline = self.peer_packet_process_pipeline.write().await;
peer_pipeline.clear();
let mut nic_pipeline = self.nic_packet_process_pipeline.write().await;
nic_pipeline.clear();
self.peer_rpc_mgr.rpc_server().registry().unregister_all();
} }
} }
@@ -1154,7 +1158,7 @@ mod tests {
}); });
server_mgr server_mgr
.add_client_tunnel(server.accept().await.unwrap()) .add_client_tunnel(server.accept().await.unwrap(), false)
.await .await
.unwrap(); .unwrap();
} }
@@ -1359,7 +1363,7 @@ mod tests {
let a_mgr_copy = peer_mgr_a.clone(); let a_mgr_copy = peer_mgr_a.clone();
tokio::spawn(async move { tokio::spawn(async move {
a_mgr_copy.add_client_tunnel(a_ring).await.unwrap(); a_mgr_copy.add_client_tunnel(a_ring, false).await.unwrap();
}); });
let b_mgr_copy = peer_mgr_b.clone(); let b_mgr_copy = peer_mgr_b.clone();
tokio::spawn(async move { tokio::spawn(async move {
+22 -7
View File
@@ -10,7 +10,7 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity}, global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity},
PeerId, PeerId,
}, },
proto::{cli::PeerConnInfo, common::PeerFeatureFlag}, proto::{cli::PeerConnInfo, peer_rpc::RoutePeerInfo},
tunnel::{packet_def::ZCPacket, TunnelError}, tunnel::{packet_def::ZCPacket, TunnelError},
}; };
@@ -87,7 +87,7 @@ impl PeerMap {
}); });
} }
fn get_peer_by_id(&self, peer_id: PeerId) -> Option<Arc<Peer>> { pub fn get_peer_by_id(&self, peer_id: PeerId) -> Option<Arc<Peer>> {
self.peer_map.get(&peer_id).map(|v| v.clone()) self.peer_map.get(&peer_id).map(|v| v.clone())
} }
@@ -194,12 +194,27 @@ impl PeerMap {
None None
} }
pub async fn get_peer_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> { pub async fn get_route_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo> {
for route in self.routes.read().await.iter() { for route in self.routes.read().await.iter() {
let feature_flag = route.get_feature_flag(peer_id).await; if let Some(info) = route.get_peer_info(peer_id).await {
if feature_flag.is_some() { return Some(info);
return feature_flag; }
}; }
None
}
pub async fn get_origin_my_peer_id(
&self,
network_name: &str,
foreign_my_peer_id: PeerId,
) -> Option<PeerId> {
for route in self.routes.read().await.iter() {
let origin_peer_id = route
.get_origin_my_peer_id(network_name, foreign_my_peer_id)
.await;
if origin_peer_id.is_some() {
return origin_peer_id;
}
} }
None None
} }
+62 -13
View File
@@ -33,7 +33,7 @@ use crate::{
}, },
peers::route_trait::{Route, RouteInterfaceBox}, peers::route_trait::{Route, RouteInterfaceBox},
proto::{ proto::{
common::{Ipv4Inet, NatType, PeerFeatureFlag, StunInfo}, common::{Ipv4Inet, NatType, StunInfo},
peer_rpc::{ peer_rpc::{
route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey,
OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion, OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion,
@@ -124,6 +124,7 @@ impl RoutePeerInfo {
feature_flag: None, feature_flag: None,
peer_route_id: 0, peer_route_id: 0,
network_length: 24, network_length: 24,
quic_port: None,
} }
} }
@@ -139,10 +140,12 @@ impl RoutePeerInfo {
cost: 0, cost: 0,
ipv4_addr: global_ctx.get_ipv4().map(|x| x.address().into()), ipv4_addr: global_ctx.get_ipv4().map(|x| x.address().into()),
proxy_cidrs: global_ctx proxy_cidrs: global_ctx
.config
.get_proxy_cidrs() .get_proxy_cidrs()
.iter() .iter()
.map(|x| x.mapped_cidr.unwrap_or(x.cidr))
.chain(global_ctx.get_vpn_portal_cidr())
.map(|x| x.to_string()) .map(|x| x.to_string())
.chain(global_ctx.get_vpn_portal_cidr().map(|x| x.to_string()))
.collect(), .collect(),
hostname: Some(global_ctx.get_hostname()), hostname: Some(global_ctx.get_hostname()),
udp_stun_info: global_ctx udp_stun_info: global_ctx
@@ -160,6 +163,8 @@ impl RoutePeerInfo {
.get_ipv4() .get_ipv4()
.map(|x| x.network_length() as u32) .map(|x| x.network_length() as u32)
.unwrap_or(24), .unwrap_or(24),
quic_port: global_ctx.get_quic_proxy_port().map(|x| x as u32),
}; };
let need_update_periodically = if let Ok(Ok(d)) = let need_update_periodically = if let Ok(Ok(d)) =
@@ -854,13 +859,33 @@ impl RouteTable {
self.peer_infos.insert(*peer_id, info.clone()); self.peer_infos.insert(*peer_id, info.clone());
let is_new_peer_better = |old_peer_id: PeerId| -> bool {
let old_next_hop = self.get_next_hop(old_peer_id);
let new_next_hop = item.value();
old_next_hop.is_none() || new_next_hop.path_len < old_next_hop.unwrap().path_len
};
if let Some(ipv4_addr) = info.ipv4_addr { if let Some(ipv4_addr) = info.ipv4_addr {
self.ipv4_peer_id_map.insert(ipv4_addr.into(), *peer_id); self.ipv4_peer_id_map
.entry(ipv4_addr.into())
.and_modify(|v| {
if *v != *peer_id && is_new_peer_better(*v) {
*v = *peer_id;
}
})
.or_insert(*peer_id);
} }
for cidr in info.proxy_cidrs.iter() { for cidr in info.proxy_cidrs.iter() {
self.cidr_peer_id_map self.cidr_peer_id_map
.insert(cidr.parse().unwrap(), *peer_id); .entry(cidr.parse().unwrap())
.and_modify(|v| {
if *v != *peer_id && is_new_peer_better(*v) {
// if the next hop is not set or the new next hop is better, update it.
*v = *peer_id;
}
})
.or_insert(*peer_id);
} }
} }
} }
@@ -1059,6 +1084,7 @@ struct PeerRouteServiceImpl {
route_table: RouteTable, route_table: RouteTable,
route_table_with_cost: RouteTable, route_table_with_cost: RouteTable,
foreign_network_owner_map: DashMap<NetworkIdentity, Vec<PeerId>>, foreign_network_owner_map: DashMap<NetworkIdentity, Vec<PeerId>>,
foreign_network_my_peer_id_map: DashMap<(String, PeerId), PeerId>,
synced_route_info: SyncedRouteInfo, synced_route_info: SyncedRouteInfo,
cached_local_conn_map: std::sync::Mutex<RouteConnBitmap>, cached_local_conn_map: std::sync::Mutex<RouteConnBitmap>,
cached_local_conn_map_version: AtomicVersion, cached_local_conn_map_version: AtomicVersion,
@@ -1079,6 +1105,10 @@ impl Debug for PeerRouteServiceImpl {
.field("route_table_with_cost", &self.route_table_with_cost) .field("route_table_with_cost", &self.route_table_with_cost)
.field("synced_route_info", &self.synced_route_info) .field("synced_route_info", &self.synced_route_info)
.field("foreign_network_owner_map", &self.foreign_network_owner_map) .field("foreign_network_owner_map", &self.foreign_network_owner_map)
.field(
"foreign_network_my_peer_id_map",
&self.foreign_network_my_peer_id_map,
)
.field( .field(
"cached_local_conn_map", "cached_local_conn_map",
&self.cached_local_conn_map.lock().unwrap(), &self.cached_local_conn_map.lock().unwrap(),
@@ -1102,6 +1132,7 @@ impl PeerRouteServiceImpl {
route_table: RouteTable::new(), route_table: RouteTable::new(),
route_table_with_cost: RouteTable::new(), route_table_with_cost: RouteTable::new(),
foreign_network_owner_map: DashMap::new(), foreign_network_owner_map: DashMap::new(),
foreign_network_my_peer_id_map: DashMap::new(),
synced_route_info: SyncedRouteInfo { synced_route_info: SyncedRouteInfo {
peer_infos: DashMap::new(), peer_infos: DashMap::new(),
@@ -1241,6 +1272,7 @@ impl PeerRouteServiceImpl {
} }
fn update_foreign_network_owner_map(&self) { fn update_foreign_network_owner_map(&self) {
self.foreign_network_my_peer_id_map.clear();
self.foreign_network_owner_map.clear(); self.foreign_network_owner_map.clear();
for item in self.synced_route_info.foreign_network.iter() { for item in self.synced_route_info.foreign_network.iter() {
let key = item.key(); let key = item.key();
@@ -1265,7 +1297,12 @@ impl PeerRouteServiceImpl {
self.foreign_network_owner_map self.foreign_network_owner_map
.entry(network_identity) .entry(network_identity)
.or_insert_with(|| Vec::new()) .or_insert_with(|| Vec::new())
.push(key.peer_id); .push(entry.my_peer_id_for_this_network);
self.foreign_network_my_peer_id_map.insert(
(key.network_name.clone(), entry.my_peer_id_for_this_network),
key.peer_id,
);
} }
} }
@@ -1363,7 +1400,7 @@ impl PeerRouteServiceImpl {
.dst_saved_conn_bitmap_version .dst_saved_conn_bitmap_version
.get(&peer_id) .get(&peer_id)
.map(|item| item.get()); .map(|item| item.get());
if Some(*local_version) != peer_version { if peer_version.is_none() || peer_version.unwrap() < *local_version {
need_update = true; need_update = true;
break; break;
} }
@@ -1504,8 +1541,6 @@ impl PeerRouteServiceImpl {
req_dynamic_msg.set_field_by_name("peer_infos", Value::Message(peer_infos)); req_dynamic_msg.set_field_by_name("peer_infos", Value::Message(peer_infos));
} }
tracing::trace!(?req_dynamic_msg, "build_sync_route_raw_req");
req_dynamic_msg req_dynamic_msg
} }
@@ -1621,7 +1656,12 @@ impl PeerRouteServiceImpl {
} }
fn update_peer_info_last_update(&self) { fn update_peer_info_last_update(&self) {
tracing::debug!(?self, "update_peer_info_last_update"); tracing::debug!(
"update_peer_info_last_update, my_peer_id: {:?}, prev: {:?}, new: {:?}",
self.my_peer_id,
self.peer_info_last_update.load(),
std::time::Instant::now()
);
self.peer_info_last_update.store(std::time::Instant::now()); self.peer_info_last_update.store(std::time::Instant::now());
} }
@@ -2064,7 +2104,6 @@ impl PeerRoute {
} }
} }
#[tracing::instrument(skip(session_mgr))]
async fn maintain_session_tasks( async fn maintain_session_tasks(
session_mgr: RouteSessionManager, session_mgr: RouteSessionManager,
service_impl: Arc<PeerRouteServiceImpl>, service_impl: Arc<PeerRouteServiceImpl>,
@@ -2072,7 +2111,6 @@ impl PeerRoute {
session_mgr.maintain_sessions(service_impl).await; session_mgr.maintain_sessions(service_impl).await;
} }
#[tracing::instrument(skip(session_mgr))]
async fn update_my_peer_info_routine( async fn update_my_peer_info_routine(
service_impl: Arc<PeerRouteServiceImpl>, service_impl: Arc<PeerRouteServiceImpl>,
session_mgr: RouteSessionManager, session_mgr: RouteSessionManager,
@@ -2271,12 +2309,23 @@ impl Route for PeerRoute {
.unwrap_or_default() .unwrap_or_default()
} }
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> { async fn get_origin_my_peer_id(
&self,
network_name: &str,
foreign_my_peer_id: PeerId,
) -> Option<PeerId> {
self.service_impl
.foreign_network_my_peer_id_map
.get(&(network_name.to_string(), foreign_my_peer_id))
.map(|x| *x)
}
async fn get_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo> {
self.service_impl self.service_impl
.route_table .route_table
.peer_infos .peer_infos
.get(&peer_id) .get(&peer_id)
.and_then(|x| x.feature_flag.clone()) .map(|x| x.clone())
} }
async fn get_peer_info_last_update_time(&self) -> Instant { async fn get_peer_info_last_update_time(&self) -> Instant {
+14 -6
View File
@@ -4,11 +4,9 @@ use dashmap::DashMap;
use crate::{ use crate::{
common::{global_ctx::NetworkIdentity, PeerId}, common::{global_ctx::NetworkIdentity, PeerId},
proto::{ proto::peer_rpc::{
common::PeerFeatureFlag, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos,
peer_rpc::{ RoutePeerInfo,
ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos,
},
}, },
}; };
@@ -95,9 +93,19 @@ pub trait Route {
Default::default() Default::default()
} }
// my peer id in foreign network is different from the one in local network
// this function is used to get the peer id in local network
async fn get_origin_my_peer_id(
&self,
_network_name: &str,
_foreign_my_peer_id: PeerId,
) -> Option<PeerId> {
None
}
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {} async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {}
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag>; async fn get_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo>;
async fn get_peer_info_last_update_time(&self) -> std::time::Instant; async fn get_peer_info_last_update_time(&self) -> std::time::Instant;
+1 -1
View File
@@ -41,7 +41,7 @@ pub async fn connect_peer_manager(client: Arc<PeerManager>, server: Arc<PeerMana
let (a_ring, b_ring) = create_ring_tunnel_pair(); let (a_ring, b_ring) = create_ring_tunnel_pair();
let a_mgr_copy = client.clone(); let a_mgr_copy = client.clone();
tokio::spawn(async move { tokio::spawn(async move {
a_mgr_copy.add_client_tunnel(a_ring).await.unwrap(); a_mgr_copy.add_client_tunnel(a_ring, false).await.unwrap();
}); });
let b_mgr_copy = server.clone(); let b_mgr_copy = server.clone();
tokio::spawn(async move { tokio::spawn(async move {
+2
View File
@@ -103,6 +103,7 @@ message ListForeignNetworkRequest {}
message ForeignNetworkEntryPb { message ForeignNetworkEntryPb {
repeated PeerInfo peers = 1; repeated PeerInfo peers = 1;
bytes network_secret_digest = 2; bytes network_secret_digest = 2;
uint32 my_peer_id_for_this_network = 3;
} }
message ListForeignNetworkResponse { message ListForeignNetworkResponse {
@@ -186,6 +187,7 @@ service VpnPortalRpc {
enum TcpProxyEntryTransportType { enum TcpProxyEntryTransportType {
TCP = 0; TCP = 0;
KCP = 1; KCP = 1;
QUIC = 2;
} }
enum TcpProxyEntryState { enum TcpProxyEntryState {
+18
View File
@@ -35,6 +35,14 @@ message FlagsInConfig {
bool accept_dns = 22; bool accept_dns = 22;
// enable private mode // enable private mode
bool private_mode = 23; bool private_mode = 23;
// should we convert all tcp streams into quic streams
bool enable_quic_proxy = 24;
// does this peer allow quic input
bool disable_quic_input = 25;
// a global relay limit, only work for foreign network
uint64 foreign_relay_bps_limit = 26;
} }
message RpcDescriptor { message RpcDescriptor {
@@ -171,3 +179,13 @@ message PortForwardConfigPb {
SocketAddr dst_addr = 2; SocketAddr dst_addr = 2;
SocketType socket_type = 3; SocketType socket_type = 3;
} }
message ProxyDstInfo {
SocketAddr dst_addr = 1;
}
message LimiterConfig {
optional uint64 burst_rate = 1; // default 1 means no burst (capacity is same with bps)
optional uint64 bps = 2; // default 0 means no limit (unit is B/s)
optional uint64 fill_duration_ms = 3; // default 10ms, the period to fill the bucket
}
+3
View File
@@ -22,6 +22,8 @@ message RoutePeerInfo {
uint64 peer_route_id = 12; uint64 peer_route_id = 12;
uint32 network_length = 13; uint32 network_length = 13;
optional uint32 quic_port = 14;
} }
message PeerIdVersion { message PeerIdVersion {
@@ -46,6 +48,7 @@ message ForeignNetworkRouteInfoEntry {
google.protobuf.Timestamp last_update = 2; google.protobuf.Timestamp last_update = 2;
uint32 version = 3; uint32 version = 3;
bytes network_secret_digest = 4; bytes network_secret_digest = 4;
uint32 my_peer_id_for_this_network = 5;
} }
message RouteForeignNetworkInfos { message RouteForeignNetworkInfos {
+6 -3
View File
@@ -131,11 +131,14 @@ impl BidirectRpcManager {
} }
}; };
if o.peer_manager_header().unwrap().packet_type == PacketType::RpcReq as u8 { let Some(peer_manager_header) = o.peer_manager_header() else {
tracing::error!("peer manager header not found");
continue;
};
if peer_manager_header.packet_type == PacketType::RpcReq as u8 {
server_tx.send(o).await.unwrap(); server_tx.send(o).await.unwrap();
continue; continue;
} else if o.peer_manager_header().unwrap().packet_type == PacketType::RpcResp as u8 } else if peer_manager_header.packet_type == PacketType::RpcResp as u8 {
{
client_tx.send(o).await.unwrap(); client_tx.send(o).await.unwrap();
continue; continue;
} }
@@ -96,6 +96,10 @@ impl ServiceRegistry {
self.table.retain(|k, _| k.domain_name != domain_name); self.table.retain(|k, _| k.domain_name != domain_name);
} }
pub fn unregister_all(&self) {
self.table.clear();
}
pub async fn call_method( pub async fn call_method(
&self, &self,
rpc_desc: RpcDescriptor, rpc_desc: RpcDescriptor,
+13 -2
View File
@@ -21,7 +21,12 @@ use super::service_registry::ServiceRegistry;
#[async_trait::async_trait] #[async_trait::async_trait]
#[auto_impl::auto_impl(Arc, Box)] #[auto_impl::auto_impl(Arc, Box)]
pub trait RpcServerHook: Send + Sync { pub trait RpcServerHook: Send + Sync {
async fn on_new_client(&self, _tunnel_info: Option<TunnelInfo>) {} async fn on_new_client(
&self,
tunnel_info: Option<TunnelInfo>,
) -> Result<Option<TunnelInfo>, anyhow::Error> {
Ok(tunnel_info)
}
async fn on_client_disconnected(&self, _tunnel_info: Option<TunnelInfo>) {} async fn on_client_disconnected(&self, _tunnel_info: Option<TunnelInfo>) {}
} }
@@ -72,7 +77,13 @@ impl<L: TunnelListener + 'static> StandAloneServer<L> {
let inflight_server = inflight.clone(); let inflight_server = inflight.clone();
let hook = hook.clone(); let hook = hook.clone();
hook.on_new_client(tunnel_info.clone()).await; let tunnel_info = match hook.on_new_client(tunnel_info).await {
Ok(info) => info,
Err(e) => {
tracing::warn!(?e, "standalone hook.on_new_client failed");
continue;
}
};
inflight_server.fetch_add(1, std::sync::atomic::Ordering::Relaxed); inflight_server.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
tasks.lock().unwrap().spawn(async move { tasks.lock().unwrap().spawn(async move {
+5
View File
@@ -66,6 +66,11 @@ message NetworkConfig {
optional bool enable_magic_dns = 42; optional bool enable_magic_dns = 42;
optional bool enable_private_mode = 43; optional bool enable_private_mode = 43;
repeated string rpc_portal_whitelists = 44;
optional bool enable_quic_proxy = 45;
optional bool disable_quic_input = 46;
} }
message MyNodeInfo { message MyNodeInfo {
+270 -50
View File
@@ -17,7 +17,9 @@ use crate::{
instance::instance::Instance, instance::instance::Instance,
proto::common::CompressionAlgoPb, proto::common::CompressionAlgoPb,
tunnel::{ tunnel::{
common::tests::wait_for_condition, ring::RingTunnelConnector, tcp::TcpTunnelConnector, common::tests::{_tunnel_bench_netns, wait_for_condition},
ring::RingTunnelConnector,
tcp::{TcpTunnelConnector, TcpTunnelListener},
udp::UdpTunnelConnector, udp::UdpTunnelConnector,
}, },
}; };
@@ -188,6 +190,24 @@ pub async fn init_three_node_ex<F: Fn(TomlConfigLoader) -> TomlConfigLoader>(
vec![inst1, inst2, inst3] vec![inst1, inst2, inst3]
} }
pub async fn drop_insts(insts: Vec<Instance>) {
let mut set = JoinSet::new();
for mut inst in insts {
set.spawn(async move {
inst.clear_resources().await;
let pm = Arc::downgrade(&inst.get_peer_manager());
drop(inst);
let now = std::time::Instant::now();
while now.elapsed().as_secs() < 5 && pm.strong_count() > 0 {
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
debug_assert_eq!(pm.strong_count(), 0, "PeerManager should be dropped");
});
}
while let Some(_) = set.join_next().await {}
}
async fn ping_test(from_netns: &str, target_ip: &str, payload_size: Option<usize>) -> bool { async fn ping_test(from_netns: &str, target_ip: &str, payload_size: Option<usize>) -> bool {
let _g = NetNS::new(Some(ROOT_NETNS_NAME.to_owned())).guard(); let _g = NetNS::new(Some(ROOT_NETNS_NAME.to_owned())).guard();
let code = tokio::process::Command::new("ip") let code = tokio::process::Command::new("ip")
@@ -204,6 +224,8 @@ async fn ping_test(from_netns: &str, target_ip: &str, payload_size: Option<usize
"1", "1",
target_ip.to_string().as_str(), target_ip.to_string().as_str(),
]) ])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status() .status()
.await .await
.unwrap(); .unwrap();
@@ -233,14 +255,17 @@ pub async fn basic_three_node_test(#[values("tcp", "udp", "wg", "ws", "wss")] pr
Duration::from_secs(5000), Duration::from_secs(5000),
) )
.await; .await;
drop_insts(insts).await;
} }
async fn subnet_proxy_test_udp() { async fn subnet_proxy_test_udp(target_ip: &str) {
use crate::tunnel::{common::tests::_tunnel_pingpong_netns, udp::UdpTunnelListener}; use crate::tunnel::{common::tests::_tunnel_pingpong_netns, udp::UdpTunnelListener};
use rand::Rng; use rand::Rng;
let udp_listener = UdpTunnelListener::new("udp://10.1.2.4:22233".parse().unwrap()); let udp_listener = UdpTunnelListener::new("udp://10.1.2.4:22233".parse().unwrap());
let udp_connector = UdpTunnelConnector::new("udp://10.1.2.4:22233".parse().unwrap()); let udp_connector =
UdpTunnelConnector::new(format!("udp://{}:22233", target_ip).parse().unwrap());
// NOTE: this should not excced udp tunnel max buffer size // NOTE: this should not excced udp tunnel max buffer size
let mut buf = vec![0; 7 * 1024]; let mut buf = vec![0; 7 * 1024];
@@ -257,7 +282,8 @@ async fn subnet_proxy_test_udp() {
// no fragment // no fragment
let udp_listener = UdpTunnelListener::new("udp://10.1.2.4:22233".parse().unwrap()); let udp_listener = UdpTunnelListener::new("udp://10.1.2.4:22233".parse().unwrap());
let udp_connector = UdpTunnelConnector::new("udp://10.1.2.4:22233".parse().unwrap()); let udp_connector =
UdpTunnelConnector::new(format!("udp://{}:22233", target_ip).parse().unwrap());
let mut buf = vec![0; 1 * 1024]; let mut buf = vec![0; 1 * 1024];
rand::thread_rng().fill(&mut buf[..]); rand::thread_rng().fill(&mut buf[..]);
@@ -305,12 +331,13 @@ async fn subnet_proxy_test_udp() {
.await; .await;
} }
async fn subnet_proxy_test_tcp() { async fn subnet_proxy_test_tcp(target_ip: &str) {
use crate::tunnel::{common::tests::_tunnel_pingpong_netns, tcp::TcpTunnelListener}; use crate::tunnel::{common::tests::_tunnel_pingpong_netns, tcp::TcpTunnelListener};
use rand::Rng; use rand::Rng;
let tcp_listener = TcpTunnelListener::new("tcp://10.1.2.4:22223".parse().unwrap()); let tcp_listener = TcpTunnelListener::new("tcp://10.1.2.4:22223".parse().unwrap());
let tcp_connector = TcpTunnelConnector::new("tcp://10.1.2.4:22223".parse().unwrap()); let tcp_connector =
TcpTunnelConnector::new(format!("tcp://{}:22223", target_ip).parse().unwrap());
let mut buf = vec![0; 32]; let mut buf = vec![0; 32];
rand::thread_rng().fill(&mut buf[..]); rand::thread_rng().fill(&mut buf[..]);
@@ -341,15 +368,15 @@ async fn subnet_proxy_test_tcp() {
.await; .await;
} }
async fn subnet_proxy_test_icmp() { async fn subnet_proxy_test_icmp(target_ip: &str) {
wait_for_condition( wait_for_condition(
|| async { ping_test("net_a", "10.1.2.4", None).await }, || async { ping_test("net_a", target_ip, None).await },
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
wait_for_condition( wait_for_condition(
|| async { ping_test("net_a", "10.1.2.4", Some(5 * 1024)).await }, || async { ping_test("net_a", target_ip, Some(5 * 1024)).await },
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
@@ -368,49 +395,21 @@ async fn subnet_proxy_test_icmp() {
.await; .await;
} }
#[rstest::rstest]
#[tokio::test] #[tokio::test]
#[serial_test::serial] pub async fn quic_proxy() {
pub async fn subnet_proxy_three_node_test(
#[values("tcp", "udp", "wg")] proto: &str,
#[values(true, false)] no_tun: bool,
#[values(true, false)] relay_by_public_server: bool,
#[values(true, false)] enable_kcp_proxy: bool,
#[values(true, false)] disable_kcp_input: bool,
#[values(true, false)] dst_enable_kcp_proxy: bool,
) {
let insts = init_three_node_ex( let insts = init_three_node_ex(
proto, "udp",
|cfg| { |cfg| {
if cfg.get_inst_name() == "inst3" { if cfg.get_inst_name() == "inst3" {
let mut flags = cfg.get_flags(); cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap(), None);
flags.no_tun = no_tun;
flags.disable_kcp_input = disable_kcp_input;
flags.enable_kcp_proxy = dst_enable_kcp_proxy;
cfg.set_flags(flags);
cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap());
} }
if cfg.get_inst_name() == "inst2" && relay_by_public_server {
cfg.set_network_identity(NetworkIdentity::new(
"public".to_string(),
"public".to_string(),
));
}
if cfg.get_inst_name() == "inst1" && enable_kcp_proxy {
let mut flags = cfg.get_flags();
flags.enable_kcp_proxy = true;
cfg.set_flags(flags);
}
cfg cfg
}, },
relay_by_public_server, false,
) )
.await; .await;
assert_eq!(insts[2].get_global_ctx().get_proxy_cidrs().len(), 1); assert_eq!(insts[2].get_global_ctx().config.get_proxy_cidrs().len(), 1);
wait_proxy_route_appear( wait_proxy_route_appear(
&insts[0].get_peer_manager(), &insts[0].get_peer_manager(),
@@ -420,9 +419,93 @@ pub async fn subnet_proxy_three_node_test(
) )
.await; .await;
subnet_proxy_test_icmp().await; let target_ip = "10.1.2.4";
subnet_proxy_test_tcp().await;
subnet_proxy_test_udp().await; subnet_proxy_test_icmp(target_ip).await;
subnet_proxy_test_tcp(target_ip).await;
drop_insts(insts).await;
}
#[rstest::rstest]
#[serial_test::serial]
#[tokio::test]
pub async fn subnet_proxy_three_node_test(
#[values(true, false)] no_tun: bool,
#[values(true, false)] relay_by_public_server: bool,
#[values(true, false)] enable_kcp_proxy: bool,
#[values(true, false)] enable_quic_proxy: bool,
#[values(true, false)] disable_kcp_input: bool,
#[values(true, false)] disable_quic_input: bool,
#[values(true, false)] dst_enable_kcp_proxy: bool,
#[values(true, false)] dst_enable_quic_proxy: bool,
) {
let insts = init_three_node_ex(
"udp",
|cfg| {
if cfg.get_inst_name() == "inst3" {
let mut flags = cfg.get_flags();
flags.no_tun = no_tun;
flags.disable_kcp_input = disable_kcp_input;
flags.enable_kcp_proxy = dst_enable_kcp_proxy;
flags.disable_quic_input = disable_quic_input;
flags.enable_quic_proxy = dst_enable_quic_proxy;
cfg.set_flags(flags);
cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap(), None);
cfg.add_proxy_cidr(
"10.1.2.0/24".parse().unwrap(),
Some("10.1.3.0/24".parse().unwrap()),
);
}
if cfg.get_inst_name() == "inst2" && relay_by_public_server {
cfg.set_network_identity(NetworkIdentity::new(
"public".to_string(),
"public".to_string(),
));
}
if cfg.get_inst_name() == "inst1" {
let mut flags = cfg.get_flags();
if enable_kcp_proxy {
flags.enable_kcp_proxy = true;
}
if enable_quic_proxy {
flags.enable_quic_proxy = true;
}
cfg.set_flags(flags);
}
cfg
},
relay_by_public_server,
)
.await;
assert_eq!(insts[2].get_global_ctx().config.get_proxy_cidrs().len(), 2);
wait_proxy_route_appear(
&insts[0].get_peer_manager(),
"10.144.144.3/24",
insts[2].peer_id(),
"10.1.2.0/24",
)
.await;
wait_proxy_route_appear(
&insts[0].get_peer_manager(),
"10.144.144.3/24",
insts[2].peer_id(),
"10.1.3.0/24",
)
.await;
for target_ip in ["10.1.3.4", "10.1.2.4"].iter() {
subnet_proxy_test_icmp(target_ip).await;
subnet_proxy_test_tcp(target_ip).await;
subnet_proxy_test_udp(target_ip).await;
}
drop_insts(insts).await;
} }
#[rstest::rstest] #[rstest::rstest]
@@ -464,6 +547,8 @@ pub async fn data_compress(
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
drop_insts(_insts).await;
} }
#[cfg(feature = "wireguard")] #[cfg(feature = "wireguard")]
@@ -577,6 +662,8 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
set_link_status("net_d", true); set_link_status("net_d", true);
} }
drop_insts(insts).await;
}); });
let (ret,) = tokio::join!(task); let (ret,) = tokio::join!(task);
@@ -630,6 +717,8 @@ pub async fn udp_broadcast_test() {
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
assert_eq!(counter.load(std::sync::atomic::Ordering::Relaxed), 2); assert_eq!(counter.load(std::sync::atomic::Ordering::Relaxed), 2);
drop_insts(_insts).await;
} }
#[tokio::test] #[tokio::test]
@@ -678,6 +767,8 @@ pub async fn foreign_network_forward_nic_data() {
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
drop_insts(vec![center_inst, inst1, inst2]).await;
} }
use std::{net::SocketAddr, str::FromStr}; use std::{net::SocketAddr, str::FromStr};
@@ -778,6 +869,8 @@ pub async fn wireguard_vpn_portal() {
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
drop_insts(insts).await;
} }
#[cfg(feature = "wireguard")] #[cfg(feature = "wireguard")]
@@ -837,6 +930,79 @@ pub async fn socks5_vpn_portal(#[values("10.144.144.1", "10.144.144.3")] dst_add
drop(conn); drop(conn);
tokio::join!(task).0.unwrap(); tokio::join!(task).0.unwrap();
drop_insts(_insts).await;
}
#[tokio::test]
#[serial_test::serial]
pub async fn foreign_network_functional_cluster() {
crate::set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
prepare_linux_namespaces();
let center_node_config1 = get_inst_config("inst1", Some("net_a"), "10.144.144.1");
center_node_config1
.set_network_identity(NetworkIdentity::new("center".to_string(), "".to_string()));
let mut center_inst1 = Instance::new(center_node_config1);
let center_node_config2 = get_inst_config("inst2", Some("net_b"), "10.144.144.2");
center_node_config2
.set_network_identity(NetworkIdentity::new("center".to_string(), "".to_string()));
let mut center_inst2 = Instance::new(center_node_config2);
let inst1_config = get_inst_config("inst1", Some("net_c"), "10.144.145.1");
inst1_config.set_listeners(vec![]);
let mut inst1 = Instance::new(inst1_config);
let mut inst2 = Instance::new(get_inst_config("inst2", Some("net_d"), "10.144.145.2"));
center_inst1.run().await.unwrap();
center_inst2.run().await.unwrap();
inst1.run().await.unwrap();
inst2.run().await.unwrap();
center_inst1
.get_conn_manager()
.add_connector(RingTunnelConnector::new(
format!("ring://{}", center_inst2.id()).parse().unwrap(),
));
inst1
.get_conn_manager()
.add_connector(RingTunnelConnector::new(
format!("ring://{}", center_inst1.id()).parse().unwrap(),
));
inst2
.get_conn_manager()
.add_connector(RingTunnelConnector::new(
format!("ring://{}", center_inst2.id()).parse().unwrap(),
));
let peer_map_inst1 = inst1.get_peer_manager();
println!("inst1 peer map: {:?}", peer_map_inst1.list_routes().await);
drop(peer_map_inst1);
wait_for_condition(
|| async { ping_test("net_c", "10.144.145.2", None).await },
Duration::from_secs(5),
)
.await;
// connect to two centers, ping should work
inst1
.get_conn_manager()
.add_connector(RingTunnelConnector::new(
format!("ring://{}", center_inst2.id()).parse().unwrap(),
));
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
wait_for_condition(
|| async { ping_test("net_c", "10.144.145.2", None).await },
Duration::from_secs(5),
)
.await;
drop_insts(vec![center_inst1, center_inst2, inst1, inst2]).await;
} }
#[rstest::rstest] #[rstest::rstest]
@@ -887,11 +1053,17 @@ pub async fn manual_reconnector(#[values(true, false)] is_foreign: bool) {
.get_foreign_network_client() .get_foreign_network_client()
.get_peer_map() .get_peer_map()
}; };
let center_inst_peer_id = if !is_foreign {
center_inst.peer_id()
} else {
center_inst
.get_peer_manager()
.get_foreign_network_manager()
.get_network_peer_id(&inst1.get_global_ctx().get_network_identity().network_name)
.unwrap()
};
let conns = peer_map let conns = peer_map.list_peer_conns(center_inst_peer_id).await.unwrap();
.list_peer_conns(center_inst.peer_id())
.await
.unwrap();
assert!(conns.len() >= 1); assert!(conns.len() >= 1);
@@ -900,6 +1072,9 @@ pub async fn manual_reconnector(#[values(true, false)] is_foreign: bool) {
Duration::from_secs(5), Duration::from_secs(5),
) )
.await; .await;
drop(peer_map);
drop_insts(vec![center_inst, inst1, inst2]).await;
} }
#[rstest::rstest] #[rstest::rstest]
@@ -943,7 +1118,7 @@ pub async fn port_forward_test(
}, },
]); ]);
} else if cfg.get_inst_name() == "inst3" { } else if cfg.get_inst_name() == "inst3" {
cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap()); cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap(), None);
} }
let mut flags = cfg.get_flags(); let mut flags = cfg.get_flags();
flags.no_tun = no_tun; flags.no_tun = no_tun;
@@ -1019,4 +1194,49 @@ pub async fn port_forward_test(
buf, buf,
) )
.await; .await;
drop_insts(_insts).await;
}
#[rstest::rstest]
#[serial_test::serial]
#[tokio::test]
pub async fn relay_bps_limit_test(#[values(100, 200, 400, 800)] bps_limit: u64) {
let insts = init_three_node_ex(
"udp",
|cfg| {
if cfg.get_inst_name() == "inst2" {
cfg.set_network_identity(NetworkIdentity::new(
"public".to_string(),
"public".to_string(),
));
let mut f = cfg.get_flags();
f.foreign_relay_bps_limit = bps_limit * 1024;
cfg.set_flags(f);
}
cfg
},
true,
)
.await;
// connect to virtual ip (no tun mode)
let tcp_listener = TcpTunnelListener::new("tcp://0.0.0.0:22223".parse().unwrap());
let tcp_connector = TcpTunnelConnector::new("tcp://10.144.144.3:22223".parse().unwrap());
let bps = _tunnel_bench_netns(
tcp_listener,
tcp_connector,
NetNS::new(Some("net_c".into())),
NetNS::new(Some("net_a".into())),
)
.await;
println!("bps: {}", bps);
let bps = bps as u64 / 1024;
// allow 50kb jitter
assert!(bps >= bps_limit - 50 && bps <= bps_limit + 50);
drop_insts(insts).await;
} }
+46 -26
View File
@@ -436,9 +436,10 @@ pub fn reserve_buf(buf: &mut BytesMut, min_size: usize, max_size: usize) {
} }
pub mod tests { pub mod tests {
use std::time::Instant; use atomic_shim::AtomicU64;
use std::{sync::Arc, time::Instant};
use futures::{Future, SinkExt, StreamExt, TryStreamExt}; use futures::{Future, SinkExt, StreamExt};
use tokio_util::bytes::{BufMut, Bytes, BytesMut}; use tokio_util::bytes::{BufMut, Bytes, BytesMut};
use crate::{ use crate::{
@@ -554,21 +555,56 @@ pub mod tests {
} }
} }
pub(crate) async fn _tunnel_bench<L, C>(mut listener: L, mut connector: C) pub(crate) async fn _tunnel_bench<L, C>(listener: L, connector: C)
where where
L: TunnelListener + Send + Sync + 'static, L: TunnelListener + Send + Sync + 'static,
C: TunnelConnector + Send + Sync + 'static, C: TunnelConnector + Send + Sync + 'static,
{ {
listener.listen().await.unwrap(); _tunnel_bench_netns(listener, connector, NetNS::new(None), NetNS::new(None)).await;
}
pub(crate) async fn _tunnel_bench_netns<L, C>(
mut listener: L,
mut connector: C,
netns_l: NetNS,
netns_c: NetNS,
) -> usize
where
L: TunnelListener + Send + Sync + 'static,
C: TunnelConnector + Send + Sync + 'static,
{
{
let _g = netns_l.guard();
listener.listen().await.unwrap();
}
let bps = Arc::new(AtomicU64::new(0));
let bps_clone = bps.clone();
let lis = tokio::spawn(async move { let lis = tokio::spawn(async move {
let ret = listener.accept().await.unwrap(); let ret = listener.accept().await.unwrap();
_tunnel_echo_server(ret, false).await // _tunnel_echo_server(ret, false).await
let (mut r, _s) = ret.split();
let now = Instant::now();
let mut count = 0;
while let Some(Ok(p)) = r.next().await {
count += p.payload_len();
let elapsed_sec = now.elapsed().as_secs();
if elapsed_sec > 0 {
bps_clone.store(
count as u64 / now.elapsed().as_secs() as u64,
std::sync::atomic::Ordering::Relaxed,
);
}
}
}); });
let tunnel = connector.connect().await.unwrap(); let tunnel = {
let _g = netns_c.guard();
connector.connect().await.unwrap()
};
let (recv, mut send) = tunnel.split(); let (_recv, mut send) = tunnel.split();
// prepare a 4k buffer with random data // prepare a 4k buffer with random data
let mut send_buf = BytesMut::new(); let mut send_buf = BytesMut::new();
@@ -576,22 +612,6 @@ pub mod tests {
send_buf.put_i128(rand::random::<i128>()); send_buf.put_i128(rand::random::<i128>());
} }
let r = tokio::spawn(async move {
let now = Instant::now();
let count = recv
.try_fold(0usize, |mut ret, _| async move {
ret += 1;
Ok(ret)
})
.await
.unwrap();
println!(
"bps: {}",
(count / 1024) * 4 / now.elapsed().as_secs() as usize
);
});
let now = Instant::now(); let now = Instant::now();
while now.elapsed().as_secs() < 10 { while now.elapsed().as_secs() < 10 {
// send.feed(item) // send.feed(item)
@@ -605,11 +625,11 @@ pub mod tests {
drop(tunnel); drop(tunnel);
tracing::warn!("wait for recv to finish..."); tracing::warn!("wait for recv to finish...");
let bps = bps.load(std::sync::atomic::Ordering::Acquire);
let _ = tokio::join!(r); println!("bps: {}", bps);
lis.abort(); lis.abort();
let _ = tokio::join!(lis); bps as usize
} }
pub fn enable_log() { pub fn enable_log() {
+18
View File
@@ -81,6 +81,7 @@ bitflags::bitflags! {
const EXIT_NODE = 0b0000_0100; const EXIT_NODE = 0b0000_0100;
const NO_PROXY = 0b0000_1000; const NO_PROXY = 0b0000_1000;
const COMPRESSED = 0b0001_0000; const COMPRESSED = 0b0001_0000;
const KCP_SRC_MODIFIED = 0b0010_0000;
const _ = !0; const _ = !0;
} }
@@ -183,6 +184,23 @@ impl PeerManagerHeader {
self.flags = flags.bits(); self.flags = flags.bits();
self self
} }
pub fn set_kcp_src_modified(&mut self, modified: bool) -> &mut Self {
let mut flags = PeerManagerHeaderFlags::from_bits(self.flags).unwrap();
if modified {
flags.insert(PeerManagerHeaderFlags::KCP_SRC_MODIFIED);
} else {
flags.remove(PeerManagerHeaderFlags::KCP_SRC_MODIFIED);
}
self.flags = flags.bits();
self
}
pub fn is_kcp_src_modified(&self) -> bool {
PeerManagerHeaderFlags::from_bits(self.flags)
.unwrap()
.contains(PeerManagerHeaderFlags::KCP_SRC_MODIFIED)
}
} }
#[repr(C, packed)] #[repr(C, packed)]
+21 -7
View File
@@ -2,14 +2,18 @@
//! //!
//! Checkout the `README.md` for guidance. //! Checkout the `README.md` for guidance.
use std::{error::Error, net::SocketAddr, sync::Arc}; use std::{error::Error, net::SocketAddr, sync::Arc, time::Duration};
use crate::tunnel::{ use crate::tunnel::{
common::{FramedReader, FramedWriter, TunnelWrapper}, common::{FramedReader, FramedWriter, TunnelWrapper},
TunnelInfo, TunnelInfo,
}; };
use anyhow::Context; use anyhow::Context;
use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Connection, Endpoint, ServerConfig};
use quinn::{
congestion::BbrConfig, crypto::rustls::QuicClientConfig, ClientConfig, Connection, Endpoint,
ServerConfig, TransportConfig,
};
use super::{ use super::{
check_scheme_and_get_socket_addr, check_scheme_and_get_socket_addr,
@@ -17,10 +21,18 @@ use super::{
IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener, IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener,
}; };
fn configure_client() -> ClientConfig { pub fn configure_client() -> ClientConfig {
ClientConfig::new(Arc::new( let client_crypto = QuicClientConfig::try_from(get_insecure_tls_client_config()).unwrap();
QuicClientConfig::try_from(get_insecure_tls_client_config()).unwrap(), let mut client_config = ClientConfig::new(Arc::new(client_crypto));
))
// // Create a new TransportConfig and set BBR
let mut transport_config = TransportConfig::default();
transport_config.congestion_controller_factory(Arc::new(BbrConfig::default()));
transport_config.keep_alive_interval(Some(Duration::from_secs(5)));
// Replace the default TransportConfig with the transport_config() method
client_config.transport_config(Arc::new(transport_config));
client_config
} }
/// Constructs a QUIC endpoint configured to listen for incoming connections on a certain address /// Constructs a QUIC endpoint configured to listen for incoming connections on a certain address
@@ -38,13 +50,15 @@ pub fn make_server_endpoint(bind_addr: SocketAddr) -> Result<(Endpoint, Vec<u8>)
} }
/// Returns default server configuration along with its certificate. /// Returns default server configuration along with its certificate.
fn configure_server() -> Result<(ServerConfig, Vec<u8>), Box<dyn Error>> { pub fn configure_server() -> Result<(ServerConfig, Vec<u8>), Box<dyn Error>> {
let (certs, key) = get_insecure_tls_cert(); let (certs, key) = get_insecure_tls_cert();
let mut server_config = ServerConfig::with_single_cert(certs.clone(), key.into())?; let mut server_config = ServerConfig::with_single_cert(certs.clone(), key.into())?;
let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap();
transport_config.max_concurrent_uni_streams(10_u8.into()); transport_config.max_concurrent_uni_streams(10_u8.into());
transport_config.max_concurrent_bidi_streams(10_u8.into()); transport_config.max_concurrent_bidi_streams(10_u8.into());
// Setting BBR congestion control
transport_config.congestion_controller_factory(Arc::new(BbrConfig::default()));
Ok((server_config, certs[0].to_vec())) Ok((server_config, certs[0].to_vec()))
} }
+4 -2
View File
@@ -151,7 +151,7 @@ async fn respond_stun_packet(
use crate::common::stun_codec_ext::*; use crate::common::stun_codec_ext::*;
use bytecodec::DecodeExt as _; use bytecodec::DecodeExt as _;
use bytecodec::EncodeExt as _; use bytecodec::EncodeExt as _;
use stun_codec::rfc5389::attributes::MappedAddress; use stun_codec::rfc5389::attributes::XorMappedAddress;
use stun_codec::rfc5389::methods::BINDING; use stun_codec::rfc5389::methods::BINDING;
use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder}; use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder};
@@ -173,7 +173,9 @@ async fn respond_stun_packet(
// we discard the prefix, make sure our implementation is not compatible with other stun client // we discard the prefix, make sure our implementation is not compatible with other stun client
u32_to_tid(tid_to_u32(&tid)), u32_to_tid(tid_to_u32(&tid)),
); );
resp_msg.add_attribute(Attribute::MappedAddress(MappedAddress::new(addr.clone()))); resp_msg.add_attribute(Attribute::XorMappedAddress(XorMappedAddress::new(
addr.clone(),
)));
let mut encoder = MessageEncoder::new(); let mut encoder = MessageEncoder::new();
let rsp_buf = encoder let rsp_buf = encoder
+5 -2
View File
@@ -202,8 +202,11 @@ impl WSTunnelConnector {
init_crypto_provider(); init_crypto_provider();
let tls_conn = let tls_conn =
tokio_rustls::TlsConnector::from(Arc::new(get_insecure_tls_client_config())); tokio_rustls::TlsConnector::from(Arc::new(get_insecure_tls_client_config()));
// Modify SNI logic: always use "localhost" as SNI to avoid IP blocking. // Modify SNI logic: use "localhost" as SNI for url without domain to avoid IP blocking.
let sni = "localhost"; let sni = match addr.domain() {
None => "localhost".to_string(),
Some(domain) => domain.to_string(),
};
let server_name = rustls::pki_types::ServerName::try_from(sni) let server_name = rustls::pki_types::ServerName::try_from(sni)
.map_err(|_| TunnelError::InvalidProtocol("Invalid SNI".to_string()))?; .map_err(|_| TunnelError::InvalidProtocol("Invalid SNI".to_string()))?;
let stream = tls_conn.connect(server_name, stream).await?; let stream = tls_conn.connect(server_name, stream).await?;
+18 -3
View File
@@ -4,7 +4,7 @@ use anyhow::Context;
use tracing::level_filters::LevelFilter; use tracing::level_filters::LevelFilter;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
use crate::common::{config::ConfigLoader, get_logger_timer_rfc3339}; use crate::common::{config::LoggingConfigLoader, get_logger_timer_rfc3339};
pub type PeerRoutePair = crate::proto::cli::PeerRoutePair; pub type PeerRoutePair = crate::proto::cli::PeerRoutePair;
@@ -23,7 +23,7 @@ pub fn float_to_str(f: f64, precision: usize) -> String {
pub type NewFilterSender = std::sync::mpsc::Sender<String>; pub type NewFilterSender = std::sync::mpsc::Sender<String>;
pub fn init_logger( pub fn init_logger(
config: impl ConfigLoader, config: impl LoggingConfigLoader,
need_reload: bool, need_reload: bool,
) -> Result<Option<NewFilterSender>, anyhow::Error> { ) -> Result<Option<NewFilterSender>, anyhow::Error> {
let file_config = config.get_file_logger_config(); let file_config = config.get_file_logger_config();
@@ -211,6 +211,21 @@ pub fn setup_panic_handler() {
})); }));
} }
pub fn check_tcp_available(port: u16) -> bool {
use std::net::TcpListener;
let s = std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), port);
TcpListener::bind(s).is_ok()
}
pub fn find_free_tcp_port(range: std::ops::Range<u16>) -> Option<u16> {
for port in range {
if check_tcp_available(port) {
return Some(port);
}
}
None
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::common::config::{self}; use crate::common::config::{self};
@@ -219,7 +234,7 @@ mod tests {
async fn test_logger_reload() { async fn test_logger_reload() {
println!("current working dir: {:?}", std::env::current_dir()); println!("current working dir: {:?}", std::env::current_dir());
let config = config::TomlConfigLoader::default(); let config = config::LoggingConfigBuilder::default().build().unwrap();
let s = init_logger(&config, true).unwrap(); let s = init_logger(&config, true).unwrap();
tracing::debug!("test not display debug"); tracing::debug!("test not display debug");
s.unwrap().send(LevelFilter::DEBUG.to_string()).unwrap(); s.unwrap().send(LevelFilter::DEBUG.to_string()).unwrap();
+15 -3
View File
@@ -85,6 +85,7 @@ impl WireGuardImpl {
let mut ip_registered = false; let mut ip_registered = false;
let remote_addr = info.remote_addr.clone(); let remote_addr = info.remote_addr.clone();
let endpoint_addr = remote_addr.clone().map(Into::into);
peer_mgr peer_mgr
.get_global_ctx() .get_global_ctx()
.issue_event(GlobalCtxEvent::VpnPortalClientConnected( .issue_event(GlobalCtxEvent::VpnPortalClientConnected(
@@ -115,10 +116,12 @@ impl WireGuardImpl {
}; };
if !ip_registered { if !ip_registered {
let client_entry = Arc::new(ClientEntry { let client_entry = Arc::new(ClientEntry {
endpoint_addr: remote_addr.clone().map(Into::into), endpoint_addr: endpoint_addr.clone(),
sink: mpsc_tunnel.get_sink(), sink: mpsc_tunnel.get_sink(),
}); });
map_key = Some(i.get_source()); map_key = Some(i.get_source());
// Be careful here: we may overwrite an existing entry if the client IP is reused,
// which is common when clients are behind NAT.
wg_peer_ip_table.insert(i.get_source(), client_entry.clone()); wg_peer_ip_table.insert(i.get_source(), client_entry.clone());
ip_registered = true; ip_registered = true;
} }
@@ -130,8 +133,17 @@ impl WireGuardImpl {
} }
if map_key.is_some() { if map_key.is_some() {
tracing::info!(?map_key, "Removing wg client from table"); // Remove the client from the wg_peer_ip_table only when its endpoint address is unchanged,
wg_peer_ip_table.remove(&map_key.unwrap()); // or we may break clients behind NAT.
match wg_peer_ip_table.remove_if(&map_key.unwrap(), |_, entry| {
entry.endpoint_addr == endpoint_addr
}) {
Some(_) => tracing::info!(?map_key, "Removed wg client from table"),
None => tracing::info!(
?map_key,
"The wg client changed its endpoint address, not removing from table"
),
}
} }
peer_mgr peer_mgr
+28 -61
View File
@@ -1,11 +1,5 @@
use std::collections::BTreeMap;
use dashmap::DashMap;
use crate::{ use crate::{
common::config::{ConfigLoader, TomlConfigLoader}, common::config::ConfigLoader, launcher::ConfigSource, instance_manager::NetworkInstanceManager, proto::{
launcher::NetworkInstance,
proto::{
rpc_types::{self, controller::BaseController}, rpc_types::{self, controller::BaseController},
web::{ web::{
CollectNetworkInfoRequest, CollectNetworkInfoResponse, DeleteNetworkInstanceRequest, CollectNetworkInfoRequest, CollectNetworkInfoResponse, DeleteNetworkInstanceRequest,
@@ -14,13 +8,13 @@ use crate::{
RetainNetworkInstanceResponse, RunNetworkInstanceRequest, RunNetworkInstanceResponse, RetainNetworkInstanceResponse, RunNetworkInstanceRequest, RunNetworkInstanceResponse,
ValidateConfigRequest, ValidateConfigResponse, WebClientService, ValidateConfigRequest, ValidateConfigResponse, WebClientService,
}, },
}, }
}; };
pub struct Controller { pub struct Controller {
token: String, token: String,
hostname: String, hostname: String,
instance_map: DashMap<uuid::Uuid, NetworkInstance>, manager: NetworkInstanceManager,
} }
impl Controller { impl Controller {
@@ -28,55 +22,12 @@ impl Controller {
Controller { Controller {
token, token,
hostname, hostname,
instance_map: DashMap::new(), manager: NetworkInstanceManager::new(),
} }
} }
pub fn run_network_instance(&self, cfg: TomlConfigLoader) -> Result<(), anyhow::Error> {
let instance_id = cfg.get_id();
if self.instance_map.contains_key(&instance_id) {
anyhow::bail!("instance {} already exists", instance_id);
}
let mut instance = NetworkInstance::new(cfg);
instance.start()?;
println!("instance {} started", instance_id);
self.instance_map.insert(instance_id, instance);
Ok(())
}
pub fn retain_network_instance(
&self,
instance_ids: Vec<uuid::Uuid>,
) -> Result<RetainNetworkInstanceResponse, anyhow::Error> {
self.instance_map.retain(|k, _| instance_ids.contains(k));
let remain = self
.instance_map
.iter()
.map(|item| item.key().clone().into())
.collect::<Vec<_>>();
println!("instance {:?} retained", remain);
Ok(RetainNetworkInstanceResponse {
remain_inst_ids: remain,
})
}
pub fn collect_network_infos(&self) -> Result<NetworkInstanceRunningInfoMap, anyhow::Error> {
let mut map = BTreeMap::new();
for instance in self.instance_map.iter() {
if let Some(info) = instance.get_running_info() {
map.insert(instance.key().to_string(), info);
}
}
Ok(NetworkInstanceRunningInfoMap { map })
}
pub fn list_network_instance_ids(&self) -> Vec<uuid::Uuid> { pub fn list_network_instance_ids(&self) -> Vec<uuid::Uuid> {
self.instance_map self.manager.list_network_instance_ids()
.iter()
.map(|item| item.key().clone())
.collect()
} }
pub fn token(&self) -> String { pub fn token(&self) -> String {
@@ -114,7 +65,8 @@ impl WebClientService for Controller {
if let Some(inst_id) = req.inst_id { if let Some(inst_id) = req.inst_id {
cfg.set_id(inst_id.into()); cfg.set_id(inst_id.into());
} }
self.run_network_instance(cfg)?; self.manager.run_network_instance(cfg, ConfigSource::Web)?;
println!("instance {} started", id);
Ok(RunNetworkInstanceResponse { Ok(RunNetworkInstanceResponse {
inst_id: Some(id.into()), inst_id: Some(id.into()),
}) })
@@ -125,7 +77,13 @@ impl WebClientService for Controller {
_: BaseController, _: BaseController,
req: RetainNetworkInstanceRequest, req: RetainNetworkInstanceRequest,
) -> Result<RetainNetworkInstanceResponse, rpc_types::error::Error> { ) -> Result<RetainNetworkInstanceResponse, rpc_types::error::Error> {
Ok(self.retain_network_instance(req.inst_ids.into_iter().map(Into::into).collect())?) let remain = self
.manager
.retain_network_instance(req.inst_ids.into_iter().map(Into::into).collect())?;
println!("instance {:?} retained", remain);
Ok(RetainNetworkInstanceResponse {
remain_inst_ids: remain.iter().map(|item| (*item).into()).collect(),
})
} }
async fn collect_network_info( async fn collect_network_info(
@@ -133,7 +91,14 @@ impl WebClientService for Controller {
_: BaseController, _: BaseController,
req: CollectNetworkInfoRequest, req: CollectNetworkInfoRequest,
) -> Result<CollectNetworkInfoResponse, rpc_types::error::Error> { ) -> Result<CollectNetworkInfoResponse, rpc_types::error::Error> {
let mut ret = self.collect_network_infos()?; let mut ret = NetworkInstanceRunningInfoMap {
map: self
.manager
.collect_network_infos()?
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
};
let include_inst_ids = req let include_inst_ids = req
.inst_ids .inst_ids
.iter() .iter()
@@ -163,6 +128,7 @@ impl WebClientService for Controller {
) -> Result<ListNetworkInstanceResponse, rpc_types::error::Error> { ) -> Result<ListNetworkInstanceResponse, rpc_types::error::Error> {
Ok(ListNetworkInstanceResponse { Ok(ListNetworkInstanceResponse {
inst_ids: self inst_ids: self
.manager
.list_network_instance_ids() .list_network_instance_ids()
.into_iter() .into_iter()
.map(Into::into) .map(Into::into)
@@ -176,11 +142,12 @@ impl WebClientService for Controller {
_: BaseController, _: BaseController,
req: DeleteNetworkInstanceRequest, req: DeleteNetworkInstanceRequest,
) -> Result<DeleteNetworkInstanceResponse, rpc_types::error::Error> { ) -> Result<DeleteNetworkInstanceResponse, rpc_types::error::Error> {
let mut inst_ids = self.list_network_instance_ids(); let remain_inst_ids = self
inst_ids.retain(|id| !req.inst_ids.contains(&(id.clone().into()))); .manager
self.retain_network_instance(inst_ids.clone())?; .delete_network_instance(req.inst_ids.into_iter().map(Into::into).collect())?;
println!("instance {:?} retained", remain_inst_ids);
Ok(DeleteNetworkInstanceResponse { Ok(DeleteNetworkInstanceResponse {
remain_inst_ids: inst_ids.into_iter().map(Into::into).collect(), remain_inst_ids: remain_inst_ids.into_iter().map(Into::into).collect(),
}) })
} }
} }