Compare commits

...

28 Commits

Author SHA1 Message Date
sijie.sun dd5b00faf4 bump version to v2.2.2 2025-02-10 08:47:18 +08:00
sijie.sun 0caec3e4da fix label translate 2025-02-09 22:01:26 +08:00
sijie.sun e48e62cac0 fix tcp proxy not close properly 2025-02-09 22:01:09 +08:00
sijie.sun 06ebda2e2f update kcp-sys to fix unexpected disconnect 2025-02-09 00:30:27 +08:00
sijie.sun 53c449b9fb fix net2net kcp proxy 2025-02-08 23:11:10 +08:00
sijie.sun 51e0fac72c improve user experience
1. add config generator to easytier-web
2. add command to show tcp/kcp proxy entries
2025-02-07 23:59:36 +08:00
sijie.sun 32b1fe0893 netlink shoud remove route only when ifidx is same 2025-02-06 19:23:00 +08:00
sijie.sun 2af3b82e32 bump version to 2.2.1 2025-02-06 16:54:49 +08:00
sijie.sun eca1231831 fix help msg of kcp 2025-02-06 16:54:49 +08:00
sijie.sun e833c2a28b improve experience of subnet/kcp proxy
1. add self to windows firewall on windows
2. android always use smoltcp
2025-02-06 16:54:49 +08:00
Sijie.Sun 8b89a037e8 fix tcp incoming failure when kcp proxy is enabled (#601) 2025-02-06 09:08:34 +08:00
Sijie.Sun 1e821a03fe netlink route add should be exclusive (#596) 2025-02-04 23:01:13 +08:00
Sijie.Sun 66051967fe fix self peer route info not exist when starting (#595) 2025-02-04 21:35:14 +08:00
Sijie.Sun a63778854f use netlink instead of shell cmd to config ip (#593) 2025-02-03 15:13:50 +08:00
Sijie.Sun 4aea0821dd forward original peer info in ospf route (#589)
prost doesn't support unknown field, and these info may be lost when
they go through a old version node.
2025-01-27 20:38:22 +08:00
Sijie.Sun 08546925cc fix tests (#588)
fix proxy_three_node_disconnect_test and hole_punching_symmetric_only_random
2025-01-27 15:17:47 +08:00
Sijie.Sun d0f26d9303 bump version to 2.2.0 (#586) 2025-01-26 23:45:50 +08:00
Sijie.Sun 2a5d5ea4df make kcp proxy compitible with old version (#585)
* fix kcp not work with smoltcp
* check if dst kcp input is enabled
2025-01-26 16:22:10 +08:00
Sijie.Sun b69b122c8d add options to gui to enable kcp (#583)
* add test to kcp
* add options to gui to enable kcp
2025-01-26 13:31:20 +08:00
Sijie.Sun 55a39491cb feat/kcp (#580)
* support proxy tcp stream with kcp to improve experience of tcp over udp
* update rust version
* make subnet proxy route metrics lower in windows.
2025-01-26 00:41:15 +08:00
Sijie.Sun 1194ee1c2d fix peer manager stuck when sending large peer rpc (#572) 2025-01-17 06:50:21 +08:00
Sijie.Sun c23b544c34 tcp accept should retry when encoutering some kinds of error (#565)
* tcp accept should retry when encoutering some kinds of error

bump version to v2.1.2

* persistent temporary machine id
2025-01-14 08:55:48 +08:00
Sijie.Sun 9d76b86f49 fix bugs (#561)
1. if peers disconnected before stop session, may crash at the assert.
2. bind_device flag should take effect on manual connector.
2025-01-12 00:16:38 +08:00
Sijie.Sun bb0ccca3e5 allow manually specify public address of listeners (#556) 2025-01-10 09:25:14 +08:00
Sijie.Sun 306817ae9a allow listener retry listen (#554) 2025-01-09 00:01:41 +08:00
Sijie.Sun d2ec60e108 batch recv for udp proxy (#552) 2025-01-07 23:52:18 +08:00
Sijie.Sun e016aeddeb optimize pingpong conn close condition (#549)
if received some packets from peer, only close conn after enough
rounds of pingpong
2025-01-07 22:42:57 +08:00
Sijie.Sun a4419a31fd fix peer rpc compatibility issue (#548)
every rpc packet should contains descriptor if sent to old version et.
2025-01-06 23:30:56 +08:00
97 changed files with 3863 additions and 1120 deletions
+7 -3
View File
@@ -97,6 +97,7 @@ jobs:
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
- name: Cargo cache - name: Cargo cache
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: | path: |
@@ -114,6 +115,9 @@ jobs:
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }} if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
run: | run: |
bash ./.github/workflows/install_rust.sh bash ./.github/workflows/install_rust.sh
# this dir is a soft link generated by install_rust.sh
# kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h
export KCP_SYS_EXTRA_HEADER_PATH=/usr/include/musl-cross
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier
else else
@@ -142,14 +146,14 @@ jobs:
whoami whoami
env | sort env | sort
sudo pkg install -y git protobuf sudo pkg install -y git protobuf llvm-devel
curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
source $HOME/.cargo/env source $HOME/.cargo/env
rustup set auto-self-update disable rustup set auto-self-update disable
rustup install 1.77 rustup install 1.84
rustup default 1.77 rustup default 1.84
export CC=clang export CC=clang
export CXX=clang++ export CXX=clang++
+10 -2
View File
@@ -11,7 +11,7 @@ on:
image_tag: image_tag:
description: 'Tag for this image build' description: 'Tag for this image build'
type: string type: string
default: 'v1.2.0' default: 'v2.2.2'
required: true required: true
mark_latest: mark_latest:
description: 'Mark this image as latest' description: 'Mark this image as latest'
@@ -39,6 +39,12 @@ jobs:
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: login github container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Download artifact - name: Download artifact
id: download-artifact id: download-artifact
uses: dawidd6/action-download-artifact@v6 uses: dawidd6/action-download-artifact@v6
@@ -58,4 +64,6 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
file: .github/workflows/Dockerfile file: .github/workflows/Dockerfile
tags: easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }}, tags: |
easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},
ghcr.io/${{ github.actor }}/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},
+14
View File
@@ -124,6 +124,20 @@ jobs:
# GitHub repo token to use to avoid rate limiter # GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: |
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
- name: Install GUI cross compile (aarch64 only) - name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }} if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
run: | run: |
+4 -16
View File
@@ -8,20 +8,7 @@
# dependencies are only needed on ubuntu as that's the only place where # dependencies are only needed on ubuntu as that's the only place where
# we make cross-compilation # we make cross-compilation
if [[ $OS =~ ^ubuntu.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ ]]; then
sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools libappindicator3-dev sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools libappindicator3-dev llvm clang
# for easytier-gui
if [[ $GUI_TARGET != '' && $GUI_TARGET =~ ^x86_64.*$ ]]; then
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
fi
# curl -s musl.cc | grep mipsel # curl -s musl.cc | grep mipsel
case $TARGET in case $TARGET in
mipsel-unknown-linux-musl) mipsel-unknown-linux-musl)
@@ -52,13 +39,14 @@ if [[ $OS =~ ^ubuntu.*$ ]]; then
wget -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/ wget -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/ tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/ sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/${MUSL_URI}/include/ /usr/include/musl-cross
fi fi
fi fi
# see https://github.com/rust-lang/rustup/issues/3709 # see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable rustup set auto-self-update disable
rustup install 1.77 rustup install 1.84
rustup default 1.77 rustup default 1.84
# mips/mipsel cannot add target from rustup, need compile by ourselves # mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
+1 -1
View File
@@ -21,7 +21,7 @@ on:
version: version:
description: 'Version for this release' description: 'Version for this release'
type: string type: string
default: 'v2.1.1' default: 'v2.2.2'
required: true required: true
make_latest: make_latest:
description: 'Mark this release as latest' description: 'Mark this release as latest'
Generated
+261 -83
View File
@@ -183,9 +183,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.86" version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
[[package]] [[package]]
name = "approx" name = "approx"
@@ -506,9 +506,9 @@ dependencies = [
[[package]] [[package]]
name = "auto_impl" name = "auto_impl"
version = "1.2.0" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -670,6 +670,12 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]]
name = "beef"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1"
[[package]] [[package]]
name = "bigdecimal" name = "bigdecimal"
version = "0.4.6" version = "0.4.6"
@@ -684,6 +690,24 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "bindgen"
version = "0.71.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3"
dependencies = [
"bitflags 2.8.0",
"cexpr",
"clang-sys",
"itertools 0.12.1",
"proc-macro2",
"quote",
"regex",
"rustc-hash",
"shlex",
"syn 2.0.87",
]
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "1.3.2" version = "1.3.2"
@@ -692,9 +716,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "2.6.0" version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@@ -901,9 +925,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495"
[[package]] [[package]]
name = "bytes" name = "bytes"
version = "1.7.1" version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@@ -955,7 +979,7 @@ version = "0.18.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ca26ef0159422fb77631dc9d17b102f253b876fe1586b03b803e63a309b4ee2" checksum = "8ca26ef0159422fb77631dc9d17b102f253b876fe1586b03b803e63a309b4ee2"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"cairo-sys-rs", "cairo-sys-rs",
"glib", "glib",
"libc", "libc",
@@ -1018,9 +1042,9 @@ dependencies = [
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.1.12" version = "1.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68064e60dbf1f17005c2fde4d07c16d8baa506fd7ffed8ccab702d93617975c7" checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229"
dependencies = [ dependencies = [
"jobserver", "jobserver",
"libc", "libc",
@@ -1033,6 +1057,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cexpr"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
dependencies = [
"nom",
]
[[package]] [[package]]
name = "cfb" name = "cfb"
version = "0.7.3" version = "0.7.3"
@@ -1125,6 +1158,17 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "clang-sys"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
dependencies = [
"glob",
"libc",
"libloading 0.8.5",
]
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.5.15" version = "4.5.15"
@@ -1183,7 +1227,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f79398230a6e2c08f5c9760610eb6924b52aa9e7950a619602baba59dcbbdbb2" checksum = "f79398230a6e2c08f5c9760610eb6924b52aa9e7950a619602baba59dcbbdbb2"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block", "block",
"cocoa-foundation", "cocoa-foundation",
"core-foundation 0.10.0", "core-foundation 0.10.0",
@@ -1199,7 +1243,7 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14045fb83be07b5acf1c0884b2180461635b433455fa35d1cd6f17f1450679d" checksum = "e14045fb83be07b5acf1c0884b2180461635b433455fa35d1cd6f17f1450679d"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block", "block",
"core-foundation 0.10.0", "core-foundation 0.10.0",
"core-graphics-types 0.2.0", "core-graphics-types 0.2.0",
@@ -1335,7 +1379,7 @@ version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1" checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"core-foundation 0.10.0", "core-foundation 0.10.0",
"core-graphics-types 0.2.0", "core-graphics-types 0.2.0",
"foreign-types 0.5.0", "foreign-types 0.5.0",
@@ -1359,7 +1403,7 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb" checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"core-foundation 0.10.0", "core-foundation 0.10.0",
"libc", "libc",
] ]
@@ -1608,7 +1652,7 @@ dependencies = [
"log", "log",
"netlink-packet-core", "netlink-packet-core",
"netlink-packet-generic", "netlink-packet-generic",
"netlink-packet-route", "netlink-packet-route 0.17.1",
"netlink-packet-utils", "netlink-packet-utils",
"netlink-packet-wireguard", "netlink-packet-wireguard",
"netlink-sys", "netlink-sys",
@@ -1830,7 +1874,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]] [[package]]
name = "easytier" name = "easytier"
version = "2.1.1" version = "2.2.2"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"anyhow", "anyhow",
@@ -1843,7 +1887,7 @@ dependencies = [
"atomicbox", "atomicbox",
"auto_impl", "auto_impl",
"base64 0.22.1", "base64 0.22.1",
"bitflags 2.6.0", "bitflags 2.8.0",
"boringtun-easytier", "boringtun-easytier",
"bytecodec", "bytecodec",
"byteorder", "byteorder",
@@ -1866,10 +1910,15 @@ dependencies = [
"http 1.1.0", "http 1.1.0",
"humansize", "humansize",
"indexmap 1.9.3", "indexmap 1.9.3",
"kcp-sys",
"machine-uid", "machine-uid",
"mimalloc-rust", "mimalloc-rust",
"netlink-packet-core",
"netlink-packet-route 0.21.0",
"netlink-packet-utils",
"netlink-sys",
"network-interface", "network-interface",
"nix 0.27.1", "nix 0.29.0",
"once_cell", "once_cell",
"parking_lot", "parking_lot",
"percent-encoding", "percent-encoding",
@@ -1878,6 +1927,8 @@ dependencies = [
"pnet", "pnet",
"prost", "prost",
"prost-build", "prost-build",
"prost-reflect",
"prost-reflect-build",
"prost-types", "prost-types",
"quinn", "quinn",
"rand 0.8.5", "rand 0.8.5",
@@ -1917,8 +1968,8 @@ dependencies = [
"url", "url",
"uuid", "uuid",
"wildmatch", "wildmatch",
"windows 0.52.0",
"windows-service", "windows-service",
"windows-sys 0.52.0",
"winreg 0.52.0", "winreg 0.52.0",
"zerocopy", "zerocopy",
"zip", "zip",
@@ -1926,7 +1977,7 @@ dependencies = [
[[package]] [[package]]
name = "easytier-gui" name = "easytier-gui"
version = "2.1.1" version = "2.2.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -1971,7 +2022,7 @@ dependencies = [
[[package]] [[package]]
name = "easytier-web" name = "easytier-web"
version = "0.1.0" version = "2.2.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -2709,7 +2760,7 @@ version = "0.18.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "233daaf6e83ae6a12a52055f568f9d7cf4671dabb78ff9560ab6da230ce00ee5" checksum = "233daaf6e83ae6a12a52055f568f9d7cf4671dabb78ff9560ab6da230ce00ee5"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"futures-channel", "futures-channel",
"futures-core", "futures-core",
"futures-executor", "futures-executor",
@@ -3392,15 +3443,6 @@ dependencies = [
"either", "either",
] ]
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itoa" name = "itoa"
version = "0.4.8" version = "0.4.8"
@@ -3518,13 +3560,35 @@ dependencies = [
"serde_json", "serde_json",
] ]
[[package]]
name = "kcp-sys"
version = "0.1.0"
source = "git+https://github.com/EasyTier/kcp-sys#0f0a0558391ba391c089806c23f369651f6c9eeb"
dependencies = [
"anyhow",
"auto_impl",
"bindgen",
"bitflags 2.8.0",
"bytes",
"cc",
"dashmap",
"parking_lot",
"rand 0.8.5",
"thiserror 2.0.11",
"tokio",
"tokio-util",
"tracing",
"tracing-subscriber",
"zerocopy",
]
[[package]] [[package]]
name = "keyboard-types" name = "keyboard-types"
version = "0.7.0" version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b750dcadc39a09dbadd74e118f6dd6598df77fa01df0cfcdc52c28dece74528a" checksum = "b750dcadc39a09dbadd74e118f6dd6598df77fa01df0cfcdc52c28dece74528a"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"serde", "serde",
"unicode-segmentation", "unicode-segmentation",
] ]
@@ -3577,9 +3641,9 @@ dependencies = [
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.155" version = "0.2.169"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
[[package]] [[package]]
name = "libloading" name = "libloading"
@@ -3613,7 +3677,7 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"libc", "libc",
] ]
@@ -3657,6 +3721,39 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "logos"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7251356ef8cb7aec833ddf598c6cb24d17b689d20b993f9d11a3d764e34e6458"
dependencies = [
"logos-derive",
]
[[package]]
name = "logos-codegen"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59f80069600c0d66734f5ff52cc42f2dabd6b29d205f333d61fd7832e9e9963f"
dependencies = [
"beef",
"fnv",
"lazy_static",
"proc-macro2",
"quote",
"regex-syntax 0.8.4",
"syn 2.0.87",
]
[[package]]
name = "logos-derive"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24fb722b06a9dc12adb0963ed585f19fc61dc5413e6a9be9422ef92c091e731d"
dependencies = [
"logos-codegen",
]
[[package]] [[package]]
name = "loom" name = "loom"
version = "0.5.6" version = "0.5.6"
@@ -3895,7 +3992,7 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4" checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"jni-sys", "jni-sys",
"log", "log",
"ndk-sys", "ndk-sys",
@@ -3956,6 +4053,21 @@ dependencies = [
"netlink-packet-utils", "netlink-packet-utils",
] ]
[[package]]
name = "netlink-packet-route"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "483325d4bfef65699214858f097d504eb812c38ce7077d165f301ec406c3066e"
dependencies = [
"anyhow",
"bitflags 2.8.0",
"byteorder",
"libc",
"log",
"netlink-packet-core",
"netlink-packet-utils",
]
[[package]] [[package]]
name = "netlink-packet-utils" name = "netlink-packet-utils"
version = "0.5.2" version = "0.5.2"
@@ -3984,9 +4096,9 @@ dependencies = [
[[package]] [[package]]
name = "netlink-sys" name = "netlink-sys"
version = "0.8.6" version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23"
dependencies = [ dependencies = [
"bytes", "bytes",
"libc", "libc",
@@ -4029,7 +4141,7 @@ version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"cfg-if", "cfg-if",
"libc", "libc",
"memoffset", "memoffset",
@@ -4041,7 +4153,7 @@ version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"cfg-if", "cfg-if",
"cfg_aliases", "cfg_aliases",
"libc", "libc",
@@ -4250,7 +4362,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"libc", "libc",
"objc2", "objc2",
@@ -4266,7 +4378,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74dd3b56391c7a0596a295029734d3c1c5e7e510a4cb30245f8221ccea96b009" checksum = "74dd3b56391c7a0596a295029734d3c1c5e7e510a4cb30245f8221ccea96b009"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-core-location", "objc2-core-location",
@@ -4290,7 +4402,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-foundation", "objc2-foundation",
@@ -4332,7 +4444,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"dispatch", "dispatch",
"libc", "libc",
@@ -4357,7 +4469,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-foundation", "objc2-foundation",
@@ -4369,7 +4481,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-foundation", "objc2-foundation",
@@ -4392,7 +4504,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8bb46798b20cd6b91cbd113524c490f1686f4c4e8f49502431415f3512e2b6f" checksum = "b8bb46798b20cd6b91cbd113524c490f1686f4c4e8f49502431415f3512e2b6f"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-cloud-kit", "objc2-cloud-kit",
@@ -4424,7 +4536,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76cfcbf642358e8689af64cee815d139339f3ed8ad05103ed5eaf73db8d84cb3" checksum = "76cfcbf642358e8689af64cee815d139339f3ed8ad05103ed5eaf73db8d84cb3"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-core-location", "objc2-core-location",
@@ -4437,7 +4549,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68bc69301064cebefc6c4c90ce9cba69225239e4b8ff99d445a2b5563797da65" checksum = "68bc69301064cebefc6c4c90ce9cba69225239e4b8ff99d445a2b5563797da65"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"block2", "block2",
"objc2", "objc2",
"objc2-app-kit", "objc2-app-kit",
@@ -4482,7 +4594,7 @@ version = "0.10.66"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"cfg-if", "cfg-if",
"foreign-types 0.3.2", "foreign-types 0.3.2",
"libc", "libc",
@@ -4526,6 +4638,15 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "ordered-float"
version = "2.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c"
dependencies = [
"num-traits",
]
[[package]] [[package]]
name = "ordered-float" name = "ordered-float"
version = "3.9.2" version = "3.9.2"
@@ -5288,7 +5409,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302"
dependencies = [ dependencies = [
"bytes", "bytes",
"heck 0.5.0", "heck 0.5.0",
"itertools 0.13.0", "itertools 0.12.1",
"log", "log",
"multimap", "multimap",
"once_cell", "once_cell",
@@ -5308,7 +5429,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"itertools 0.13.0", "itertools 0.12.1",
"proc-macro2",
"quote",
"syn 2.0.87",
]
[[package]]
name = "prost-reflect"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e92b959d24e05a3e2da1d0beb55b48bc8a97059b8336ea617780bd6addbbfb5a"
dependencies = [
"base64 0.22.1",
"logos",
"once_cell",
"prost",
"prost-reflect-derive",
"prost-types",
"serde",
"serde-value",
]
[[package]]
name = "prost-reflect-build"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50e2537231d94dd2778920c2ada37dd9eb1ac0325bb3ee3ee651bd44c1134123"
dependencies = [
"prost-build",
"prost-reflect",
]
[[package]]
name = "prost-reflect-derive"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fce6b22f15cc8d8d400a2b98ad29202b33bd56c7d9ddd815bc803a807ecb65"
dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.87", "syn 2.0.87",
@@ -5556,7 +5714,7 @@ version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
] ]
[[package]] [[package]]
@@ -5942,9 +6100,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]] [[package]]
name = "rustc-hash" name = "rustc-hash"
version = "2.0.0" version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497"
[[package]] [[package]]
name = "rustc_version" name = "rustc_version"
@@ -5961,7 +6119,7 @@ version = "0.38.34"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"errno", "errno",
"libc", "libc",
"linux-raw-sys", "linux-raw-sys",
@@ -6259,7 +6417,7 @@ dependencies = [
"bigdecimal", "bigdecimal",
"chrono", "chrono",
"inherent", "inherent",
"ordered-float", "ordered-float 3.9.2",
"rust_decimal", "rust_decimal",
"sea-query-derive", "sea-query-derive",
"serde_json", "serde_json",
@@ -6332,7 +6490,7 @@ version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"core-foundation 0.9.4", "core-foundation 0.9.4",
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
@@ -6399,6 +6557,16 @@ dependencies = [
"typeid", "typeid",
] ]
[[package]]
name = "serde-value"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c"
dependencies = [
"ordered-float 2.10.1",
"serde",
]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.207" version = "1.0.207"
@@ -6716,9 +6884,9 @@ dependencies = [
[[package]] [[package]]
name = "smoltcp" name = "smoltcp"
version = "0.11.0" version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a1a996951e50b5971a2c8c0fa05a381480d70a933064245c4a223ddc87ccc97" checksum = "dad095989c1533c1c266d9b1e8d70a1329dd3723c3edac6d03bbd67e7bf6f4bb"
dependencies = [ dependencies = [
"bitflags 1.3.2", "bitflags 1.3.2",
"byteorder", "byteorder",
@@ -6924,7 +7092,7 @@ dependencies = [
"atoi", "atoi",
"base64 0.22.1", "base64 0.22.1",
"bigdecimal", "bigdecimal",
"bitflags 2.6.0", "bitflags 2.8.0",
"byteorder", "byteorder",
"bytes", "bytes",
"chrono", "chrono",
@@ -6971,7 +7139,7 @@ dependencies = [
"atoi", "atoi",
"base64 0.22.1", "base64 0.22.1",
"bigdecimal", "bigdecimal",
"bitflags 2.6.0", "bitflags 2.8.0",
"byteorder", "byteorder",
"chrono", "chrono",
"crc", "crc",
@@ -7257,7 +7425,7 @@ version = "0.30.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "833b4d43383d76d5078d72f3acd977f47eb5b6751eb40baa665d13828e7b79df" checksum = "833b4d43383d76d5078d72f3acd977f47eb5b6751eb40baa665d13828e7b79df"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"cocoa", "cocoa",
"core-foundation 0.10.0", "core-foundation 0.10.0",
"core-graphics 0.24.0", "core-graphics 0.24.0",
@@ -7406,7 +7574,7 @@ dependencies = [
"sha2", "sha2",
"syn 2.0.87", "syn 2.0.87",
"tauri-utils", "tauri-utils",
"thiserror 2.0.2", "thiserror 2.0.11",
"time", "time",
"url", "url",
"uuid", "uuid",
@@ -7637,7 +7805,7 @@ dependencies = [
"serde_json", "serde_json",
"serde_with", "serde_with",
"swift-rs", "swift-rs",
"thiserror 2.0.2", "thiserror 2.0.11",
"toml 0.8.19", "toml 0.8.19",
"url", "url",
"urlpattern", "urlpattern",
@@ -7706,11 +7874,11 @@ dependencies = [
[[package]] [[package]]
name = "thiserror" name = "thiserror"
version = "2.0.2" version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "037e29b009aa709f293b974da5cd33b15783c049e07f8435778ce8c4871525d8" checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc"
dependencies = [ dependencies = [
"thiserror-impl 2.0.2", "thiserror-impl 2.0.11",
] ]
[[package]] [[package]]
@@ -7726,9 +7894,9 @@ dependencies = [
[[package]] [[package]]
name = "thiserror-impl" name = "thiserror-impl"
version = "2.0.2" version = "2.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea4778c7e8ff768bdb32a58a2349903859fe719a320300d7d4ce8636f19a1e69" checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -7885,9 +8053,9 @@ dependencies = [
[[package]] [[package]]
name = "tokio-util" name = "tokio-util"
version = "0.7.11" version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
dependencies = [ dependencies = [
"bytes", "bytes",
"futures-core", "futures-core",
@@ -8067,7 +8235,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97"
dependencies = [ dependencies = [
"async-compression", "async-compression",
"bitflags 2.6.0", "bitflags 2.8.0",
"bytes", "bytes",
"futures-core", "futures-core",
"http 1.1.0", "http 1.1.0",
@@ -8158,9 +8326,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing" name = "tracing"
version = "0.1.40" version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [ dependencies = [
"log", "log",
"pin-project-lite", "pin-project-lite",
@@ -8182,9 +8350,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing-attributes" name = "tracing-attributes"
version = "0.1.27" version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -8193,9 +8361,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing-core" name = "tracing-core"
version = "0.1.32" version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"valuable", "valuable",
@@ -8214,9 +8382,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing-subscriber" name = "tracing-subscriber"
version = "0.3.18" version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
dependencies = [ dependencies = [
"matchers", "matchers",
"nu-ansi-term", "nu-ansi-term",
@@ -8893,6 +9061,16 @@ dependencies = [
"windows-targets 0.48.5", "windows-targets 0.48.5",
] ]
[[package]]
name = "windows"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
"windows-core 0.52.0",
"windows-targets 0.52.6",
]
[[package]] [[package]]
name = "windows" name = "windows"
version = "0.58.0" version = "0.58.0"
@@ -8962,7 +9140,7 @@ version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d24d6bcc7f734a4091ecf8d7a64c5f7d7066f45585c1861eba06449909609c8a" checksum = "d24d6bcc7f734a4091ecf8d7a64c5f7d7066f45585c1861eba06449909609c8a"
dependencies = [ dependencies = [
"bitflags 2.6.0", "bitflags 2.8.0",
"widestring", "widestring",
"windows-sys 0.52.0", "windows-sys 0.52.0",
] ]
+7 -8
View File
@@ -31,6 +31,7 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
- **High Availability**: Supports multi-path and switches to healthy paths when high packet loss or network errors are detected. - **High Availability**: Supports multi-path and switches to healthy paths when high packet loss or network errors are detected.
- **IPv6 Support**: Supports networking using IPv6. - **IPv6 Support**: Supports networking using IPv6.
- **Multiple Protocol Types**: Supports communication between nodes using protocols such as WebSocket and QUIC. - **Multiple Protocol Types**: Supports communication between nodes using protocols such as WebSocket and QUIC.
- **Web Management Interface**: Provides a [web-based management](https://easytier.cn/web) interface for easy configuration and monitoring.
## Installation ## Installation
@@ -52,7 +53,7 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
4. **Install by Docker Compose** 4. **Install by Docker Compose**
Please visit the [EasyTier Official Website](https://www.easytier.top/en/) to view the full documentation. Please visit the [EasyTier Official Website](https://www.easytier.cn/en/) to view the full documentation.
5. **Install by script (For Linux Only)** 5. **Install by script (For Linux Only)**
@@ -200,20 +201,20 @@ Subnet proxy information will automatically sync to each node in the virtual net
### Networking without Public IP ### Networking without Public IP
EasyTier supports networking using shared public nodes. The currently deployed shared public node is ``tcp://public.easytier.top:11010``. EasyTier supports networking using shared public nodes. The currently deployed shared public node is ``tcp://public.easytier.cn:11010``.
When using shared nodes, each node entering the network needs to provide the same ``--network-name`` and ``--network-secret`` parameters as the unique identifier of the network. When using shared nodes, each node entering the network needs to provide the same ``--network-name`` and ``--network-secret`` parameters as the unique identifier of the network.
Taking two nodes as an example, Node A executes: Taking two nodes as an example, Node A executes:
```sh ```sh
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010 sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
``` ```
Node B executes Node B executes
```sh ```sh
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010 sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
``` ```
After the command is successfully executed, Node A can access Node B through the virtual IP 10.144.144.2. After the command is successfully executed, Node A can access Node B through the virtual IP 10.144.144.2.
@@ -286,7 +287,7 @@ Run you own public server cluster is exactly same as running an virtual network,
You can also join the official public server cluster with following command: You can also join the official public server cluster with following command:
``` ```
sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.top:11010 sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.cn:11010
``` ```
@@ -296,10 +297,8 @@ You can use ``easytier-core --help`` to view all configuration items
## Roadmap ## Roadmap
- [ ] Improve documentation and user guides. - [ ] Support features such TCP hole punching, KCP, FEC etc.
- [ ] Support features such as encryption, TCP hole punching, etc.
- [ ] Support iOS. - [ ] Support iOS.
- [ ] Support Web configuration management.
## Community and Contribution ## Community and Contribution
+8 -8
View File
@@ -8,7 +8,7 @@
[简体中文](/README_CN.md) | [English](/README.md) [简体中文](/README_CN.md) | [English](/README.md)
**请访问 [EasyTier 官网](https://www.easytier.top/) 以查看完整的文档。** **请访问 [EasyTier 官网](https://www.easytier.cn/) 以查看完整的文档。**
一个简单、安全、去中心化的内网穿透 VPN 组网方案,使用 Rust 语言和 Tokio 框架实现。 一个简单、安全、去中心化的内网穿透 VPN 组网方案,使用 Rust 语言和 Tokio 框架实现。
@@ -31,6 +31,7 @@
- **高可用性**:支持多路径和在检测到高丢包率或网络错误时切换到健康路径。 - **高可用性**:支持多路径和在检测到高丢包率或网络错误时切换到健康路径。
- **IPV6 支持**:支持利用 IPV6 组网。 - **IPV6 支持**:支持利用 IPV6 组网。
- **多协议类型**: 支持使用 WebSocket、QUIC 等协议进行节点间通信。 - **多协议类型**: 支持使用 WebSocket、QUIC 等协议进行节点间通信。
- **Web 管理界面**:支持通过 [Web 界面](https://easytier.cn)管理节点。
## 安装 ## 安装
@@ -52,7 +53,7 @@
4. **通过Docker Compose安装** 4. **通过Docker Compose安装**
请访问 [EasyTier 官网](https://www.easytier.top/) 以查看完整的文档。 请访问 [EasyTier 官网](https://www.easytier.cn/) 以查看完整的文档。
5. **使用一键脚本安装 (仅适用于 Linux)** 5. **使用一键脚本安装 (仅适用于 Linux)**
@@ -199,20 +200,20 @@ sudo easytier-core --ipv4 10.144.144.2 -n 10.1.1.0/24
### 无公网IP组网 ### 无公网IP组网
EasyTier 支持共享公网节点进行组网。目前已部署共享的公网节点 ``tcp://public.easytier.top:11010``。 EasyTier 支持共享公网节点进行组网。目前已部署共享的公网节点 ``tcp://public.easytier.cn:11010``。
使用共享节点时,需要每个入网节点提供相同的 ``--network-name`` 和 ``--network-secret`` 参数,作为网络的唯一标识。 使用共享节点时,需要每个入网节点提供相同的 ``--network-name`` 和 ``--network-secret`` 参数,作为网络的唯一标识。
以双节点为例,节点 A 执行: 以双节点为例,节点 A 执行:
```sh ```sh
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010 sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
``` ```
节点 B 执行 节点 B 执行
```sh ```sh
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010 sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
``` ```
命令执行成功后,节点 A 即可通过虚拟 IP 10.144.144.2 访问节点 B。 命令执行成功后,节点 A 即可通过虚拟 IP 10.144.144.2 访问节点 B。
@@ -289,7 +290,7 @@ connected_clients:
也可以使用以下命令加入官方公共服务器集群,后续将实现公共服务器集群的节点间负载均衡: 也可以使用以下命令加入官方公共服务器集群,后续将实现公共服务器集群的节点间负载均衡:
``` ```
sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.top:11010 sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.cn:11010
``` ```
### 其他配置 ### 其他配置
@@ -299,9 +300,8 @@ sudo easytier-core --network-name easytier --network-secret easytier -p tcp://pu
## 路线图 ## 路线图
- [ ] 完善文档和用户指南。 - [ ] 完善文档和用户指南。
- [ ] 支持 TCP 打洞等特性。 - [ ] 支持 TCP 打洞、KCP、FEC 等特性。
- [ ] 支持 iOS。 - [ ] 支持 iOS。
- [ ] 支持 Web 配置管理。
## 社区和贡献 ## 社区和贡献
+1 -1
View File
@@ -1,7 +1,7 @@
{ {
"name": "easytier-gui", "name": "easytier-gui",
"type": "module", "type": "module",
"version": "2.1.1", "version": "2.2.2",
"private": true, "private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4", "packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": { "scripts": {
+1 -1
View File
@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-gui" name = "easytier-gui"
version = "2.1.1" version = "2.2.2"
description = "EasyTier GUI" description = "EasyTier GUI"
authors = ["you"] authors = ["you"]
edition = "2021" edition = "2021"
Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.9 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.
Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

After

Width:  |  Height:  |  Size: 68 KiB

+5 -1
View File
@@ -89,6 +89,7 @@ fn get_os_hostname() -> Result<String, String> {
#[tauri::command] #[tauri::command]
fn set_logging_level(level: String) -> Result<(), String> { fn set_logging_level(level: String) -> Result<(), String> {
#[allow(static_mut_refs)]
let sender = unsafe { LOGGER_LEVEL_SENDER.as_ref().unwrap() }; let sender = unsafe { LOGGER_LEVEL_SENDER.as_ref().unwrap() };
sender.send(level).map_err(|e| e.to_string())?; sender.send(level).map_err(|e| e.to_string())?;
Ok(()) Ok(())
@@ -188,7 +189,10 @@ pub fn run() {
let Ok(Some(logger_reinit)) = utils::init_logger(config, true) else { let Ok(Some(logger_reinit)) = utils::init_logger(config, true) else {
return Ok(()); return Ok(());
}; };
unsafe { LOGGER_LEVEL_SENDER.replace(logger_reinit) }; #[allow(static_mut_refs)]
unsafe {
LOGGER_LEVEL_SENDER.replace(logger_reinit)
};
// for tray icon, menu need to be built in js // for tray icon, menu need to be built in js
#[cfg(not(target_os = "android"))] #[cfg(not(target_os = "android"))]
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false "createUpdaterArtifacts": false
}, },
"productName": "easytier-gui", "productName": "easytier-gui",
"version": "2.1.1", "version": "2.2.2",
"identifier": "com.kkrainbow.easytier", "identifier": "com.kkrainbow.easytier",
"plugins": {}, "plugins": {},
"app": { "app": {
+1 -1
View File
@@ -8,7 +8,7 @@ repository = "https://github.com/EasyTier/EasyTier"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.77.0" rust-version = "1.84.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
+1 -1
View File
@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-web" name = "easytier-web"
version = "0.1.0" version = "2.2.2"
edition = "2021" edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server." description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -120,6 +120,21 @@ function searchListenerSuggestions(e: { query: string }) {
listenerSuggestions.value = ret listenerSuggestions.value = ret
} }
interface BoolFlag {
field: keyof NetworkConfig
help: string
}
const bool_flags: BoolFlag[] = [
{ field: 'latency_first', help: 'latency_first_help' },
{ field: 'use_smoltcp', help: 'use_smoltcp_help' },
{ field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' },
{ field: 'disable_kcp_input', help: 'disable_kcp_input_help' },
{ field: 'disable_p2p', help: 'disable_p2p_help' },
{ field: 'bind_device', help: 'bind_device_help' },
{ field: 'no_tun', help: 'no_tun_help' },
]
</script> </script>
<template> <template>
@@ -188,11 +203,18 @@ function searchListenerSuggestions(e: { query: string }) {
<Panel :header="t('advanced_settings')" toggleable collapsed> <Panel :header="t('advanced_settings')" toggleable collapsed>
<div class="flex flex-col gap-y-2"> <div class="flex flex-col gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap"> <div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow"> <div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex items-center"> <label> {{ t('flags_switch') }} </label>
<Checkbox v-model="curNetwork.latency_first" input-id="use_latency_first" :binary="true" /> <div class="flex flex-row flex-wrap">
<label for="use_latency_first" class="ml-2"> {{ t('use_latency_first') }} </label>
<div class="basis-64 flex" v-for="flag in bool_flags">
<Checkbox v-model="curNetwork[flag.field]" :input-id="flag.field" :binary="true" />
<label :for="flag.field" class="ml-2"> {{ t(flag.field) }} </label>
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t(flag.help)"></span>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -69,6 +69,29 @@ upload_bytes: 上传
download_bytes: 下载 download_bytes: 下载
loss_rate: 丢包率 loss_rate: 丢包率
flags_switch: 功能开关
latency_first: 开启延迟优先模式
latency_first_help: 忽略中转跳数,选择总延迟最低的路径
use_smoltcp: 使用用户态协议栈
use_smoltcp_help: 使用用户态 TCP/IP 协议栈,避免操作系统防火墙问题导致无法子网代理 / KCP代理。
enable_kcp_proxy: 启用 KCP 代理
enable_kcp_proxy_help: 将 TCP 流量转为 KCP 流量,降低传输延迟,提升传输速度。
disable_kcp_input: 禁用 KCP 输入
disable_kcp_input_help: 禁用 KCP 入站流量,其他开启 KCP 代理的节点仍然使用 TCP 连接到本节点。
disable_p2p: 禁用 P2P
disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转。
bind_device: 仅使用物理网卡
bind_device_help: 仅使用物理网卡,避免 EasyTier 通过其他虚拟网建立连接。
no_tun: 无 TUN 模式
no_tun_help: 不使用 TUN 网卡,适合无管理员权限时使用。本节点仅允许被访问。访问其他节点需要使用 SOCK5
status: status:
version: 内核版本 version: 内核版本
local: 本机 local: 本机
@@ -113,3 +136,4 @@ event:
VpnPortalClientDisconnected: VPN门户客户端已断开连接 VpnPortalClientDisconnected: VPN门户客户端已断开连接
DhcpIpv4Changed: DHCP IPv4地址更改 DhcpIpv4Changed: DHCP IPv4地址更改
DhcpIpv4Conflicted: DHCP IPv4地址冲突 DhcpIpv4Conflicted: DHCP IPv4地址冲突
@@ -68,6 +68,29 @@ upload_bytes: Upload
download_bytes: Download download_bytes: Download
loss_rate: Loss Rate loss_rate: Loss Rate
flags_switch: Feature Switch
latency_first: Enable Latency-First Mode
latency_first_help: Ignore hop count and select the path with the lowest total latency
use_smoltcp: Use User-Space Protocol Stack
use_smoltcp_help: Use a user-space TCP/IP stack to avoid issues with operating system firewalls blocking subnet or KCP proxy functionality.
enable_kcp_proxy: Enable KCP Proxy
enable_kcp_proxy_help: Convert TCP traffic to KCP traffic to reduce latency and boost transmission speed.
disable_kcp_input: Disable KCP Input
disable_kcp_input_help: Disable inbound KCP traffic, while nodes with KCP proxy enabled continue to connect using TCP.
disable_p2p: Disable P2P
disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server.
bind_device: Bind to Physical Device Only
bind_device_help: Use only the physical network interface to prevent EasyTier from connecting via virtual networks.
no_tun: No TUN Mode
no_tun_help: Do not use a TUN interface, suitable for environments without administrator privileges. This node is only accessible; accessing other nodes requires SOCKS5.
status: status:
version: Version version: Version
local: Local local: Local
@@ -1,6 +1,7 @@
import axios, { AxiosError, AxiosInstance, AxiosResponse, InternalAxiosRequestConfig } from 'axios'; import axios, { AxiosError, AxiosInstance, AxiosResponse, InternalAxiosRequestConfig } from 'axios';
import { Md5 } from 'ts-md5' import { Md5 } from 'ts-md5'
import { UUID } from './utils'; import { UUID } from './utils';
import { NetworkConfig } from '../types/network';
export interface ValidateConfigResponse { export interface ValidateConfigResponse {
toml_config: string; toml_config: string;
@@ -37,6 +38,15 @@ export interface ListNetworkInstanceIdResponse {
disabled_inst_ids: Array<UUID>, disabled_inst_ids: Array<UUID>,
} }
export interface GenerateConfigRequest {
config: NetworkConfig;
}
export interface GenerateConfigResponse {
toml_config?: string;
error?: string;
}
export class ApiClient { export class ApiClient {
private client: AxiosInstance; private client: AxiosInstance;
private authFailedCb: Function | undefined; private authFailedCb: Function | undefined;
@@ -193,6 +203,18 @@ export class ApiClient {
public captcha_url() { public captcha_url() {
return this.client.defaults.baseURL + '/auth/captcha'; return this.client.defaults.baseURL + '/auth/captcha';
} }
public async generate_config(config: GenerateConfigRequest): Promise<GenerateConfigResponse> {
try {
const response = await this.client.post<any, GenerateConfigResponse>('/generate-config', config);
return response;
} catch (error) {
if (error instanceof AxiosError) {
return { error: error.response?.data };
}
return { error: 'Unknown error: ' + error };
}
}
} }
export default ApiClient; export default ApiClient;
+15 -1
View File
@@ -35,6 +35,13 @@ export interface NetworkConfig {
latency_first: boolean latency_first: boolean
dev_name: string dev_name: string
use_smoltcp?: boolean
enable_kcp_proxy?: boolean
disable_kcp_input?: boolean
disable_p2p?: boolean
bind_device?: boolean
no_tun?: boolean
} }
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -67,8 +74,15 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
'wg://0.0.0.0:11011', 'wg://0.0.0.0:11011',
], ],
rpc_port: 0, rpc_port: 0,
latency_first: true, latency_first: false,
dev_name: '', dev_name: '',
use_smoltcp: false,
enable_kcp_proxy: false,
disable_kcp_input: false,
disable_p2p: false,
bind_device: true,
no_tun: false,
} }
} }
@@ -0,0 +1,39 @@
<script setup lang="ts">
import { NetworkTypes } from 'easytier-frontend-lib';
import { ref } from 'vue';
import { Api } from 'easytier-frontend-lib'
const defaultApiHost = 'https://config-server.easytier.cn'
const api = new Api.ApiClient(defaultApiHost);
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration");
const generateConfig = (config: NetworkTypes.NetworkConfig) => {
api.generate_config({
config: config
}).then((res) => {
if (res.error) {
toml_config.value = res.error;
} else if (res.toml_config) {
toml_config.value = res.toml_config;
} else {
toml_config.value = "Api server returned an unexpected response";
}
});
};
</script>
<template>
<div class="flex items-center justify-center m-5">
<div class="flex w-full">
<div class="w-1/2 p-4">
<Config :cur-network="newNetworkConfig" @run-network="generateConfig" />
</div>
<div class="w-1/2 p-4 bg-gray-100">
<pre class="whitespace-pre-wrap">{{ toml_config }}</pre>
</div>
</div>
</div>
</template>
+5
View File
@@ -15,6 +15,7 @@ import DeviceManagement from './components/DeviceManagement.vue'
import Dashboard from './components/Dashboard.vue' import Dashboard from './components/Dashboard.vue'
import DialogService from 'primevue/dialogservice'; import DialogService from 'primevue/dialogservice';
import ToastService from 'primevue/toastservice'; import ToastService from 'primevue/toastservice';
import ConfigGenerator from './components/ConfigGenerator.vue'
const routes = [ const routes = [
{ {
@@ -66,6 +67,10 @@ const routes = [
} }
} }
}, },
{
path: '/config_generator',
component: ConfigGenerator,
}
] ]
const router = createRouter({ const router = createRouter({
+36
View File
@@ -6,11 +6,14 @@ mod users;
use std::{net::SocketAddr, sync::Arc}; use std::{net::SocketAddr, sync::Arc};
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::routing::post;
use axum::{extract::State, routing::get, Json, Router}; use axum::{extract::State, routing::get, Json, Router};
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer}; use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
use axum_login::{login_required, AuthManagerLayerBuilder, AuthzBackend}; use axum_login::{login_required, AuthManagerLayerBuilder, AuthzBackend};
use axum_messages::MessagesManagerLayer; use axum_messages::MessagesManagerLayer;
use easytier::common::config::ConfigLoader;
use easytier::common::scoped_task::ScopedTask; use easytier::common::scoped_task::ScopedTask;
use easytier::launcher::NetworkConfig;
use easytier::proto::rpc_types; use easytier::proto::rpc_types;
use network::NetworkApi; use network::NetworkApi;
use sea_orm::DbErr; use sea_orm::DbErr;
@@ -48,6 +51,17 @@ struct GetSummaryJsonResp {
device_count: u32, device_count: u32,
} }
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct GenerateConfigRequest {
config: NetworkConfig,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct GenerateConfigResponse {
error: Option<String>,
toml_config: Option<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)] #[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Error { pub struct Error {
message: String, message: String,
@@ -131,6 +145,24 @@ impl RestfulServer {
.into()) .into())
} }
async fn handle_generate_config(
Json(req): Json<GenerateConfigRequest>,
) -> Result<Json<GenerateConfigResponse>, HttpHandleError> {
let config = req.config.gen_config();
match config {
Ok(c) => Ok(GenerateConfigResponse {
error: None,
toml_config: Some(c.dump()),
}
.into()),
Err(e) => Ok(GenerateConfigResponse {
error: Some(format!("{:?}", e)),
toml_config: None,
}
.into()),
}
}
pub async fn start(&mut self) -> Result<(), anyhow::Error> { pub async fn start(&mut self) -> Result<(), anyhow::Error> {
let listener = TcpListener::bind(self.bind_addr).await?; let listener = TcpListener::bind(self.bind_addr).await?;
@@ -178,6 +210,10 @@ impl RestfulServer {
.route_layer(login_required!(Backend)) .route_layer(login_required!(Backend))
.merge(auth::router()) .merge(auth::router())
.with_state(self.client_mgr.clone()) .with_state(self.client_mgr.clone())
.route(
"/api/v1/generate-config",
post(Self::handle_generate_config),
)
.layer(MessagesManagerLayer) .layer(MessagesManagerLayer)
.layer(auth_layer) .layer(auth_layer)
.layer(tower_http::cors::CorsLayer::very_permissive()) .layer(tower_http::cors::CorsLayer::very_permissive())
+27 -8
View File
@@ -3,12 +3,12 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command." description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier" homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier" repository = "https://github.com/EasyTier/EasyTier"
version = "2.1.1" version = "2.2.2"
edition = "2021" edition = "2021"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.77.0" rust-version = "1.84.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
@@ -89,7 +89,7 @@ tun = { package = "tun-easytier", version = "1.1.1", features = [
"async", "async",
], optional = true } ], optional = true }
# for net ns # for net ns
nix = { version = "0.27", features = ["sched", "socket", "ioctl"] } nix = { version = "0.29.0", features = ["sched", "socket", "ioctl", "net"] }
uuid = { version = "1.5.0", features = [ uuid = { version = "1.5.0", features = [
"v4", "v4",
@@ -163,12 +163,13 @@ indexmap = { version = "~1.9.3", optional = false, features = ["std"] }
atomic-shim = "0.2.0" atomic-shim = "0.2.0"
smoltcp = { version = "0.11.0", optional = true, default-features = false, features = [ smoltcp = { version = "0.12.0", optional = true, default-features = false, features = [
"std", "std",
"medium-ip", "medium-ip",
"proto-ipv4", "proto-ipv4",
"proto-ipv6", "proto-ipv6",
"socket-tcp", "socket-tcp",
"socket-tcp-cubic",
"async", "async",
] } ] }
parking_lot = { version = "0.12.0", optional = true } parking_lot = { version = "0.12.0", optional = true }
@@ -185,16 +186,33 @@ service-manager = {git = "https://github.com/chipsenkbeil/service-manager-rs.git
async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] } async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] }
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" }
prost-reflect = { version = "0.14.5", features = [
"serde",
"derive",
"text-format"
] }
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies] [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3" machine-uid = "0.5.3"
[target.'cfg(any(target_os = "linux"))'.dependencies]
netlink-sys = "0.8.7"
netlink-packet-route = "0.21.0"
netlink-packet-core = { version = "0.7.0" }
netlink-packet-utils = "0.5.2"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.52", features = [ windows = { version = "0.52.0", features = [
"Win32_Networking_WinSock",
"Win32_NetworkManagement_IpHelper",
"Win32_Foundation", "Win32_Foundation",
"Win32_NetworkManagement_WindowsFirewall",
"Win32_System_Com",
"Win32_Networking",
"Win32_System_Ole",
"Win32_Networking_WinSock",
"Win32_System_IO", "Win32_System_IO",
] } ]}
encoding = "0.2" encoding = "0.2"
winreg = "0.52" winreg = "0.52"
windows-service = "0.7.0" windows-service = "0.7.0"
@@ -205,6 +223,7 @@ globwalk = "0.8.1"
regex = "1" regex = "1"
prost-build = "0.13.2" prost-build = "0.13.2"
rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = ["internal-namespace"] } rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = ["internal-namespace"] }
prost-reflect-build = { version = "0.14.0" }
[target.'cfg(windows)'.build-dependencies] [target.'cfg(windows)'.build-dependencies]
reqwest = { version = "0.11", features = ["blocking"] } reqwest = { version = "0.11", features = ["blocking"] }
+7 -4
View File
@@ -141,7 +141,8 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("cargo:rerun-if-changed={}", proto_file); println!("cargo:rerun-if-changed={}", proto_file);
} }
prost_build::Config::new() let mut config = prost_build::Config::new();
config
.protoc_arg("--experimental_allow_proto3_optional") .protoc_arg("--experimental_allow_proto3_optional")
.type_attribute(".common", "#[derive(serde::Serialize, serde::Deserialize)]") .type_attribute(".common", "#[derive(serde::Serialize, serde::Deserialize)]")
.type_attribute(".error", "#[derive(serde::Serialize, serde::Deserialize)]") .type_attribute(".error", "#[derive(serde::Serialize, serde::Deserialize)]")
@@ -156,9 +157,11 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.type_attribute("peer_rpc.ForeignNetworkRouteInfoKey", "#[derive(Hash, Eq)]") .type_attribute("peer_rpc.ForeignNetworkRouteInfoKey", "#[derive(Hash, Eq)]")
.type_attribute("common.RpcDescriptor", "#[derive(Hash, Eq)]") .type_attribute("common.RpcDescriptor", "#[derive(Hash, Eq)]")
.service_generator(Box::new(rpc_build::ServiceGenerator::new())) .service_generator(Box::new(rpc_build::ServiceGenerator::new()))
.btree_map(["."]) .btree_map(["."]);
.compile_protos(&proto_files, &["src/proto/"])
.unwrap(); prost_reflect_build::Builder::new()
.file_descriptor_set_bytes("crate::proto::DESCRIPTOR_POOL_BYTES")
.compile_protos_with_config(config, &proto_files, &["src/proto/"])?;
check_locale(); check_locale();
Ok(()) Ok(())
+14 -2
View File
@@ -100,8 +100,8 @@ core_clap:
en: "do not create TUN device, can use subnet proxy to access node" en: "do not create TUN device, can use subnet proxy to access node"
zh-CN: "不创建TUN设备,可以使用子网代理访问节点" zh-CN: "不创建TUN设备,可以使用子网代理访问节点"
use_smoltcp: use_smoltcp:
en: "enable smoltcp stack for subnet proxy" en: "enable smoltcp stack for subnet proxy and kcp proxy"
zh-CN: "为子网代理启用smoltcp堆栈" zh-CN: "为子网代理和 KCP 代理启用smoltcp堆栈"
manual_routes: manual_routes:
en: "assign routes cidr manually, will disable subnet proxy and wireguard routes propagated from peers. e.g.: 192.168.0.0/16" en: "assign routes cidr manually, will disable subnet proxy and wireguard routes propagated from peers. e.g.: 192.168.0.0/16"
zh-CN: "手动分配路由CIDR,将禁用子网代理和从对等节点传播的wireguard路由。例如:192.168.0.0/16" zh-CN: "手动分配路由CIDR,将禁用子网代理和从对等节点传播的wireguard路由。例如:192.168.0.0/16"
@@ -134,6 +134,18 @@ core_clap:
compression: compression:
en: "compression algorithm to use, support none, zstd. default is none" en: "compression algorithm to use, support none, zstd. default is none"
zh-CN: "要使用的压缩算法,支持 none、zstd。默认为 none" zh-CN: "要使用的压缩算法,支持 none、zstd。默认为 none"
mapped_listeners:
en: "manually specify the public address of the listener, other nodes can use this address to connect to this node. e.g.: tcp://123.123.123.123:11223, can specify multiple."
zh-CN: "手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。例如:tcp://123.123.123.123:11223,可以指定多个。"
bind_device:
en: "bind the connector socket to physical devices to avoid routing issues. e.g.: subnet proxy segment conflicts with a node's segment, after binding the physical device, it can communicate with the node normally."
zh-CN: "将连接器的套接字绑定到物理设备以避免路由问题。比如子网代理网段与某节点的网段冲突,绑定物理设备后可以与该节点正常通信。"
enable_kcp_proxy:
en: "proxy tcp streams with kcp, improving the latency and throughput on the network with udp packet loss."
zh-CN: "使用 KCP 代理 TCP 流,提高在 UDP 丢包网络上的延迟和吞吐量。"
disable_kcp_input:
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
core_app: core_app:
panic_backtrace_save: panic_backtrace_save:
+120 -38
View File
@@ -1,26 +1,27 @@
use std::{ use std::{io, net::SocketAddr, os::windows::io::AsRawSocket};
ffi::c_void,
io::{self, ErrorKind},
mem,
net::SocketAddr,
os::windows::io::AsRawSocket,
ptr,
};
use anyhow::Context;
use network_interface::NetworkInterfaceConfig; use network_interface::NetworkInterfaceConfig;
use windows_sys::{ use windows::{
core::PCSTR, core::BSTR,
Win32::{ Win32::{
Foundation::{BOOL, FALSE}, Foundation::{BOOL, FALSE},
NetworkManagement::WindowsFirewall::{
INetFwPolicy2, INetFwRule, NET_FW_ACTION_ALLOW, NET_FW_PROFILE2_PRIVATE,
NET_FW_PROFILE2_PUBLIC, NET_FW_RULE_DIR_IN, NET_FW_RULE_DIR_OUT,
},
Networking::WinSock::{ Networking::WinSock::{
htonl, setsockopt, WSAGetLastError, WSAIoctl, IPPROTO_IP, IPPROTO_IPV6, htonl, setsockopt, WSAGetLastError, WSAIoctl, IPPROTO_IP, IPPROTO_IPV6,
IPV6_UNICAST_IF, IP_UNICAST_IF, SIO_UDP_CONNRESET, SOCKET, SOCKET_ERROR, IPV6_UNICAST_IF, IP_UNICAST_IF, SIO_UDP_CONNRESET, SOCKET, SOCKET_ERROR,
}, },
System::Com::{
CoCreateInstance, CoInitializeEx, CoUninitialize, CLSCTX_ALL, COINIT_MULTITHREADED,
},
}, },
}; };
pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> { pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> {
let handle = socket.as_raw_socket() as SOCKET; let handle = SOCKET(socket.as_raw_socket() as usize);
unsafe { unsafe {
// Ignoring UdpSocket's WSAECONNRESET error // Ignoring UdpSocket's WSAECONNRESET error
@@ -39,21 +40,18 @@ pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> {
let ret = WSAIoctl( let ret = WSAIoctl(
handle, handle,
SIO_UDP_CONNRESET, SIO_UDP_CONNRESET,
&enable as *const _ as *const c_void, Some(&enable as *const _ as *const std::ffi::c_void),
mem::size_of_val(&enable) as u32, std::mem::size_of_val(&enable) as u32,
ptr::null_mut(), None,
0, 0,
&mut bytes_returned as *mut _, &mut bytes_returned as *mut _,
ptr::null_mut(), None,
None, None,
); );
if ret == SOCKET_ERROR { if ret == SOCKET_ERROR {
use std::io::Error;
// Error occurs
let err_code = WSAGetLastError(); let err_code = WSAGetLastError();
return Err(Error::from_raw_os_error(err_code)); return Err(std::io::Error::from_raw_os_error(err_code.0));
} }
} }
@@ -63,7 +61,7 @@ pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> {
pub fn interface_count() -> io::Result<usize> { pub fn interface_count() -> io::Result<usize> {
let ifaces = network_interface::NetworkInterface::show().map_err(|e| { let ifaces = network_interface::NetworkInterface::show().map_err(|e| {
io::Error::new( io::Error::new(
ErrorKind::NotFound, io::ErrorKind::NotFound,
format!("Failed to get interfaces. error: {}", e), format!("Failed to get interfaces. error: {}", e),
) )
})?; })?;
@@ -73,7 +71,7 @@ pub fn interface_count() -> io::Result<usize> {
pub fn find_interface_index(iface_name: &str) -> io::Result<u32> { pub fn find_interface_index(iface_name: &str) -> io::Result<u32> {
let ifaces = network_interface::NetworkInterface::show().map_err(|e| { let ifaces = network_interface::NetworkInterface::show().map_err(|e| {
io::Error::new( io::Error::new(
ErrorKind::NotFound, io::ErrorKind::NotFound,
format!("Failed to get interfaces. {}, error: {}", iface_name, e), format!("Failed to get interfaces. {}, error: {}", iface_name, e),
) )
})?; })?;
@@ -82,7 +80,7 @@ pub fn find_interface_index(iface_name: &str) -> io::Result<u32> {
} }
tracing::error!("Failed to find interface index for {}", iface_name); tracing::error!("Failed to find interface index for {}", iface_name);
Err(io::Error::new( Err(io::Error::new(
ErrorKind::NotFound, io::ErrorKind::NotFound,
format!("{}", iface_name), format!("{}", iface_name),
)) ))
} }
@@ -92,7 +90,7 @@ pub fn set_ip_unicast_if<S: AsRawSocket>(
addr: &SocketAddr, addr: &SocketAddr,
iface: &str, iface: &str,
) -> io::Result<()> { ) -> io::Result<()> {
let handle = socket.as_raw_socket() as SOCKET; let handle = SOCKET(socket.as_raw_socket() as usize);
let if_index = find_interface_index(iface)?; let if_index = find_interface_index(iface)?;
@@ -100,30 +98,23 @@ pub fn set_ip_unicast_if<S: AsRawSocket>(
// https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options // https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options
let ret = match addr { let ret = match addr {
SocketAddr::V4(..) => { SocketAddr::V4(..) => {
// Interface index is in network byte order for IPPROTO_IP.
let if_index = htonl(if_index); let if_index = htonl(if_index);
setsockopt( let if_index_bytes = if_index.to_ne_bytes();
handle, setsockopt(handle, IPPROTO_IP.0, IP_UNICAST_IF, Some(&if_index_bytes))
IPPROTO_IP as i32,
IP_UNICAST_IF as i32,
&if_index as *const _ as PCSTR,
mem::size_of_val(&if_index) as i32,
)
} }
SocketAddr::V6(..) => { SocketAddr::V6(..) => {
// Interface index is in host byte order for IPPROTO_IPV6. let if_index_bytes = if_index.to_ne_bytes();
setsockopt( setsockopt(
handle, handle,
IPPROTO_IPV6 as i32, IPPROTO_IPV6.0,
IPV6_UNICAST_IF as i32, IPV6_UNICAST_IF,
&if_index as *const _ as PCSTR, Some(&if_index_bytes),
mem::size_of_val(&if_index) as i32,
) )
} }
}; };
if ret == SOCKET_ERROR { if ret == SOCKET_ERROR {
let err = io::Error::from_raw_os_error(WSAGetLastError()); let err = std::io::Error::from_raw_os_error(WSAGetLastError().0);
tracing::error!( tracing::error!(
"set IP_UNICAST_IF / IPV6_UNICAST_IF interface: {}, index: {}, error: {}", "set IP_UNICAST_IF / IPV6_UNICAST_IF interface: {}, index: {}, error: {}",
iface, iface,
@@ -152,4 +143,95 @@ pub fn setup_socket_for_win<S: AsRawSocket>(
} }
Ok(()) Ok(())
} }
struct ComInitializer;
impl ComInitializer {
fn new() -> windows::core::Result<Self> {
unsafe { CoInitializeEx(None, COINIT_MULTITHREADED)? };
Ok(Self)
}
}
impl Drop for ComInitializer {
fn drop(&mut self) {
unsafe {
CoUninitialize();
}
}
}
pub fn do_add_self_to_firewall_allowlist(inbound: bool) -> anyhow::Result<()> {
let _com = ComInitializer::new()?;
// 创建防火墙策略实例
let policy: INetFwPolicy2 = unsafe {
CoCreateInstance(
&windows::Win32::NetworkManagement::WindowsFirewall::NetFwPolicy2,
None,
CLSCTX_ALL,
)
}?;
// 创建防火墙规则实例
let rule: INetFwRule = unsafe {
CoCreateInstance(
&windows::Win32::NetworkManagement::WindowsFirewall::NetFwRule,
None,
CLSCTX_ALL,
)
}?;
// 设置规则属性
let exe_path = std::env::current_exe()
.with_context(|| "Failed to get current executable path when adding firewall rule")?
.to_string_lossy()
.replace(r"\\?\", "");
let name = BSTR::from(format!(
"EasyTier {} ({})",
exe_path,
if inbound { "Inbound" } else { "Outbound" }
));
let desc = BSTR::from("Allow EasyTier to do subnet proxy and kcp proxy");
let app_path = BSTR::from(&exe_path);
unsafe {
rule.SetName(&name)?;
rule.SetDescription(&desc)?;
rule.SetApplicationName(&app_path)?;
rule.SetAction(NET_FW_ACTION_ALLOW)?;
if inbound {
rule.SetDirection(NET_FW_RULE_DIR_IN)?; // 允许入站连接
} else {
rule.SetDirection(NET_FW_RULE_DIR_OUT)?; // 允许出站连接
}
rule.SetEnabled(windows::Win32::Foundation::VARIANT_TRUE)?;
rule.SetProfiles(NET_FW_PROFILE2_PRIVATE.0 | NET_FW_PROFILE2_PUBLIC.0)?;
rule.SetGrouping(&BSTR::from("EasyTier"))?;
// 获取规则集合并添加新规则
let rules = policy.Rules()?;
rules.Remove(&name)?; // 先删除同名规则
rules.Add(&rule)?;
}
Ok(())
}
pub fn add_self_to_firewall_allowlist() -> anyhow::Result<()> {
do_add_self_to_firewall_allowlist(true)?;
do_add_self_to_firewall_allowlist(false)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add_self_to_firewall_allowlist() {
let res = add_self_to_firewall_allowlist();
assert!(res.is_ok());
}
}
+22 -1
View File
@@ -27,8 +27,12 @@ pub fn gen_default_flags() -> Flags {
relay_all_peer_rpc: false, relay_all_peer_rpc: false,
disable_udp_hole_punching: false, disable_udp_hole_punching: false,
ipv6_listener: "udp://[::]:0".to_string(), ipv6_listener: "udp://[::]:0".to_string(),
multi_thread: false, multi_thread: true,
data_compress_algo: CompressionAlgoPb::None.into(), data_compress_algo: CompressionAlgoPb::None.into(),
bind_device: true,
enable_kcp_proxy: false,
disable_kcp_input: false,
disable_relay_kcp: true,
} }
} }
@@ -72,6 +76,9 @@ pub trait ConfigLoader: Send + Sync {
fn get_listeners(&self) -> Vec<url::Url>; fn get_listeners(&self) -> Vec<url::Url>;
fn set_listeners(&self, listeners: Vec<url::Url>); fn set_listeners(&self, listeners: Vec<url::Url>);
fn get_mapped_listeners(&self) -> Vec<url::Url>;
fn set_mapped_listeners(&self, listeners: Option<Vec<url::Url>>);
fn get_rpc_portal(&self) -> Option<SocketAddr>; fn get_rpc_portal(&self) -> Option<SocketAddr>;
fn set_rpc_portal(&self, addr: SocketAddr); fn set_rpc_portal(&self, addr: SocketAddr);
@@ -183,6 +190,7 @@ struct Config {
dhcp: Option<bool>, dhcp: Option<bool>,
network_identity: Option<NetworkIdentity>, network_identity: Option<NetworkIdentity>,
listeners: Option<Vec<url::Url>>, listeners: Option<Vec<url::Url>>,
mapped_listeners: Option<Vec<url::Url>>,
exit_nodes: Option<Vec<Ipv4Addr>>, exit_nodes: Option<Vec<Ipv4Addr>>,
peer: Option<Vec<PeerConfig>>, peer: Option<Vec<PeerConfig>>,
@@ -472,6 +480,19 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().listeners = Some(listeners); self.config.lock().unwrap().listeners = Some(listeners);
} }
fn get_mapped_listeners(&self) -> Vec<url::Url> {
self.config
.lock()
.unwrap()
.mapped_listeners
.clone()
.unwrap_or_default()
}
fn set_mapped_listeners(&self, listeners: Option<Vec<url::Url>>) {
self.config.lock().unwrap().mapped_listeners = listeners;
}
fn get_rpc_portal(&self) -> Option<SocketAddr> { fn get_rpc_portal(&self) -> Option<SocketAddr> {
self.config.lock().unwrap().rpc_portal self.config.lock().unwrap().rpc_portal
} }
+9 -2
View File
@@ -101,6 +101,10 @@ impl GlobalCtx {
let enable_exit_node = config_fs.get_flags().enable_exit_node; let enable_exit_node = config_fs.get_flags().enable_exit_node;
let no_tun = config_fs.get_flags().no_tun; let no_tun = config_fs.get_flags().no_tun;
let mut feature_flags = PeerFeatureFlag::default();
feature_flags.kcp_input = !config_fs.get_flags().disable_kcp_input;
feature_flags.no_relay_kcp = config_fs.get_flags().disable_relay_kcp;
GlobalCtx { GlobalCtx {
inst_name: config_fs.get_inst_name(), inst_name: config_fs.get_inst_name(),
id, id,
@@ -123,7 +127,7 @@ impl GlobalCtx {
enable_exit_node, enable_exit_node,
no_tun, no_tun,
feature_flags: AtomicCell::new(PeerFeatureFlag::default()), feature_flags: AtomicCell::new(feature_flags),
} }
} }
@@ -230,7 +234,10 @@ impl GlobalCtx {
} }
pub fn add_running_listener(&self, url: url::Url) { pub fn add_running_listener(&self, url: url::Url) {
self.running_listeners.lock().unwrap().push(url); let mut l = self.running_listeners.lock().unwrap();
if !l.contains(&url) {
l.push(url);
}
} }
pub fn get_vpn_portal_cidr(&self) -> Option<cidr::Ipv4Cidr> { pub fn get_vpn_portal_cidr(&self) -> Option<cidr::Ipv4Cidr> {
-417
View File
@@ -1,417 +0,0 @@
use std::net::Ipv4Addr;
use async_trait::async_trait;
use tokio::process::Command;
use super::error::Error;
#[async_trait]
pub trait IfConfiguerTrait: Send + Sync {
async fn add_ipv4_route(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn remove_ipv4_route(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn add_ipv4_ip(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn set_link_status(&self, _name: &str, _up: bool) -> Result<(), Error> {
Ok(())
}
async fn remove_ip(&self, _name: &str, _ip: Option<Ipv4Addr>) -> Result<(), Error> {
Ok(())
}
async fn wait_interface_show(&self, _name: &str) -> Result<(), Error> {
return Ok(());
}
async fn set_mtu(&self, _name: &str, _mtu: u32) -> Result<(), Error> {
Ok(())
}
}
fn cidr_to_subnet_mask(prefix_length: u8) -> Ipv4Addr {
if prefix_length > 32 {
panic!("Invalid CIDR prefix length");
}
let subnet_mask: u32 = (!0u32)
.checked_shl(32 - u32::from(prefix_length))
.unwrap_or(0);
Ipv4Addr::new(
((subnet_mask >> 24) & 0xFF) as u8,
((subnet_mask >> 16) & 0xFF) as u8,
((subnet_mask >> 8) & 0xFF) as u8,
(subnet_mask & 0xFF) as u8,
)
}
async fn run_shell_cmd(cmd: &str) -> Result<(), Error> {
let cmd_out: std::process::Output;
let stdout: String;
let stderr: String;
#[cfg(target_os = "windows")]
{
const CREATE_NO_WINDOW: u32 = 0x08000000;
cmd_out = Command::new("cmd")
.stdin(std::process::Stdio::null())
.arg("/C")
.arg(cmd)
.creation_flags(CREATE_NO_WINDOW)
.output()
.await?;
stdout = crate::utils::utf8_or_gbk_to_string(cmd_out.stdout.as_slice());
stderr = crate::utils::utf8_or_gbk_to_string(cmd_out.stderr.as_slice());
};
#[cfg(not(target_os = "windows"))]
{
cmd_out = Command::new("sh").arg("-c").arg(cmd).output().await?;
stdout = String::from_utf8_lossy(cmd_out.stdout.as_slice()).to_string();
stderr = String::from_utf8_lossy(cmd_out.stderr.as_slice()).to_string();
};
let ec = cmd_out.status.code();
let succ = cmd_out.status.success();
tracing::info!(?cmd, ?ec, ?succ, ?stdout, ?stderr, "run shell cmd");
if !cmd_out.status.success() {
return Err(Error::ShellCommandError(stdout + &stderr));
}
Ok(())
}
pub struct MacIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for MacIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"route -n add {} -netmask {} -interface {} -hopcount 7",
address,
cidr_to_subnet_mask(cidr_prefix),
name
)
.as_str(),
)
.await
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"route -n delete {} -netmask {} -interface {}",
address,
cidr_to_subnet_mask(cidr_prefix),
name
)
.as_str(),
)
.await
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"ifconfig {} {:?}/{:?} 10.8.8.8 up",
name, address, cidr_prefix,
)
.as_str(),
)
.await
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
run_shell_cmd(format!("ifconfig {} {}", name, if up { "up" } else { "down" }).as_str())
.await
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
run_shell_cmd(format!("ifconfig {} inet delete", name).as_str()).await
} else {
run_shell_cmd(
format!("ifconfig {} inet {} delete", name, ip.unwrap().to_string()).as_str(),
)
.await
}
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(format!("ifconfig {} mtu {}", name, mtu).as_str()).await
}
}
pub struct LinuxIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for LinuxIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"ip route add {}/{} dev {} metric 65535",
address, cidr_prefix, name
)
.as_str(),
)
.await
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(format!("ip route del {}/{} dev {}", address, cidr_prefix, name).as_str())
.await
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(format!("ip addr add {:?}/{:?} dev {}", address, cidr_prefix, name).as_str())
.await
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
run_shell_cmd(format!("ip link set {} {}", name, if up { "up" } else { "down" }).as_str())
.await
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
run_shell_cmd(format!("ip addr flush dev {}", name).as_str()).await
} else {
run_shell_cmd(
format!("ip addr del {:?} dev {}", ip.unwrap().to_string(), name).as_str(),
)
.await
}
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(format!("ip link set dev {} mtu {}", name, mtu).as_str()).await
}
}
#[cfg(target_os = "windows")]
pub struct WindowsIfConfiger {}
#[cfg(target_os = "windows")]
impl WindowsIfConfiger {
pub fn get_interface_index(name: &str) -> Option<u32> {
crate::arch::windows::find_interface_index(name).ok()
}
async fn list_ipv4(name: &str) -> Result<Vec<Ipv4Addr>, Error> {
use anyhow::Context;
use network_interface::NetworkInterfaceConfig;
use std::net::IpAddr;
let ret = network_interface::NetworkInterface::show().with_context(|| "show interface")?;
let addrs = ret
.iter()
.filter_map(|x| {
if x.name != name {
return None;
}
Some(x.addr.clone())
})
.flat_map(|x| x)
.map(|x| x.ip())
.filter_map(|x| {
if let IpAddr::V4(ipv4) = x {
Some(ipv4)
} else {
None
}
})
.collect::<Vec<_>>();
Ok(addrs)
}
async fn remove_one_ipv4(name: &str, ip: Ipv4Addr) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface ipv4 delete address {} address={}",
name,
ip.to_string()
)
.as_str(),
)
.await
}
}
#[cfg(target_os = "windows")]
#[async_trait]
impl IfConfiguerTrait for WindowsIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let Some(idx) = Self::get_interface_index(name) else {
return Err(Error::NotFound);
};
run_shell_cmd(
format!(
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 255",
address,
cidr_to_subnet_mask(cidr_prefix),
idx
)
.as_str(),
)
.await
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let Some(idx) = Self::get_interface_index(name) else {
return Err(Error::NotFound);
};
run_shell_cmd(
format!(
"route DELETE {} MASK {} IF {}",
address,
cidr_to_subnet_mask(cidr_prefix),
idx
)
.as_str(),
)
.await
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface ipv4 add address {} address={} mask={}",
name,
address,
cidr_to_subnet_mask(cidr_prefix)
)
.as_str(),
)
.await
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface set interface {} {}",
name,
if up { "enable" } else { "disable" }
)
.as_str(),
)
.await
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
for ip in Self::list_ipv4(name).await?.iter() {
Self::remove_one_ipv4(name, *ip).await?;
}
Ok(())
} else {
Self::remove_one_ipv4(name, ip.unwrap()).await
}
}
async fn wait_interface_show(&self, name: &str) -> Result<(), Error> {
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), async move {
loop {
if let Some(idx) = Self::get_interface_index(name) {
tracing::info!(?name, ?idx, "Interface found");
break;
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
Ok::<(), Error>(())
})
.await??,
)
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
let _ = run_shell_cmd(
format!("netsh interface ipv6 set subinterface {} mtu={}", name, mtu).as_str(),
)
.await;
run_shell_cmd(
format!("netsh interface ipv4 set subinterface {} mtu={}", name, mtu).as_str(),
)
.await
}
}
pub struct DummyIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for DummyIfConfiger {}
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
pub type IfConfiger = MacIfConfiger;
#[cfg(target_os = "linux")]
pub type IfConfiger = LinuxIfConfiger;
#[cfg(target_os = "windows")]
pub type IfConfiger = WindowsIfConfiger;
#[cfg(not(any(
target_os = "macos",
target_os = "linux",
target_os = "windows",
target_os = "freebsd"
)))]
pub type IfConfiger = DummyIfConfiger;
+81
View File
@@ -0,0 +1,81 @@
use std::net::Ipv4Addr;
use async_trait::async_trait;
use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait};
pub struct MacIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for MacIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"route -n add {} -netmask {} -interface {} -hopcount 7",
address,
cidr_to_subnet_mask(cidr_prefix),
name
)
.as_str(),
)
.await
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"route -n delete {} -netmask {} -interface {}",
address,
cidr_to_subnet_mask(cidr_prefix),
name
)
.as_str(),
)
.await
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"ifconfig {} {:?}/{:?} 10.8.8.8 up",
name, address, cidr_prefix,
)
.as_str(),
)
.await
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
run_shell_cmd(format!("ifconfig {} {}", name, if up { "up" } else { "down" }).as_str())
.await
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
run_shell_cmd(format!("ifconfig {} inet delete", name).as_str()).await
} else {
run_shell_cmd(
format!("ifconfig {} inet {} delete", name, ip.unwrap().to_string()).as_str(),
)
.await
}
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(format!("ifconfig {} mtu {}", name, mtu).as_str()).await
}
}
+127
View File
@@ -0,0 +1,127 @@
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
mod darwin;
#[cfg(any(target_os = "linux"))]
mod netlink;
#[cfg(target_os = "windows")]
mod windows;
mod route;
use std::net::Ipv4Addr;
use async_trait::async_trait;
use tokio::process::Command;
use super::error::Error;
#[async_trait]
pub trait IfConfiguerTrait: Send + Sync {
async fn add_ipv4_route(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn remove_ipv4_route(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn add_ipv4_ip(
&self,
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
) -> Result<(), Error> {
Ok(())
}
async fn set_link_status(&self, _name: &str, _up: bool) -> Result<(), Error> {
Ok(())
}
async fn remove_ip(&self, _name: &str, _ip: Option<Ipv4Addr>) -> Result<(), Error> {
Ok(())
}
async fn wait_interface_show(&self, _name: &str) -> Result<(), Error> {
return Ok(());
}
async fn set_mtu(&self, _name: &str, _mtu: u32) -> Result<(), Error> {
Ok(())
}
}
fn cidr_to_subnet_mask(prefix_length: u8) -> Ipv4Addr {
if prefix_length > 32 {
panic!("Invalid CIDR prefix length");
}
let subnet_mask: u32 = (!0u32)
.checked_shl(32 - u32::from(prefix_length))
.unwrap_or(0);
Ipv4Addr::new(
((subnet_mask >> 24) & 0xFF) as u8,
((subnet_mask >> 16) & 0xFF) as u8,
((subnet_mask >> 8) & 0xFF) as u8,
(subnet_mask & 0xFF) as u8,
)
}
async fn run_shell_cmd(cmd: &str) -> Result<(), Error> {
let cmd_out: std::process::Output;
let stdout: String;
let stderr: String;
#[cfg(target_os = "windows")]
{
const CREATE_NO_WINDOW: u32 = 0x08000000;
cmd_out = Command::new("cmd")
.stdin(std::process::Stdio::null())
.arg("/C")
.arg(cmd)
.creation_flags(CREATE_NO_WINDOW)
.output()
.await?;
stdout = crate::utils::utf8_or_gbk_to_string(cmd_out.stdout.as_slice());
stderr = crate::utils::utf8_or_gbk_to_string(cmd_out.stderr.as_slice());
};
#[cfg(not(target_os = "windows"))]
{
cmd_out = Command::new("sh").arg("-c").arg(cmd).output().await?;
stdout = String::from_utf8_lossy(cmd_out.stdout.as_slice()).to_string();
stderr = String::from_utf8_lossy(cmd_out.stderr.as_slice()).to_string();
};
let ec = cmd_out.status.code();
let succ = cmd_out.status.success();
tracing::info!(?cmd, ?ec, ?succ, ?stdout, ?stderr, "run shell cmd");
if !cmd_out.status.success() {
return Err(Error::ShellCommandError(stdout + &stderr));
}
Ok(())
}
pub struct DummyIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for DummyIfConfiger {}
#[cfg(any(target_os = "linux"))]
pub type IfConfiger = netlink::NetlinkIfConfiger;
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
pub type IfConfiger = darwin::MacIfConfiger;
#[cfg(target_os = "windows")]
pub type IfConfiger = windows::WindowsIfConfiger;
#[cfg(not(any(
target_os = "macos",
target_os = "linux",
target_os = "windows",
target_os = "freebsd",
)))]
pub type IfConfiger = DummyIfConfiger;
+577
View File
@@ -0,0 +1,577 @@
use std::{
ffi::CString,
fmt::Debug,
net::{IpAddr, Ipv4Addr, Ipv6Addr},
num::NonZero,
os::fd::AsRawFd,
};
use anyhow::Context;
use async_trait::async_trait;
use cidr::IpInet;
use netlink_packet_core::{
NetlinkDeserializable, NetlinkHeader, NetlinkMessage, NetlinkPayload, NetlinkSerializable,
NLM_F_ACK, NLM_F_CREATE, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REQUEST,
};
use netlink_packet_route::{
address::{AddressAttribute, AddressMessage},
route::{
RouteAddress, RouteAttribute, RouteHeader, RouteMessage, RouteProtocol, RouteScope,
RouteType,
},
AddressFamily, RouteNetlinkMessage,
};
use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
use nix::{
ifaddrs::getifaddrs,
libc::{self, ifreq, ioctl, Ioctl, SIOCGIFFLAGS, SIOCGIFMTU, SIOCSIFFLAGS, SIOCSIFMTU},
net::if_::InterfaceFlags,
sys::socket::SockaddrLike as _,
};
use pnet::ipnetwork::ip_mask_to_prefix;
use super::{route::Route, Error, IfConfiguerTrait};
pub(crate) fn dummy_socket() -> Result<std::net::UdpSocket, Error> {
Ok(std::net::UdpSocket::bind("0:0")?)
}
fn build_ifreq(name: &str) -> ifreq {
let c_str = CString::new(name).unwrap();
let mut ifr: ifreq = unsafe { std::mem::zeroed() };
let name_bytes = c_str.as_bytes_with_nul();
for (i, &b) in name_bytes.iter().enumerate() {
ifr.ifr_name[i] = b as libc::c_char;
}
ifr
}
fn send_netlink_req<T: NetlinkDeserializable + NetlinkSerializable + Debug>(
req: T,
flags: u16,
) -> Result<Socket, Error> {
let mut socket = Socket::new(NETLINK_ROUTE)?;
socket.bind_auto()?;
socket.connect(&SocketAddr::new(0, 0))?;
let mut req: NetlinkMessage<T> =
NetlinkMessage::new(NetlinkHeader::default(), NetlinkPayload::InnerMessage(req));
req.header.flags = flags;
req.finalize();
let mut buf = vec![0; req.header.length as _];
req.serialize(&mut buf);
tracing::debug!("net link request >>> {:?}", req);
socket.send(&buf, 0)?;
Ok(socket)
}
fn send_netlink_req_and_wait_one_resp<T: NetlinkDeserializable + NetlinkSerializable + Debug>(
req: T,
is_remove: bool,
) -> Result<(), Error> {
let socket = send_netlink_req(
req,
NLM_F_ACK | NLM_F_CREATE | NLM_F_REQUEST | if !is_remove { NLM_F_EXCL } else { 0 },
)?;
let resp = socket.recv_from_full()?;
let ret = NetlinkMessage::<T>::deserialize(&resp.0)
.with_context(|| "Failed to deserialize netlink message")?;
tracing::debug!("net link response <<< {:?}", ret);
match ret.payload {
NetlinkPayload::Error(e) => {
if e.code == NonZero::new(0) {
return Ok(());
} else {
return Err(e.to_io().into());
}
}
p => {
tracing::error!("Unexpected netlink response: {:?}", p);
return Err(anyhow::anyhow!("Unexpected netlink response").into());
}
}
}
fn addr_to_ip(addr: RouteAddress) -> Option<IpAddr> {
match addr {
RouteAddress::Inet(addr) => Some(addr.into()),
RouteAddress::Inet6(addr) => Some(addr.into()),
_ => None,
}
}
impl From<RouteMessage> for Route {
fn from(msg: RouteMessage) -> Self {
let mut gateway = None;
let mut source = None;
let mut source_hint = None;
let mut destination = None;
let mut ifindex = None;
let mut metric = None;
for attr in msg.attributes {
match attr {
RouteAttribute::Source(addr) => {
source = addr_to_ip(addr);
}
RouteAttribute::PrefSource(addr) => {
source_hint = addr_to_ip(addr);
}
RouteAttribute::Destination(addr) => {
destination = addr_to_ip(addr);
}
RouteAttribute::Gateway(addr) => {
gateway = addr_to_ip(addr);
}
RouteAttribute::Oif(i) => {
ifindex = Some(i);
}
RouteAttribute::Priority(priority) => {
metric = Some(priority);
}
_ => {}
}
}
// rtnetlink gives None instead of 0.0.0.0 for the default route, but we'll convert to 0 here to make it match the other platforms
let destination = destination.unwrap_or_else(|| match msg.header.address_family {
AddressFamily::Inet => Ipv4Addr::UNSPECIFIED.into(),
AddressFamily::Inet6 => Ipv6Addr::UNSPECIFIED.into(),
_ => panic!("invalid destination family"),
});
Self {
destination,
prefix: msg.header.destination_prefix_length,
source,
source_prefix: msg.header.source_prefix_length,
source_hint,
gateway,
ifindex,
table: msg.header.table,
metric,
}
}
}
pub struct NetlinkIfConfiger {}
impl NetlinkIfConfiger {
fn get_interface_index(name: &str) -> Result<u32, Error> {
let name = CString::new(name).with_context(|| "failed to convert interface name")?;
match unsafe { libc::if_nametoindex(name.as_ptr()) } {
0 => Err(std::io::Error::last_os_error().into()),
n => Ok(n),
}
}
fn get_prefix_len(name: &str, ip: Ipv4Addr) -> Result<u8, Error> {
let addrs = Self::list_addresses(name)?;
for addr in addrs {
if addr.address() == IpAddr::V4(ip) {
return Ok(addr.network_length());
}
}
Err(Error::NotFound)
}
fn remove_one_ip(name: &str, ip: Ipv4Addr, prefix_len: u8) -> Result<(), Error> {
let mut message = AddressMessage::default();
message.header.prefix_len = prefix_len;
message.header.index = NetlinkIfConfiger::get_interface_index(name)?;
message.header.family = AddressFamily::Inet;
message
.attributes
.push(AddressAttribute::Address(std::net::IpAddr::V4(ip)));
send_netlink_req_and_wait_one_resp::<RouteNetlinkMessage>(
RouteNetlinkMessage::DelAddress(message),
true,
)
}
pub(crate) fn mtu_op<T: TryInto<Ioctl>>(
name: &str,
op: T,
value: libc::c_int,
) -> Result<u32, Error>
where
<T as TryInto<Ioctl>>::Error: Debug,
{
let dummy_socket = dummy_socket()?;
let mut ifr: ifreq = build_ifreq(name);
unsafe {
ifr.ifr_ifru.ifru_mtu = value;
// 使用ioctl获取MTU
if ioctl(dummy_socket.as_raw_fd(), op.try_into().unwrap(), &ifr) != 0 {
return Err(std::io::Error::last_os_error().into());
}
}
Ok(unsafe { ifr.ifr_ifru.ifru_mtu as u32 })
}
fn mtu(name: &str) -> Result<u32, Error> {
Self::mtu_op(name, SIOCGIFMTU, 0)
}
pub fn list_addresses(name: &str) -> Result<Vec<IpInet>, Error> {
let mut result = vec![];
for interface in getifaddrs()
.with_context(|| "failed to call getifaddrs")?
.filter(|x| x.interface_name == name)
{
let (Some(address), Some(netmask)) = (interface.address, interface.netmask) else {
continue;
};
use nix::sys::socket::AddressFamily::{Inet, Inet6};
let (address, netmask) = match (address.family(), netmask.family()) {
(Some(Inet), Some(Inet)) => (
IpAddr::V4(address.as_sockaddr_in().unwrap().ip().into()),
IpAddr::V4(netmask.as_sockaddr_in().unwrap().ip().into()),
),
(Some(Inet6), Some(Inet6)) => (
IpAddr::V6(address.as_sockaddr_in6().unwrap().ip()),
IpAddr::V6(netmask.as_sockaddr_in6().unwrap().ip()),
),
(_, _) => continue,
};
let prefix = ip_mask_to_prefix(netmask).unwrap();
result.push(IpInet::new(address, prefix).unwrap());
}
Ok(result)
}
pub(crate) fn set_flags_op<T: TryInto<Ioctl>>(
name: &str,
op: T,
flags: InterfaceFlags,
) -> Result<InterfaceFlags, Error>
where
<T as TryInto<Ioctl>>::Error: Debug,
{
let mut req = build_ifreq(name);
req.ifr_ifru.ifru_flags = flags.bits() as _;
let socket = dummy_socket()?;
unsafe {
if ioctl(socket.as_raw_fd(), op.try_into().unwrap(), &req) != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(InterfaceFlags::from_bits_truncate(
req.ifr_ifru.ifru_flags as _,
))
}
}
pub(crate) fn set_flags(name: &str, flags: InterfaceFlags) -> Result<InterfaceFlags, Error> {
Self::set_flags_op(name, SIOCSIFFLAGS, flags)
}
pub(crate) fn get_flags(name: &str) -> Result<InterfaceFlags, Error> {
Self::set_flags_op(name, SIOCGIFFLAGS, InterfaceFlags::empty())
}
fn list_routes() -> Result<Vec<RouteMessage>, Error> {
let mut message = RouteMessage::default();
message.header.table = RouteHeader::RT_TABLE_UNSPEC;
message.header.protocol = RouteProtocol::Unspec;
message.header.scope = RouteScope::Universe;
message.header.kind = RouteType::Unicast;
message.header.address_family = AddressFamily::Inet;
message.header.destination_prefix_length = 0;
message.header.source_prefix_length = 0;
let s = send_netlink_req(
RouteNetlinkMessage::GetRoute(message),
NLM_F_REQUEST | NLM_F_DUMP,
)?;
let mut ret_vec = vec![];
let mut resp = Vec::<u8>::new();
loop {
if resp.len() == 0 {
let (new_resp, _) = s.recv_from_full()?;
resp = new_resp;
}
let ret = NetlinkMessage::<RouteNetlinkMessage>::deserialize(&resp)
.with_context(|| "Failed to deserialize netlink message")?;
resp = resp.split_off(ret.buffer_len());
tracing::debug!("net link response <<< {:?}", ret);
match ret.payload {
NetlinkPayload::Error(e) => {
if e.code == NonZero::new(0) {
continue;
} else {
return Err(e.to_io().into());
}
}
NetlinkPayload::InnerMessage(RouteNetlinkMessage::NewRoute(m)) => {
tracing::debug!("net link response <<< {:?}", m);
ret_vec.push(m);
}
NetlinkPayload::Done(_) => {
break;
}
p => {
tracing::error!("Unexpected netlink response: {:?}", p);
return Err(anyhow::anyhow!("Unexpected netlink response").into());
}
}
}
Ok(ret_vec)
}
}
#[async_trait]
impl IfConfiguerTrait for NetlinkIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let mut message = RouteMessage::default();
message.header.table = RouteHeader::RT_TABLE_MAIN;
message.header.protocol = RouteProtocol::Static;
message.header.scope = RouteScope::Universe;
message.header.kind = RouteType::Unicast;
message.header.address_family = AddressFamily::Inet;
// metric
message.attributes.push(RouteAttribute::Priority(65535));
// output interface
message
.attributes
.push(RouteAttribute::Oif(NetlinkIfConfiger::get_interface_index(
name,
)?));
// source address
message.header.destination_prefix_length = cidr_prefix;
message
.attributes
.push(RouteAttribute::Destination(RouteAddress::Inet(address)));
send_netlink_req_and_wait_one_resp(RouteNetlinkMessage::NewRoute(message), false)
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let routes = Self::list_routes()?;
let ifidx = NetlinkIfConfiger::get_interface_index(name)?;
for msg in routes {
let other_route: Route = msg.clone().into();
if other_route.destination == std::net::IpAddr::V4(address)
&& other_route.prefix == cidr_prefix
&& other_route.ifindex == Some(ifidx)
{
send_netlink_req_and_wait_one_resp(RouteNetlinkMessage::DelRoute(msg), true)?;
return Ok(());
}
}
Ok(())
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let mut message = AddressMessage::default();
message.header.prefix_len = cidr_prefix;
message.header.index = NetlinkIfConfiger::get_interface_index(name)?;
message.header.family = AddressFamily::Inet;
message
.attributes
.push(AddressAttribute::Address(std::net::IpAddr::V4(address)));
// for IPv4 the IFA_LOCAL address can be set to the same value as
// IFA_ADDRESS
message
.attributes
.push(AddressAttribute::Local(std::net::IpAddr::V4(address)));
// set the IFA_BROADCAST address as well
if cidr_prefix == 32 {
message
.attributes
.push(AddressAttribute::Broadcast(address));
} else {
let ip_addr = u32::from(address);
let brd = Ipv4Addr::from((0xffff_ffff_u32) >> u32::from(cidr_prefix) | ip_addr);
message.attributes.push(AddressAttribute::Broadcast(brd));
};
send_netlink_req_and_wait_one_resp::<RouteNetlinkMessage>(
RouteNetlinkMessage::NewAddress(message),
false,
)
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
let mut flags = Self::get_flags(name)?;
flags.set(InterfaceFlags::IFF_UP, up);
Self::set_flags(name, flags)?;
Ok(())
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
let addrs = Self::list_addresses(name)?;
for addr in addrs {
if let IpAddr::V4(ipv4) = addr.address() {
Self::remove_one_ip(name, ipv4, addr.network_length())?;
}
}
} else {
let ip = ip.unwrap();
let prefix_len = Self::get_prefix_len(name, ip)?;
Self::remove_one_ip(name, ip, prefix_len)?;
}
Ok(())
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
Self::mtu_op(name, SIOCSIFMTU, mtu as libc::c_int)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
const DUMMY_IFACE_NAME: &str = "dummy";
fn run_cmd(cmd: &str) -> String {
let output = std::process::Command::new("sh")
.arg("-c")
.arg(cmd)
.output()
.expect("failed to execute process");
String::from_utf8(output.stdout).unwrap()
}
struct PrepareEnv {}
impl PrepareEnv {
fn new() -> Self {
let _ = run_cmd(&format!("sudo ip link add {} type dummy", DUMMY_IFACE_NAME));
PrepareEnv {}
}
}
impl Drop for PrepareEnv {
fn drop(&mut self) {
let _ = run_cmd(&format!("sudo ip link del {}", DUMMY_IFACE_NAME));
}
}
#[serial_test::serial]
#[tokio::test]
async fn addr_test() {
let _prepare_env = PrepareEnv::new();
let ifcfg = NetlinkIfConfiger {};
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
ifcfg
.add_ipv4_ip(DUMMY_IFACE_NAME, "10.44.44.4".parse().unwrap(), 24)
.await
.unwrap();
let addrs = NetlinkIfConfiger::list_addresses(DUMMY_IFACE_NAME).unwrap();
assert_eq!(addrs.len(), 1);
assert_eq!(
addrs[0].address(),
IpAddr::V4("10.44.44.4".parse().unwrap())
);
assert_eq!(addrs[0].network_length(), 24);
NetlinkIfConfiger::remove_one_ip(DUMMY_IFACE_NAME, "10.44.44.4".parse().unwrap(), 24)
.unwrap();
let addrs = NetlinkIfConfiger::list_addresses(DUMMY_IFACE_NAME).unwrap();
assert_eq!(addrs.len(), 0);
let old_mtu = NetlinkIfConfiger::mtu(DUMMY_IFACE_NAME).unwrap();
assert_ne!(old_mtu, 0);
let new_mtu = old_mtu + 1;
ifcfg.set_mtu(DUMMY_IFACE_NAME, new_mtu).await.unwrap();
let mtu = NetlinkIfConfiger::mtu(DUMMY_IFACE_NAME).unwrap();
assert_eq!(mtu, new_mtu);
ifcfg
.set_link_status(DUMMY_IFACE_NAME, false)
.await
.unwrap();
ifcfg.set_link_status(DUMMY_IFACE_NAME, true).await.unwrap();
}
#[serial_test::serial]
#[tokio::test]
async fn route_test() {
let _prepare_env = PrepareEnv::new();
let ret = NetlinkIfConfiger::list_routes().unwrap();
let ifcfg = NetlinkIfConfiger {};
println!("{:?}", ret);
ifcfg.set_link_status(DUMMY_IFACE_NAME, true).await.unwrap();
ifcfg
.add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24)
.await
.unwrap();
let routes = NetlinkIfConfiger::list_routes()
.unwrap()
.into_iter()
.map(Route::from)
.map(|x| x.destination)
.collect::<Vec<_>>();
assert!(routes.contains(&IpAddr::V4("10.5.5.0".parse().unwrap())));
ifcfg
.remove_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24)
.await
.unwrap();
let routes = NetlinkIfConfiger::list_routes()
.unwrap()
.into_iter()
.map(Route::from)
.map(|x| x.destination)
.collect::<Vec<_>>();
assert!(!routes.contains(&IpAddr::V4("10.5.5.0".parse().unwrap())));
}
}
+133
View File
@@ -0,0 +1,133 @@
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Route {
/// Network address of the destination. `0.0.0.0` with a prefix of `0` is considered a default route.
pub destination: IpAddr,
/// Length of network prefix in the destination address.
pub prefix: u8,
/// The address of the next hop of this route.
///
/// On macOS, this must be `Some` if ifindex is `None`
pub gateway: Option<IpAddr>,
/// The index of the local interface through which the next hop of this route may be reached.
///
/// On macOS, this must be `Some` if gateway is `None`
pub ifindex: Option<u32>,
#[cfg(target_os = "linux")]
/// The routing table this route belongs to.
pub table: u8,
/// Network address of the source.
#[cfg(target_os = "linux")]
pub source: Option<IpAddr>,
/// Prefix length of the source address.
#[cfg(target_os = "linux")]
pub source_prefix: u8,
/// Source address hint. Does not influence routing.
#[cfg(target_os = "linux")]
pub source_hint: Option<IpAddr>,
#[cfg(any(target_os = "windows", target_os = "linux"))]
/// The route metric offset value for this route.
pub metric: Option<u32>,
#[cfg(target_os = "windows")]
/// Luid of the local interface through which the next hop of this route may be reached.
///
/// If luid is specified, ifindex is optional.
pub luid: Option<u64>,
}
impl Route {
/// Create a route that matches a given destination network.
///
/// Either the gateway or interface should be set before attempting to add to a routing table.
pub fn new(destination: IpAddr, prefix: u8) -> Self {
Self {
destination,
prefix,
gateway: None,
ifindex: None,
#[cfg(target_os = "linux")]
// default to main table
table: 254,
#[cfg(target_os = "linux")]
source: None,
#[cfg(target_os = "linux")]
source_prefix: 0,
#[cfg(target_os = "linux")]
source_hint: None,
#[cfg(any(target_os = "windows", target_os = "linux"))]
metric: None,
#[cfg(target_os = "windows")]
luid: None,
}
}
/// Set the next next hop gateway for this route.
pub fn with_gateway(mut self, gateway: IpAddr) -> Self {
self.gateway = Some(gateway);
self
}
/// Set the index of the local interface through which the next hop of this route should be reached.
pub fn with_ifindex(mut self, ifindex: u32) -> Self {
self.ifindex = Some(ifindex);
self
}
/// Set table the route will be installed in.
#[cfg(target_os = "linux")]
pub fn with_table(mut self, table: u8) -> Self {
self.table = table;
self
}
/// Set source.
#[cfg(target_os = "linux")]
pub fn with_source(mut self, source: IpAddr, prefix: u8) -> Self {
self.source = Some(source);
self.source_prefix = prefix;
self
}
/// Set source hint.
#[cfg(target_os = "linux")]
pub fn with_source_hint(mut self, hint: IpAddr) -> Self {
self.source_hint = Some(hint);
self
}
/// Set route metric.
#[cfg(any(target_os = "windows", target_os = "linux"))]
pub fn with_metric(mut self, metric: u32) -> Self {
self.metric = Some(metric);
self
}
/// Set luid of the local interface through which the next hop of this route should be reached.
#[cfg(target_os = "windows")]
pub fn with_luid(mut self, luid: u64) -> Self {
self.luid = Some(luid);
self
}
/// Get the netmask covering the network portion of the destination address.
pub fn mask(&self) -> IpAddr {
match self.destination {
IpAddr::V4(_) => IpAddr::V4(Ipv4Addr::from(
u32::MAX.checked_shl(32 - self.prefix as u32).unwrap_or(0),
)),
IpAddr::V6(_) => IpAddr::V6(Ipv6Addr::from(
u128::MAX.checked_shl(128 - self.prefix as u32).unwrap_or(0),
)),
}
}
}
+166
View File
@@ -0,0 +1,166 @@
use std::net::Ipv4Addr;
use async_trait::async_trait;
use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait};
pub struct WindowsIfConfiger {}
impl WindowsIfConfiger {
pub fn get_interface_index(name: &str) -> Option<u32> {
crate::arch::windows::find_interface_index(name).ok()
}
async fn list_ipv4(name: &str) -> Result<Vec<Ipv4Addr>, Error> {
use anyhow::Context;
use network_interface::NetworkInterfaceConfig;
use std::net::IpAddr;
let ret = network_interface::NetworkInterface::show().with_context(|| "show interface")?;
let addrs = ret
.iter()
.filter_map(|x| {
if x.name != name {
return None;
}
Some(x.addr.clone())
})
.flat_map(|x| x)
.map(|x| x.ip())
.filter_map(|x| {
if let IpAddr::V4(ipv4) = x {
Some(ipv4)
} else {
None
}
})
.collect::<Vec<_>>();
Ok(addrs)
}
async fn remove_one_ipv4(name: &str, ip: Ipv4Addr) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface ipv4 delete address {} address={}",
name,
ip.to_string()
)
.as_str(),
)
.await
}
}
#[cfg(target_os = "windows")]
#[async_trait]
impl IfConfiguerTrait for WindowsIfConfiger {
async fn add_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let Some(idx) = Self::get_interface_index(name) else {
return Err(Error::NotFound);
};
run_shell_cmd(
format!(
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 9000",
address,
cidr_to_subnet_mask(cidr_prefix),
idx
)
.as_str(),
)
.await
}
async fn remove_ipv4_route(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
let Some(idx) = Self::get_interface_index(name) else {
return Err(Error::NotFound);
};
run_shell_cmd(
format!(
"route DELETE {} MASK {} IF {}",
address,
cidr_to_subnet_mask(cidr_prefix),
idx
)
.as_str(),
)
.await
}
async fn add_ipv4_ip(
&self,
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface ipv4 add address {} address={} mask={}",
name,
address,
cidr_to_subnet_mask(cidr_prefix)
)
.as_str(),
)
.await
}
async fn set_link_status(&self, name: &str, up: bool) -> Result<(), Error> {
run_shell_cmd(
format!(
"netsh interface set interface {} {}",
name,
if up { "enable" } else { "disable" }
)
.as_str(),
)
.await
}
async fn remove_ip(&self, name: &str, ip: Option<Ipv4Addr>) -> Result<(), Error> {
if ip.is_none() {
for ip in Self::list_ipv4(name).await?.iter() {
Self::remove_one_ipv4(name, *ip).await?;
}
Ok(())
} else {
Self::remove_one_ipv4(name, ip.unwrap()).await
}
}
async fn wait_interface_show(&self, name: &str) -> Result<(), Error> {
Ok(
tokio::time::timeout(std::time::Duration::from_secs(10), async move {
loop {
if let Some(idx) = Self::get_interface_index(name) {
tracing::info!(?name, ?idx, "Interface found");
break;
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
Ok::<(), Error>(())
})
.await??,
)
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
let _ = run_shell_cmd(
format!("netsh interface ipv6 set subinterface {} mtu={}", name, mtu).as_str(),
)
.await;
run_shell_cmd(
format!("netsh interface ipv4 set subinterface {} mtu={}", name, mtu).as_str(),
)
.await
}
}
+23 -3
View File
@@ -1,6 +1,7 @@
use std::{ use std::{
fmt::Debug, fmt::Debug,
future, future,
io::Write as _,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use tokio::task::JoinSet; use tokio::task::JoinSet;
@@ -81,7 +82,17 @@ pub fn join_joinset_background<T: Debug + Send + Sync + 'static>(
} }
pub fn get_machine_id() -> uuid::Uuid { pub fn get_machine_id() -> uuid::Uuid {
// TODO: load from local file // a path same as the binary
let machine_id_file = std::env::current_exe()
.map(|x| x.with_file_name("et_machine_id"))
.unwrap_or_else(|_| std::path::PathBuf::from("et_machine_id"));
// try load from local file
if let Ok(mid) = std::fs::read_to_string(&machine_id_file) {
if let Ok(mid) = uuid::Uuid::parse_str(mid.trim()) {
return mid;
}
}
#[cfg(any( #[cfg(any(
target_os = "linux", target_os = "linux",
@@ -95,7 +106,7 @@ pub fn get_machine_id() -> uuid::Uuid {
crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b); crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b);
uuid::Uuid::from_bytes(b) uuid::Uuid::from_bytes(b)
}) })
.unwrap_or(uuid::Uuid::new_v4()); .ok();
#[cfg(not(any( #[cfg(not(any(
target_os = "linux", target_os = "linux",
@@ -103,9 +114,18 @@ pub fn get_machine_id() -> uuid::Uuid {
target_os = "windows", target_os = "windows",
target_os = "freebsd" target_os = "freebsd"
)))] )))]
let gen_mid = None;
if gen_mid.is_some() {
return gen_mid.unwrap();
}
let gen_mid = uuid::Uuid::new_v4(); let gen_mid = uuid::Uuid::new_v4();
// TODO: save to local file // try save to local file
if let Ok(mut file) = std::fs::File::create(machine_id_file) {
let _ = file.write_all(gen_mid.to_string().as_bytes());
}
gen_mid gen_mid
} }
+145 -77
View File
@@ -1,6 +1,13 @@
// try connect peers directly, with either its public ip or lan ip // try connect peers directly, with either its public ip or lan ip
use std::{net::SocketAddr, sync::Arc, time::Duration}; use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};
use crate::{ use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId}, common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
@@ -29,6 +36,8 @@ use super::create_connector_by_url;
pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1; pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1;
pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300; pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300;
static TESTING: AtomicBool = AtomicBool::new(false);
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait PeerManagerForDirectConnector { pub trait PeerManagerForDirectConnector {
async fn list_peers(&self) -> Vec<PeerId>; async fn list_peers(&self) -> Vec<PeerId>;
@@ -182,7 +191,7 @@ impl DirectConnectorManager {
// let (peer_id, conn_id) = data.peer_manager.try_connect(connector).await?; // let (peer_id, conn_id) = data.peer_manager.try_connect(connector).await?;
if peer_id != dst_peer_id { if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) {
tracing::info!( tracing::info!(
"connect to ip succ: {}, but peer id mismatch, expect: {}, actual: {}", "connect to ip succ: {}, but peer id mismatch, expect: {}, actual: {}",
addr, addr,
@@ -279,87 +288,103 @@ impl DirectConnectorManager {
let listener_host = listener.socket_addrs(|| None).unwrap().pop(); let listener_host = listener.socket_addrs(|| None).unwrap().pop();
match listener_host { match listener_host {
Some(SocketAddr::V4(_)) => { Some(SocketAddr::V4(s_addr)) => {
ip_list.interface_ipv4s.iter().for_each(|ip| { if s_addr.ip().is_unspecified() {
let mut addr = (*listener).clone(); ip_list.interface_ipv4s.iter().for_each(|ip| {
if addr.set_host(Some(ip.to_string().as_str())).is_ok() { let mut addr = (*listener).clone();
tasks.spawn(Self::try_connect_to_ip( if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
data.clone(), tasks.spawn(Self::try_connect_to_ip(
dst_peer_id.clone(), data.clone(),
addr.to_string(), dst_peer_id.clone(),
)); addr.to_string(),
} else { ));
tracing::error!( } else {
?ip, tracing::error!(
?listener, ?ip,
?dst_peer_id, ?listener,
"failed to set host for interface ipv4" ?dst_peer_id,
); "failed to set host for interface ipv4"
} );
}); }
});
if let Some(public_ipv4) = ip_list.public_ipv4 { if let Some(public_ipv4) = ip_list.public_ipv4 {
let mut addr = (*listener).clone(); let mut addr = (*listener).clone();
if addr if addr
.set_host(Some(public_ipv4.to_string().as_str())) .set_host(Some(public_ipv4.to_string().as_str()))
.is_ok() .is_ok()
{ {
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
data.clone(), data.clone(),
dst_peer_id.clone(), dst_peer_id.clone(),
addr.to_string(), addr.to_string(),
)); ));
} else { } else {
tracing::error!( tracing::error!(
?public_ipv4, ?public_ipv4,
?listener, ?listener,
?dst_peer_id, ?dst_peer_id,
"failed to set host for public ipv4" "failed to set host for public ipv4"
); );
}
} }
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
data.clone(),
dst_peer_id.clone(),
listener.to_string(),
));
} }
} }
Some(SocketAddr::V6(_)) => { Some(SocketAddr::V6(s_addr)) => {
ip_list.interface_ipv6s.iter().for_each(|ip| { if s_addr.ip().is_unspecified() {
let mut addr = (*listener).clone(); ip_list.interface_ipv6s.iter().for_each(|ip| {
if addr let mut addr = (*listener).clone();
.set_host(Some(format!("[{}]", ip.to_string()).as_str())) if addr
.is_ok() .set_host(Some(format!("[{}]", ip.to_string()).as_str()))
{ .is_ok()
tasks.spawn(Self::try_connect_to_ip( {
data.clone(), tasks.spawn(Self::try_connect_to_ip(
dst_peer_id.clone(), data.clone(),
addr.to_string(), dst_peer_id.clone(),
)); addr.to_string(),
} else { ));
tracing::error!( } else {
?ip, tracing::error!(
?listener, ?ip,
?dst_peer_id, ?listener,
"failed to set host for interface ipv6" ?dst_peer_id,
); "failed to set host for interface ipv6"
} );
}); }
});
if let Some(public_ipv6) = ip_list.public_ipv6 { if let Some(public_ipv6) = ip_list.public_ipv6 {
let mut addr = (*listener).clone(); let mut addr = (*listener).clone();
if addr if addr
.set_host(Some(format!("[{}]", public_ipv6.to_string()).as_str())) .set_host(Some(format!("[{}]", public_ipv6.to_string()).as_str()))
.is_ok() .is_ok()
{ {
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
data.clone(), data.clone(),
dst_peer_id.clone(), dst_peer_id.clone(),
addr.to_string(), addr.to_string(),
)); ));
} else { } else {
tracing::error!( tracing::error!(
?public_ipv6, ?public_ipv6,
?listener, ?listener,
?dst_peer_id, ?dst_peer_id,
"failed to set host for public ipv6" "failed to set host for public ipv6"
); );
}
} }
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
data.clone(),
dst_peer_id.clone(),
listener.to_string(),
));
} }
} }
p => { p => {
@@ -452,6 +477,49 @@ mod tests {
proto::peer_rpc::GetIpListResponse, proto::peer_rpc::GetIpListResponse,
}; };
use super::TESTING;
#[tokio::test]
async fn direct_connector_mapped_listener() {
TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
let p_a = create_mock_peer_manager().await;
let p_b = create_mock_peer_manager().await;
let p_c = create_mock_peer_manager().await;
let p_x = create_mock_peer_manager().await;
connect_peer_manager(p_a.clone(), p_b.clone()).await;
connect_peer_manager(p_b.clone(), p_c.clone()).await;
connect_peer_manager(p_c.clone(), p_x.clone()).await;
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
wait_route_appear(p_a.clone(), p_x.clone()).await.unwrap();
let mut f = p_a.get_global_ctx().get_flags();
f.bind_device = false;
p_a.get_global_ctx().config.set_flags(f);
p_c.get_global_ctx()
.config
.set_mapped_listeners(Some(vec!["tcp://127.0.0.1:11334".parse().unwrap()]));
p_x.get_global_ctx()
.config
.set_listeners(vec!["tcp://0.0.0.0:11334".parse().unwrap()]);
let mut lis_x = ListenerManager::new(p_x.get_global_ctx(), p_x.clone());
lis_x.prepare_listeners().await.unwrap();
lis_x.run().await.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
let mut dm_a = DirectConnectorManager::new(p_a.get_global_ctx(), p_a.clone());
let mut dm_c = DirectConnectorManager::new(p_c.get_global_ctx(), p_c.clone());
dm_a.run_as_client();
dm_c.run_as_server();
// p_c's mapped listener is p_x's listener, so p_a should connect to p_x directly
wait_route_appear_with_cost(p_a.clone(), p_x.my_peer_id(), Some(1))
.await
.unwrap();
}
#[rstest::rstest] #[rstest::rstest]
#[tokio::test] #[tokio::test]
async fn direct_connector_basic_test( async fn direct_connector_basic_test(
+8 -6
View File
@@ -297,12 +297,14 @@ impl ManualConnectorManager {
connector.lock().await.set_ip_version(ip_version); connector.lock().await.set_ip_version(ip_version);
set_bind_addr_for_peer_connector( if data.global_ctx.config.get_flags().bind_device {
connector.lock().await.as_mut(), set_bind_addr_for_peer_connector(
ip_version == IpVersion::V4, connector.lock().await.as_mut(),
&ip_collector, ip_version == IpVersion::V4,
) &ip_collector,
.await; )
.await;
}
data.global_ctx.issue_event(GlobalCtxEvent::Connecting( data.global_ctx.issue_event(GlobalCtxEvent::Connecting(
connector.lock().await.remote_url().clone(), connector.lock().await.remote_url().clone(),
+40 -30
View File
@@ -56,23 +56,27 @@ pub async fn create_connector_by_url(
"tcp" => { "tcp" => {
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp")?; let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp")?;
let mut connector = TcpTunnelConnector::new(url); let mut connector = TcpTunnelConnector::new(url);
set_bind_addr_for_peer_connector( if global_ctx.config.get_flags().bind_device {
&mut connector, set_bind_addr_for_peer_connector(
dst_addr.is_ipv4(), &mut connector,
&global_ctx.get_ip_collector(), dst_addr.is_ipv4(),
) &global_ctx.get_ip_collector(),
.await; )
.await;
}
return Ok(Box::new(connector)); return Ok(Box::new(connector));
} }
"udp" => { "udp" => {
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp")?; let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp")?;
let mut connector = UdpTunnelConnector::new(url); let mut connector = UdpTunnelConnector::new(url);
set_bind_addr_for_peer_connector( if global_ctx.config.get_flags().bind_device {
&mut connector, set_bind_addr_for_peer_connector(
dst_addr.is_ipv4(), &mut connector,
&global_ctx.get_ip_collector(), dst_addr.is_ipv4(),
) &global_ctx.get_ip_collector(),
.await; )
.await;
}
return Ok(Box::new(connector)); return Ok(Box::new(connector));
} }
"ring" => { "ring" => {
@@ -84,12 +88,14 @@ pub async fn create_connector_by_url(
"quic" => { "quic" => {
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic")?; let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic")?;
let mut connector = QUICTunnelConnector::new(url); let mut connector = QUICTunnelConnector::new(url);
set_bind_addr_for_peer_connector( if global_ctx.config.get_flags().bind_device {
&mut connector, set_bind_addr_for_peer_connector(
dst_addr.is_ipv4(), &mut connector,
&global_ctx.get_ip_collector(), dst_addr.is_ipv4(),
) &global_ctx.get_ip_collector(),
.await; )
.await;
}
return Ok(Box::new(connector)); return Ok(Box::new(connector));
} }
#[cfg(feature = "wireguard")] #[cfg(feature = "wireguard")]
@@ -101,12 +107,14 @@ pub async fn create_connector_by_url(
&nid.network_secret.unwrap_or_default(), &nid.network_secret.unwrap_or_default(),
); );
let mut connector = WgTunnelConnector::new(url, wg_config); let mut connector = WgTunnelConnector::new(url, wg_config);
set_bind_addr_for_peer_connector( if global_ctx.config.get_flags().bind_device {
&mut connector, set_bind_addr_for_peer_connector(
dst_addr.is_ipv4(), &mut connector,
&global_ctx.get_ip_collector(), dst_addr.is_ipv4(),
) &global_ctx.get_ip_collector(),
.await; )
.await;
}
return Ok(Box::new(connector)); return Ok(Box::new(connector));
} }
#[cfg(feature = "websocket")] #[cfg(feature = "websocket")]
@@ -114,12 +122,14 @@ pub async fn create_connector_by_url(
use crate::tunnel::{FromUrl, IpVersion}; use crate::tunnel::{FromUrl, IpVersion};
let dst_addr = SocketAddr::from_url(url.clone(), IpVersion::Both)?; let dst_addr = SocketAddr::from_url(url.clone(), IpVersion::Both)?;
let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url); let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url);
set_bind_addr_for_peer_connector( if global_ctx.config.get_flags().bind_device {
&mut connector, set_bind_addr_for_peer_connector(
dst_addr.is_ipv4(), &mut connector,
&global_ctx.get_ip_collector(), dst_addr.is_ipv4(),
) &global_ctx.get_ip_collector(),
.await; )
.await;
}
return Ok(Box::new(connector)); return Ok(Box::new(connector));
} }
_ => { _ => {
@@ -284,6 +284,7 @@ impl PunchSymToConeHoleClient {
BaseController { BaseController {
timeout_ms: 4000, timeout_ms: 4000,
trace_id: 0, trace_id: 0,
..Default::default()
}, },
req, req,
) )
@@ -314,6 +315,7 @@ impl PunchSymToConeHoleClient {
BaseController { BaseController {
timeout_ms: 4000, timeout_ms: 4000,
trace_id: 0, trace_id: 0,
..Default::default()
}, },
req, req,
) )
@@ -529,6 +531,7 @@ pub mod tests {
}; };
#[tokio::test] #[tokio::test]
#[serial_test::serial]
#[serial_test::serial(hole_punch)] #[serial_test::serial(hole_punch)]
async fn hole_punching_symmetric_only_random() { async fn hole_punching_symmetric_only_random() {
RUN_TESTING.store(true, std::sync::atomic::Ordering::Relaxed); RUN_TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
@@ -577,13 +580,15 @@ pub mod tests {
) )
.await; .await;
println!("start punching {:?}", p_a.list_routes().await);
wait_for_condition( wait_for_condition(
|| async { || async {
wait_route_appear_with_cost(p_a.clone(), p_c.my_peer_id(), Some(1)) wait_route_appear_with_cost(p_a.clone(), p_c.my_peer_id(), Some(1))
.await .await
.is_ok() .is_ok()
}, },
Duration::from_secs(5), Duration::from_secs(10),
) )
.await; .await;
println!("{:?}", p_a.list_routes().await); println!("{:?}", p_a.list_routes().await);
+104 -7
View File
@@ -20,7 +20,8 @@ use easytier::{
DumpRouteRequest, GetVpnPortalInfoRequest, ListConnectorRequest, DumpRouteRequest, GetVpnPortalInfoRequest, ListConnectorRequest,
ListForeignNetworkRequest, ListGlobalForeignNetworkRequest, ListPeerRequest, ListForeignNetworkRequest, ListGlobalForeignNetworkRequest, ListPeerRequest,
ListPeerResponse, ListRouteRequest, ListRouteResponse, NodeInfo, PeerManageRpc, ListPeerResponse, ListRouteRequest, ListRouteResponse, NodeInfo, PeerManageRpc,
PeerManageRpcClientFactory, ShowNodeInfoRequest, VpnPortalRpc, PeerManageRpcClientFactory, ShowNodeInfoRequest, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc, TcpProxyRpcClientFactory, VpnPortalRpc,
VpnPortalRpcClientFactory, VpnPortalRpcClientFactory,
}, },
common::NatType, common::NatType,
@@ -50,14 +51,24 @@ struct Cli {
#[derive(Subcommand, Debug)] #[derive(Subcommand, Debug)]
enum SubCommand { enum SubCommand {
#[command(about = "show peers info")]
Peer(PeerArgs), Peer(PeerArgs),
#[command(about = "manage connectors")]
Connector(ConnectorArgs), Connector(ConnectorArgs),
#[command(about = "do stun test")]
Stun, Stun,
#[command(about = "show route info")]
Route(RouteArgs), Route(RouteArgs),
#[command(about = "show global peers info")]
PeerCenter, PeerCenter,
#[command(about = "show vpn portal (wireguard) info")]
VpnPortal, VpnPortal,
#[command(about = "inspect self easytier-core status")]
Node(NodeArgs), Node(NodeArgs),
#[command(about = "manage easytier-core as a system service")]
Service(ServiceArgs), Service(ServiceArgs),
#[command(about = "show tcp/kcp proxy status")]
Proxy,
} }
#[derive(Args, Debug)] #[derive(Args, Debug)]
@@ -114,7 +125,9 @@ enum ConnectorSubCommand {
#[derive(Subcommand, Debug)] #[derive(Subcommand, Debug)]
enum NodeSubCommand { enum NodeSubCommand {
#[command(about = "show node info")]
Info, Info,
#[command(about = "show node config")]
Config, Config,
} }
@@ -135,10 +148,15 @@ struct ServiceArgs {
#[derive(Subcommand, Debug)] #[derive(Subcommand, Debug)]
enum ServiceSubCommand { enum ServiceSubCommand {
#[command(about = "register easytier-core as a system service")]
Install(InstallArgs), Install(InstallArgs),
#[command(about = "unregister easytier-core system service")]
Uninstall, Uninstall,
#[command(about = "check easytier-core system service status")]
Status, Status,
#[command(about = "start easytier-core system service")]
Start, Start,
#[command(about = "stop easytier-core system service")]
Stop, Stop,
} }
@@ -153,13 +171,17 @@ struct InstallArgs {
#[arg(long, default_value = "false")] #[arg(long, default_value = "false")]
disable_autostart: bool, disable_autostart: bool,
#[arg(long)] #[arg(long, help = "path to easytier-core binary")]
core_path: Option<PathBuf>, core_path: Option<PathBuf>,
#[arg(long)] #[arg(long)]
service_work_dir: Option<PathBuf>, service_work_dir: Option<PathBuf>,
#[arg(trailing_var_arg = true, allow_hyphen_values = true)] #[arg(
trailing_var_arg = true,
allow_hyphen_values = true,
help = "args to pass to easytier-core"
)]
core_args: Option<Vec<OsString>>, core_args: Option<Vec<OsString>>,
} }
@@ -221,6 +243,19 @@ impl CommandHandler {
.with_context(|| "failed to get vpn portal client")?) .with_context(|| "failed to get vpn portal client")?)
} }
async fn get_tcp_proxy_client(
&self,
transport_type: &str,
) -> Result<Box<dyn TcpProxyRpc<Controller = BaseController>>, Error> {
Ok(self
.client
.lock()
.unwrap()
.scoped_client::<TcpProxyRpcClientFactory<BaseController>>(transport_type.to_string())
.await
.with_context(|| "failed to get vpn portal client")?)
}
async fn list_peers(&self) -> Result<ListPeerResponse, Error> { async fn list_peers(&self) -> Result<ListPeerResponse, Error> {
let client = self.get_peer_manager_client().await?; let client = self.get_peer_manager_client().await?;
let request = ListPeerRequest::default(); let request = ListPeerRequest::default();
@@ -647,12 +682,22 @@ impl Service {
environment: None, environment: None,
}; };
if self.status()? != ServiceStatus::NotInstalled { if self.status()? != ServiceStatus::NotInstalled {
return Err(anyhow::anyhow!("Service is already installed")); return Err(anyhow::anyhow!(
"Service is already installed! Service Name: {}",
self.lable
));
} }
self.service_manager self.service_manager
.install(ctx) .install(ctx.clone())
.map_err(|e| anyhow::anyhow!("failed to install service: {}", e)) .map_err(|e| anyhow::anyhow!("failed to install service: {:?}", e))?;
println!(
"Service installed successfully! Service Name: {}",
self.lable
);
Ok(())
} }
pub fn uninstall(&self) -> Result<(), Error> { pub fn uninstall(&self) -> Result<(), Error> {
@@ -769,7 +814,8 @@ impl Service {
writeln!(unit_content, "Type=simple")?; writeln!(unit_content, "Type=simple")?;
writeln!(unit_content, "WorkingDirectory={work_dir}")?; writeln!(unit_content, "WorkingDirectory={work_dir}")?;
writeln!(unit_content, "ExecStart={target_app} {args}")?; writeln!(unit_content, "ExecStart={target_app} {args}")?;
writeln!(unit_content, "Restart=Always")?; writeln!(unit_content, "Restart=always")?;
writeln!(unit_content, "RestartSec=1")?;
writeln!(unit_content, "LimitNOFILE=infinity")?; writeln!(unit_content, "LimitNOFILE=infinity")?;
writeln!(unit_content)?; writeln!(unit_content)?;
writeln!(unit_content, "[Install]")?; writeln!(unit_content, "[Install]")?;
@@ -1088,6 +1134,57 @@ async fn main() -> Result<(), Error> {
} }
} }
} }
SubCommand::Proxy => {
let mut entries = vec![];
let client = handler.get_tcp_proxy_client("tcp").await?;
let ret = client
.list_tcp_proxy_entry(BaseController::default(), Default::default())
.await;
entries.extend(ret.unwrap_or_default().entries);
let client = handler.get_tcp_proxy_client("kcp_src").await?;
let ret = client
.list_tcp_proxy_entry(BaseController::default(), Default::default())
.await;
entries.extend(ret.unwrap_or_default().entries);
let client = handler.get_tcp_proxy_client("kcp_dst").await?;
let ret = client
.list_tcp_proxy_entry(BaseController::default(), Default::default())
.await;
entries.extend(ret.unwrap_or_default().entries);
#[derive(tabled::Tabled)]
struct TableItem {
src: String,
dst: String,
start_time: String,
state: String,
transport_type: String,
}
let table_rows = entries
.iter()
.map(|e| TableItem {
src: SocketAddr::from(e.src.unwrap_or_default()).to_string(),
dst: SocketAddr::from(e.dst.unwrap_or_default()).to_string(),
start_time: chrono::DateTime::<chrono::Utc>::from_timestamp_millis(
(e.start_time * 1000) as i64,
)
.unwrap()
.with_timezone(&chrono::Local)
.format("%Y-%m-%d %H:%M:%S")
.to_string(),
state: format!("{:?}", TcpProxyEntryState::try_from(e.state).unwrap()),
transport_type: format!(
"{:?}",
TcpProxyEntryTransportType::try_from(e.transport_type).unwrap()
),
})
.collect::<Vec<_>>();
println!("{}", tabled::Table::new(table_rows).with(Style::modern()));
}
} }
Ok(()) Ok(())
+51 -1
View File
@@ -123,6 +123,13 @@ struct Cli {
)] )]
listeners: Vec<String>, listeners: Vec<String>,
#[arg(
long,
help = t!("core_clap.mapped_listeners").to_string(),
num_args = 0..
)]
mapped_listeners: Vec<String>,
#[arg( #[arg(
long, long,
help = t!("core_clap.no_listener").to_string(), help = t!("core_clap.no_listener").to_string(),
@@ -185,7 +192,7 @@ struct Cli {
#[arg( #[arg(
long, long,
help = t!("core_clap.multi_thread").to_string(), help = t!("core_clap.multi_thread").to_string(),
default_value = "false" default_value = "true"
)] )]
multi_thread: bool, multi_thread: bool,
@@ -300,6 +307,26 @@ struct Cli {
default_value = "none", default_value = "none",
)] )]
compression: String, compression: String,
#[arg(
long,
help = t!("core_clap.bind_device").to_string()
)]
bind_device: Option<bool>,
#[arg(
long,
help = t!("core_clap.enable_kcp_proxy").to_string(),
default_value = "false"
)]
enable_kcp_proxy: bool,
#[arg(
long,
help = t!("core_clap.disable_kcp_input").to_string(),
default_value = "false"
)]
disable_kcp_input: bool,
} }
rust_i18n::i18n!("locales", fallback = "en"); rust_i18n::i18n!("locales", fallback = "en");
@@ -422,6 +449,23 @@ impl TryFrom<&Cli> for TomlConfigLoader {
.collect(), .collect(),
); );
cfg.set_mapped_listeners(Some(
cli.mapped_listeners
.iter()
.map(|s| {
s.parse()
.with_context(|| format!("mapped listener is not a valid url: {}", s))
.unwrap()
})
.map(|s: url::Url| {
if s.port().is_none() {
panic!("mapped listener port is missing: {}", s);
}
s
})
.collect(),
));
for n in cli.proxy_networks.iter() { for n in cli.proxy_networks.iter() {
cfg.add_proxy_cidr( cfg.add_proxy_cidr(
n.parse() n.parse()
@@ -518,6 +562,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
f.relay_network_whitelist = wl.join(" "); f.relay_network_whitelist = wl.join(" ");
} }
f.disable_p2p = cli.disable_p2p; f.disable_p2p = cli.disable_p2p;
f.disable_udp_hole_punching = cli.disable_udp_hole_punching;
f.relay_all_peer_rpc = cli.relay_all_peer_rpc; f.relay_all_peer_rpc = cli.relay_all_peer_rpc;
if let Some(ipv6_listener) = cli.ipv6_listener.as_ref() { if let Some(ipv6_listener) = cli.ipv6_listener.as_ref() {
f.ipv6_listener = ipv6_listener f.ipv6_listener = ipv6_listener
@@ -534,6 +579,11 @@ impl TryFrom<&Cli> for TomlConfigLoader {
), ),
} }
.into(); .into();
if let Some(bind_device) = cli.bind_device {
f.bind_device = bind_device;
}
f.enable_kcp_proxy = cli.enable_kcp_proxy;
f.disable_kcp_input = cli.disable_kcp_input;
cfg.set_flags(f); cfg.set_flags(f);
cfg.set_exit_nodes(cli.exit_nodes.clone()); cfg.set_exit_nodes(cli.exit_nodes.clone());
+11 -5
View File
@@ -6,6 +6,7 @@ use std::{
time::Duration, time::Duration,
}; };
use anyhow::Context;
use pnet::packet::{ use pnet::packet::{
icmp::{self, echo_reply::MutableEchoReplyPacket, IcmpCode, IcmpTypes, MutableIcmpPacket}, icmp::{self, echo_reply::MutableEchoReplyPacket, IcmpCode, IcmpTypes, MutableIcmpPacket},
ip::IpNextHeaderProtocols, ip::IpNextHeaderProtocols,
@@ -212,7 +213,7 @@ impl IcmpProxy {
Err(e) => { Err(e) => {
tracing::warn!("create icmp socket failed: {:?}", e); tracing::warn!("create icmp socket failed: {:?}", e);
if !self.global_ctx.no_tun() { if !self.global_ctx.no_tun() {
return Err(e); return Err(anyhow::anyhow!("create icmp socket failed: {:?}", e).into());
} }
} }
} }
@@ -281,10 +282,15 @@ impl IcmpProxy {
dst_ip: Ipv4Addr, dst_ip: Ipv4Addr,
icmp_packet: &icmp::echo_request::EchoRequestPacket, icmp_packet: &icmp::echo_request::EchoRequestPacket,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.socket.lock().unwrap().as_ref().unwrap().send_to( self.socket
icmp_packet.packet(), .lock()
&SocketAddrV4::new(dst_ip.into(), 0).into(), .unwrap()
)?; .as_ref()
.with_context(|| "icmp socket not created")?
.send_to(
icmp_packet.packet(),
&SocketAddrV4::new(dst_ip.into(), 0).into(),
)?;
Ok(()) Ok(())
} }
+439
View File
@@ -0,0 +1,439 @@
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::{Arc, Weak},
time::Duration,
};
use anyhow::Context;
use bytes::Bytes;
use dashmap::DashMap;
use kcp_sys::{
endpoint::{ConnId, KcpEndpoint, KcpPacketReceiver},
ffi_safe::KcpConfig,
packet_def::KcpPacket,
stream::KcpStream,
};
use pnet::packet::{
ip::IpNextHeaderProtocols,
ipv4::Ipv4Packet,
tcp::{TcpFlags, TcpPacket},
Packet as _,
};
use prost::Message;
use tokio::{io::copy_bidirectional, task::JoinSet};
use super::{
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
CidrSet,
};
use crate::{
common::{
error::Result,
global_ctx::{ArcGlobalCtx, GlobalCtx},
},
peers::{peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter},
proto::{
cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc,
},
peer_rpc::KcpConnData,
rpc_types::{self, controller::BaseController},
},
tunnel::packet_def::{PacketType, PeerManagerHeader, ZCPacket},
};
fn create_kcp_endpoint() -> KcpEndpoint {
let mut kcp_endpoint = KcpEndpoint::new();
kcp_endpoint.set_kcp_config_factory(Box::new(|conv| {
let mut cfg = KcpConfig::new_turbo(conv);
cfg.interval = Some(5);
cfg
}));
kcp_endpoint
}
struct KcpEndpointFilter {
kcp_endpoint: Arc<KcpEndpoint>,
is_src: bool,
}
#[async_trait::async_trait]
impl PeerPacketFilter for KcpEndpointFilter {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
let t = packet.peer_manager_header().unwrap().packet_type;
if t == PacketType::KcpSrc as u8 && !self.is_src {
} else if t == PacketType::KcpDst as u8 && self.is_src {
} else {
return Some(packet);
}
let _ = self
.kcp_endpoint
.input_sender_ref()
.send(KcpPacket::from(packet.payload_bytes()))
.await;
None
}
}
#[tracing::instrument]
async fn handle_kcp_output(
peer_mgr: Arc<PeerManager>,
mut output_receiver: KcpPacketReceiver,
is_src: bool,
) {
while let Some(packet) = output_receiver.recv().await {
let dst_peer_id = if is_src {
packet.header().dst_session_id()
} else {
packet.header().src_session_id()
};
let packet_type = if is_src {
PacketType::KcpSrc as u8
} else {
PacketType::KcpDst as u8
};
let mut packet = ZCPacket::new_with_payload(&packet.inner().freeze());
packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type as u8);
if let Err(e) = peer_mgr.send_msg(packet, dst_peer_id).await {
tracing::error!("failed to send kcp packet to peer: {:?}", e);
}
}
}
#[derive(Debug, Clone)]
pub struct NatDstKcpConnector {
kcp_endpoint: Arc<KcpEndpoint>,
peer_mgr: Arc<PeerManager>,
}
#[async_trait::async_trait]
impl NatDstConnector for NatDstKcpConnector {
type DstStream = KcpStream;
async fn connect(&self, src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let conn_data = KcpConnData {
src: Some(src.into()),
dst: Some(nat_dst.into()),
};
let (dst_peers, _) = match nat_dst {
SocketAddr::V4(addr) => {
let ip = addr.ip();
self.peer_mgr.get_msg_dst_peer(&ip).await
}
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
};
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peers);
if dst_peers.len() != 1 {
return Err(anyhow::anyhow!("no dst peer found for nat dst: {}", nat_dst).into());
}
let ret = self
.kcp_endpoint
.connect(
Duration::from_secs(10),
self.peer_mgr.my_peer_id(),
dst_peers[0],
Bytes::from(conn_data.encode_to_vec()),
)
.await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst.to_string()))?;
let stream = KcpStream::new(&self.kcp_endpoint, ret)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
Ok(stream)
}
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
true
}
fn check_packet_from_peer(
&self,
_cidr_set: &CidrSet,
_global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
_ipv4: &Ipv4Packet,
) -> bool {
return hdr.from_peer_id == hdr.to_peer_id;
}
fn transport_type(&self) -> TcpProxyEntryTransportType {
TcpProxyEntryTransportType::Kcp
}
}
#[derive(Clone)]
struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>);
pub struct KcpProxySrc {
kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>,
tcp_proxy: TcpProxyForKcpSrc,
tasks: JoinSet<()>,
}
impl TcpProxyForKcpSrc {
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
self.0.get_peer_manager().get_peer_map();
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
return false;
};
let Some(feature_flag) = peer_map.get_peer_feature_flag(dst_peer_id).await else {
return false;
};
feature_flag.kcp_input
}
}
#[async_trait::async_trait]
impl NicPacketFilter for TcpProxyForKcpSrc {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let ret = self.0.try_process_packet_from_nic(zc_packet).await;
if ret {
return true;
}
let data = zc_packet.payload();
let ip_packet = Ipv4Packet::new(data).unwrap();
if ip_packet.get_version() != 4
|| ip_packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp
{
return false;
}
// if no connection is established, only allow SYN packet
let tcp_packet = TcpPacket::new(ip_packet.payload()).unwrap();
let is_syn = tcp_packet.get_flags() & TcpFlags::SYN != 0
&& tcp_packet.get_flags() & TcpFlags::ACK == 0;
if is_syn {
// only check dst feature flag when SYN packet
if !self
.check_dst_allow_kcp_input(&ip_packet.get_destination())
.await
{
return false;
}
} else {
// if not syn packet, only allow established connection
if !self.0.is_tcp_proxy_connection(SocketAddr::new(
IpAddr::V4(ip_packet.get_source()),
tcp_packet.get_source(),
)) {
return false;
}
}
if let Some(my_ipv4) = self.0.get_global_ctx().get_ipv4() {
// this is a net-to-net packet, only allow it when smoltcp is enabled
// because the syn-ack packet will not be through and handled by the tun device when
// the source ip is in the local network
if ip_packet.get_source() != my_ipv4.address() && !self.0.is_smoltcp_enabled() {
return false;
}
};
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.0.get_my_peer_id().into();
true
}
}
impl KcpProxySrc {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = create_kcp_endpoint();
kcp_endpoint.run().await;
let output_receiver = kcp_endpoint.output_receiver().unwrap();
let mut tasks = JoinSet::new();
tasks.spawn(handle_kcp_output(
peer_manager.clone(),
output_receiver,
true,
));
let kcp_endpoint = Arc::new(kcp_endpoint);
let tcp_proxy = TcpProxy::new(
peer_manager.clone(),
NatDstKcpConnector {
kcp_endpoint: kcp_endpoint.clone(),
peer_mgr: peer_manager.clone(),
},
);
Self {
kcp_endpoint,
peer_manager,
tcp_proxy: TcpProxyForKcpSrc(tcp_proxy),
tasks,
}
}
pub async fn start(&self) {
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: true,
}))
.await;
self.tcp_proxy.0.start(false).await.unwrap();
}
pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstKcpConnector>> {
self.tcp_proxy.0.clone()
}
}
pub struct KcpProxyDst {
kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
tasks: JoinSet<()>,
}
impl KcpProxyDst {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = create_kcp_endpoint();
kcp_endpoint.run().await;
let mut tasks = JoinSet::new();
let output_receiver = kcp_endpoint.output_receiver().unwrap();
tasks.spawn(handle_kcp_output(
peer_manager.clone(),
output_receiver,
false,
));
Self {
kcp_endpoint: Arc::new(kcp_endpoint),
peer_manager,
proxy_entries: Arc::new(DashMap::new()),
tasks,
}
}
#[tracing::instrument(ret)]
async fn handle_one_in_stream(
mut kcp_stream: KcpStream,
global_ctx: ArcGlobalCtx,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
) -> Result<()> {
let mut conn_data = kcp_stream.conn_data().clone();
let parsed_conn_data = KcpConnData::decode(&mut conn_data)
.with_context(|| format!("failed to decode kcp conn data: {:?}", conn_data))?;
let mut dst_socket: SocketAddr = parsed_conn_data
.dst
.ok_or(anyhow::anyhow!(
"failed to get dst socket from kcp conn data: {:?}",
parsed_conn_data
))?
.into();
let conn_id = kcp_stream.conn_id();
proxy_entries.insert(
conn_id,
TcpProxyEntry {
src: parsed_conn_data.src,
dst: parsed_conn_data.dst,
start_time: chrono::Local::now().timestamp() as u64,
state: TcpProxyEntryState::ConnectingDst.into(),
transport_type: TcpProxyEntryTransportType::Kcp.into(),
},
);
crate::defer! {
proxy_entries.remove(&conn_id);
}
if Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address())) {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
tracing::debug!("kcp connect to dst socket: {:?}", dst_socket);
let _g = global_ctx.net_ns.guard();
let connector = NatDstTcpConnector {};
let mut ret = connector
.connect("0.0.0.0:0".parse().unwrap(), dst_socket)
.await?;
if let Some(mut e) = proxy_entries.get_mut(&kcp_stream.conn_id()) {
e.state = TcpProxyEntryState::Connected.into();
}
copy_bidirectional(&mut ret, &mut kcp_stream).await?;
Ok(())
}
async fn run_accept_task(&mut self) {
let kcp_endpoint = self.kcp_endpoint.clone();
let global_ctx = self.peer_manager.get_global_ctx().clone();
let proxy_entries = self.proxy_entries.clone();
self.tasks.spawn(async move {
while let Ok(conn) = kcp_endpoint.accept().await {
let stream = KcpStream::new(&kcp_endpoint, conn)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))
.unwrap();
let global_ctx = global_ctx.clone();
let proxy_entries = proxy_entries.clone();
tokio::spawn(async move {
let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries).await;
});
}
});
}
pub async fn start(&mut self) {
self.run_accept_task().await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: false,
}))
.await;
}
}
#[derive(Clone)]
pub struct KcpProxyDstRpcService(Weak<DashMap<ConnId, TcpProxyEntry>>);
impl KcpProxyDstRpcService {
pub fn new(kcp_proxy_dst: &KcpProxyDst) -> Self {
Self(Arc::downgrade(&kcp_proxy_dst.proxy_entries))
}
}
#[async_trait::async_trait]
impl TcpProxyRpc for KcpProxyDstRpcService {
type Controller = BaseController;
async fn list_tcp_proxy_entry(
&self,
_: BaseController,
_request: ListTcpProxyEntryRequest, // Accept request of type HelloRequest
) -> std::result::Result<ListTcpProxyEntryResponse, rpc_types::error::Error> {
let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() {
reply.entries.push(item.value().clone());
}
}
Ok(reply)
}
}
+3 -1
View File
@@ -15,8 +15,10 @@ pub mod fast_socks5;
#[cfg(feature = "socks5")] #[cfg(feature = "socks5")]
pub mod socks5; pub mod socks5;
pub mod kcp_proxy;
#[derive(Debug)] #[derive(Debug)]
struct CidrSet { pub(crate) struct CidrSet {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>, cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>,
tasks: JoinSet<()>, tasks: JoinSet<()>,
+270 -64
View File
@@ -1,3 +1,4 @@
use anyhow::Context;
use cidr::Ipv4Inet; use cidr::Ipv4Inet;
use core::panic; use core::panic;
use crossbeam::atomic::AtomicCell; use crossbeam::atomic::AtomicCell;
@@ -9,9 +10,9 @@ use pnet::packet::MutablePacket;
use pnet::packet::Packet; use pnet::packet::Packet;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}; use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
use std::sync::atomic::{AtomicBool, AtomicU16}; use std::sync::atomic::{AtomicBool, AtomicU16};
use std::sync::Arc; use std::sync::{Arc, Weak};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio::io::copy_bidirectional; use tokio::io::{copy_bidirectional, AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio::net::{TcpListener, TcpSocket, TcpStream}; use tokio::net::{TcpListener, TcpSocket, TcpStream};
use tokio::sync::{mpsc, Mutex}; use tokio::sync::{mpsc, Mutex};
use tokio::task::JoinSet; use tokio::task::JoinSet;
@@ -23,31 +24,94 @@ use crate::common::join_joinset_background;
use crate::peers::peer_manager::PeerManager; use crate::peers::peer_manager::PeerManager;
use crate::peers::{NicPacketFilter, PeerPacketFilter}; use crate::peers::{NicPacketFilter, PeerPacketFilter};
use crate::tunnel::packet_def::{PacketType, ZCPacket}; use crate::proto::cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc,
};
use crate::proto::rpc_types;
use crate::proto::rpc_types::controller::BaseController;
use crate::tunnel::packet_def::{PacketType, PeerManagerHeader, ZCPacket};
use super::CidrSet; use super::CidrSet;
#[cfg(feature = "smoltcp")] #[cfg(feature = "smoltcp")]
use super::tokio_smoltcp::{self, channel_device, Net, NetConfig}; use super::tokio_smoltcp::{self, channel_device, Net, NetConfig};
#[derive(Debug, Clone, Copy, PartialEq)] #[async_trait::async_trait]
enum NatDstEntryState { pub(crate) trait NatDstConnector: Send + Sync + Clone + 'static {
// receive syn packet but not start connecting to dst type DstStream: AsyncRead + AsyncWrite + Unpin + Send;
SynReceived,
// connecting to dst async fn connect(&self, src: SocketAddr, dst: SocketAddr) -> Result<Self::DstStream>;
ConnectingDst, fn check_packet_from_peer_fast(&self, cidr_set: &CidrSet, global_ctx: &GlobalCtx) -> bool;
// connected to dst fn check_packet_from_peer(
Connected, &self,
// connection closed cidr_set: &CidrSet,
Closed, global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet,
) -> bool;
fn transport_type(&self) -> TcpProxyEntryTransportType;
} }
#[derive(Debug, Clone)]
pub struct NatDstTcpConnector;
#[async_trait::async_trait]
impl NatDstConnector for NatDstTcpConnector {
type DstStream = TcpStream;
async fn connect(&self, _src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let socket = TcpSocket::new_v4().unwrap();
if let Err(e) = socket.set_nodelay(true) {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
Ok(
tokio::time::timeout(Duration::from_secs(10), socket.connect(nat_dst))
.await?
.with_context(|| format!("connect to nat dst failed: {:?}", nat_dst))?,
)
}
fn check_packet_from_peer_fast(&self, cidr_set: &CidrSet, global_ctx: &GlobalCtx) -> bool {
!cidr_set.is_empty() || global_ctx.enable_exit_node() || global_ctx.no_tun()
}
fn check_packet_from_peer(
&self,
cidr_set: &CidrSet,
global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet,
) -> bool {
let is_exit_node = hdr.is_exit_node();
if !cidr_set.contains_v4(ipv4.get_destination())
&& !is_exit_node
&& !(global_ctx.no_tun()
&& Some(ipv4.get_destination())
== global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
{
return false;
}
true
}
fn transport_type(&self) -> TcpProxyEntryTransportType {
TcpProxyEntryTransportType::Tcp
}
}
type NatDstEntryState = TcpProxyEntryState;
#[derive(Debug)] #[derive(Debug)]
pub struct NatDstEntry { pub struct NatDstEntry {
id: uuid::Uuid, id: uuid::Uuid,
src: SocketAddr, src: SocketAddr,
dst: SocketAddr, dst: SocketAddr,
start_time: Instant, start_time: Instant,
start_time_local: chrono::DateTime<chrono::Local>,
tasks: Mutex<JoinSet<()>>, tasks: Mutex<JoinSet<()>>,
state: AtomicCell<NatDstEntryState>, state: AtomicCell<NatDstEntryState>,
} }
@@ -59,10 +123,21 @@ impl NatDstEntry {
src, src,
dst, dst,
start_time: Instant::now(), start_time: Instant::now(),
start_time_local: chrono::Local::now(),
tasks: Mutex::new(JoinSet::new()), tasks: Mutex::new(JoinSet::new()),
state: AtomicCell::new(NatDstEntryState::SynReceived), state: AtomicCell::new(NatDstEntryState::SynReceived),
} }
} }
fn into_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry {
TcpProxyEntry {
src: Some(self.src.clone().into()),
dst: Some(self.dst.clone().into()),
start_time: self.start_time_local.timestamp() as u64,
state: self.state.load().into(),
transport_type: transport_type.into(),
}
}
} }
enum ProxyTcpStream { enum ProxyTcpStream {
@@ -83,7 +158,24 @@ impl ProxyTcpStream {
} }
} }
pub async fn copy_bidirectional(&mut self, dst: &mut TcpStream) -> Result<()> { pub async fn shutdown(&mut self) -> Result<()> {
match self {
Self::KernelTcpStream(stream) => {
stream.shutdown().await?;
Ok(())
}
#[cfg(feature = "smoltcp")]
Self::SmolTcpStream(stream) => {
stream.shutdown().await?;
Ok(())
}
}
}
pub async fn copy_bidirectional<D: AsyncRead + AsyncWrite + Unpin>(
&mut self,
dst: &mut D,
) -> Result<()> {
match self { match self {
Self::KernelTcpStream(stream) => { Self::KernelTcpStream(stream) => {
copy_bidirectional(stream, dst).await?; copy_bidirectional(stream, dst).await?;
@@ -176,7 +268,7 @@ type ConnSockMap = Arc<DashMap<uuid::Uuid, ArcNatDstEntry>>;
type AddrConnSockMap = Arc<DashMap<SocketAddr, ArcNatDstEntry>>; type AddrConnSockMap = Arc<DashMap<SocketAddr, ArcNatDstEntry>>;
#[derive(Debug)] #[derive(Debug)]
pub struct TcpProxy { pub struct TcpProxy<C: NatDstConnector> {
global_ctx: Arc<GlobalCtx>, global_ctx: Arc<GlobalCtx>,
peer_manager: Arc<PeerManager>, peer_manager: Arc<PeerManager>,
local_port: AtomicU16, local_port: AtomicU16,
@@ -194,10 +286,12 @@ pub struct TcpProxy {
#[cfg(feature = "smoltcp")] #[cfg(feature = "smoltcp")]
smoltcp_net: Arc<Mutex<Option<Net>>>, smoltcp_net: Arc<Mutex<Option<Net>>>,
enable_smoltcp: Arc<AtomicBool>, enable_smoltcp: Arc<AtomicBool>,
connector: C,
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl PeerPacketFilter for TcpProxy { impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_peer(&self, mut packet: ZCPacket) -> Option<ZCPacket> { async fn try_process_packet_from_peer(&self, mut packet: ZCPacket) -> Option<ZCPacket> {
if let Some(_) = self.try_handle_peer_packet(&mut packet).await { if let Some(_) = self.try_handle_peer_packet(&mut packet).await {
if self if self
@@ -221,10 +315,10 @@ impl PeerPacketFilter for TcpProxy {
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl NicPacketFilter for TcpProxy { impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) { async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let Some(my_ipv4) = self.get_local_ip() else { let Some(my_ipv4) = self.get_local_ip() else {
return; return false;
}; };
let data = zc_packet.payload(); let data = zc_packet.payload();
@@ -233,25 +327,33 @@ impl NicPacketFilter for TcpProxy {
|| ip_packet.get_source() != my_ipv4 || ip_packet.get_source() != my_ipv4
|| ip_packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp || ip_packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp
{ {
return; return false;
} }
let tcp_packet = TcpPacket::new(ip_packet.payload()).unwrap(); let tcp_packet = TcpPacket::new(ip_packet.payload()).unwrap();
if tcp_packet.get_source() != self.get_local_port() { if tcp_packet.get_source() != self.get_local_port() {
return; return false;
} }
let dst_addr = SocketAddr::V4(SocketAddrV4::new( let mut dst_addr = SocketAddr::V4(SocketAddrV4::new(
ip_packet.get_destination(), ip_packet.get_destination(),
tcp_packet.get_destination(), tcp_packet.get_destination(),
)); ));
let mut need_transform_dst = false;
// for kcp proxy, the src ip of nat entry will be converted from my ip to fake ip
// here we need to convert it back
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(my_ipv4) {
dst_addr.set_ip(IpAddr::V4(my_ipv4));
need_transform_dst = true;
}
tracing::trace!(dst_addr = ?dst_addr, "tcp packet try find entry"); tracing::trace!(dst_addr = ?dst_addr, "tcp packet try find entry");
let entry = if let Some(entry) = self.addr_conn_map.get(&dst_addr) { let entry = if let Some(entry) = self.addr_conn_map.get(&dst_addr) {
entry entry
} else { } else {
let Some(syn_entry) = self.syn_map.get(&dst_addr) else { let Some(syn_entry) = self.syn_map.get(&dst_addr) else {
return; return false;
}; };
syn_entry syn_entry
}; };
@@ -267,9 +369,15 @@ impl NicPacketFilter for TcpProxy {
.mut_peer_manager_header() .mut_peer_manager_header()
.unwrap() .unwrap()
.set_no_proxy(true); .set_no_proxy(true);
if need_transform_dst {
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.get_my_peer_id().into();
}
let mut ip_packet = MutableIpv4Packet::new(zc_packet.mut_payload()).unwrap(); let mut ip_packet = MutableIpv4Packet::new(zc_packet.mut_payload()).unwrap();
ip_packet.set_source(ip); ip_packet.set_source(ip);
if need_transform_dst {
ip_packet.set_destination(my_ipv4);
}
let dst = ip_packet.get_destination(); let dst = ip_packet.get_destination();
let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap(); let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap();
@@ -280,12 +388,15 @@ impl NicPacketFilter for TcpProxy {
Self::update_ip_packet_checksum(&mut ip_packet); Self::update_ip_packet_checksum(&mut ip_packet);
tracing::trace!(dst_addr = ?dst_addr, nat_entry = ?nat_entry, packet = ?ip_packet, "tcp packet after modified"); tracing::trace!(dst_addr = ?dst_addr, nat_entry = ?nat_entry, packet = ?ip_packet, "tcp packet after modified");
true
} }
} }
impl TcpProxy { impl<C: NatDstConnector> TcpProxy<C> {
pub fn new(global_ctx: Arc<GlobalCtx>, peer_manager: Arc<PeerManager>) -> Arc<Self> { pub fn new(peer_manager: Arc<PeerManager>, connector: C) -> Arc<Self> {
let (smoltcp_stack_sender, smoltcp_stack_receiver) = mpsc::channel::<ZCPacket>(1000); let (smoltcp_stack_sender, smoltcp_stack_receiver) = mpsc::channel::<ZCPacket>(1000);
let global_ctx = peer_manager.get_global_ctx();
Arc::new(Self { Arc::new(Self {
global_ctx: global_ctx.clone(), global_ctx: global_ctx.clone(),
@@ -307,6 +418,8 @@ impl TcpProxy {
smoltcp_net: Arc::new(Mutex::new(None)), smoltcp_net: Arc::new(Mutex::new(None)),
enable_smoltcp: Arc::new(AtomicBool::new(true)), enable_smoltcp: Arc::new(AtomicBool::new(true)),
connector,
}) })
} }
@@ -326,15 +439,17 @@ impl TcpProxy {
ip_packet.set_checksum(pnet::packet::ipv4::checksum(&ip_packet.to_immutable())); ip_packet.set_checksum(pnet::packet::ipv4::checksum(&ip_packet.to_immutable()));
} }
pub async fn start(self: &Arc<Self>) -> Result<()> { pub async fn start(self: &Arc<Self>, add_pipeline: bool) -> Result<()> {
self.run_syn_map_cleaner().await?; self.run_syn_map_cleaner().await?;
self.run_listener().await?; self.run_listener().await?;
self.peer_manager if add_pipeline {
.add_packet_process_pipeline(Box::new(self.clone())) self.peer_manager
.await; .add_packet_process_pipeline(Box::new(self.clone()))
self.peer_manager .await;
.add_nic_packet_process_pipeline(Box::new(self.clone())) self.peer_manager
.await; .add_nic_packet_process_pipeline(Box::new(self.clone()))
.await;
}
join_joinset_background(self.tasks.clone(), "TcpProxy".to_owned()); join_joinset_background(self.tasks.clone(), "TcpProxy".to_owned());
Ok(()) Ok(())
@@ -364,7 +479,10 @@ impl TcpProxy {
async fn get_proxy_listener(&self) -> Result<ProxyTcpListener> { async fn get_proxy_listener(&self) -> Result<ProxyTcpListener> {
#[cfg(feature = "smoltcp")] #[cfg(feature = "smoltcp")]
if self.global_ctx.get_flags().use_smoltcp || self.global_ctx.no_tun() { if self.global_ctx.get_flags().use_smoltcp
|| self.global_ctx.no_tun()
|| cfg!(target_os = "android")
{
// use smoltcp network stack // use smoltcp network stack
self.local_port self.local_port
.store(8899, std::sync::atomic::Ordering::Relaxed); .store(8899, std::sync::atomic::Ordering::Relaxed);
@@ -458,11 +576,32 @@ impl TcpProxy {
let syn_map = self.syn_map.clone(); let syn_map = self.syn_map.clone();
let conn_map = self.conn_map.clone(); let conn_map = self.conn_map.clone();
let addr_conn_map = self.addr_conn_map.clone(); let addr_conn_map = self.addr_conn_map.clone();
let connector = self.connector.clone();
let accept_task = async move { let accept_task = async move {
let conn_map = conn_map.clone(); let conn_map = conn_map.clone();
while let Ok((tcp_stream, socket_addr)) = tcp_listener.accept().await { loop {
let accept_ret = tcp_listener.accept().await;
let Ok((tcp_stream, mut socket_addr)) = accept_ret else {
tracing::error!("nat tcp listener accept failed: {:?}", accept_ret.err());
continue;
};
let my_ip = global_ctx
.get_ipv4()
.as_ref()
.map(Ipv4Inet::address)
.unwrap_or(Ipv4Addr::UNSPECIFIED);
if socket_addr.ip() == Self::get_fake_local_ipv4(my_ip) {
socket_addr.set_ip(IpAddr::V4(my_ip));
}
let Some(entry) = syn_map.get(&socket_addr) else { let Some(entry) = syn_map.get(&socket_addr) else {
tracing::error!("tcp connection from unknown source: {:?}", socket_addr); tracing::error!(
?my_ip,
?socket_addr,
"tcp connection from unknown source, ignore it"
);
continue; continue;
}; };
tracing::info!( tracing::info!(
@@ -483,6 +622,7 @@ impl TcpProxy {
assert!(old_nat_val.is_none()); assert!(old_nat_val.is_none());
tasks.lock().unwrap().spawn(Self::connect_to_nat_dst( tasks.lock().unwrap().spawn(Self::connect_to_nat_dst(
connector.clone(),
global_ctx.clone(), global_ctx.clone(),
tcp_stream, tcp_stream,
conn_map.clone(), conn_map.clone(),
@@ -490,8 +630,6 @@ impl TcpProxy {
entry_clone, entry_clone,
)); ));
} }
tracing::error!("nat tcp listener exited");
panic!("nat tcp listener exited");
}; };
self.tasks self.tasks
.lock() .lock()
@@ -511,6 +649,7 @@ impl TcpProxy {
} }
async fn connect_to_nat_dst( async fn connect_to_nat_dst(
connector: C,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
src_tcp_stream: ProxyTcpStream, src_tcp_stream: ProxyTcpStream,
conn_map: ConnSockMap, conn_map: ConnSockMap,
@@ -521,12 +660,6 @@ impl TcpProxy {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e); tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
} }
let _guard = global_ctx.net_ns.guard();
let socket = TcpSocket::new_v4().unwrap();
if let Err(e) = socket.set_nodelay(true) {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
let nat_dst = if Some(nat_entry.dst.ip()) let nat_dst = if Some(nat_entry.dst.ip())
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address())) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
{ {
@@ -537,12 +670,8 @@ impl TcpProxy {
nat_entry.dst nat_entry.dst
}; };
let Ok(Ok(dst_tcp_stream)) = tokio::time::timeout( let _guard = global_ctx.net_ns.guard();
Duration::from_secs(10), let Ok(dst_tcp_stream) = connector.connect(nat_entry.src, nat_dst).await else {
TcpSocket::new_v4().unwrap().connect(nat_dst),
)
.await
else {
tracing::error!("connect to dst failed: {:?}", nat_entry); tracing::error!("connect to dst failed: {:?}", nat_entry);
nat_entry.state.store(NatDstEntryState::Closed); nat_entry.state.store(NatDstEntryState::Closed);
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry); Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry);
@@ -567,7 +696,7 @@ impl TcpProxy {
async fn handle_nat_connection( async fn handle_nat_connection(
mut src_tcp_stream: ProxyTcpStream, mut src_tcp_stream: ProxyTcpStream,
mut dst_tcp_stream: TcpStream, mut dst_tcp_stream: C::DstStream,
conn_map: ConnSockMap, conn_map: ConnSockMap,
addr_conn_map: AddrConnSockMap, addr_conn_map: AddrConnSockMap,
nat_entry: ArcNatDstEntry, nat_entry: ArcNatDstEntry,
@@ -578,6 +707,18 @@ impl TcpProxy {
tracing::info!(nat_entry = ?nat_entry_clone, ret = ?ret, "nat tcp connection closed"); tracing::info!(nat_entry = ?nat_entry_clone, ret = ?ret, "nat tcp connection closed");
nat_entry_clone.state.store(NatDstEntryState::Closed); nat_entry_clone.state.store(NatDstEntryState::Closed);
let ret = src_tcp_stream.shutdown().await;
tracing::info!(nat_entry = ?nat_entry_clone, ret = ?ret, "src tcp stream shutdown");
let ret = dst_tcp_stream.shutdown().await;
tracing::info!(nat_entry = ?nat_entry_clone, ret = ?ret, "dst tcp stream shutdown");
drop(src_tcp_stream);
drop(dst_tcp_stream);
// sleep later so the fin packet can be processed
tokio::time::sleep(Duration::from_secs(10)).await;
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry_clone); Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry_clone);
}); });
} }
@@ -586,11 +727,12 @@ impl TcpProxy {
self.local_port.load(std::sync::atomic::Ordering::Relaxed) self.local_port.load(std::sync::atomic::Ordering::Relaxed)
} }
pub fn get_my_peer_id(&self) -> u32 {
self.peer_manager.my_peer_id()
}
pub fn get_local_ip(&self) -> Option<Ipv4Addr> { pub fn get_local_ip(&self) -> Option<Ipv4Addr> {
if self if self.is_smoltcp_enabled() {
.enable_smoltcp
.load(std::sync::atomic::Ordering::Relaxed)
{
Some(Ipv4Addr::new(192, 88, 99, 254)) Some(Ipv4Addr::new(192, 88, 99, 254))
} else { } else {
self.global_ctx self.global_ctx
@@ -600,17 +742,30 @@ impl TcpProxy {
} }
} }
pub fn get_global_ctx(&self) -> &ArcGlobalCtx {
&self.global_ctx
}
pub fn is_smoltcp_enabled(&self) -> bool {
self.enable_smoltcp
.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn get_fake_local_ipv4(local_ip: Ipv4Addr) -> Ipv4Addr {
let octets = local_ip.octets();
Ipv4Addr::new(octets[0], octets[1], octets[2], 0)
}
async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> { async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> {
if self.cidr_set.is_empty() if !self
&& !self.global_ctx.enable_exit_node() .connector
&& !self.global_ctx.no_tun() .check_packet_from_peer_fast(&self.cidr_set, &self.global_ctx)
{ {
return None; return None;
} }
let ipv4_addr = self.get_local_ip()?; let ipv4_addr = self.get_local_ip()?;
let hdr = packet.peer_manager_header().unwrap(); let hdr = packet.peer_manager_header().unwrap().clone();
let is_exit_node = hdr.is_exit_node();
if hdr.packet_type != PacketType::Data as u8 || hdr.is_no_proxy() { if hdr.packet_type != PacketType::Data as u8 || hdr.is_no_proxy() {
return None; return None;
@@ -623,11 +778,9 @@ impl TcpProxy {
return None; return None;
} }
if !self.cidr_set.contains_v4(ipv4.get_destination()) if !self
&& !is_exit_node .connector
&& !(self.global_ctx.no_tun() .check_packet_from_peer(&self.cidr_set, &self.global_ctx, &hdr, &ipv4)
&& Some(ipv4.get_destination())
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
{ {
return None; return None;
} }
@@ -658,6 +811,10 @@ impl TcpProxy {
} }
let mut ip_packet = MutableIpv4Packet::new(payload_bytes).unwrap(); let mut ip_packet = MutableIpv4Packet::new(payload_bytes).unwrap();
if !self.is_smoltcp_enabled() && source_ip == ipv4_addr {
// modify the source so the response packet can be handled by tun device
ip_packet.set_source(Self::get_fake_local_ipv4(ipv4_addr));
}
ip_packet.set_destination(ipv4_addr); ip_packet.set_destination(ipv4_addr);
let source = ip_packet.get_source(); let source = ip_packet.get_source();
@@ -672,4 +829,53 @@ impl TcpProxy {
Some(()) Some(())
} }
pub fn get_peer_manager(&self) -> &Arc<PeerManager> {
&self.peer_manager
}
pub fn is_tcp_proxy_connection(&self, src: SocketAddr) -> bool {
self.syn_map.contains_key(&src) || self.addr_conn_map.contains_key(&src)
}
pub fn list_proxy_entries(&self) -> Vec<TcpProxyEntry> {
let mut entries: Vec<TcpProxyEntry> = Vec::new();
let transport_type = self.connector.transport_type();
for entry in self.syn_map.iter() {
entries.push(entry.value().as_ref().into_pb(transport_type));
}
for entry in self.conn_map.iter() {
entries.push(entry.value().as_ref().into_pb(transport_type));
}
entries
}
}
#[derive(Clone)]
pub struct TcpProxyRpcService<C: NatDstConnector> {
tcp_proxy: Weak<TcpProxy<C>>,
}
#[async_trait::async_trait]
impl<C: NatDstConnector> TcpProxyRpc for TcpProxyRpcService<C> {
type Controller = BaseController;
async fn list_tcp_proxy_entry(
&self,
_: BaseController,
_request: ListTcpProxyEntryRequest, // Accept request of type HelloRequest
) -> std::result::Result<ListTcpProxyEntryResponse, rpc_types::error::Error> {
let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.tcp_proxy.upgrade() {
reply.entries = tcp_proxy.list_proxy_entries();
}
Ok(reply)
}
}
impl<C: NatDstConnector> TcpProxyRpcService<C> {
pub fn new(tcp_proxy: Arc<TcpProxy<C>>) -> Self {
Self {
tcp_proxy: Arc::downgrade(&tcp_proxy),
}
}
} }
+1 -1
View File
@@ -43,7 +43,7 @@ pub struct BufferRxToken(Packet);
impl RxToken for BufferRxToken { impl RxToken for BufferRxToken {
fn consume<R, F>(mut self, f: F) -> R fn consume<R, F>(mut self, f: F) -> R
where where
F: FnOnce(&mut [u8]) -> R, F: FnOnce(&[u8]) -> R,
{ {
let p = &mut self.0; let p = &mut self.0;
let result = f(p); let result = f(p);
@@ -2,6 +2,7 @@ use parking_lot::Mutex;
use smoltcp::{ use smoltcp::{
iface::{SocketHandle as InnerSocketHandle, SocketSet}, iface::{SocketHandle as InnerSocketHandle, SocketSet},
socket::tcp, socket::tcp,
time::Duration,
}; };
use std::{ use std::{
ops::{Deref, DerefMut}, ops::{Deref, DerefMut},
@@ -53,6 +54,8 @@ impl SocketAlloctor {
let tx_buffer = tcp::SocketBuffer::new(vec![0; self.buffer_size.tcp_tx_size]); let tx_buffer = tcp::SocketBuffer::new(vec![0; self.buffer_size.tcp_tx_size]);
let mut tcp = tcp::Socket::new(rx_buffer, tx_buffer); let mut tcp = tcp::Socket::new(rx_buffer, tx_buffer);
tcp.set_nagle_enabled(false); tcp.set_nagle_enabled(false);
tcp.set_keep_alive(Some(Duration::from_secs(10)));
tcp.set_timeout(Some(Duration::from_secs(60)));
tcp tcp
} }
+66 -43
View File
@@ -4,6 +4,7 @@ use std::{
time::Duration, time::Duration,
}; };
use bytes::{BufMut, BytesMut};
use cidr::Ipv4Inet; use cidr::Ipv4Inet;
use crossbeam::atomic::AtomicCell; use crossbeam::atomic::AtomicCell;
use dashmap::DashMap; use dashmap::DashMap;
@@ -24,11 +25,11 @@ use tokio::{
use tracing::Level; use tracing::Level;
use crate::{ use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId}, common::{error::Error, global_ctx::ArcGlobalCtx, scoped_task::ScopedTask, PeerId},
gateway::ip_reassembler::compose_ipv4_packet, gateway::ip_reassembler::compose_ipv4_packet,
peers::{peer_manager::PeerManager, PeerPacketFilter}, peers::{peer_manager::PeerManager, PeerPacketFilter},
tunnel::{ tunnel::{
common::setup_sokcet2, common::{reserve_buf, setup_sokcet2},
packet_def::{PacketType, ZCPacket}, packet_def::{PacketType, ZCPacket},
}, },
}; };
@@ -139,59 +140,81 @@ impl UdpNatEntry {
mut packet_sender: Sender<ZCPacket>, mut packet_sender: Sender<ZCPacket>,
virtual_ipv4: Ipv4Addr, virtual_ipv4: Ipv4Addr,
) { ) {
let mut buf = [0u8; 65536]; let (s, mut r) = tachyonix::channel(128);
let mut udp_body: &mut [u8] = unsafe { std::mem::transmute(&mut buf[20 + 8..]) };
let mut ip_id = 1;
loop { let self_clone = self.clone();
let (len, src_socket) = match timeout( let recv_task = ScopedTask::from(tokio::spawn(async move {
Duration::from_secs(120), let mut cur_buf = BytesMut::new();
self.socket.recv_from(&mut udp_body), loop {
) if self_clone
.await .stopped
{ .load(std::sync::atomic::Ordering::Relaxed)
Ok(Ok(x)) => x, {
Ok(Err(err)) => {
tracing::error!(?err, "udp nat recv failed");
break; break;
} }
Err(err) => {
tracing::error!(?err, "udp nat recv timeout"); reserve_buf(&mut cur_buf, 64 * 1024 + 28, 128 * 1024 + 28);
break; assert_eq!(cur_buf.len(), 0);
unsafe {
cur_buf.advance_mut(28);
} }
};
tracing::trace!(?len, ?src_socket, "udp nat packet response received"); let (len, src_socket) = match timeout(
Duration::from_secs(120),
self_clone.socket.recv_buf_from(&mut cur_buf),
)
.await
{
Ok(Ok(x)) => x,
Ok(Err(err)) => {
tracing::error!(?err, "udp nat recv failed");
break;
}
Err(err) => {
tracing::error!(?err, "udp nat recv timeout");
break;
}
};
if self.stopped.load(std::sync::atomic::Ordering::Relaxed) { tracing::trace!(?len, ?src_socket, "udp nat packet response received");
break;
let ret_buf = cur_buf.split();
s.send((ret_buf, len, src_socket)).await.unwrap();
} }
}));
let SocketAddr::V4(mut src_v4) = src_socket else { let self_clone = self.clone();
continue; let send_task = ScopedTask::from(tokio::spawn(async move {
}; let mut ip_id = 1;
while let Ok((mut packet, len, src_socket)) = r.recv().await {
let SocketAddr::V4(mut src_v4) = src_socket else {
continue;
};
self.mark_active(); self_clone.mark_active();
if src_v4.ip().is_loopback() { if src_v4.ip().is_loopback() {
src_v4.set_ip(virtual_ipv4); src_v4.set_ip(virtual_ipv4);
}
let Ok(_) = Self::compose_ipv4_packet(
&self_clone,
&mut packet_sender,
&mut packet,
&src_v4,
len,
1280,
ip_id,
)
.await
else {
break;
};
ip_id = ip_id.wrapping_add(1);
} }
}));
let Ok(_) = Self::compose_ipv4_packet( let _ = tokio::join!(recv_task, send_task);
&self,
&mut packet_sender,
&mut buf,
&src_v4,
len,
1200,
ip_id,
)
.await
else {
break;
};
ip_id = ip_id.wrapping_add(1);
}
self.stop(); self.stop();
} }
+46 -8
View File
@@ -17,13 +17,14 @@ use crate::connector::direct::DirectConnectorManager;
use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager}; use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager};
use crate::connector::udp_hole_punch::UdpHolePunchConnector; use crate::connector::udp_hole_punch::UdpHolePunchConnector;
use crate::gateway::icmp_proxy::IcmpProxy; use crate::gateway::icmp_proxy::IcmpProxy;
use crate::gateway::tcp_proxy::TcpProxy; use crate::gateway::kcp_proxy::{KcpProxyDst, KcpProxyDstRpcService, KcpProxySrc};
use crate::gateway::tcp_proxy::{NatDstTcpConnector, TcpProxy, TcpProxyRpcService};
use crate::gateway::udp_proxy::UdpProxy; use crate::gateway::udp_proxy::UdpProxy;
use crate::peer_center::instance::PeerCenterInstance; use crate::peer_center::instance::PeerCenterInstance;
use crate::peers::peer_conn::PeerConnId; use crate::peers::peer_conn::PeerConnId;
use crate::peers::peer_manager::{PeerManager, RouteAlgoType}; use crate::peers::peer_manager::{PeerManager, RouteAlgoType};
use crate::peers::rpc_service::PeerManagerRpcService; use crate::peers::rpc_service::PeerManagerRpcService;
use crate::peers::PacketRecvChanReceiver; use crate::peers::{create_packet_recv_chan, recv_packet_from_chan, PacketRecvChanReceiver};
use crate::proto::cli::VpnPortalRpc; use crate::proto::cli::VpnPortalRpc;
use crate::proto::cli::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo}; use crate::proto::cli::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo};
use crate::proto::peer_rpc::PeerCenterRpcServer; use crate::proto::peer_rpc::PeerCenterRpcServer;
@@ -40,7 +41,7 @@ use crate::gateway::socks5::Socks5Server;
#[derive(Clone)] #[derive(Clone)]
struct IpProxy { struct IpProxy {
tcp_proxy: Arc<TcpProxy>, tcp_proxy: Arc<TcpProxy<NatDstTcpConnector>>,
icmp_proxy: Arc<IcmpProxy>, icmp_proxy: Arc<IcmpProxy>,
udp_proxy: Arc<UdpProxy>, udp_proxy: Arc<UdpProxy>,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
@@ -49,7 +50,7 @@ struct IpProxy {
impl IpProxy { impl IpProxy {
fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Result<Self, Error> { fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Result<Self, Error> {
let tcp_proxy = TcpProxy::new(global_ctx.clone(), peer_manager.clone()); let tcp_proxy = TcpProxy::new(peer_manager.clone(), NatDstTcpConnector {});
let icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone()) let icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone())
.with_context(|| "create icmp proxy failed")?; .with_context(|| "create icmp proxy failed")?;
let udp_proxy = UdpProxy::new(global_ctx.clone(), peer_manager.clone()) let udp_proxy = UdpProxy::new(global_ctx.clone(), peer_manager.clone())
@@ -72,7 +73,7 @@ impl IpProxy {
} }
self.started.store(true, Ordering::Relaxed); self.started.store(true, Ordering::Relaxed);
self.tcp_proxy.start().await?; self.tcp_proxy.start(true).await?;
self.icmp_proxy.start().await?; self.icmp_proxy.start().await?;
self.udp_proxy.start().await?; self.udp_proxy.start().await?;
Ok(()) Ok(())
@@ -116,6 +117,9 @@ pub struct Instance {
ip_proxy: Option<IpProxy>, ip_proxy: Option<IpProxy>,
kcp_proxy_src: Option<KcpProxySrc>,
kcp_proxy_dst: Option<KcpProxyDst>,
peer_center: Arc<PeerCenterInstance>, peer_center: Arc<PeerCenterInstance>,
vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>, vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>,
@@ -137,7 +141,7 @@ impl Instance {
global_ctx.config.dump() global_ctx.config.dump()
); );
let (peer_packet_sender, peer_packet_receiver) = tokio::sync::mpsc::channel(100); let (peer_packet_sender, peer_packet_receiver) = create_packet_recv_chan();
let id = global_ctx.get_id(); let id = global_ctx.get_id();
@@ -193,6 +197,8 @@ impl Instance {
udp_hole_puncher: Arc::new(Mutex::new(udp_hole_puncher)), udp_hole_puncher: Arc::new(Mutex::new(udp_hole_puncher)),
ip_proxy: None, ip_proxy: None,
kcp_proxy_src: None,
kcp_proxy_dst: None,
peer_center, peer_center,
@@ -230,7 +236,7 @@ impl Instance {
let mut tasks = JoinSet::new(); let mut tasks = JoinSet::new();
tasks.spawn(async move { tasks.spawn(async move {
let mut packet_recv = packet_recv.lock().await; let mut packet_recv = packet_recv.lock().await;
while let Some(packet) = packet_recv.recv().await { while let Ok(packet) = recv_packet_from_chan(&mut packet_recv).await {
tracing::trace!("packet consumed by mock nic ctx: {:?}", packet); tracing::trace!("packet consumed by mock nic ctx: {:?}", packet);
} }
}); });
@@ -374,7 +380,17 @@ impl Instance {
self.check_dhcp_ip_conflict(); self.check_dhcp_ip_conflict();
} }
self.run_rpc_server().await?; if self.global_ctx.get_flags().enable_kcp_proxy {
let src_proxy = KcpProxySrc::new(self.get_peer_manager()).await;
src_proxy.start().await;
self.kcp_proxy_src = Some(src_proxy);
}
if !self.global_ctx.get_flags().disable_kcp_input {
let mut dst_proxy = KcpProxyDst::new(self.get_peer_manager()).await;
dst_proxy.start().await;
self.kcp_proxy_dst = Some(dst_proxy);
}
// run after tun device created, so listener can bind to tun device, which may be required by win 10 // run after tun device created, so listener can bind to tun device, which may be required by win 10
self.ip_proxy = Some(IpProxy::new( self.ip_proxy = Some(IpProxy::new(
@@ -401,6 +417,8 @@ impl Instance {
#[cfg(feature = "socks5")] #[cfg(feature = "socks5")]
self.socks5_server.run().await?; self.socks5_server.run().await?;
self.run_rpc_server().await?;
Ok(()) Ok(())
} }
@@ -523,6 +541,26 @@ impl Instance {
s.registry() s.registry()
.register(VpnPortalRpcServer::new(vpn_portal_rpc), ""); .register(VpnPortalRpcServer::new(vpn_portal_rpc), "");
if let Some(ip_proxy) = self.ip_proxy.as_ref() {
s.registry().register(
TcpProxyRpcServer::new(TcpProxyRpcService::new(ip_proxy.tcp_proxy.clone())),
"tcp",
);
}
if let Some(kcp_proxy) = self.kcp_proxy_src.as_ref() {
s.registry().register(
TcpProxyRpcServer::new(TcpProxyRpcService::new(kcp_proxy.get_tcp_proxy())),
"kcp_src",
);
}
if let Some(kcp_proxy) = self.kcp_proxy_dst.as_ref() {
s.registry().register(
TcpProxyRpcServer::new(KcpProxyDstRpcService::new(kcp_proxy)),
"kcp_dst",
);
}
let _g = self.global_ctx.net_ns.guard(); let _g = self.global_ctx.net_ns.guard();
Ok(s.serve().await.with_context(|| "rpc server start failed")?) Ok(s.serve().await.with_context(|| "rpc server start failed")?)
} }
+153 -72
View File
@@ -1,8 +1,7 @@
use std::{fmt::Debug, sync::Arc}; use std::{fmt::Debug, sync::Arc};
use anyhow::Context;
use async_trait::async_trait; use async_trait::async_trait;
use tokio::{sync::Mutex, task::JoinSet}; use tokio::task::JoinSet;
#[cfg(feature = "quic")] #[cfg(feature = "quic")]
use crate::tunnel::quic::QUICTunnelListener; use crate::tunnel::quic::QUICTunnelListener;
@@ -63,16 +62,20 @@ impl TunnelHandlerForListener for PeerManager {
} }
} }
#[derive(Debug, Clone)] pub trait ListenerCreatorTrait: Fn() -> Box<dyn TunnelListener> + Send + Sync {}
struct Listener { impl<T: Send + Sync> ListenerCreatorTrait for T where T: Fn() -> Box<dyn TunnelListener> + Send {}
inner: Arc<Mutex<dyn TunnelListener>>, pub type ListenerCreator = Box<dyn ListenerCreatorTrait>;
#[derive(Clone)]
struct ListenerFactory {
creator_fn: Arc<ListenerCreator>,
must_succ: bool, must_succ: bool,
} }
pub struct ListenerManager<H> { pub struct ListenerManager<H> {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
net_ns: NetNS, net_ns: NetNS,
listeners: Vec<Listener>, listeners: Vec<ListenerFactory>,
peer_manager: Arc<H>, peer_manager: Arc<H>,
tasks: JoinSet<()>, tasks: JoinSet<()>,
@@ -90,31 +93,39 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
} }
pub async fn prepare_listeners(&mut self) -> Result<(), Error> { pub async fn prepare_listeners(&mut self) -> Result<(), Error> {
let self_id = self.global_ctx.get_id();
self.add_listener( self.add_listener(
RingTunnelListener::new( move || {
format!("ring://{}", self.global_ctx.get_id()) Box::new(RingTunnelListener::new(
.parse() format!("ring://{}", self_id).parse().unwrap(),
.unwrap(), ))
), },
true, true,
) )
.await?; .await?;
for l in self.global_ctx.config.get_listener_uris().iter() { for l in self.global_ctx.config.get_listener_uris().iter() {
let Ok(lis) = get_listener_by_url(l, self.global_ctx.clone()) else { let l = l.clone();
let Ok(_) = get_listener_by_url(&l, self.global_ctx.clone()) else {
let msg = format!("failed to get listener by url: {}, maybe not supported", l); let msg = format!("failed to get listener by url: {}, maybe not supported", l);
self.global_ctx self.global_ctx
.issue_event(GlobalCtxEvent::ListenerAddFailed(l.clone(), msg)); .issue_event(GlobalCtxEvent::ListenerAddFailed(l.clone(), msg));
continue; continue;
}; };
self.add_listener(lis, true).await?; let ctx = self.global_ctx.clone();
self.add_listener(move || get_listener_by_url(&l, ctx.clone()).unwrap(), true)
.await?;
} }
if self.global_ctx.config.get_flags().enable_ipv6 { if self.global_ctx.config.get_flags().enable_ipv6 {
let ipv6_listener = self.global_ctx.config.get_flags().ipv6_listener.clone(); let ipv6_listener = self.global_ctx.config.get_flags().ipv6_listener.clone();
let _ = self let _ = self
.add_listener( .add_listener(
UdpTunnelListener::new(ipv6_listener.parse().unwrap()), move || {
Box::new(UdpTunnelListener::new(
ipv6_listener.clone().parse().unwrap(),
))
},
false, false,
) )
.await?; .await?;
@@ -123,85 +134,91 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
Ok(()) Ok(())
} }
pub async fn add_listener<L>(&mut self, listener: L, must_succ: bool) -> Result<(), Error> pub async fn add_listener<C: ListenerCreatorTrait + 'static>(
where &mut self,
L: TunnelListener + 'static, creator: C,
{ must_succ: bool,
let listener = Arc::new(Mutex::new(listener)); ) -> Result<(), Error> {
self.listeners.push(Listener { self.listeners.push(ListenerFactory {
inner: listener, creator_fn: Arc::new(Box::new(creator)),
must_succ, must_succ,
}); });
Ok(()) Ok(())
} }
#[tracing::instrument] #[tracing::instrument(skip(creator))]
async fn run_listener( async fn run_listener(
listener: Arc<Mutex<dyn TunnelListener>>, creator: Arc<ListenerCreator>,
peer_manager: Arc<H>, peer_manager: Arc<H>,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
) { ) {
let mut l = listener.lock().await;
global_ctx.add_running_listener(l.local_url());
global_ctx.issue_event(GlobalCtxEvent::ListenerAdded(l.local_url()));
loop { loop {
let ret = match l.accept().await { let mut l = (creator)();
Ok(ret) => ret, let _g = global_ctx.net_ns.guard();
match l.listen().await {
Ok(_) => {
global_ctx.add_running_listener(l.local_url());
global_ctx.issue_event(GlobalCtxEvent::ListenerAdded(l.local_url()));
}
Err(e) => { Err(e) => {
global_ctx.issue_event(GlobalCtxEvent::ListenerAcceptFailed( global_ctx.issue_event(GlobalCtxEvent::ListenerAddFailed(
l.local_url(), l.local_url(),
e.to_string(), format!("error: {:?}, retry listen later...", e),
)); ));
tracing::error!(?e, ?l, "listener accept error"); tracing::error!(?e, ?l, "listener listen error");
tokio::time::sleep(std::time::Duration::from_secs(1)).await; tokio::time::sleep(std::time::Duration::from_secs(1)).await;
continue; continue;
} }
}; }
loop {
let ret = match l.accept().await {
Ok(ret) => ret,
Err(e) => {
global_ctx.issue_event(GlobalCtxEvent::ListenerAcceptFailed(
l.local_url(),
format!("error: {:?}, retry listen later...", e),
));
tracing::error!(?e, ?l, "listener accept error");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
break;
}
};
let tunnel_info = ret.info().unwrap(); let tunnel_info = ret.info().unwrap();
global_ctx.issue_event(GlobalCtxEvent::ConnectionAccepted( global_ctx.issue_event(GlobalCtxEvent::ConnectionAccepted(
tunnel_info tunnel_info
.local_addr .local_addr
.clone() .clone()
.unwrap_or_default() .unwrap_or_default()
.to_string(), .to_string(),
tunnel_info tunnel_info
.remote_addr .remote_addr
.clone() .clone()
.unwrap_or_default() .unwrap_or_default()
.to_string(), .to_string(),
)); ));
tracing::info!(ret = ?ret, "conn accepted"); tracing::info!(ret = ?ret, "conn accepted");
let peer_manager = peer_manager.clone(); let peer_manager = peer_manager.clone();
let global_ctx = global_ctx.clone(); let global_ctx = global_ctx.clone();
tokio::spawn(async move { tokio::spawn(async move {
let server_ret = peer_manager.handle_tunnel(ret).await; let server_ret = peer_manager.handle_tunnel(ret).await;
if let Err(e) = &server_ret { if let Err(e) = &server_ret {
global_ctx.issue_event(GlobalCtxEvent::ConnectionError( global_ctx.issue_event(GlobalCtxEvent::ConnectionError(
tunnel_info.local_addr.unwrap_or_default().to_string(), tunnel_info.local_addr.unwrap_or_default().to_string(),
tunnel_info.remote_addr.unwrap_or_default().to_string(), tunnel_info.remote_addr.unwrap_or_default().to_string(),
e.to_string(), e.to_string(),
)); ));
tracing::error!(error = ?e, "handle conn error"); tracing::error!(error = ?e, "handle conn error");
} }
}); });
}
} }
} }
pub async fn run(&mut self) -> Result<(), Error> { pub async fn run(&mut self) -> Result<(), Error> {
for listener in &self.listeners { for listener in &self.listeners {
let _guard = self.net_ns.guard();
let addr = listener.inner.lock().await.local_url();
tracing::warn!("run listener: {:?}", listener);
listener
.inner
.lock()
.await
.listen()
.await
.with_context(|| format!("failed to add listener {}", addr))?;
self.tasks.spawn(Self::run_listener( self.tasks.spawn(Self::run_listener(
listener.inner.clone(), listener.creator_fn.clone(),
self.peer_manager.clone(), self.peer_manager.clone(),
self.global_ctx.clone(), self.global_ctx.clone(),
)); ));
@@ -213,12 +230,14 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::atomic::{AtomicI32, Ordering};
use futures::{SinkExt, StreamExt}; use futures::{SinkExt, StreamExt};
use tokio::time::timeout; use tokio::time::timeout;
use crate::{ use crate::{
common::global_ctx::tests::get_mock_global_ctx, common::global_ctx::tests::get_mock_global_ctx,
tunnel::{packet_def::ZCPacket, ring::RingTunnelConnector, TunnelConnector}, tunnel::{packet_def::ZCPacket, ring::RingTunnelConnector, TunnelConnector, TunnelError},
}; };
use super::*; use super::*;
@@ -245,12 +264,18 @@ mod tests {
let ring_id = format!("ring://{}", uuid::Uuid::new_v4()); let ring_id = format!("ring://{}", uuid::Uuid::new_v4());
let ring_id_clone = ring_id.clone();
listener_mgr listener_mgr
.add_listener(RingTunnelListener::new(ring_id.parse().unwrap()), true) .add_listener(
move || Box::new(RingTunnelListener::new(ring_id_clone.parse().unwrap())),
true,
)
.await .await
.unwrap(); .unwrap();
listener_mgr.run().await.unwrap(); listener_mgr.run().await.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
let connect_once = |ring_id| async move { let connect_once = |ring_id| async move {
let tunnel = RingTunnelConnector::new(ring_id).connect().await.unwrap(); let tunnel = RingTunnelConnector::new(ring_id).connect().await.unwrap();
let (mut recv, _send) = tunnel.split(); let (mut recv, _send) = tunnel.split();
@@ -269,4 +294,60 @@ mod tests {
.await .await
.unwrap(); .unwrap();
} }
#[tokio::test]
async fn retry_listen() {
let counter = Arc::new(AtomicI32::new(0));
let drop_counter = Arc::new(AtomicI32::new(0));
struct MockListener {
counter: Arc<AtomicI32>,
drop_counter: Arc<AtomicI32>,
}
#[async_trait::async_trait]
impl TunnelListener for MockListener {
fn local_url(&self) -> url::Url {
"mock://".parse().unwrap()
}
async fn listen(&mut self) -> Result<(), TunnelError> {
self.counter.fetch_add(1, Ordering::Relaxed);
Ok(())
}
async fn accept(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
Err(TunnelError::BufferFull)
}
}
impl Drop for MockListener {
fn drop(&mut self) {
self.drop_counter.fetch_add(1, Ordering::Relaxed);
}
}
let handler = Arc::new(MockListenerHandler {});
let mut listener_mgr = ListenerManager::new(get_mock_global_ctx(), handler.clone());
let counter_clone = counter.clone();
let drop_counter_clone = drop_counter.clone();
listener_mgr
.add_listener(
move || {
Box::new(MockListener {
counter: counter_clone.clone(),
drop_counter: drop_counter_clone.clone(),
})
},
true,
)
.await
.unwrap();
listener_mgr.run().await.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
assert!(counter.load(Ordering::Relaxed) >= 2);
assert!(drop_counter.load(Ordering::Relaxed) >= 1);
}
} }
+11 -2
View File
@@ -12,7 +12,7 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent}, global_ctx::{ArcGlobalCtx, GlobalCtxEvent},
ifcfg::{IfConfiger, IfConfiguerTrait}, ifcfg::{IfConfiger, IfConfiguerTrait},
}, },
peers::{peer_manager::PeerManager, PacketRecvChanReceiver}, peers::{peer_manager::PeerManager, recv_packet_from_chan, PacketRecvChanReceiver},
tunnel::{ tunnel::{
common::{reserve_buf, FramedWriter, TunnelWrapper, ZCPacketToBytes}, common::{reserve_buf, FramedWriter, TunnelWrapper, ZCPacketToBytes},
packet_def::{ZCPacket, ZCPacketType, TAIL_RESERVED_SIZE}, packet_def::{ZCPacket, ZCPacketType, TAIL_RESERVED_SIZE},
@@ -349,6 +349,15 @@ impl VirtualNic {
{ {
let dev_name = self.global_ctx.get_flags().dev_name; let dev_name = self.global_ctx.get_flags().dev_name;
match crate::arch::windows::add_self_to_firewall_allowlist() {
Ok(_) => tracing::info!("add_self_to_firewall_allowlist successful!"),
Err(e) => {
println!("Failed to add Easytier to firewall allowlist, Subnet proxy and KCP proxy may not work properly. error: {}", e);
println!("You can add firewall rules manually, or use --use-smoltcp to run with user-space TCP/IP stack.");
println!("");
}
}
match checkreg(&dev_name) { match checkreg(&dev_name) {
Ok(_) => tracing::trace!("delete successful!"), Ok(_) => tracing::trace!("delete successful!"),
Err(e) => tracing::error!("An error occurred: {}", e), Err(e) => tracing::error!("An error occurred: {}", e),
@@ -610,7 +619,7 @@ impl NicCtx {
self.tasks.spawn(async move { self.tasks.spawn(async move {
// unlock until coroutine finished // unlock until coroutine finished
let mut channel = channel.lock().await; let mut channel = channel.lock().await;
while let Some(packet) = channel.recv().await { while let Ok(packet) = recv_packet_from_chan(&mut channel).await {
tracing::trace!( tracing::trace!(
"[USER_PACKET] forward packet from peers to nic. packet: {:?}", "[USER_PACKET] forward packet from peers to nic. packet: {:?}",
packet packet
+26 -1
View File
@@ -256,7 +256,7 @@ impl EasyTierLauncher {
fetch_node_info, fetch_node_info,
)); ));
if let Err(e) = ret { if let Err(e) = ret {
error_msg.write().unwrap().replace(e.to_string()); error_msg.write().unwrap().replace(format!("{:?}", e));
} }
instance_alive.store(false, std::sync::atomic::Ordering::Relaxed); instance_alive.store(false, std::sync::atomic::Ordering::Relaxed);
notifier.notify_one(); notifier.notify_one();
@@ -525,6 +525,31 @@ impl NetworkConfig {
if let Some(dev_name) = self.dev_name.clone() { if let Some(dev_name) = self.dev_name.clone() {
flags.dev_name = dev_name; flags.dev_name = dev_name;
} }
if let Some(use_smoltcp) = self.use_smoltcp {
flags.use_smoltcp = use_smoltcp;
}
if let Some(enable_kcp_proxy) = self.enable_kcp_proxy {
flags.enable_kcp_proxy = enable_kcp_proxy;
}
if let Some(disable_kcp_input) = self.disable_kcp_input {
flags.disable_kcp_input = disable_kcp_input;
}
if let Some(disable_p2p) = self.disable_p2p {
flags.disable_p2p = disable_p2p;
}
if let Some(bind_device) = self.bind_device {
flags.bind_device = bind_device;
}
if let Some(no_tun) = self.no_tun {
flags.no_tun = no_tun;
}
cfg.set_flags(flags); cfg.set_flags(flags);
Ok(cfg) Ok(cfg)
} }
@@ -37,11 +37,13 @@ use crate::{
}; };
use super::{ use super::{
create_packet_recv_chan,
peer_conn::PeerConn, peer_conn::PeerConn,
peer_map::PeerMap, peer_map::PeerMap,
peer_ospf_route::PeerRoute, peer_ospf_route::PeerRoute,
peer_rpc::{PeerRpcManager, PeerRpcManagerTransport}, peer_rpc::{PeerRpcManager, PeerRpcManagerTransport},
peer_rpc_service::DirectConnectorManagerRpcServer, peer_rpc_service::DirectConnectorManagerRpcServer,
recv_packet_from_chan,
route_trait::NextHopPolicy, route_trait::NextHopPolicy,
PacketRecvChan, PacketRecvChanReceiver, PacketRecvChan, PacketRecvChanReceiver,
}; };
@@ -79,7 +81,7 @@ impl ForeignNetworkEntry {
) -> Self { ) -> Self {
let foreign_global_ctx = Self::build_foreign_global_ctx(&network, global_ctx.clone()); let foreign_global_ctx = Self::build_foreign_global_ctx(&network, global_ctx.clone());
let (packet_sender, packet_recv) = mpsc::channel(64); let (packet_sender, packet_recv) = create_packet_recv_chan();
let peer_map = Arc::new(PeerMap::new( let peer_map = Arc::new(PeerMap::new(
packet_sender, packet_sender,
@@ -251,7 +253,7 @@ impl ForeignNetworkEntry {
let network_name = self.network.network_name.clone(); let network_name = self.network.network_name.clone();
self.tasks.lock().await.spawn(async move { self.tasks.lock().await.spawn(async move {
while let Some(zc_packet) = recv.recv().await { while let Ok(zc_packet) = recv_packet_from_chan(&mut recv).await {
let Some(hdr) = zc_packet.peer_manager_header() else { let Some(hdr) = zc_packet.peer_manager_header() else {
tracing::warn!("invalid packet, skip"); tracing::warn!("invalid packet, skip");
continue; continue;
@@ -622,7 +624,7 @@ mod tests {
network: &str, network: &str,
secret: &str, secret: &str,
) -> Arc<PeerManager> { ) -> Arc<PeerManager> {
let (s, _r) = tokio::sync::mpsc::channel(1000); let (s, _r) = create_packet_recv_chan();
let peer_mgr = Arc::new(PeerManager::new( let peer_mgr = Arc::new(PeerManager::new(
RouteAlgoType::Ospf, RouteAlgoType::Ospf,
get_mock_global_ctx_with_network(Some(NetworkIdentity::new( get_mock_global_ctx_with_network(Some(NetworkIdentity::new(
+17 -1
View File
@@ -33,11 +33,27 @@ pub trait PeerPacketFilter {
#[async_trait::async_trait] #[async_trait::async_trait]
#[auto_impl::auto_impl(Arc)] #[auto_impl::auto_impl(Arc)]
pub trait NicPacketFilter { pub trait NicPacketFilter {
async fn try_process_packet_from_nic(&self, data: &mut ZCPacket); async fn try_process_packet_from_nic(&self, data: &mut ZCPacket) -> bool;
} }
type BoxPeerPacketFilter = Box<dyn PeerPacketFilter + Send + Sync>; type BoxPeerPacketFilter = Box<dyn PeerPacketFilter + Send + Sync>;
type BoxNicPacketFilter = Box<dyn NicPacketFilter + Send + Sync>; type BoxNicPacketFilter = Box<dyn NicPacketFilter + Send + Sync>;
// pub type PacketRecvChan = tachyonix::Sender<ZCPacket>;
// pub type PacketRecvChanReceiver = tachyonix::Receiver<ZCPacket>;
// pub fn create_packet_recv_chan() -> (PacketRecvChan, PacketRecvChanReceiver) {
// tachyonix::channel(128)
// }
pub type PacketRecvChan = tokio::sync::mpsc::Sender<ZCPacket>; pub type PacketRecvChan = tokio::sync::mpsc::Sender<ZCPacket>;
pub type PacketRecvChanReceiver = tokio::sync::mpsc::Receiver<ZCPacket>; pub type PacketRecvChanReceiver = tokio::sync::mpsc::Receiver<ZCPacket>;
pub fn create_packet_recv_chan() -> (PacketRecvChan, PacketRecvChanReceiver) {
tokio::sync::mpsc::channel(128)
}
pub async fn recv_packet_from_chan(
packet_recv_chan_receiver: &mut PacketRecvChanReceiver,
) -> Result<ZCPacket, anyhow::Error> {
packet_recv_chan_receiver
.recv()
.await
.ok_or(anyhow::anyhow!("recv_packet_from_chan failed"))
}
+4 -4
View File
@@ -171,11 +171,11 @@ impl Drop for Peer {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use tokio::{sync::mpsc, time::timeout}; use tokio::time::timeout;
use crate::{ use crate::{
common::{global_ctx::tests::get_mock_global_ctx, new_peer_id}, common::{global_ctx::tests::get_mock_global_ctx, new_peer_id},
peers::peer_conn::PeerConn, peers::{create_packet_recv_chan, peer_conn::PeerConn},
tunnel::ring::create_ring_tunnel_pair, tunnel::ring::create_ring_tunnel_pair,
}; };
@@ -183,8 +183,8 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn close_peer() { async fn close_peer() {
let (local_packet_send, _local_packet_recv) = mpsc::channel(10); let (local_packet_send, _local_packet_recv) = create_packet_recv_chan();
let (remote_packet_send, _remote_packet_recv) = mpsc::channel(10); let (remote_packet_send, _remote_packet_recv) = create_packet_recv_chan();
let global_ctx = get_mock_global_ctx(); let global_ctx = get_mock_global_ctx();
let local_peer = Peer::new(new_peer_id(), local_packet_send, global_ctx.clone()); let local_peer = Peer::new(new_peer_id(), local_packet_send, global_ctx.clone());
let remote_peer = Peer::new(new_peer_id(), remote_packet_send, global_ctx.clone()); let remote_peer = Peer::new(new_peer_id(), remote_packet_send, global_ctx.clone());
+5 -14
View File
@@ -413,6 +413,7 @@ mod tests {
use crate::common::global_ctx::tests::get_mock_global_ctx; use crate::common::global_ctx::tests::get_mock_global_ctx;
use crate::common::new_peer_id; use crate::common::new_peer_id;
use crate::common::scoped_task::ScopedTask; use crate::common::scoped_task::ScopedTask;
use crate::peers::create_packet_recv_chan;
use crate::tunnel::filter::tests::DropSendTunnelFilter; use crate::tunnel::filter::tests::DropSendTunnelFilter;
use crate::tunnel::filter::PacketRecorderTunnelFilter; use crate::tunnel::filter::PacketRecorderTunnelFilter;
use crate::tunnel::ring::create_ring_tunnel_pair; use crate::tunnel::ring::create_ring_tunnel_pair;
@@ -484,13 +485,6 @@ mod tests {
let c_recorder = Arc::new(DropSendTunnelFilter::new(drop_start, drop_end)); let c_recorder = Arc::new(DropSendTunnelFilter::new(drop_start, drop_end));
let c = TunnelWithFilter::new(c, c_recorder.clone()); let c = TunnelWithFilter::new(c, c_recorder.clone());
let s = if drop_both {
let s_recorder = Arc::new(DropSendTunnelFilter::new(drop_start, drop_end));
Box::new(TunnelWithFilter::new(s, s_recorder.clone()))
} else {
s
};
let c_peer_id = new_peer_id(); let c_peer_id = new_peer_id();
let s_peer_id = new_peer_id(); let s_peer_id = new_peer_id();
@@ -503,9 +497,8 @@ mod tests {
); );
s_peer.set_close_event_sender(tokio::sync::mpsc::channel(1).0); s_peer.set_close_event_sender(tokio::sync::mpsc::channel(1).0);
s_peer s_peer.start_recv_loop(create_packet_recv_chan().0).await;
.start_recv_loop(tokio::sync::mpsc::channel(200).0) // do not start ping for s, s only reponde to ping from c
.await;
assert!(c_ret.is_ok()); assert!(c_ret.is_ok());
assert!(s_ret.is_ok()); assert!(s_ret.is_ok());
@@ -513,9 +506,7 @@ mod tests {
let (close_send, mut close_recv) = tokio::sync::mpsc::channel(1); let (close_send, mut close_recv) = tokio::sync::mpsc::channel(1);
c_peer.set_close_event_sender(close_send); c_peer.set_close_event_sender(close_send);
c_peer.start_pingpong(); c_peer.start_pingpong();
c_peer c_peer.start_recv_loop(create_packet_recv_chan().0).await;
.start_recv_loop(tokio::sync::mpsc::channel(200).0)
.await;
let throughput = c_peer.throughput.clone(); let throughput = c_peer.throughput.clone();
let _t = ScopedTask::from(tokio::spawn(async move { let _t = ScopedTask::from(tokio::spawn(async move {
@@ -547,7 +538,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn peer_conn_pingpong_bothside_timeout() { async fn peer_conn_pingpong_bothside_timeout() {
peer_conn_pingpong_test_common(4, 12, true, true).await; peer_conn_pingpong_test_common(3, 14, true, true).await;
} }
#[tokio::test] #[tokio::test]
+94 -75
View File
@@ -12,6 +12,7 @@ use tokio::{
task::JoinSet, task::JoinSet,
time::{timeout, Interval}, time::{timeout, Interval},
}; };
use tracing::Instrument;
use crate::{ use crate::{
common::{error::Error, PeerId}, common::{error::Error, PeerId},
@@ -25,7 +26,7 @@ use crate::{
struct PingIntervalController { struct PingIntervalController {
throughput: Arc<Throughput>, throughput: Arc<Throughput>,
loss_rate_20: Arc<WindowLatency>, loss_counter: Arc<AtomicU32>,
interval: Interval, interval: Interval,
@@ -38,13 +39,27 @@ struct PingIntervalController {
last_throughput: Throughput, last_throughput: Throughput,
} }
impl std::fmt::Debug for PingIntervalController {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PingIntervalController")
.field("throughput", &self.throughput)
.field("loss_counter", &self.loss_counter)
.field("logic_time", &self.logic_time)
.field("last_send_logic_time", &self.last_send_logic_time)
.field("backoff_idx", &self.backoff_idx)
.field("max_backoff_idx", &self.max_backoff_idx)
.field("last_throughput", &self.last_throughput)
.finish()
}
}
impl PingIntervalController { impl PingIntervalController {
fn new(throughput: Arc<Throughput>, loss_rate_20: Arc<WindowLatency>) -> Self { fn new(throughput: Arc<Throughput>, loss_counter: Arc<AtomicU32>) -> Self {
let last_throughput = *throughput; let last_throughput = *throughput;
Self { Self {
throughput, throughput,
loss_rate_20, loss_counter,
interval: tokio::time::interval(Duration::from_secs(1)), interval: tokio::time::interval(Duration::from_secs(1)),
logic_time: 0, logic_time: 0,
last_send_logic_time: 0, last_send_logic_time: 0,
@@ -69,13 +84,12 @@ impl PingIntervalController {
self.throughput.rx_packets() > self.last_throughput.rx_packets() self.throughput.rx_packets() > self.last_throughput.rx_packets()
} }
#[tracing::instrument]
fn should_send_ping(&mut self) -> bool { fn should_send_ping(&mut self) -> bool {
if self.loss_rate_20.get_latency_us::<f64>() > 0.0 { tracing::trace!(?self, "check should_send_ping");
if self.loss_counter.load(Ordering::Relaxed) > 0 {
self.backoff_idx = 0; self.backoff_idx = 0;
} else if self.tx_increase() } else if self.tx_increase() && !self.rx_increase() {
&& !self.rx_increase()
&& self.logic_time - self.last_send_logic_time > 2
{
// if tx increase but rx not increase, we should do pingpong more frequently // if tx increase but rx not increase, we should do pingpong more frequently
self.backoff_idx = 0; self.backoff_idx = 0;
} }
@@ -210,8 +224,8 @@ impl PeerConnPinger {
// one with 1% precision // one with 1% precision
let loss_rate_stats_1 = WindowLatency::new(100); let loss_rate_stats_1 = WindowLatency::new(100);
// one with 20% precision, so we can fast fail this conn. // disconnect the connection if lost 5 pingpong consecutively
let loss_rate_stats_20 = Arc::new(WindowLatency::new(5)); let loss_counter = Arc::new(AtomicU32::new(0));
let stopped = Arc::new(AtomicU32::new(0)); let stopped = Arc::new(AtomicU32::new(0));
@@ -220,98 +234,103 @@ impl PeerConnPinger {
let ctrl_resp_sender = self.ctrl_sender.clone(); let ctrl_resp_sender = self.ctrl_sender.clone();
let stopped_clone = stopped.clone(); let stopped_clone = stopped.clone();
let mut controller = let mut controller =
PingIntervalController::new(self.throughput_stats.clone(), loss_rate_stats_20.clone()); PingIntervalController::new(self.throughput_stats.clone(), loss_counter.clone());
self.tasks.spawn(async move { self.tasks.spawn(
let mut req_seq = 0; async move {
loop { let mut req_seq = 0;
controller.tick().await; loop {
controller.tick().await;
if stopped_clone.load(Ordering::Relaxed) != 0 { if stopped_clone.load(Ordering::Relaxed) != 0 {
return Ok(()); return Ok(());
}
while pingpong_tasks.len() > 5 {
pingpong_tasks.join_next().await;
}
if !controller.should_send_ping() {
continue;
}
let mut sink = sink.clone();
let receiver = ctrl_resp_sender.subscribe();
let ping_res_sender = ping_res_sender.clone();
pingpong_tasks.spawn(async move {
let mut receiver = receiver.resubscribe();
let pingpong_once_ret = Self::do_pingpong_once(
my_node_id,
peer_id,
&mut sink,
&mut receiver,
req_seq,
)
.await;
if let Err(e) = ping_res_sender.send(pingpong_once_ret).await {
tracing::info!(?e, "pingpong task send result error, exit..");
};
});
req_seq = req_seq.wrapping_add(1);
} }
while pingpong_tasks.len() > 5 {
pingpong_tasks.join_next().await;
}
if !controller.should_send_ping() {
continue;
}
let mut sink = sink.clone();
let receiver = ctrl_resp_sender.subscribe();
let ping_res_sender = ping_res_sender.clone();
pingpong_tasks.spawn(async move {
let mut receiver = receiver.resubscribe();
let pingpong_once_ret = Self::do_pingpong_once(
my_node_id,
peer_id,
&mut sink,
&mut receiver,
req_seq,
)
.await;
if let Err(e) = ping_res_sender.send(pingpong_once_ret).await {
tracing::info!(?e, "pingpong task send result error, exit..");
};
});
req_seq = req_seq.wrapping_add(1);
} }
}); .instrument(tracing::info_span!(
"pingpong_controller",
?my_node_id,
?peer_id
)),
);
let mut counter: u64 = 0;
let throughput = self.throughput_stats.clone(); let throughput = self.throughput_stats.clone();
let mut last_rx_packets = throughput.rx_packets(); let mut last_rx_packets = throughput.rx_packets();
while let Some(ret) = ping_res_receiver.recv().await { while let Some(ret) = ping_res_receiver.recv().await {
counter += 1;
if let Ok(lat) = ret { if let Ok(lat) = ret {
latency_stats.record_latency(lat as u32); latency_stats.record_latency(lat as u32);
loss_rate_stats_1.record_latency(0); loss_rate_stats_1.record_latency(0);
loss_rate_stats_20.record_latency(0);
} else { } else {
loss_rate_stats_1.record_latency(1); loss_rate_stats_1.record_latency(1);
loss_rate_stats_20.record_latency(1); loss_counter.fetch_add(1, Ordering::Relaxed);
} }
let loss_rate_20: f64 = loss_rate_stats_20.get_latency_us();
let loss_rate_1: f64 = loss_rate_stats_1.get_latency_us(); let loss_rate_1: f64 = loss_rate_stats_1.get_latency_us();
tracing::trace!( tracing::trace!(
?ret, ?ret,
?self, ?self,
?loss_rate_1, ?loss_rate_1,
?loss_rate_20,
"pingpong task recv pingpong_once result" "pingpong task recv pingpong_once result"
); );
if (counter > 5 && loss_rate_20 > 0.74) || (counter > 150 && loss_rate_1 > 0.20) { let current_rx_packets = throughput.rx_packets();
let current_rx_packets = throughput.rx_packets(); if last_rx_packets != current_rx_packets {
let need_close = if last_rx_packets != current_rx_packets { // if we receive some packet from peers, reset the counter to avoid conn close.
// if we receive some packet from peers, we should relax the condition // conn will close only if we have 5 continous round pingpong loss after no packet received.
counter > 50 && loss_rate_1 > 0.5 loss_counter.store(0, Ordering::Relaxed);
}
// TODO: wait more time to see if the loss rate is still high after no rx tracing::debug!(
} else { "loss_counter: {:?}, loss_rate_1: {}, cur_rx_packets: {}, last_rx: {}, node_id: {}",
true loss_counter,
}; loss_rate_1,
current_rx_packets,
last_rx_packets,
my_node_id
);
if need_close { if loss_counter.load(Ordering::Relaxed) >= 5 {
tracing::warn!( tracing::warn!(
?ret, ?ret,
?self, ?self,
?loss_rate_1, ?loss_rate_1,
?loss_rate_20, ?loss_counter,
?last_rx_packets, ?last_rx_packets,
?current_rx_packets, ?current_rx_packets,
"pingpong loss rate too high, closing" "pingpong loss too much pingpong packet and no other ingress packets, closing the connection",
); );
break; break;
}
} }
last_rx_packets = throughput.rx_packets(); last_rx_packets = throughput.rx_packets();
+43 -23
View File
@@ -30,6 +30,7 @@ use crate::{
peers::{ peers::{
peer_conn::PeerConn, peer_conn::PeerConn,
peer_rpc::PeerRpcManagerTransport, peer_rpc::PeerRpcManagerTransport,
recv_packet_from_chan,
route_trait::{ForeignNetworkRouteInfoMap, NextHopPolicy, RouteInterface}, route_trait::{ForeignNetworkRouteInfoMap, NextHopPolicy, RouteInterface},
PeerPacketFilter, PeerPacketFilter,
}, },
@@ -43,11 +44,12 @@ use crate::{
tunnel::{ tunnel::{
self, self,
packet_def::{CompressorAlgo, PacketType, ZCPacket}, packet_def::{CompressorAlgo, PacketType, ZCPacket},
SinkItem, Tunnel, TunnelConnector, Tunnel, TunnelConnector,
}, },
}; };
use super::{ use super::{
create_packet_recv_chan,
encrypt::{Encryptor, NullCipher}, encrypt::{Encryptor, NullCipher},
foreign_network_client::ForeignNetworkClient, foreign_network_client::ForeignNetworkClient,
foreign_network_manager::{ForeignNetworkManager, GlobalForeignNetworkAccessor}, foreign_network_manager::{ForeignNetworkManager, GlobalForeignNetworkAccessor},
@@ -56,7 +58,7 @@ use super::{
peer_ospf_route::PeerRoute, peer_ospf_route::PeerRoute,
peer_rpc::PeerRpcManager, peer_rpc::PeerRpcManager,
route_trait::{ArcRoute, Route}, route_trait::{ArcRoute, Route},
BoxNicPacketFilter, BoxPeerPacketFilter, PacketRecvChanReceiver, BoxNicPacketFilter, BoxPeerPacketFilter, PacketRecvChan, PacketRecvChanReceiver,
}; };
struct RpcTransport { struct RpcTransport {
@@ -116,7 +118,7 @@ pub struct PeerManager {
my_peer_id: PeerId, my_peer_id: PeerId,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
nic_channel: mpsc::Sender<SinkItem>, nic_channel: PacketRecvChan,
tasks: Arc<Mutex<JoinSet<()>>>, tasks: Arc<Mutex<JoinSet<()>>>,
@@ -155,11 +157,11 @@ impl PeerManager {
pub fn new( pub fn new(
route_algo: RouteAlgoType, route_algo: RouteAlgoType,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
nic_channel: mpsc::Sender<SinkItem>, nic_channel: PacketRecvChan,
) -> Self { ) -> Self {
let my_peer_id = rand::random(); let my_peer_id = rand::random();
let (packet_send, packet_recv) = mpsc::channel(128); let (packet_send, packet_recv) = create_packet_recv_chan();
let peers = Arc::new(PeerMap::new( let peers = Arc::new(PeerMap::new(
packet_send.clone(), packet_send.clone(),
global_ctx.clone(), global_ctx.clone(),
@@ -417,7 +419,7 @@ impl PeerManager {
let encryptor = self.encryptor.clone(); let encryptor = self.encryptor.clone();
self.tasks.lock().await.spawn(async move { self.tasks.lock().await.spawn(async move {
tracing::trace!("start_peer_recv"); tracing::trace!("start_peer_recv");
while let Some(ret) = recv.recv().await { while let Ok(ret) = recv_packet_from_chan(&mut recv).await {
let Err(mut ret) = let Err(mut ret) =
Self::try_handle_foreign_network_packet(ret, my_peer_id, &peers, &foreign_mgr) Self::try_handle_foreign_network_packet(ret, my_peer_id, &peers, &foreign_mgr)
.await .await
@@ -505,7 +507,7 @@ impl PeerManager {
async fn init_packet_process_pipeline(&self) { async fn init_packet_process_pipeline(&self) {
// for tun/tap ip/eth packet. // for tun/tap ip/eth packet.
struct NicPacketProcessor { struct NicPacketProcessor {
nic_channel: mpsc::Sender<SinkItem>, nic_channel: PacketRecvChan,
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl PeerPacketFilter for NicPacketProcessor { impl PeerPacketFilter for NicPacketProcessor {
@@ -673,7 +675,7 @@ impl PeerManager {
async fn run_nic_packet_process_pipeline(&self, data: &mut ZCPacket) { async fn run_nic_packet_process_pipeline(&self, data: &mut ZCPacket) {
for pipeline in self.nic_packet_process_pipeline.read().await.iter().rev() { for pipeline in self.nic_packet_process_pipeline.read().await.iter().rev() {
pipeline.try_process_packet_from_nic(data).await; let _ = pipeline.try_process_packet_from_nic(data).await;
} }
} }
@@ -720,13 +722,7 @@ impl PeerManager {
} }
} }
pub async fn send_msg_ipv4(&self, mut msg: ZCPacket, ipv4_addr: Ipv4Addr) -> Result<(), Error> { pub async fn get_msg_dst_peer(&self, ipv4_addr: &Ipv4Addr) -> (Vec<PeerId>, bool) {
tracing::trace!(
"do send_msg in peer manager, msg: {:?}, ipv4_addr: {}",
msg,
ipv4_addr
);
let mut is_exit_node = false; let mut is_exit_node = false;
let mut dst_peers = vec![]; let mut dst_peers = vec![];
let network_length = self let network_length = self
@@ -734,10 +730,10 @@ impl PeerManager {
.get_ipv4() .get_ipv4()
.map(|x| x.network_length()) .map(|x| x.network_length())
.unwrap_or(24); .unwrap_or(24);
let ipv4_inet = cidr::Ipv4Inet::new(ipv4_addr, network_length).unwrap(); let ipv4_inet = cidr::Ipv4Inet::new(*ipv4_addr, network_length).unwrap();
if ipv4_addr.is_broadcast() if ipv4_addr.is_broadcast()
|| ipv4_addr.is_multicast() || ipv4_addr.is_multicast()
|| ipv4_addr == ipv4_inet.last_address() || *ipv4_addr == ipv4_inet.last_address()
{ {
dst_peers.extend( dst_peers.extend(
self.peers self.peers
@@ -758,10 +754,15 @@ impl PeerManager {
} }
} }
if dst_peers.is_empty() { (dst_peers, is_exit_node)
tracing::info!("no peer id for ipv4: {}", ipv4_addr); }
return Ok(());
} pub async fn send_msg_ipv4(&self, mut msg: ZCPacket, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
tracing::trace!(
"do send_msg in peer manager, msg: {:?}, ipv4_addr: {}",
msg,
ipv4_addr
);
msg.fill_peer_manager_hdr( msg.fill_peer_manager_hdr(
self.my_peer_id, self.my_peer_id,
@@ -769,6 +770,24 @@ impl PeerManager {
tunnel::packet_def::PacketType::Data as u8, tunnel::packet_def::PacketType::Data as u8,
); );
self.run_nic_packet_process_pipeline(&mut msg).await; self.run_nic_packet_process_pipeline(&mut msg).await;
let cur_to_peer_id = msg.peer_manager_header().unwrap().to_peer_id.into();
if cur_to_peer_id != 0 {
return Self::send_msg_internal(
&self.peers,
&self.foreign_network_client,
msg,
cur_to_peer_id,
)
.await;
}
let (dst_peers, is_exit_node) = self.get_msg_dst_peer(&ipv4_addr).await;
if dst_peers.is_empty() {
tracing::info!("no peer id for ipv4: {}", ipv4_addr);
return Ok(());
}
let compressor = DefaultCompressor {}; let compressor = DefaultCompressor {};
compressor compressor
.compress(&mut msg, self.data_compress_algo) .compress(&mut msg, self.data_compress_algo)
@@ -875,7 +894,7 @@ impl PeerManager {
self.global_ctx.clone() self.global_ctx.clone()
} }
pub fn get_nic_channel(&self) -> mpsc::Sender<SinkItem> { pub fn get_nic_channel(&self) -> PacketRecvChan {
self.nic_channel.clone() self.nic_channel.clone()
} }
@@ -935,6 +954,7 @@ mod tests {
}, },
instance::listeners::get_listener_by_url, instance::listeners::get_listener_by_url,
peers::{ peers::{
create_packet_recv_chan,
peer_manager::RouteAlgoType, peer_manager::RouteAlgoType,
peer_rpc::tests::register_service, peer_rpc::tests::register_service,
route_trait::NextHopPolicy, route_trait::NextHopPolicy,
@@ -1078,7 +1098,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn communicate_between_enc_and_non_enc() { async fn communicate_between_enc_and_non_enc() {
let create_mgr = |enable_encryption| async move { let create_mgr = |enable_encryption| async move {
let (s, _r) = tokio::sync::mpsc::channel(1000); let (s, _r) = create_packet_recv_chan();
let mock_global_ctx = get_mock_global_ctx(); let mock_global_ctx = get_mock_global_ctx();
mock_global_ctx.config.set_flags(Flags { mock_global_ctx.config.set_flags(Flags {
enable_encryption, enable_encryption,
+22 -6
View File
@@ -10,7 +10,7 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity}, global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity},
PeerId, PeerId,
}, },
proto::cli::PeerConnInfo, proto::{cli::PeerConnInfo, common::PeerFeatureFlag},
tunnel::{packet_def::ZCPacket, TunnelError}, tunnel::{packet_def::ZCPacket, TunnelError},
}; };
@@ -70,11 +70,17 @@ impl PeerMap {
pub async fn send_msg_directly(&self, msg: ZCPacket, dst_peer_id: PeerId) -> Result<(), Error> { pub async fn send_msg_directly(&self, msg: ZCPacket, dst_peer_id: PeerId) -> Result<(), Error> {
if dst_peer_id == self.my_peer_id { if dst_peer_id == self.my_peer_id {
return Ok(self let packet_send = self.packet_send.clone();
.packet_send tokio::spawn(async move {
.send(msg) let ret = packet_send
.await .send(msg)
.with_context(|| "send msg to self failed")?); .await
.with_context(|| "send msg to self failed");
if ret.is_err() {
tracing::error!("send msg to self failed: {:?}", ret);
}
});
return Ok(());
} }
match self.get_peer_by_id(dst_peer_id) { match self.get_peer_by_id(dst_peer_id) {
@@ -161,6 +167,16 @@ impl PeerMap {
None None
} }
pub async fn get_peer_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> {
for route in self.routes.read().await.iter() {
let feature_flag = route.get_feature_flag(peer_id).await;
if feature_flag.is_some() {
return feature_flag;
};
}
None
}
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.peer_map.is_empty() self.peer_map.is_empty()
} }
+156 -25
View File
@@ -16,6 +16,8 @@ use petgraph::{
graph::NodeIndex, graph::NodeIndex,
Directed, Graph, Directed, Graph,
}; };
use prost::Message;
use prost_reflect::{DynamicMessage, ReflectMessage};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::{ use tokio::{
select, select,
@@ -30,7 +32,7 @@ use crate::{
}, },
peers::route_trait::{Route, RouteInterfaceBox}, peers::route_trait::{Route, RouteInterfaceBox},
proto::{ proto::{
common::{Ipv4Inet, NatType, StunInfo}, common::{Ipv4Inet, NatType, PeerFeatureFlag, StunInfo},
peer_rpc::{ peer_rpc::{
route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey,
OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion, OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion,
@@ -283,6 +285,8 @@ type Error = SyncRouteInfoError;
#[derive(Debug)] #[derive(Debug)]
struct SyncedRouteInfo { struct SyncedRouteInfo {
peer_infos: DashMap<PeerId, RoutePeerInfo>, peer_infos: DashMap<PeerId, RoutePeerInfo>,
// prost doesn't support unknown fields, so we use DynamicMessage to store raw infos and progate them to other peers.
raw_peer_infos: DashMap<PeerId, DynamicMessage>,
conn_map: DashMap<PeerId, (BTreeSet<PeerId>, AtomicVersion)>, conn_map: DashMap<PeerId, (BTreeSet<PeerId>, AtomicVersion)>,
foreign_network: DashMap<ForeignNetworkRouteInfoKey, ForeignNetworkRouteInfoEntry>, foreign_network: DashMap<ForeignNetworkRouteInfoKey, ForeignNetworkRouteInfoEntry>,
} }
@@ -297,6 +301,7 @@ impl SyncedRouteInfo {
fn remove_peer(&self, peer_id: PeerId) { fn remove_peer(&self, peer_id: PeerId) {
tracing::warn!(?peer_id, "remove_peer from synced_route_info"); tracing::warn!(?peer_id, "remove_peer from synced_route_info");
self.peer_infos.remove(&peer_id); self.peer_infos.remove(&peer_id);
self.raw_peer_infos.remove(&peer_id);
self.conn_map.remove(&peer_id); self.conn_map.remove(&peer_id);
self.foreign_network.retain(|k, _| k.peer_id != peer_id); self.foreign_network.retain(|k, _| k.peer_id != peer_id);
} }
@@ -369,8 +374,11 @@ impl SyncedRouteInfo {
my_peer_route_id: u64, my_peer_route_id: u64,
dst_peer_id: PeerId, dst_peer_id: PeerId,
peer_infos: &Vec<RoutePeerInfo>, peer_infos: &Vec<RoutePeerInfo>,
raw_peer_infos: &Vec<DynamicMessage>,
) -> Result<(), Error> { ) -> Result<(), Error> {
for mut route_info in peer_infos.iter().map(Clone::clone) { for (idx, route_info) in peer_infos.iter().enumerate() {
let mut route_info = route_info.clone();
let raw_route_info = &raw_peer_infos[idx];
self.check_duplicate_peer_id( self.check_duplicate_peer_id(
my_peer_id, my_peer_id,
my_peer_route_id, my_peer_route_id,
@@ -383,6 +391,13 @@ impl SyncedRouteInfo {
&route_info, &route_info,
)?; )?;
let peer_id_raw = raw_route_info
.get_field_by_name("peer_id")
.unwrap()
.as_u32()
.unwrap();
assert_eq!(peer_id_raw, route_info.peer_id);
// time between peers may not be synchronized, so update last_update to local now. // time between peers may not be synchronized, so update last_update to local now.
// note only last_update with larger version will be updated to local saved peer info. // note only last_update with larger version will be updated to local saved peer info.
route_info.last_update = Some(SystemTime::now().into()); route_info.last_update = Some(SystemTime::now().into());
@@ -391,10 +406,16 @@ impl SyncedRouteInfo {
.entry(route_info.peer_id) .entry(route_info.peer_id)
.and_modify(|old_entry| { .and_modify(|old_entry| {
if route_info.version > old_entry.version { if route_info.version > old_entry.version {
self.raw_peer_infos
.insert(route_info.peer_id, raw_route_info.clone());
*old_entry = route_info.clone(); *old_entry = route_info.clone();
} }
}) })
.or_insert_with(|| route_info.clone()); .or_insert_with(|| {
self.raw_peer_infos
.insert(route_info.peer_id, raw_route_info.clone());
route_info.clone()
});
} }
Ok(()) Ok(())
} }
@@ -1047,6 +1068,7 @@ impl PeerRouteServiceImpl {
synced_route_info: SyncedRouteInfo { synced_route_info: SyncedRouteInfo {
peer_infos: DashMap::new(), peer_infos: DashMap::new(),
raw_peer_infos: DashMap::new(),
conn_map: DashMap::new(), conn_map: DashMap::new(),
foreign_network: DashMap::new(), foreign_network: DashMap::new(),
}, },
@@ -1381,6 +1403,39 @@ impl PeerRouteServiceImpl {
} }
} }
fn build_sync_route_raw_req(
req: &SyncRouteInfoRequest,
raw_peer_infos: &DashMap<PeerId, DynamicMessage>,
) -> DynamicMessage {
use prost_reflect::Value;
let mut req_dynamic_msg = DynamicMessage::new(SyncRouteInfoRequest::default().descriptor());
req_dynamic_msg.transcode_from(req).unwrap();
let peer_infos = req.peer_infos.as_ref().map(|x| &x.items);
if let Some(peer_infos) = peer_infos {
let mut peer_info_raws = Vec::new();
for peer_info in peer_infos.iter() {
if let Some(info) = raw_peer_infos.get(&peer_info.peer_id) {
peer_info_raws.push(Value::Message(info.clone()));
} else {
let mut p = DynamicMessage::new(RoutePeerInfo::default().descriptor());
p.transcode_from(peer_info).unwrap();
peer_info_raws.push(Value::Message(p));
}
}
let mut peer_infos = DynamicMessage::new(RoutePeerInfos::default().descriptor());
peer_infos.set_field_by_name("items", Value::List(peer_info_raws));
req_dynamic_msg.set_field_by_name("peer_infos", Value::Message(peer_infos));
}
tracing::trace!(?req_dynamic_msg, "build_sync_route_raw_req");
req_dynamic_msg
}
async fn sync_route_with_peer( async fn sync_route_with_peer(
&self, &self,
dst_peer_id: PeerId, dst_peer_id: PeerId,
@@ -1419,20 +1474,27 @@ impl PeerRouteServiceImpl {
self.global_ctx.get_network_name(), self.global_ctx.get_network_name(),
); );
let sync_route_info_req = SyncRouteInfoRequest {
my_peer_id,
my_session_id: session.my_session_id.load(Ordering::Relaxed),
is_initiator: session.we_are_initiator.load(Ordering::Relaxed),
peer_infos: peer_infos.clone().map(|x| RoutePeerInfos { items: x }),
conn_bitmap: conn_bitmap.clone().map(Into::into),
foreign_network_infos: foreign_network.clone(),
};
let mut ctrl = BaseController::default(); let mut ctrl = BaseController::default();
ctrl.set_timeout_ms(3000); ctrl.set_timeout_ms(3000);
let ret = rpc_stub ctrl.set_raw_input(
.sync_route_info( Self::build_sync_route_raw_req(
ctrl, &sync_route_info_req,
SyncRouteInfoRequest { &self.synced_route_info.raw_peer_infos,
my_peer_id,
my_session_id: session.my_session_id.load(Ordering::Relaxed),
is_initiator: session.we_are_initiator.load(Ordering::Relaxed),
peer_infos: peer_infos.clone().map(|x| RoutePeerInfos { items: x }),
conn_bitmap: conn_bitmap.clone().map(Into::into),
foreign_network_infos: foreign_network.clone(),
},
) )
.encode_to_vec()
.into(),
);
let ret = rpc_stub
.sync_route_info(ctrl, SyncRouteInfoRequest::default())
.await; .await;
if let Err(e) = &ret { if let Err(e) = &ret {
@@ -1508,12 +1570,30 @@ impl Debug for RouteSessionManager {
} }
} }
fn get_raw_peer_infos(req_raw_input: &mut bytes::Bytes) -> Option<Vec<DynamicMessage>> {
let sync_req_dynamic_msg =
DynamicMessage::decode(SyncRouteInfoRequest::default().descriptor(), req_raw_input)
.unwrap();
let peer_infos = sync_req_dynamic_msg.get_field_by_name("peer_infos")?;
let infos = peer_infos
.as_message()?
.get_field_by_name("items")?
.as_list()?
.iter()
.map(|x| x.as_message().unwrap().clone())
.collect();
Some(infos)
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl OspfRouteRpc for RouteSessionManager { impl OspfRouteRpc for RouteSessionManager {
type Controller = BaseController; type Controller = BaseController;
async fn sync_route_info( async fn sync_route_info(
&self, &self,
_ctrl: BaseController, ctrl: BaseController,
request: SyncRouteInfoRequest, request: SyncRouteInfoRequest,
) -> Result<SyncRouteInfoResponse, rpc_types::error::Error> { ) -> Result<SyncRouteInfoResponse, rpc_types::error::Error> {
let from_peer_id = request.my_peer_id; let from_peer_id = request.my_peer_id;
@@ -1522,6 +1602,13 @@ impl OspfRouteRpc for RouteSessionManager {
let peer_infos = request.peer_infos.map(|x| x.items); let peer_infos = request.peer_infos.map(|x| x.items);
let conn_bitmap = request.conn_bitmap.map(Into::into); let conn_bitmap = request.conn_bitmap.map(Into::into);
let foreign_network = request.foreign_network_infos; let foreign_network = request.foreign_network_infos;
let raw_peer_infos = if peer_infos.is_some() {
let r = get_raw_peer_infos(&mut ctrl.get_raw_input().unwrap()).unwrap();
assert_eq!(r.len(), peer_infos.as_ref().unwrap().len());
Some(r)
} else {
None
};
let ret = self let ret = self
.do_sync_route_info( .do_sync_route_info(
@@ -1529,6 +1616,7 @@ impl OspfRouteRpc for RouteSessionManager {
from_session_id, from_session_id,
is_initiator, is_initiator,
peer_infos, peer_infos,
raw_peer_infos,
conn_bitmap, conn_bitmap,
foreign_network, foreign_network,
) )
@@ -1563,8 +1651,6 @@ impl RouteSessionManager {
) { ) {
let mut last_sync = Instant::now(); let mut last_sync = Instant::now();
loop { loop {
let mut first_time = true;
loop { loop {
let Some(service_impl) = service_impl.clone().upgrade() else { let Some(service_impl) = service_impl.clone().upgrade() else {
return; return;
@@ -1574,11 +1660,6 @@ impl RouteSessionManager {
return; return;
}; };
if first_time {
first_time = false;
service_impl.update_my_infos().await;
}
// if we are initiator, we should ensure the dst has the session. // if we are initiator, we should ensure the dst has the session.
let sync_as_initiator = if last_sync.elapsed().as_secs() > 10 { let sync_as_initiator = if last_sync.elapsed().as_secs() > 10 {
last_sync = Instant::now(); last_sync = Instant::now();
@@ -1739,7 +1820,6 @@ impl RouteSessionManager {
continue; continue;
} }
let _ = self.stop_session(*peer_id); let _ = self.stop_session(*peer_id);
assert_ne!(Some(*peer_id), cur_dst_peer_id_to_initiate);
} }
} }
@@ -1784,6 +1864,7 @@ impl RouteSessionManager {
from_session_id: SessionId, from_session_id: SessionId,
is_initiator: bool, is_initiator: bool,
peer_infos: Option<Vec<RoutePeerInfo>>, peer_infos: Option<Vec<RoutePeerInfo>>,
raw_peer_infos: Option<Vec<DynamicMessage>>,
conn_bitmap: Option<RouteConnBitmap>, conn_bitmap: Option<RouteConnBitmap>,
foreign_network: Option<RouteForeignNetworkInfos>, foreign_network: Option<RouteForeignNetworkInfos>,
) -> Result<SyncRouteInfoResponse, Error> { ) -> Result<SyncRouteInfoResponse, Error> {
@@ -1806,6 +1887,7 @@ impl RouteSessionManager {
service_impl.my_peer_route_id, service_impl.my_peer_route_id,
from_peer_id, from_peer_id,
peer_infos, peer_infos,
raw_peer_infos.as_ref().unwrap(),
)?; )?;
session.update_dst_saved_peer_info_version(peer_infos); session.update_dst_saved_peer_info_version(peer_infos);
need_update_route_table = true; need_update_route_table = true;
@@ -1941,6 +2023,9 @@ impl PeerRoute {
return; return;
}; };
// make sure my_peer_id is in the peer_infos.
self.service_impl.update_my_infos().await;
peer_rpc.rpc_server().registry().register( peer_rpc.rpc_server().registry().register(
OspfRouteRpcServer::new(self.session_mgr.clone()), OspfRouteRpcServer::new(self.session_mgr.clone()),
&self.global_ctx.get_network_name(), &self.global_ctx.get_network_name(),
@@ -2043,6 +2128,8 @@ impl Route for PeerRoute {
route.cost_latency_first = next_hop_peer_latency_first.map(|x| x.path_latency); route.cost_latency_first = next_hop_peer_latency_first.map(|x| x.path_latency);
route.path_latency_latency_first = next_hop_peer_latency_first.map(|x| x.path_latency); route.path_latency_latency_first = next_hop_peer_latency_first.map(|x| x.path_latency);
route.feature_flag = item.feature_flag.clone();
routes.push(route); routes.push(route);
} }
routes routes
@@ -2102,6 +2189,14 @@ impl Route for PeerRoute {
.map(|x| x.clone()) .map(|x| x.clone())
.unwrap_or_default() .unwrap_or_default()
} }
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> {
self.service_impl
.route_table
.peer_infos
.get(&peer_id)
.and_then(|x| x.feature_flag.clone())
}
} }
impl PeerPacketFilter for Arc<PeerRoute> {} impl PeerPacketFilter for Arc<PeerRoute> {}
@@ -2114,17 +2209,26 @@ mod tests {
time::Duration, time::Duration,
}; };
use dashmap::DashMap;
use prost_reflect::{DynamicMessage, ReflectMessage};
use crate::{ use crate::{
common::{global_ctx::tests::get_mock_global_ctx, PeerId}, common::{global_ctx::tests::get_mock_global_ctx, PeerId},
connector::udp_hole_punch::tests::replace_stun_info_collector, connector::udp_hole_punch::tests::replace_stun_info_collector,
peers::{ peers::{
create_packet_recv_chan,
peer_manager::{PeerManager, RouteAlgoType}, peer_manager::{PeerManager, RouteAlgoType},
peer_ospf_route::PeerRouteServiceImpl,
route_trait::{NextHopPolicy, Route, RouteCostCalculatorInterface}, route_trait::{NextHopPolicy, Route, RouteCostCalculatorInterface},
tests::connect_peer_manager, tests::connect_peer_manager,
}, },
proto::common::NatType, proto::{
common::NatType,
peer_rpc::{RoutePeerInfo, RoutePeerInfos, SyncRouteInfoRequest},
},
tunnel::common::tests::wait_for_condition, tunnel::common::tests::wait_for_condition,
}; };
use prost::Message;
use super::PeerRoute; use super::PeerRoute;
@@ -2155,7 +2259,7 @@ mod tests {
} }
async fn create_mock_pmgr() -> Arc<PeerManager> { async fn create_mock_pmgr() -> Arc<PeerManager> {
let (s, _r) = tokio::sync::mpsc::channel(1000); let (s, _r) = create_packet_recv_chan();
let peer_mgr = Arc::new(PeerManager::new( let peer_mgr = Arc::new(PeerManager::new(
RouteAlgoType::None, RouteAlgoType::None,
get_mock_global_ctx(), get_mock_global_ctx(),
@@ -2544,4 +2648,31 @@ mod tests {
) )
.await; .await;
} }
#[tokio::test]
async fn test_raw_peer_info() {
let mut req = SyncRouteInfoRequest::default();
let raw_info_map: DashMap<PeerId, DynamicMessage> = DashMap::new();
req.peer_infos = Some(RoutePeerInfos {
items: vec![RoutePeerInfo {
peer_id: 1,
..Default::default()
}],
});
let mut raw_req = DynamicMessage::new(RoutePeerInfo::default().descriptor());
raw_req
.transcode_from(&req.peer_infos.as_ref().unwrap().items[0])
.unwrap();
raw_info_map.insert(1, raw_req);
let out = PeerRouteServiceImpl::build_sync_route_raw_req(&req, &raw_info_map);
let out_bytes = out.encode_to_vec();
let req2 = SyncRouteInfoRequest::decode(out_bytes.as_slice()).unwrap();
assert_eq!(req, req2);
}
} }
+3 -1
View File
@@ -24,8 +24,10 @@ impl DirectConnectorRpc for DirectConnectorManagerRpcServer {
let mut ret = self.global_ctx.get_ip_collector().collect_ip_addrs().await; let mut ret = self.global_ctx.get_ip_collector().collect_ip_addrs().await;
ret.listeners = self ret.listeners = self
.global_ctx .global_ctx
.get_running_listeners() .config
.get_mapped_listeners()
.into_iter() .into_iter()
.chain(self.global_ctx.get_running_listeners().into_iter())
.map(Into::into) .map(Into::into)
.collect(); .collect();
Ok(ret) Ok(ret)
+7 -2
View File
@@ -4,8 +4,11 @@ use dashmap::DashMap;
use crate::{ use crate::{
common::{global_ctx::NetworkIdentity, PeerId}, common::{global_ctx::NetworkIdentity, PeerId},
proto::peer_rpc::{ proto::{
ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos, common::PeerFeatureFlag,
peer_rpc::{
ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos,
},
}, },
}; };
@@ -94,6 +97,8 @@ pub trait Route {
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {} async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {}
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag>;
async fn dump(&self) -> String { async fn dump(&self) -> String {
"this route implementation does not support dump".to_string() "this route implementation does not support dump".to_string()
} }
+74 -3
View File
@@ -1,14 +1,24 @@
use std::sync::Arc; use std::sync::Arc;
use crate::{ use crate::{
common::{error::Error, global_ctx::tests::get_mock_global_ctx, PeerId}, common::{
error::Error,
global_ctx::{
tests::{get_mock_global_ctx, get_mock_global_ctx_with_network},
NetworkIdentity,
},
PeerId,
},
tunnel::ring::create_ring_tunnel_pair, tunnel::ring::create_ring_tunnel_pair,
}; };
use super::peer_manager::{PeerManager, RouteAlgoType}; use super::{
create_packet_recv_chan,
peer_manager::{PeerManager, RouteAlgoType},
};
pub async fn create_mock_peer_manager() -> Arc<PeerManager> { pub async fn create_mock_peer_manager() -> Arc<PeerManager> {
let (s, _r) = tokio::sync::mpsc::channel(1000); let (s, _r) = create_packet_recv_chan();
let peer_mgr = Arc::new(PeerManager::new( let peer_mgr = Arc::new(PeerManager::new(
RouteAlgoType::Ospf, RouteAlgoType::Ospf,
get_mock_global_ctx(), get_mock_global_ctx(),
@@ -18,6 +28,15 @@ pub async fn create_mock_peer_manager() -> Arc<PeerManager> {
peer_mgr peer_mgr
} }
pub async fn create_mock_peer_manager_with_name(network_name: String) -> Arc<PeerManager> {
let (s, _r) = create_packet_recv_chan();
let g =
get_mock_global_ctx_with_network(Some(NetworkIdentity::new(network_name, "".to_string())));
let peer_mgr = Arc::new(PeerManager::new(RouteAlgoType::Ospf, g, s));
peer_mgr.run().await.unwrap();
peer_mgr
}
pub async fn connect_peer_manager(client: Arc<PeerManager>, server: Arc<PeerManager>) { pub async fn connect_peer_manager(client: Arc<PeerManager>, server: Arc<PeerManager>) {
let (a_ring, b_ring) = create_ring_tunnel_pair(); let (a_ring, b_ring) = create_ring_tunnel_pair();
let a_mgr_copy = client.clone(); let a_mgr_copy = client.clone();
@@ -56,3 +75,55 @@ pub async fn wait_route_appear(
wait_route_appear_with_cost(peer_mgr.clone(), target_peer.my_peer_id(), None).await?; wait_route_appear_with_cost(peer_mgr.clone(), target_peer.my_peer_id(), None).await?;
wait_route_appear_with_cost(target_peer, peer_mgr.my_peer_id(), None).await wait_route_appear_with_cost(target_peer, peer_mgr.my_peer_id(), None).await
} }
#[tokio::test]
async fn foreign_mgr_stress_test() {
const FOREIGN_NETWORK_COUNT: i32 = 20;
const PEER_PER_NETWORK: i32 = 3;
const PUBLIC_PEER_COUNT: i32 = 3;
let mut public_peers = Vec::new();
for _ in 0..PUBLIC_PEER_COUNT {
public_peers.push(create_mock_peer_manager().await);
}
connect_peer_manager(public_peers[0].clone(), public_peers[1].clone()).await;
connect_peer_manager(public_peers[0].clone(), public_peers[2].clone()).await;
connect_peer_manager(public_peers[1].clone(), public_peers[2].clone()).await;
let mut foreigns = Vec::new();
for i in 0..FOREIGN_NETWORK_COUNT {
let mut peers = Vec::new();
let name = format!("foreign-network-test-{}", i);
for _ in 0..PEER_PER_NETWORK {
let mgr = create_mock_peer_manager_with_name(name.clone()).await;
let public_peer_idx = rand::random::<usize>() % public_peers.len();
connect_peer_manager(mgr.clone(), public_peers[public_peer_idx].clone()).await;
peers.push(mgr);
}
foreigns.push(peers);
}
for _ in 0..5 {
for i in 0..PUBLIC_PEER_COUNT {
let p = public_peers[i as usize].clone();
println!(
"public peer {} routes: {:?}, global_foreign_network: {:?}, peers: {:?}",
i,
p.list_routes().await,
p.list_global_foreign_network().await.foreign_networks.len(),
p.get_peer_map().list_peers().await
);
}
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
let new_peer = create_mock_peer_manager().await;
connect_peer_manager(new_peer.clone(), public_peers[0].clone()).await;
while let Err(e) = wait_route_appear(public_peers[1].clone(), new_peer.clone()).await {
println!("wait route ret: {:?}", e);
}
}
}
+36
View File
@@ -177,3 +177,39 @@ service VpnPortalRpc {
rpc GetVpnPortalInfo(GetVpnPortalInfoRequest) rpc GetVpnPortalInfo(GetVpnPortalInfoRequest)
returns (GetVpnPortalInfoResponse); returns (GetVpnPortalInfoResponse);
} }
enum TcpProxyEntryTransportType {
TCP = 0;
KCP = 1;
}
enum TcpProxyEntryState {
Unknown = 0;
// receive syn packet but not start connecting to dst
SynReceived = 1;
// connecting to dst
ConnectingDst = 2;
// connected to dst
Connected = 3;
// connection closed
Closed = 4;
}
message TcpProxyEntry {
common.SocketAddr src = 1;
common.SocketAddr dst = 2;
uint64 start_time = 3;
TcpProxyEntryState state = 4;
TcpProxyEntryTransportType transport_type = 5;
}
message ListTcpProxyEntryRequest {}
message ListTcpProxyEntryResponse {
repeated TcpProxyEntry entries = 1;
}
service TcpProxyRpc {
rpc ListTcpProxyEntry(ListTcpProxyEntryRequest)
returns (ListTcpProxyEntryResponse);
}
+10
View File
@@ -21,6 +21,14 @@ message FlagsInConfig {
string ipv6_listener = 14; string ipv6_listener = 14;
bool multi_thread = 15; bool multi_thread = 15;
CompressionAlgoPb data_compress_algo = 16; CompressionAlgoPb data_compress_algo = 16;
bool bind_device = 17;
// should we convert all tcp streams into kcp streams
bool enable_kcp_proxy = 18;
// does this peer allow kcp input
bool disable_kcp_input = 19;
// allow relay kcp packets (for public server, this can reduce the throughput)
bool disable_relay_kcp = 20;
} }
message RpcDescriptor { message RpcDescriptor {
@@ -143,4 +151,6 @@ message StunInfo {
message PeerFeatureFlag { message PeerFeatureFlag {
bool is_public_server = 1; bool is_public_server = 1;
bool avoid_relay_data = 2; bool avoid_relay_data = 2;
bool kcp_input = 3;
bool no_relay_kcp = 4;
} }
+8 -1
View File
@@ -101,7 +101,11 @@ impl From<cidr::Ipv4Inet> for Ipv4Inet {
impl From<Ipv4Inet> for cidr::Ipv4Inet { impl From<Ipv4Inet> for cidr::Ipv4Inet {
fn from(value: Ipv4Inet) -> Self { fn from(value: Ipv4Inet) -> Self {
cidr::Ipv4Inet::new(value.address.unwrap().into(), value.network_length as u8).unwrap() cidr::Ipv4Inet::new(
value.address.unwrap_or_default().into(),
value.network_length as u8,
)
.unwrap()
} }
} }
@@ -168,6 +172,9 @@ impl From<std::net::SocketAddr> for SocketAddr {
impl From<SocketAddr> for std::net::SocketAddr { impl From<SocketAddr> for std::net::SocketAddr {
fn from(value: SocketAddr) -> Self { fn from(value: SocketAddr) -> Self {
if value.ip.is_none() {
return "0.0.0.0:0".parse().unwrap();
}
match value.ip.unwrap() { match value.ip.unwrap() {
socket_addr::Ip::Ipv4(ip) => std::net::SocketAddr::V4(std::net::SocketAddrV4::new( socket_addr::Ip::Ipv4(ip) => std::net::SocketAddr::V4(std::net::SocketAddrV4::new(
std::net::Ipv4Addr::from(ip), std::net::Ipv4Addr::from(ip),
+3
View File
@@ -9,3 +9,6 @@ pub mod web;
#[cfg(test)] #[cfg(test)]
pub mod tests; pub mod tests;
const DESCRIPTOR_POOL_BYTES: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin"));
+5
View File
@@ -205,3 +205,8 @@ message HandshakeRequest {
string network_name = 5; string network_name = 5;
bytes network_secret_digrest = 6; bytes network_secret_digrest = 6;
} }
message KcpConnData {
common.SocketAddr src = 1;
common.SocketAddr dst = 4;
}
+10 -3
View File
@@ -192,7 +192,7 @@ impl Client {
async fn call( async fn call(
&self, &self,
ctrl: Self::Controller, mut ctrl: Self::Controller,
method: <Self::Descriptor as ServiceDescriptor>::Method, method: <Self::Descriptor as ServiceDescriptor>::Method,
input: bytes::Bytes, input: bytes::Bytes,
) -> Result<bytes::Bytes> { ) -> Result<bytes::Bytes> {
@@ -224,7 +224,11 @@ impl Client {
}; };
let rpc_req = RpcRequest { let rpc_req = RpcRequest {
request: input.into(), request: if let Some(raw_input) = ctrl.get_raw_input() {
raw_input.into()
} else {
input.into()
},
timeout_ms: ctrl.timeout_ms(), timeout_ms: ctrl.timeout_ms(),
..Default::default() ..Default::default()
}; };
@@ -280,7 +284,10 @@ impl Client {
return Err(err.into()); return Err(err.into());
} }
Ok(bytes::Bytes::from(rpc_resp.response)) let raw_output = Bytes::from(rpc_resp.response.clone());
ctrl.set_raw_output(raw_output.clone());
Ok(raw_output)
} }
} }
+4 -1
View File
@@ -159,7 +159,10 @@ pub fn build_rpc_packet(
let cur_packet = RpcPacket { let cur_packet = RpcPacket {
from_peer, from_peer,
to_peer, to_peer,
descriptor: if cur_offset == 0 { descriptor: if cur_offset == 0
|| compression_info.algo == CompressionAlgoPb::None as i32
{
// old version must have descriptor on every piece
Some(rpc_desc.clone()) Some(rpc_desc.clone())
} else { } else {
None None
+12 -9
View File
@@ -13,7 +13,7 @@ use crate::{
common::{join_joinset_background, PeerId}, common::{join_joinset_background, PeerId},
proto::{ proto::{
common::{self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse}, common::{self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse},
rpc_types::error::Result, rpc_types::{controller::Controller, error::Result},
}, },
tunnel::{ tunnel::{
mpsc::{MpscTunnel, MpscTunnelSender}, mpsc::{MpscTunnel, MpscTunnelSender},
@@ -155,16 +155,19 @@ impl Server {
}; };
let rpc_request = RpcRequest::decode(Bytes::from(body))?; let rpc_request = RpcRequest::decode(Bytes::from(body))?;
let timeout_duration = std::time::Duration::from_millis(rpc_request.timeout_ms as u64); let timeout_duration = std::time::Duration::from_millis(rpc_request.timeout_ms as u64);
let ctrl = RpcController::default(); let mut ctrl = RpcController::default();
Ok(timeout( let raw_req = Bytes::from(rpc_request.request);
ctrl.set_raw_input(raw_req.clone());
let ret = timeout(
timeout_duration, timeout_duration,
reg.call_method( reg.call_method(packet.descriptor.unwrap(), ctrl.clone(), raw_req),
packet.descriptor.unwrap(),
ctrl,
Bytes::from(rpc_request.request),
),
) )
.await??) .await??;
if let Some(raw_output) = ctrl.get_raw_output() {
Ok(raw_output)
} else {
Ok(ret)
}
} }
async fn handle_rpc(sender: MpscTunnelSender, packet: RpcPacket, reg: Arc<ServiceRegistry>) { async fn handle_rpc(sender: MpscTunnelSender, packet: RpcPacket, reg: Arc<ServiceRegistry>) {
+43 -1
View File
@@ -1,4 +1,9 @@
pub trait Controller: Send + Sync + 'static { use std::sync::{Arc, Mutex};
use bytes::Bytes;
// Controller must impl clone and all cloned controllers share the same data
pub trait Controller: Send + Sync + Clone + 'static {
fn timeout_ms(&self) -> i32 { fn timeout_ms(&self) -> i32 {
5000 5000
} }
@@ -10,12 +15,29 @@ pub trait Controller: Send + Sync + 'static {
fn trace_id(&self) -> i32 { fn trace_id(&self) -> i32 {
0 0
} }
fn set_raw_input(&mut self, _raw_input: Bytes) {}
fn get_raw_input(&self) -> Option<Bytes> {
None
}
fn set_raw_output(&mut self, _raw_output: Bytes) {}
fn get_raw_output(&self) -> Option<Bytes> {
None
}
} }
#[derive(Debug)] #[derive(Debug)]
pub struct BaseControllerRawData {
pub raw_input: Option<Bytes>,
pub raw_output: Option<Bytes>,
}
#[derive(Debug, Clone)]
pub struct BaseController { pub struct BaseController {
pub timeout_ms: i32, pub timeout_ms: i32,
pub trace_id: i32, pub trace_id: i32,
pub raw_data: Arc<Mutex<BaseControllerRawData>>,
} }
impl Controller for BaseController { impl Controller for BaseController {
@@ -34,6 +56,22 @@ impl Controller for BaseController {
fn trace_id(&self) -> i32 { fn trace_id(&self) -> i32 {
self.trace_id self.trace_id
} }
fn set_raw_input(&mut self, raw_input: Bytes) {
self.raw_data.lock().unwrap().raw_input = Some(raw_input);
}
fn get_raw_input(&self) -> Option<Bytes> {
self.raw_data.lock().unwrap().raw_input.clone()
}
fn set_raw_output(&mut self, raw_output: Bytes) {
self.raw_data.lock().unwrap().raw_output = Some(raw_output);
}
fn get_raw_output(&self) -> Option<Bytes> {
self.raw_data.lock().unwrap().raw_output.clone()
}
} }
impl Default for BaseController { impl Default for BaseController {
@@ -41,6 +79,10 @@ impl Default for BaseController {
Self { Self {
timeout_ms: 5000, timeout_ms: 5000,
trace_id: 0, trace_id: 0,
raw_data: Arc::new(Mutex::new(BaseControllerRawData {
raw_input: None,
raw_output: None,
})),
} }
} }
} }
+7
View File
@@ -40,6 +40,13 @@ message NetworkConfig {
optional bool latency_first = 19; optional bool latency_first = 19;
optional string dev_name = 20; optional string dev_name = 20;
optional bool use_smoltcp = 21;
optional bool enable_kcp_proxy = 22;
optional bool disable_kcp_input = 23;
optional bool disable_p2p = 24;
optional bool bind_device = 25;
optional bool no_tun = 26;
} }
message MyNodeInfo { message MyNodeInfo {
+2 -1
View File
@@ -167,7 +167,7 @@ async fn wait_proxy_route_appear(
} }
fn set_link_status(net_ns: &str, up: bool) { fn set_link_status(net_ns: &str, up: bool) {
let _ = std::process::Command::new("ip") let ret = std::process::Command::new("ip")
.args([ .args([
"netns", "netns",
"exec", "exec",
@@ -180,4 +180,5 @@ fn set_link_status(net_ns: &str, up: bool) {
]) ])
.output() .output()
.unwrap(); .unwrap();
tracing::info!("set link status: {:?}, net_ns: {}, up: {}", ret, net_ns, up);
} }
+36 -1
View File
@@ -364,6 +364,9 @@ pub async fn subnet_proxy_three_node_test(
#[values("tcp", "udp", "wg")] proto: &str, #[values("tcp", "udp", "wg")] proto: &str,
#[values(true, false)] no_tun: bool, #[values(true, false)] no_tun: bool,
#[values(true, false)] relay_by_public_server: bool, #[values(true, false)] relay_by_public_server: bool,
#[values(true, false)] enable_kcp_proxy: bool,
#[values(true, false)] disable_kcp_input: bool,
#[values(true, false)] dst_enable_kcp_proxy: bool,
) { ) {
let insts = init_three_node_ex( let insts = init_three_node_ex(
proto, proto,
@@ -371,6 +374,8 @@ pub async fn subnet_proxy_three_node_test(
if cfg.get_inst_name() == "inst3" { if cfg.get_inst_name() == "inst3" {
let mut flags = cfg.get_flags(); let mut flags = cfg.get_flags();
flags.no_tun = no_tun; flags.no_tun = no_tun;
flags.disable_kcp_input = disable_kcp_input;
flags.enable_kcp_proxy = dst_enable_kcp_proxy;
cfg.set_flags(flags); cfg.set_flags(flags);
cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap()); cfg.add_proxy_cidr("10.1.2.0/24".parse().unwrap());
} }
@@ -382,6 +387,12 @@ pub async fn subnet_proxy_three_node_test(
)); ));
} }
if cfg.get_inst_name() == "inst1" && enable_kcp_proxy {
let mut flags = cfg.get_flags();
flags.enable_kcp_proxy = true;
cfg.set_flags(flags);
}
cfg cfg
}, },
relay_by_public_server, relay_by_public_server,
@@ -481,6 +492,11 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
} }
inst4.run().await.unwrap(); inst4.run().await.unwrap();
tracing::info!("inst1 peer id: {:?}", insts[0].peer_id());
tracing::info!("inst2 peer id: {:?}", insts[1].peer_id());
tracing::info!("inst3 peer id: {:?}", insts[2].peer_id());
tracing::info!("inst4 peer id: {:?}", inst4.peer_id());
let task = tokio::spawn(async move { let task = tokio::spawn(async move {
for _ in 1..=2 { for _ in 1..=2 {
tokio::time::sleep(tokio::time::Duration::from_secs(8)).await; tokio::time::sleep(tokio::time::Duration::from_secs(8)).await;
@@ -502,6 +518,24 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
ping_test("net_a", "10.144.144.4", Some(1)).await; ping_test("net_a", "10.144.144.4", Some(1)).await;
} }
})); }));
wait_for_condition(
|| async {
insts[2]
.get_peer_manager()
.get_peer_map()
.list_peers_with_conn()
.await
.iter()
.find(|r| **r == inst4.peer_id())
.is_none()
},
// 0 down, assume last packet is recv in -0.01
// [2, 7) send ping
// [4, 9) ping fail and close connection
Duration::from_secs(11),
)
.await;
wait_for_condition( wait_for_condition(
|| async { || async {
insts[0] insts[0]
@@ -512,9 +546,10 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
.find(|r| r.peer_id == inst4.peer_id()) .find(|r| r.peer_id == inst4.peer_id())
.is_none() .is_none()
}, },
Duration::from_secs(15), Duration::from_secs(7),
) )
.await; .await;
set_link_status("net_d", true); set_link_status("net_d", true);
} }
}); });
+6
View File
@@ -61,6 +61,8 @@ pub enum PacketType {
RpcReq = 8, RpcReq = 8,
RpcResp = 9, RpcResp = 9,
ForeignNetworkPacket = 10, ForeignNetworkPacket = 10,
KcpSrc = 11,
KcpDst = 12,
} }
bitflags::bitflags! { bitflags::bitflags! {
@@ -494,6 +496,10 @@ impl ZCPacket {
&self.inner[self.payload_offset()..] &self.inner[self.payload_offset()..]
} }
pub fn payload_bytes(mut self) -> BytesMut {
self.inner.split_off(self.payload_offset())
}
pub fn peer_manager_header(&self) -> Option<&PeerManagerHeader> { pub fn peer_manager_header(&self) -> Option<&PeerManagerHeader> {
PeerManagerHeader::ref_from_prefix( PeerManagerHeader::ref_from_prefix(
&self.inner[self &self.inner[self
+11 -1
View File
@@ -9,6 +9,16 @@ pub struct WindowLatency {
count: AtomicU32, count: AtomicU32,
} }
impl std::fmt::Debug for WindowLatency {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("WindowLatency")
.field("count", &self.count)
.field("window_size", &self.latency_us_window_size)
.field("window_latency", &self.get_latency_us::<u32>())
.finish()
}
}
impl WindowLatency { impl WindowLatency {
pub fn new(window_size: u32) -> Self { pub fn new(window_size: u32) -> Self {
Self { Self {
@@ -48,7 +58,7 @@ impl WindowLatency {
} }
} }
#[derive(Default, Copy, Clone)] #[derive(Default, Copy, Clone, Debug)]
pub struct Throughput { pub struct Throughput {
tx_bytes: u64, tx_bytes: u64,
rx_bytes: u64, rx_bytes: u64,
+40 -20
View File
@@ -28,6 +28,30 @@ impl TcpTunnelListener {
listener: None, listener: None,
} }
} }
async fn do_accept(&mut self) -> Result<Box<dyn Tunnel>, std::io::Error> {
let listener = self.listener.as_ref().unwrap();
let (stream, _) = listener.accept().await?;
if let Err(e) = stream.set_nodelay(true) {
tracing::warn!(?e, "set_nodelay fail in accept");
}
let info = TunnelInfo {
tunnel_type: "tcp".to_owned(),
local_addr: Some(self.local_url().into()),
remote_addr: Some(
super::build_url_from_socket_addr(&stream.peer_addr()?.to_string(), "tcp").into(),
),
};
let (r, w) = stream.into_split();
Ok(Box::new(TunnelWrapper::new(
FramedReader::new(r, TCP_MTU_BYTES),
FramedWriter::new(w),
Some(info),
)))
}
} }
#[async_trait] #[async_trait]
@@ -57,27 +81,23 @@ impl TunnelListener for TcpTunnelListener {
} }
async fn accept(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> { async fn accept(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let listener = self.listener.as_ref().unwrap(); loop {
let (stream, _) = listener.accept().await?; match self.do_accept().await {
Ok(ret) => return Ok(ret),
if let Err(e) = stream.set_nodelay(true) { Err(e) => {
tracing::warn!(?e, "set_nodelay fail in accept"); use std::io::ErrorKind::*;
if matches!(
e.kind(),
NotConnected | ConnectionAborted | ConnectionRefused | ConnectionReset
) {
tracing::warn!(?e, "accept fail with retryable error: {:?}", e);
continue;
}
tracing::warn!(?e, "accept fail");
return Err(e.into());
}
}
} }
let info = TunnelInfo {
tunnel_type: "tcp".to_owned(),
local_addr: Some(self.local_url().into()),
remote_addr: Some(
super::build_url_from_socket_addr(&stream.peer_addr()?.to_string(), "tcp").into(),
),
};
let (r, w) = stream.into_split();
Ok(Box::new(TunnelWrapper::new(
FramedReader::new(r, TCP_MTU_BYTES),
FramedWriter::new(w),
Some(info),
)))
} }
fn local_url(&self) -> url::Url { fn local_url(&self) -> url::Url {