Compare commits

...

22 Commits

Author SHA1 Message Date
Luna Yao 79b562cdc9 drop peer_mgr in time (#2064) 2026-04-06 11:31:05 +08:00
fanyang e3f089251c fix(ospf): mitigate route sync storm under connection flapping (#2063)
Addresses issue #2016 where nodes behind unstable networks
(e.g. campus firewalls) cause excessive traffic that can freeze
the remote node.

Two changes in peer_ospf_route.rs:

- Make do_sync_route_info only trigger reverse sync_now when
  incoming data actually changed the route table or foreign
  network state.  The previous unconditional sync_now created
  an A->B->A->B ping-pong cycle on every RPC exchange.

- Add exponential backoff (50ms..5s) to session_task retry loop.
  The previous fixed 50ms retry produced ~20 RPCs/s during
  sustained network instability.
2026-04-06 11:26:20 +08:00
fanyang cf6dcbc054 Fix IPv6 TCP tunnel display formatting (#1980)
Normalize composite tunnel display values before rendering peer and
debug output so IPv6 tunnel types no longer append `6` to the port.

- Preserve prefixes like `txt-` while converting tunnel schemes to
  their IPv6 form.
- Recover malformed values such as `txt-tcp://...:110106` into
  `txt-tcp6://...:11010`.
- Reuse the normalized remote address display in CLI debug output.
2026-04-05 22:12:55 +08:00
fanyang 2cf2b0fcac feat(cli): implement connector add/remove, drop peer stubs (#2058)
Implement the previously stubbed connector add/remove CLI commands
using PatchConfig RPC with InstanceConfigPatch.connectors, and
remove the peer add/remove stubs that had incorrect semantics.
2026-04-05 13:56:17 +08:00
dependabot[bot] aa0cca3bb6 build(deps): bump quinn-proto in /easytier-contrib/easytier-ohrs (#2059)
Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.11.13 to 0.11.14.
- [Release notes](https://github.com/quinn-rs/quinn/releases)
- [Commits](https://github.com/quinn-rs/quinn/compare/quinn-proto-0.11.13...quinn-proto-0.11.14)

---
updated-dependencies:
- dependency-name: quinn-proto
  dependency-version: 0.11.14
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-05 13:16:33 +08:00
KKRainbow fb59f01058 fix: reconcile webhook-managed configs and make disable_p2p more intelligent (#2057)
* reconcile infra configs on webhook validate
* make disable_p2p more intelligent
* fix stats
2026-04-04 23:41:57 +08:00
Luna Yao e91a0da70a refactor: listener/connector protocol abstraction (#2026)
* fix listener protocol detection
* replace IpProtocol with IpNextHeaderProtocol
* use an enum to gather all listener schemes
* rename ListenerScheme to TunnelScheme; replace IpNextHeaderProtocols with socket2::Protocol
* move TunnelScheme to tunnel
* add IpScheme, simplify connector creation
* format; fix some typos; remove check_scheme_...;
* remove PROTO_PORT_OFFSET
* rename WSTunnel.. -> WsTunnel.., DNSTunnel.. -> DnsTunnel..
2026-04-04 10:55:58 +08:00
Luna Yao 9cc617ae4c ci: build rpm package (#2044)
* add rpm to ci
* rename build_filter to build-filter
* use prepare-pnpm action
2026-04-04 10:32:08 +08:00
韩嘉乐 e4b0f1f1bb Rename libeasytier_ohrs.so to libeasytier_release.so when build release package (#2056)
Rename shared library file for release.
2026-04-04 10:29:37 +08:00
Luna Yao 443c3ca0b3 fix: append address of reverse proxy to remote_addr (#2034)
* append address of reverse proxy to remote_addr
* validate proxy address in test
2026-03-30 16:48:23 +08:00
Luna Yao 55a0e5952c chore: use cfg_aliases for mobile (#2033) 2026-03-30 16:38:39 +08:00
KKRainbow 1dff388717 bump version to v2.6.0 (#2039) 2026-03-30 15:50:07 +08:00
Luna Yao 61c741f887 add BoxExt trait (#2036) 2026-03-30 13:25:53 +08:00
ParkGarden 01dd9a05c3 fix: 重构了 Magisk 模块的 easytier_core.sh, action.sh, uninstall.sh 三个脚本的逻辑,优化参数解析与进程管理,调整措辞 (#1964) 2026-03-30 13:18:42 +08:00
KKRainbow 8c19a2293c fix(windows): avoid pnet interface enumeration panic (#2031) 2026-03-29 23:16:44 +08:00
KKRainbow a1bec48dc9 fix android vpn permission grant (#2023)
* fix android vpn permission grant
* fix url input behaviour
2026-03-29 23:16:32 +08:00
KKRainbow 7e289865b2 fix(faketcp): avoid pnet interface lookup on windows (#2029) 2026-03-29 19:26:29 +08:00
fanyang 742c7edd57 fix: use default connection loss rate for peer stats (#2030) 2026-03-29 19:25:25 +08:00
Luna Yao b71a2889ef suppress clippy warnings when no feature flags are enabled (#2028) 2026-03-29 11:02:23 +08:00
KKRainbow bcd75d6ce3 Add instance recv limiter in peer conn (#2027) 2026-03-29 10:28:02 +08:00
Luna Yao d4c1b0e867 fix: read X-Forwarded-For from HTTP header of WS/WSS (#2019) 2026-03-28 22:20:46 +08:00
KKRainbow b037ea9c3f Relax private mode foreign network secret checks (#2022) 2026-03-28 22:19:23 +08:00
94 changed files with 3342 additions and 1224 deletions
+1 -1
View File
@@ -23,7 +23,7 @@ runs:
if: ${{ inputs.web == 'true' }}
uses: ./.github/actions/prepare-pnpm
with:
build_filter: './easytier-web/*'
build-filter: './easytier-web/*'
- name: Install GUI dependencies (Used by clippy)
if: ${{ inputs.gui == 'true' }}
+13 -7
View File
@@ -3,20 +3,21 @@ author: Luna
description: 'Setup Node.js, pnpm, and install dependencies'
inputs:
build_filter:
build-filter:
description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)'
required: true
required: false
default: ''
runs:
using: "composite"
steps:
- name: Setup Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
uses: pnpm/action-setup@v5
with:
version: 10
run_install: false
@@ -27,7 +28,7 @@ runs:
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
@@ -38,5 +39,10 @@ runs:
shell: bash
run: |
pnpm -r install
echo "Building with filter: ${{ inputs.build_filter }}"
pnpm -r --filter "${{ inputs.build_filter }}" build
if [ -n "${{ inputs.build-filter }}" ]; then
echo "Building with filter: ${{ inputs.build-filter }}"
pnpm -r --filter "${{ inputs.build-filter }}" build
else
echo "No build filter provided, building all packages"
pnpm -r build
fi
+9 -32
View File
@@ -36,38 +36,15 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- uses: actions/setup-node@v4
- name: Setup Frontend Environment
uses: ./.github/actions/prepare-pnpm
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r --filter "./easytier-web/*" build
build-filter: './easytier-web/*'
- name: Archive artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: easytier-web-dashboard
path: |
@@ -142,7 +119,7 @@ jobs:
- build_web
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Set current ref as env variable
run: |
@@ -290,7 +267,7 @@ jobs:
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: easytier-${{ matrix.ARTIFACT_NAME }}
path: |
@@ -317,7 +294,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v4 # 必须先检出代码才能获取模块配置
uses: actions/checkout@v5 # 必须先检出代码才能获取模块配置
# 下载二进制文件到独立目录
- name: Download Linux aarch64 binaries
@@ -337,7 +314,7 @@ jobs:
# 上传生成的模块
- name: Upload Magisk Module
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: Easytier-Magisk
path: |
+2 -2
View File
@@ -11,7 +11,7 @@ on:
image_tag:
description: 'Tag for this image build'
type: string
default: 'v2.5.0'
default: 'v2.6.0'
required: true
mark_latest:
description: 'Mark this image as latest'
@@ -31,7 +31,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
-
name: Validate inputs
run: |
+12 -30
View File
@@ -78,7 +78,7 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
@@ -119,37 +119,18 @@ jobs:
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- name: Install rpm package (Linux target only)
if: ${{ contains(matrix.TARGET, '-linux-') }}
run: |
sudo apt update
sudo apt install -y rpm
- name: Set current ref as env variable
run: |
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
- uses: actions/setup-node@v4
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r build
- name: Setup Frontend Environment
uses: ./.github/actions/prepare-pnpm
- uses: Swatinem/rust-cache@v2
with:
@@ -184,7 +165,7 @@ jobs:
with:
projectPath: ./easytier-gui
# https://tauri.app/v1/guides/building/linux/#cross-compiling-tauri-applications-for-arm-based-devices
args: --verbose --target ${{ matrix.GUI_TARGET }} ${{ matrix.OS == 'ubuntu-22.04' && contains(matrix.TARGET, 'aarch64') && '--bundles deb' || '' }}
args: --verbose --target ${{ matrix.GUI_TARGET }} ${{ contains(matrix.TARGET, '-linux-') && contains(matrix.TARGET, 'aarch64') && '--bundles deb,rpm' || '' }}
- name: Compress
run: |
@@ -202,6 +183,7 @@ jobs:
mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/
elif [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^mips.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/
mv ./target/$GUI_TARGET/release/bundle/rpm/*.rpm ./artifacts/objects/
if [[ $GUI_TARGET =~ ^x86_64.*$ ]]; then
# currently only x86 appimage is supported
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
@@ -212,7 +194,7 @@ jobs:
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: easytier-gui-${{ matrix.ARTIFACT_NAME }}
path: |
+4 -29
View File
@@ -47,7 +47,7 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Set current ref as env variable
run: |
@@ -70,33 +70,8 @@ jobs:
echo "$ANDROID_HOME/ndk/26.0.10792818/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
echo "NDK_HOME=$ANDROID_HOME/ndk/26.0.10792818/" > $GITHUB_ENV
- uses: actions/setup-node@v4
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r build
- name: Setup Frontend Environment
uses: ./.github/actions/prepare-pnpm
- uses: Swatinem/rust-cache@v2
with:
@@ -138,7 +113,7 @@ jobs:
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: easytier-gui-${{ matrix.ARTIFACT_NAME }}
path: |
+1 -1
View File
@@ -16,7 +16,7 @@ jobs:
check-full-shell:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Install Nix
uses: cachix/install-nix-action@v27
+6 -3
View File
@@ -22,7 +22,7 @@ jobs:
cargo_fmt_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: fmt check
working-directory: ./easytier-contrib/easytier-ohrs
run: |
@@ -52,7 +52,7 @@ jobs:
OHPM_PUBLISH_CODE: ${{ secrets.OHPM_PUBLISH_CODE }}
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Install dependencies
run: |
sudo apt-get update
@@ -174,11 +174,14 @@ jobs:
jq --arg v "$TAG_VERSION" '.name = "easytier-release" | .version = $v' oh-package.json5 > oh-package.tmp.json5 && mv oh-package.tmp.json5 oh-package.json5
cd ..
ohrs build --release --arch aarch
cd dist/arm64-v8a
mv libeasytier_ohrs.so libeasytier_release.so
cd ../..
ohrs artifact
mv package.har easytier-release.har
- name: Upload artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: easytier-ohos
path: |
+2 -2
View File
@@ -18,7 +18,7 @@ on:
version:
description: 'Version for this release'
type: string
default: 'v2.5.0'
default: 'v2.6.0'
required: true
make_latest:
description: 'Mark this release as latest'
@@ -35,7 +35,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Download Core Artifact
uses: dawidd6/action-download-artifact@v11
+12 -5
View File
@@ -38,7 +38,7 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Prepare build environment
uses: ./.github/actions/prepare-build
@@ -56,6 +56,13 @@ jobs:
- uses: taiki-e/install-action@cargo-hack
- name: Check Cargo.lock is up to date
run: |
if ! cargo metadata --format-version 1 --locked --no-deps > /dev/null; then
echo "::error::Cargo.lock is out of date. Run cargo generate-lockfile or cargo build locally, then commit Cargo.lock."
exit 1
fi
- name: Check formatting
run: cargo fmt --all -- --check
@@ -72,7 +79,7 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Prepare build environment
uses: ./.github/actions/prepare-build
@@ -88,7 +95,7 @@ jobs:
- name: Archive test
run: cargo nextest archive --archive-file tests.tar.zst --package easytier --features full
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v5
with:
name: tests
path: tests.tar.zst
@@ -112,7 +119,7 @@ jobs:
- name: "three_node::subnet_proxy_three_node_test"
opts: "-E 'test(subnet_proxy_three_node_test)' --test-threads 1 --no-fail-fast"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v5
- name: Setup tools for test
run: sudo apt install bridge-utils
@@ -144,4 +151,4 @@ jobs:
steps:
- name: Mark result as failed
if: needs.test_matrix.result != 'success'
run: exit 1
run: exit 1
Generated
+48 -10
View File
@@ -2156,7 +2156,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "easytier"
version = "2.5.0"
version = "2.6.0"
dependencies = [
"aes-gcm",
"anyhow",
@@ -2175,6 +2175,7 @@ dependencies = [
"byteorder",
"bytes",
"cfg-if",
"cfg_aliases 0.2.1",
"chrono",
"cidr",
"clap",
@@ -2191,6 +2192,7 @@ dependencies = [
"easytier-rpc-build",
"encoding",
"flume 0.12.0",
"forwarded-header-value",
"futures",
"futures-util",
"gethostname 0.5.0",
@@ -2208,6 +2210,7 @@ dependencies = [
"humantime-serde",
"idna 1.0.3",
"indoc",
"itertools 0.14.0",
"kcp-sys",
"machine-uid",
"maplit",
@@ -2326,7 +2329,7 @@ dependencies = [
[[package]]
name = "easytier-gui"
version = "2.5.0"
version = "2.6.0"
dependencies = [
"anyhow",
"async-trait",
@@ -2406,7 +2409,7 @@ dependencies = [
[[package]]
name = "easytier-web"
version = "2.5.0"
version = "2.6.0"
dependencies = [
"anyhow",
"async-trait",
@@ -2920,6 +2923,16 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "forwarded-header-value"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9"
dependencies = [
"nonempty",
"thiserror 1.0.63",
]
[[package]]
name = "fragile"
version = "2.0.1"
@@ -3770,7 +3783,7 @@ dependencies = [
"rustls-pki-types",
"unicase",
"webpki",
"webpki-roots",
"webpki-roots 0.26.3",
"zeroize",
]
@@ -3847,7 +3860,7 @@ dependencies = [
"tokio",
"tokio-rustls",
"tower-service",
"webpki-roots",
"webpki-roots 0.26.3",
]
[[package]]
@@ -4319,6 +4332,15 @@ dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.11"
@@ -5205,6 +5227,12 @@ dependencies = [
"minimal-lexical",
]
[[package]]
name = "nonempty"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7"
[[package]]
name = "normpath"
version = "1.3.0"
@@ -7137,7 +7165,7 @@ dependencies = [
"wasm-bindgen-futures",
"wasm-streams",
"web-sys",
"webpki-roots",
"webpki-roots 0.26.3",
"windows-registry",
]
@@ -8460,7 +8488,7 @@ dependencies = [
"tracing",
"url",
"uuid",
"webpki-roots",
"webpki-roots 0.26.3",
]
[[package]]
@@ -9558,9 +9586,9 @@ dependencies = [
[[package]]
name = "tokio-websockets"
version = "0.8.3"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "842e11addde61da7c37ef205cd625ebcd7b607076ea62e4698f06bfd5fd01a03"
checksum = "dad543404f98bfc969aeb71994105c592acfc6c43323fddcd016bb208d1c65cb"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -9571,10 +9599,11 @@ dependencies = [
"httparse",
"ring",
"rustls-pki-types",
"simdutf8",
"tokio",
"tokio-rustls",
"tokio-util",
"webpki-roots",
"webpki-roots 1.0.6",
]
[[package]]
@@ -10675,6 +10704,15 @@ dependencies = [
"rustls-pki-types",
]
[[package]]
name = "webpki-roots"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "webview2-com"
version = "0.38.0"
+3 -3
View File
@@ -108,9 +108,9 @@ After successful execution, you can check the network status using `easytier-cli
```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.5.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.5.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.5.0-70e69a38~ |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.0-70e69a38~ |
```
You can test connectivity between nodes:
+3 -3
View File
@@ -108,9 +108,9 @@ sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<共享
```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.5.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.5.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.5.0-70e69a38~ |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.0-70e69a38~ |
```
您可以测试节点之间的连通性:
+57 -26
View File
@@ -1,43 +1,74 @@
#!/data/adb/magisk/busybox sh
MODDIR=${0%/*}
MODULE_PROP="${MODDIR}/module.prop"
IP_RULE_SCRIPT="${MODDIR}/hotspot_iprule.sh"
ET_STATUS=""
REDIR_STATUS=""
# 更新module.prop文件中的description
IS_RUNNING=false
# 确保辅助脚本有执行权限
chmod +x "${IP_RULE_SCRIPT}" 2>/dev/null
# 更新 module.prop 文件中的 description
update_module_description() {
local status_message=$1
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
# 检查 module.prop 文件存在且 description 发生变化了再写入
if [ -f "${MODULE_PROP}" ]; then
local current_desc=$(grep "^description=" "${MODULE_PROP}")
local new_desc="description=[状态] ${status_message}"
if [ "${current_desc}" != "${new_desc}" ]; then
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
fi
fi
}
# 判断程序启动状态
if [ -f "${MODDIR}/disable" ]; then
ET_STATUS="已关闭"
elif pgrep -f 'easytier-core' >/dev/null; then
if [ -f "${MODDIR}/config/command_args"]; then
ET_STATUS="主程序已开启(启动参数模式)"
IS_RUNNING=false
ET_STATUS="主程序已关闭"
elif pgrep -f "${MODDIR}/easytier-core" >/dev/null; then
IS_RUNNING=true
if [ -f "${MODDIR}/config/command_args" ]; then
ET_STATUS="主程序正在运行(启动参数模式)"
else
ET_STATUS="主程序已开启(配置文件模式)"
ET_STATUS="主程序正在运行(配置文件模式"
fi
elif [ -z "$ET_STATUS" ]; then
# 既没 disable 也没运行,说明是异常停止或未启动
ET_STATUS="主程序启动失败或未运行"
fi
#ET_STATUS不存在说明开启模块未正常运行,不修改状态
if [ -n "$ET_STATUS" ]; then
if [ -f "${MODDIR}/enable_IP_rule" ]; then
rm -f "${MODDIR}/enable_IP_rule"
${MODDIR}/hotspot_iprule.sh del
REDIR_STATUS="转发已禁用"
echo "热点子网转发已禁用"
echo "[ET-NAT] IP rule disabled." >> "${MODDIR}/log.log"
else
touch "${MODDIR}/enable_IP_rule"
${MODDIR}/hotspot_iprule.sh del
${MODDIR}/hotspot_iprule.sh add_once
REDIR_STATUS="转发已激活"
echo "热点子网转发已激活,热点开启后将自动将热点加入转发网络(要求已配置本地网络cidr=参数)。转发规则将随着热点开关而自动开关。该状态将保持到转发被禁用为止。"
echo "[ET-NAT] IP rule enabled." >> "${MODDIR}/log.log"
fi
update_module_description "${ET_STATUS} | ${REDIR_STATUS}"
# 无论主程序是否运行,都允许切换“开关文件”的状态,以便下次生效
if [ -f "${MODDIR}/enable_IP_rule" ]; then
rm -f "${MODDIR}/enable_IP_rule"
"${IP_RULE_SCRIPT}" del >/dev/null 2>&1
REDIR_STATUS="转发已禁用"
echo "热点子网转发已禁用"
echo "[ET-NAT] Action: IP rule disabled." >> "${MODDIR}/log.log"
else
echo "主程序未正常启动,请先检查配置文件"
touch "${MODDIR}/enable_IP_rule"
if [ "$IS_RUNNING" = true ]; then
"${IP_RULE_SCRIPT}" del >/dev/null 2>&1
"${IP_RULE_SCRIPT}" add_once
echo "转发规则将立即生效,无需重启"
else
echo "主程序未运行,转发规则将在下次启动时生效"
fi
REDIR_STATUS="转发已激活"
echo "----------------------------------"
echo "热点子网转发已激活"
echo "热点开启后将自动将热点加入转发网络"
echo "需要在配置中提前配置好 cidr 参数"
echo "----------------------------------"
echo "[ET-NAT] Action: IP rule enabled." >> "${MODDIR}/log.log"
fi
sync
update_module_description "${ET_STATUS}| ${REDIR_STATUS}"
+12 -9
View File
@@ -5,12 +5,15 @@ LATESTARTSERVICE=true
set_perm_recursive $MODPATH 0 0 0777 0777
ui_print '安装完成'
ui_print '当前架构为' + $ARCH
ui_print '当前系统版本为' + $API
ui_print '安装目录为: /data/adb/modules/easytier_magisk'
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.toml'
ui_print '如果需要自定义启动参数,可将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中内容,使用自定义启动参数时会忽略配置文件'
ui_print '修改配置文件后在magisk app禁用应用再启动即可生效'
ui_print '点击操作按钮可启动/关闭热点子网转发,配合easytier的子网代理功能实现手机热点访问easytier网络'
ui_print '记得重启'
ui_print "系统架构为:$ARCH"
ui_print "系统 SDK 版本:$API"
ui_print "EasyTier 安装位置:/data/adb/modules/easytier_magisk"
ui_print "配置文件位置:/data/adb/modules/easytier_magisk/config/config.toml"
ui_print "如需使用启动参数模式,请将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中的内容"
ui_print "config 目录中存在 command_args 文件时,模块会自动忽略 config.toml 文件"
ui_print "----------------------------------"
ui_print "注意!启动参数文件中不能存在 \" 和 ',配置文件则没有这个限制"
ui_print "----------------------------------"
ui_print "修改配置后无需重启设备,在 Magisk 中禁用 EasyTier 模块,等待 10 秒后重新启用即可让新配置生效"
ui_print "点击 Magisk 中模块左下角的“操作”按钮可以禁用或激活热点子网转发,使用该功能前需要在配置中提前配置好 cidr 参数"
ui_print "模块安装完成,重启设备生效"
@@ -2,64 +2,111 @@
MODDIR=${0%/*}
CONFIG_FILE="${MODDIR}/config/config.toml"
COMMAND_ARGS="${MODDIR}/config/command_args"
LOG_FILE="${MODDIR}/log.log"
MODULE_PROP="${MODDIR}/module.prop"
EASYTIER="${MODDIR}/easytier-core"
# 处理获取到的设备型号中可能出现的空格
BRAND=$(getprop ro.product.brand | tr ' ' '-')
MODEL=$(getprop ro.product.model | tr ' ' '-')
DEVICE_HOSTNAME="${BRAND}-${MODEL}"
REDIR_STATUS=""
# 更新module.prop文件中的description
# 更新 module.prop 文件中的 description
update_module_description() {
local status_message=$1
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
# 检查 module.prop 文件存在且 description 发生变化了再写入
if [ -f "${MODULE_PROP}" ]; then
local current_desc=$(grep "^description=" "${MODULE_PROP}")
local new_desc="description=[状态] ${status_message}"
if [ "${current_desc}" != "${new_desc}" ]; then
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
fi
fi
}
if [ -f "${MODDIR}/enable_IP_rule" ]; then
REDIR_STATUS="转发已激活"
else
REDIR_STATUS="转发已禁用"
fi
# 检查并初始化 TUN 设备
if [ ! -e /dev/net/tun ]; then
if [ ! -d /dev/net ]; then
mkdir -p /dev/net
fi
ln -s /dev/tun /dev/net/tun
fi
while true; do
if ls $MODDIR | grep -q "disable"; then
update_module_description "关闭中 | ${REDIR_STATUS}"
if pgrep -f 'easytier-core' >/dev/null; then
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..."
pkill easytier-core # 关闭进程
fi
# 获取子网转发激活状态
if [ -f "${MODDIR}/enable_IP_rule" ]; then
REDIR_STATUS="转发已激活"
else
if ! pgrep -f 'easytier-core' >/dev/null; then
if [ ! -f "$CONFIG_FILE" ]; then
update_module_description "config.toml不存在"
sleep 3s
continue
fi
REDIR_STATUS="转发已禁用"
fi
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
if [ -f "${MODDIR}/config/command_args" ]; then
TZ=Asia/Shanghai ${EASYTIER} $(cat ${MODDIR}/config/command_args) --hostname "$(getprop ro.product.brand)-$(getprop ro.product.model)" > ${LOG_FILE} &
sleep 5s # 等待easytier-core启动完成
update_module_description "主程序已开启(启动参数模式) | ${REDIR_STATUS}"
else
TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} --hostname "$(getprop ro.product.brand)-$(getprop ro.product.model)" > ${LOG_FILE} &
sleep 5s # 等待easytier-core启动完成
update_module_description "主程序已开启(配置文件模式) | ${REDIR_STATUS}"
fi
ip rule add from all lookup main
if ! pgrep -f 'easytier-core' >/dev/null; then
update_module_descriptio "主程序启动失败,请检查配置文件"
fi
else
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在"
# 检查模块是否被禁用
if [ -f "${MODDIR}/disable" ]; then
update_module_description "主程序已关闭 | ${REDIR_STATUS}"
if pgrep -f "${EASYTIER}" >/dev/null; then
echo "开关控制 $(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭"
pkill -f "${EASYTIER}"
fi
sleep 10s
continue
fi
sleep 3s # 暂停3秒后再次执行循环
done
# 检查进程是否已经在运行
if pgrep -f "${EASYTIER}" >/dev/null; then
sleep 10s
continue
fi
# 检查配置文件是否存在
if [ ! -f "${CONFIG_FILE}" ] && [ ! -f "${COMMAND_ARGS}" ]; then
update_module_description "缺少配置文件或启动参数文件"
sleep 10s
continue
fi
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
if [ -f "${COMMAND_ARGS}" ]; then
# 启动参数模式
CMD_CONTENT=$(tr '\r\n' ' ' < "${COMMAND_ARGS}")
if echo "${CMD_CONTENT}" | grep -q "\-\-hostname"; then
FINAL_ARGS="${CMD_CONTENT}"
else
FINAL_ARGS="${CMD_CONTENT} --hostname ${DEVICE_HOSTNAME}"
fi
TZ=Asia/Shanghai "${EASYTIER}" ${FINAL_ARGS} > "${LOG_FILE}" 2>&1 &
STR_MODE="启动参数模式"
# 否则读取 config.toml 的内容作为启动参数
else
# 配置文件模式
if grep -q "^[[:space:]]*hostname[[:space:]]*=" "${CONFIG_FILE}"; then
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" > "${LOG_FILE}" 2>&1 &
else
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" --hostname "${DEVICE_HOSTNAME}" > "${LOG_FILE}" 2>&1 &
fi
STR_MODE="配置文件模式"
fi
# 等待进程启动
sleep 5s
# 启动后的扫尾工作
if pgrep -f "${EASYTIER}" >/dev/null; then
if ! ip rule show | grep -q "lookup main"; then
ip rule add from all lookup main
fi
update_module_description "主程序正在运行(${STR_MODE}| ${REDIR_STATUS}"
else
update_module_description "主程序启动失败,请检查配置文件或启动参数"
fi
sleep 10s
done
+1 -1
View File
@@ -1,6 +1,6 @@
id=easytier_magisk
name=EasyTier_Magisk
version=v2.5.0
version=v2.6.0
versionCode=1
author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
@@ -1,3 +1,5 @@
MODDIR=${0%/*}
pkill easytier-core # 结束 easytier-core 进程
rm -rf $MODDIR/*
pkill -f "${MODDIR}/easytier-core"
# 使用 ${MODDIR:?} 确保变量非空,避免执行 rm -rf /*
rm -rf "${MODDIR:?}/"*
+153 -7
View File
@@ -1083,7 +1083,7 @@ checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055"
[[package]]
name = "easytier"
version = "2.5.0"
version = "2.6.0"
dependencies = [
"anyhow",
"arc-swap",
@@ -1101,6 +1101,7 @@ dependencies = [
"byteorder",
"bytes",
"cfg-if",
"cfg_aliases",
"chrono",
"cidr",
"clap",
@@ -1115,6 +1116,7 @@ dependencies = [
"easytier-rpc-build",
"encoding",
"flume",
"forwarded-header-value",
"futures",
"gethostname",
"git-version",
@@ -1131,6 +1133,7 @@ dependencies = [
"humantime-serde",
"idna",
"indoc",
"itertools 0.14.0",
"kcp-sys",
"machine-uid",
"multimap",
@@ -1153,7 +1156,9 @@ dependencies = [
"prost-build",
"prost-reflect",
"prost-reflect-build",
"prost-types",
"prost-wkt",
"prost-wkt-build",
"prost-wkt-types",
"quinn",
"quinn-plaintext",
"rand 0.8.5",
@@ -1173,6 +1178,7 @@ dependencies = [
"smoltcp",
"snow",
"socket2 0.5.10",
"strum",
"stun_codec",
"sys-locale",
"tabled",
@@ -1354,6 +1360,17 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "erased-serde"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2add8a07dd6a8d93ff627029c51de145e12686fbc36ecb298ac22e74cf02dec"
dependencies = [
"serde",
"serde_core",
"typeid",
]
[[package]]
name = "errno"
version = "0.3.14"
@@ -1471,6 +1488,16 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "forwarded-header-value"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9"
dependencies = [
"nonempty",
"thiserror 1.0.69",
]
[[package]]
name = "futures"
version = "0.3.31"
@@ -2217,6 +2244,15 @@ dependencies = [
"generic-array",
]
[[package]]
name = "inventory"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4f0c30c76f2f4ccee3fe55a2435f691ca00c0e4bd87abe4f4a851b1d4dac39b"
dependencies = [
"rustversion",
]
[[package]]
name = "io-uring"
version = "0.7.10"
@@ -2824,6 +2860,12 @@ dependencies = [
"minimal-lexical",
]
[[package]]
name = "nonempty"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7"
[[package]]
name = "normpath"
version = "1.5.0"
@@ -3413,6 +3455,52 @@ dependencies = [
"prost",
]
[[package]]
name = "prost-wkt"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497e1e938f0c09ef9cabe1d49437b4016e03e8f82fbbe5d1c62a9b61b9decae1"
dependencies = [
"chrono",
"inventory",
"prost",
"serde",
"serde_derive",
"serde_json",
"typetag",
]
[[package]]
name = "prost-wkt-build"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07b8bf115b70a7aa5af1fd5d6e9418492e9ccb6e4785e858c938e28d132a884b"
dependencies = [
"heck 0.5.0",
"prost",
"prost-build",
"prost-types",
"quote",
]
[[package]]
name = "prost-wkt-types"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8cdde6df0a98311c839392ca2f2f0bcecd545f86a62b4e3c6a49c336e970fe5"
dependencies = [
"chrono",
"prost",
"prost-build",
"prost-types",
"prost-wkt",
"prost-wkt-build",
"regex",
"serde",
"serde_derive",
"serde_json",
]
[[package]]
name = "quick-xml"
version = "0.38.3"
@@ -3456,9 +3544,9 @@ dependencies = [
[[package]]
name = "quinn-proto"
version = "0.11.13"
version = "0.11.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
dependencies = [
"bytes",
"fastbloom",
@@ -4136,6 +4224,12 @@ version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "simdutf8"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "siphasher"
version = "1.0.1"
@@ -4225,6 +4319,27 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "strum"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "stun_codec"
version = "0.3.5"
@@ -4575,9 +4690,9 @@ dependencies = [
[[package]]
name = "tokio-websockets"
version = "0.8.3"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "842e11addde61da7c37ef205cd625ebcd7b607076ea62e4698f06bfd5fd01a03"
checksum = "dad543404f98bfc969aeb71994105c592acfc6c43323fddcd016bb208d1c65cb"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -4588,10 +4703,11 @@ dependencies = [
"httparse",
"ring",
"rustls-pki-types",
"simdutf8",
"tokio",
"tokio-rustls",
"tokio-util",
"webpki-roots 0.26.11",
"webpki-roots 1.0.2",
]
[[package]]
@@ -4823,12 +4939,42 @@ dependencies = [
"wintun",
]
[[package]]
name = "typeid"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c"
[[package]]
name = "typenum"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "typetag"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be2212c8a9b9bcfca32024de14998494cf9a5dfa59ea1b829de98bac374b86bf"
dependencies = [
"erased-serde",
"inventory",
"once_cell",
"serde",
"typetag-impl",
]
[[package]]
name = "typetag-impl"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27a7a9b72ba121f6f1f6c3632b85604cac41aedb5ddc70accbebb6cac83de846"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "unicase"
version = "2.8.1"
+1 -1
View File
@@ -1,7 +1,7 @@
{
"name": "easytier-gui",
"type": "module",
"version": "2.5.0",
"version": "2.6.0",
"private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": {
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-gui"
version = "2.5.0"
version = "2.6.0"
description = "EasyTier GUI"
authors = ["you"]
edition = "2021"
@@ -36,6 +36,7 @@
"core:tray:allow-set-show-menu-on-left-click",
"core:tray:allow-set-tooltip",
"vpnservice:allow-ping",
"vpnservice:allow-get-vpn-status",
"vpnservice:allow-prepare-vpn",
"vpnservice:allow-start-vpn",
"vpnservice:allow-stop-vpn",
@@ -47,4 +48,4 @@
"os:allow-platform",
"os:allow-locale"
]
}
}
+30 -9
View File
@@ -206,6 +206,16 @@ async fn update_network_config_state(
.parse()
.map_err(|e: uuid::Error| e.to_string())?;
let client_manager = get_client_manager!()?;
if !disabled {
let cfg = client_manager
.handle_get_network_config(app.clone(), instance_id)
.await
.map_err(|e| e.to_string())?;
let toml_config = cfg.gen_config().map_err(|e| e.to_string())?;
client_manager
.pre_run_network_instance_hook(&app, &toml_config)
.await?;
}
client_manager
.handle_update_network_state(app.clone(), instance_id, disabled)
.await
@@ -215,6 +225,10 @@ async fn update_network_config_state(
client_manager
.post_stop_network_instances_hook(&app)
.await?;
} else {
client_manager
.post_run_network_instance_hook(&app, &instance_id)
.await?;
}
Ok(())
@@ -830,7 +844,7 @@ mod manager {
cfg: &easytier::common::config::TomlConfigLoader,
) -> Result<(), String> {
let instance_id = cfg.get_id();
app.emit("pre_run_network_instance", instance_id)
app.emit("pre_run_network_instance", instance_id.to_string())
.map_err(|e| e.to_string())?;
#[cfg(target_os = "android")]
@@ -867,20 +881,21 @@ mod manager {
let app_clone = app.clone();
let instance_id_clone = *instance_id;
tokio::spawn(async move {
let instance_id_str = instance_id_clone.to_string();
loop {
match event_receiver.recv().await {
Ok(easytier::common::global_ctx::GlobalCtxEvent::DhcpIpv4Changed(_, _)) => {
let _ = app_clone.emit("dhcp_ip_changed", instance_id_clone);
let _ = app_clone.emit("dhcp_ip_changed", &instance_id_str);
}
Ok(easytier::common::global_ctx::GlobalCtxEvent::ProxyCidrsUpdated(_, _)) => {
let _ = app_clone.emit("proxy_cidrs_updated", instance_id_clone);
let _ = app_clone.emit("proxy_cidrs_updated", &instance_id_str);
}
Ok(_) => {}
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
break;
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => {
let _ = app_clone.emit("event_lagged", instance_id_clone);
let _ = app_clone.emit("event_lagged", &instance_id_str);
event_receiver = event_receiver.resubscribe();
}
}
@@ -892,7 +907,7 @@ mod manager {
self.storage.enabled_networks.insert(*instance_id);
app.emit("post_run_network_instance", instance_id)
app.emit("post_run_network_instance", instance_id.to_string())
.map_err(|e| e.to_string())?;
Ok(())
@@ -971,20 +986,26 @@ mod manager {
.network_configs
.get(&uuid)
.map(|i| i.value().1.clone());
if config.is_none() {
let Some(config) = config else {
continue;
}
};
let toml_config = config.gen_config()?;
self.pre_run_network_instance_hook(&app, &toml_config)
.await
.map_err(|e| anyhow::anyhow!(e))?;
client
.run_network_instance(
BaseController::default(),
RunNetworkInstanceRequest {
inst_id: None,
config,
config: Some(config),
overwrite: false,
},
)
.await?;
self.storage.enabled_networks.insert(uuid);
self.post_run_network_instance_hook(&app, &uuid)
.await
.map_err(|e| anyhow::anyhow!(e))?;
}
}
}
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false
},
"productName": "easytier-gui",
"version": "2.5.0",
"version": "2.6.0",
"identifier": "com.kkrainbow.easytier",
"plugins": {
"shell": {
+2
View File
@@ -93,6 +93,7 @@ declare global {
const shallowReadonly: typeof import('vue')['shallowReadonly']
const shallowRef: typeof import('vue')['shallowRef']
const storeToRefs: typeof import('pinia')['storeToRefs']
const syncMobileVpnService: typeof import('./composables/mobile_vpn')['syncMobileVpnService']
const toRaw: typeof import('vue')['toRaw']
const toRef: typeof import('vue')['toRef']
const toRefs: typeof import('vue')['toRefs']
@@ -217,6 +218,7 @@ declare module 'vue' {
readonly shallowReadonly: UnwrapRef<typeof import('vue')['shallowReadonly']>
readonly shallowRef: UnwrapRef<typeof import('vue')['shallowRef']>
readonly storeToRefs: UnwrapRef<typeof import('pinia')['storeToRefs']>
readonly syncMobileVpnService: UnwrapRef<typeof import('./composables/mobile_vpn')['syncMobileVpnService']>
readonly toRaw: UnwrapRef<typeof import('vue')['toRaw']>
readonly toRef: UnwrapRef<typeof import('vue')['toRef']>
readonly toRefs: UnwrapRef<typeof import('vue')['toRefs']>
+47 -14
View File
@@ -1,6 +1,7 @@
import { Event, listen } from "@tauri-apps/api/event";
import { type } from "@tauri-apps/plugin-os";
import { NetworkTypes } from "easytier-frontend-lib"
import { Utils } from "easytier-frontend-lib";
const EVENTS = Object.freeze({
SAVE_CONFIGS: 'save_configs',
@@ -17,39 +18,71 @@ function onSaveConfigs(event: Event<NetworkTypes.NetworkConfig[]>) {
localStorage.setItem('networkList', JSON.stringify(event.payload.map((config) => NetworkTypes.normalizeNetworkConfig(config))));
}
async function onPreRunNetworkInstance(event: Event<string>) {
function normalizeInstanceIdPayload(payload: unknown): string {
if (typeof payload === 'string') {
return payload
}
if (payload && typeof payload === 'object') {
const uuid = payload as Partial<Utils.UUID>
if (
typeof uuid.part1 === 'number'
&& typeof uuid.part2 === 'number'
&& typeof uuid.part3 === 'number'
&& typeof uuid.part4 === 'number'
) {
return Utils.UuidToStr(uuid as Utils.UUID)
}
}
if (payload == null) {
return ''
}
const fallback = String(payload)
return fallback === '[object Object]' ? '' : fallback
}
async function onPreRunNetworkInstance(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.PRE_RUN_NETWORK_INSTANCE}', raw payload:`, event.payload, 'normalized:', instanceId)
if (type() === 'android') {
await prepareVpnService(event.payload);
await prepareVpnService(instanceId);
}
}
async function onPostRunNetworkInstance(event: Event<string>) {
async function onPostRunNetworkInstance(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.POST_RUN_NETWORK_INSTANCE}', raw payload:`, event.payload, 'normalized:', instanceId)
if (type() === 'android') {
await onNetworkInstanceChange(event.payload);
await onNetworkInstanceChange(instanceId);
}
}
async function onVpnServiceStop(event: Event<string>) {
await onNetworkInstanceChange(event.payload);
async function onVpnServiceStop(event: Event<unknown>) {
console.log(`Received event '${EVENTS.VPN_SERVICE_STOP}', raw payload:`, event.payload)
await syncMobileVpnService();
}
async function onDhcpIpChanged(event: Event<string>) {
console.log(`Received event '${EVENTS.DHCP_IP_CHANGED}' for instance: ${event.payload}`);
async function onDhcpIpChanged(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.DHCP_IP_CHANGED}' for instance: ${instanceId}`);
if (type() === 'android') {
await onNetworkInstanceChange(event.payload);
await onNetworkInstanceChange(instanceId);
}
}
async function onProxyCidrsUpdated(event: Event<string>) {
console.log(`Received event '${EVENTS.PROXY_CIDRS_UPDATED}' for instance: ${event.payload}`);
async function onProxyCidrsUpdated(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.PROXY_CIDRS_UPDATED}' for instance: ${instanceId}`);
if (type() === 'android') {
await onNetworkInstanceChange(event.payload);
await onNetworkInstanceChange(instanceId);
}
}
async function onEventLagged(event: Event<string>) {
async function onEventLagged(event: Event<unknown>) {
if (type() === 'android') {
await onNetworkInstanceChange(event.payload);
await onNetworkInstanceChange(normalizeInstanceIdPayload(event.payload));
}
}
+140 -26
View File
@@ -1,7 +1,7 @@
import type { NetworkTypes } from 'easytier-frontend-lib'
import { addPluginListener } from '@tauri-apps/api/core'
import { Utils } from 'easytier-frontend-lib'
import { prepare_vpn, start_vpn, stop_vpn } from 'tauri-plugin-vpnservice-api'
import { get_vpn_status, prepare_vpn, start_vpn, stop_vpn } from 'tauri-plugin-vpnservice-api'
type Route = NetworkTypes.Route
@@ -24,6 +24,53 @@ const curVpnStatus: vpnStatus = {
dns: undefined,
}
async function requestVpnPermission() {
console.log('prepare vpn')
const prepare_ret = await prepare_vpn()
console.log('prepare vpn', JSON.stringify((prepare_ret)))
if (prepare_ret?.errorMsg?.length) {
throw new Error(prepare_ret.errorMsg)
}
const granted = prepare_ret?.granted ?? true
if (!granted) {
console.info('vpn permission request was denied or dismissed')
}
return granted
}
function resetVpnConfigStatus() {
curVpnStatus.ipv4Addr = undefined
curVpnStatus.ipv4Cidr = undefined
curVpnStatus.routes = []
curVpnStatus.dns = undefined
}
function syncVpnStatusFromNative(status: Awaited<ReturnType<typeof get_vpn_status>>) {
curVpnStatus.running = status?.running ?? false
if (!curVpnStatus.running) {
resetVpnConfigStatus()
return
}
const ipv4WithCidr = status?.ipv4Addr
if (ipv4WithCidr?.length) {
const [ipv4Addr, cidr] = ipv4WithCidr.split('/')
curVpnStatus.ipv4Addr = ipv4Addr
const parsedCidr = Number(cidr)
curVpnStatus.ipv4Cidr = Number.isInteger(parsedCidr) ? parsedCidr : undefined
}
else {
curVpnStatus.ipv4Addr = undefined
curVpnStatus.ipv4Cidr = undefined
}
curVpnStatus.routes = [...(status?.routes ?? [])]
curVpnStatus.dns = status?.dns ?? undefined
}
async function waitVpnStatus(target_status: boolean, timeout_sec: number) {
const start_time = Date.now()
while (curVpnStatus.running !== target_status) {
@@ -34,18 +81,19 @@ async function waitVpnStatus(target_status: boolean, timeout_sec: number) {
}
}
async function doStopVpn() {
if (!curVpnStatus.running) {
async function doStopVpn(force = false) {
const wasRunning = curVpnStatus.running
if (!force && !wasRunning) {
return
}
console.log('stop vpn')
const stop_ret = await stop_vpn()
console.log('stop vpn', JSON.stringify((stop_ret)))
await waitVpnStatus(false, 3)
if (wasRunning) {
await waitVpnStatus(false, 3)
}
curVpnStatus.ipv4Addr = undefined
curVpnStatus.routes = []
curVpnStatus.dns = undefined
resetVpnConfigStatus()
}
async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[], dns?: string) {
@@ -54,19 +102,32 @@ async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[], dns?
}
console.log('start vpn service', ipv4Addr, cidr, routes, dns)
const start_ret = await start_vpn({
const request = {
ipv4Addr: `${ipv4Addr}/${cidr}`,
routes,
dns,
disallowedApplications: ['com.kkrainbow.easytier'],
mtu: 1300,
})
}
let start_ret = await start_vpn(request)
console.log('start vpn response', JSON.stringify(start_ret))
if (start_ret?.errorMsg === 'need_prepare') {
const granted = await requestVpnPermission()
if (!granted) {
throw new Error('vpn_permission_denied')
}
start_ret = await start_vpn(request)
console.log('start vpn retry response', JSON.stringify(start_ret))
}
if (start_ret?.errorMsg?.length) {
throw new Error(start_ret.errorMsg)
}
await waitVpnStatus(true, 3)
curVpnStatus.ipv4Addr = ipv4Addr
curVpnStatus.ipv4Cidr = cidr
curVpnStatus.routes = routes
curVpnStatus.dns = dns
}
@@ -75,13 +136,16 @@ async function onVpnServiceStart(payload: any) {
console.log('vpn service start', JSON.stringify(payload))
curVpnStatus.running = true
if (payload.fd) {
setTunFd(payload.fd)
await setTunFd(payload.fd).catch((e) => {
console.error('set tun fd failed', e)
})
}
}
async function onVpnServiceStop(payload: any) {
console.log('vpn service stop', JSON.stringify(payload))
curVpnStatus.running = false
resetVpnConfigStatus()
}
async function registerVpnServiceListener() {
@@ -135,15 +199,25 @@ export async function onNetworkInstanceChange(instanceId: string) {
}
if (!instanceId) {
await doStopVpn()
console.warn('vpn service skipped because instance id is empty')
if (curVpnStatus.running) {
await doStopVpn()
}
return
}
const config = await getConfig(instanceId)
console.log('vpn service loaded config', instanceId, JSON.stringify({
no_tun: config.no_tun,
dhcp: config.dhcp,
enable_magic_dns: config.enable_magic_dns,
}))
if (config.no_tun) {
console.log('vpn service skipped because no_tun is enabled', instanceId)
return
}
const curNetworkInfo = (await collectNetworkInfo(instanceId)).info.map[instanceId]
if (!curNetworkInfo || curNetworkInfo?.error_msg?.length) {
console.warn('vpn service skipped because network info is unavailable', instanceId, curNetworkInfo?.error_msg)
await doStopVpn()
return
}
@@ -170,27 +244,39 @@ export async function onNetworkInstanceChange(instanceId: string) {
const routes = getRoutesForVpn(curNetworkInfo?.routes, config)
const dns = config.enable_magic_dns ? '100.100.100.101' : undefined;
const dns = config.enable_magic_dns ? '100.100.100.101' : undefined
const ipChanged = virtual_ip !== curVpnStatus.ipv4Addr
const cidrChanged = network_length !== curVpnStatus.ipv4Cidr
const routesChanged = JSON.stringify(routes) !== JSON.stringify(curVpnStatus.routes)
const dnsChanged = dns != curVpnStatus.dns
const configChanged = ipChanged || cidrChanged || routesChanged || dnsChanged
const shouldStartVpn = !curVpnStatus.running
if (ipChanged || routesChanged || dnsChanged) {
if (shouldStartVpn || configChanged) {
console.info('vpn service virtual ip changed', JSON.stringify(curVpnStatus), virtual_ip)
try {
await doStopVpn()
}
catch (e) {
console.error(e)
if (curVpnStatus.running) {
try {
await doStopVpn()
}
catch (e) {
console.error(e)
}
}
try {
await doStartVpn(virtual_ip, network_length, routes, dns)
}
catch (e) {
console.error('start vpn service failed, stop all other network insts.', e)
await runNetworkInstance(config, true); //on android config should always be saved
if (e instanceof Error && e.message === 'need_prepare') {
console.info('vpn permission is required before starting the Android VPN service')
return
}
if (e instanceof Error && e.message === 'vpn_permission_denied') {
console.info('vpn permission request was denied or dismissed')
return
}
console.error('start vpn service failed', e)
}
}
}
@@ -202,6 +288,22 @@ async function isNoTunEnabled(instanceId: string | undefined) {
return (await getConfig(instanceId)).no_tun ?? false
}
async function findRunningTunInstanceId() {
const instanceIds = await listNetworkInstanceIds()
const runningIds = instanceIds.running_inst_ids.map(Utils.UuidToStr)
console.log('vpn service sync running instances', JSON.stringify(runningIds))
for (const instanceId of runningIds) {
if (await isNoTunEnabled(instanceId)) {
continue
}
return instanceId
}
return undefined
}
export async function initMobileVpnService() {
await registerVpnServiceListener()
}
@@ -210,10 +312,22 @@ export async function prepareVpnService(instanceId: string) {
if (await isNoTunEnabled(instanceId)) {
return
}
console.log('prepare vpn')
const prepare_ret = await prepare_vpn()
console.log('prepare vpn', JSON.stringify((prepare_ret)))
if (prepare_ret?.errorMsg?.length) {
throw new Error(prepare_ret.errorMsg)
}
await requestVpnPermission()
}
export async function syncMobileVpnService() {
syncVpnStatusFromNative(await get_vpn_status())
const instanceId = await findRunningTunInstanceId()
if (instanceId) {
console.log('vpn service sync selected instance', instanceId)
await onNetworkInstanceChange(instanceId)
return
}
if (dhcpPollingTimer) {
clearTimeout(dhcpPollingTimer)
dhcpPollingTimer = null
}
await doStopVpn(true)
}
+19 -18
View File
@@ -9,6 +9,7 @@ import { exit } from '@tauri-apps/plugin-process'
import { I18nUtils, RemoteManagement, Utils } from "easytier-frontend-lib"
import type { MenuItem } from 'primevue/menuitem'
import { useTray } from '~/composables/tray'
import { initMobileVpnService } from '~/composables/mobile_vpn'
import { GUIRemoteClient } from '~/modules/api'
import { useToast, useConfirm } from 'primevue'
@@ -189,9 +190,25 @@ async function initWithMode(mode: Mode) {
clientRunning.value = await isClientRunning()
}
onMounted(() => {
onMounted(async () => {
const cleanupFns: Array<() => void> = []
if (type() === 'android') {
try {
await initMobileVpnService()
console.error("easytier init vpn service done")
} catch (e: any) {
console.error("easytier init vpn service failed", e)
}
}
cleanupFns.push(await listenGlobalEvents())
currentMode.value = loadMode()
initWithMode(currentMode.value);
await initWithMode(currentMode.value);
onUnmounted(() => {
cleanupFns.forEach(unlisten => unlisten())
})
});
useTray(true)
@@ -347,22 +364,6 @@ async function connectRpcClient(isNormalMode: boolean, url?: string) {
console.log("easytier rpc connection established, isNormalMode: ", isNormalMode)
}
onMounted(async () => {
if (type() === 'android') {
try {
await initMobileVpnService()
console.error("easytier init vpn service done")
} catch (e: any) {
console.error("easytier init vpn service failed", e)
}
}
const unlisten = await listenGlobalEvents()
onUnmounted(() => {
unlisten()
})
})
async function openConfigServerDialog() {
editingMode.value = JSON.parse(JSON.stringify(loadMode()))
configServerDialogVisible.value = true
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "easytier-web"
version = "2.5.0"
version = "2.6.0"
edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -209,7 +209,8 @@ watch(() => curNetwork.value, syncNormalizedNetwork, { immediate: true, deep: fa
</div>
<div class="items-center flex flex-col p-fluid gap-y-2">
<UrlListInput id="initial_nodes" v-model="curNetwork.peer_urls" :protos="protos"
:add-label="t('add_initial_node')" :placeholder="t('initial_node_placeholder')" />
defaultUrl="tcp://:11010" :add-label="t('add_initial_node')"
:placeholder="t('initial_node_placeholder')" />
</div>
</div>
</div>
@@ -305,6 +306,19 @@ watch(() => curNetwork.value, syncNormalizedNetwork, { immediate: true, deep: fa
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex">
<label for="instance_recv_bps_limit">{{ t('instance_recv_bps_limit') }}</label>
<span class="pi pi-question-circle ml-2 self-center"
v-tooltip="t('instance_recv_bps_limit_help')"></span>
</div>
<InputNumber id="instance_recv_bps_limit" v-model="curNetwork.instance_recv_bps_limit"
aria-describedby="instance_recv_bps_limit-help" :format="false"
:placeholder="t('instance_recv_bps_limit_placeholder')" :min="1" fluid />
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex">
@@ -15,6 +15,7 @@ const url = defineModel<string>({ required: true })
const editing = ref(false)
const container = ref<HTMLElement | null>(null)
const internalCompact = ref(false)
const hostFocused = ref(false)
onMounted(() => {
if (container.value) {
@@ -36,36 +37,86 @@ const parseUrl = (val: string | null | undefined) => {
const p = parseInt(portStr)
return isNaN(p) ? (props.protos[proto] ?? 11010) : p
}
const parseByPattern = (input: string) => {
const trimmed = input.trim()
if (!trimmed) {
return null
}
const match = trimmed.match(/^(\w+):\/\/(.*)$/)
const proto = match ? match[1] : 'tcp'
const rest = match ? match[2] : trimmed
const authority = rest.split(/[/?#]/)[0]
if (!authority) {
return null
}
const hostAndMaybePort = authority.includes('@') ? authority.slice(authority.lastIndexOf('@') + 1) : authority
if (hostAndMaybePort.startsWith('[')) {
const ipv6End = hostAndMaybePort.indexOf(']')
if (ipv6End > 0) {
const host = hostAndMaybePort.slice(0, ipv6End + 1)
const remain = hostAndMaybePort.slice(ipv6End + 1)
const port = remain.startsWith(':') ? getValidPort(remain.slice(1), proto) : (props.protos[proto] ?? 11010)
return { proto, host, port }
}
}
const portMatch = hostAndMaybePort.match(/^(.*):(\d+)$/)
const host = portMatch ? portMatch[1] : hostAndMaybePort
const port = portMatch ? parseInt(portMatch[2]) : (props.protos[proto] ?? 11010)
return { proto, host, port }
}
if (!val) {
return { proto: 'tcp', host: '', port: props.protos['tcp'] ?? 11010 }
}
try {
const urlObj = new URL(val)
const proto = urlObj.protocol.replace(':', '')
return {
proto: proto,
host: urlObj.hostname,
port: getValidPort(urlObj.port, proto)
}
} catch (e) {
// Fallback for incomplete or invalid URLs
const match = val.match(/^(\w+):\/\/(.*)$/)
if (match) {
const proto = match[1]
const rest = match[2]
const portMatch = rest.match(/:(\d+)$/)
return {
proto,
host: portMatch ? rest.slice(0, portMatch.index) : rest,
port: portMatch ? parseInt(portMatch[1]) : (props.protos[proto] ?? 11010)
}
}
return { proto: 'tcp', host: '', port: 11010 }
const parsedByPattern = parseByPattern(val)
if (parsedByPattern) {
return parsedByPattern
}
return { proto: 'tcp', host: '', port: 11010 }
}
const internalValue = ref(parseUrl(url.value))
const defaultHost = '0.0.0.0'
const buildUrlValue = (value: { proto: string, host: string, port: number }, forceDefaultHost = false) => {
const proto = value.proto || 'tcp'
const rawHost = (value.host ?? '').trim()
const host = rawHost || (forceDefaultHost ? defaultHost : '')
if (!host) {
return null
}
let port = value.port
if (isNaN(parseInt(port as any))) {
port = props.protos[proto] ?? 11010
}
if (props.protos[proto] === 0) {
return `${proto}://${host}`
}
return `${proto}://${host}:${port}`
}
const syncUrlFromInternal = (forceDefaultHost = false) => {
const nextUrl = buildUrlValue(internalValue.value, forceDefaultHost)
if (!nextUrl || nextUrl === url.value) {
return
}
url.value = nextUrl
}
const onHostBlur = () => {
hostFocused.value = false
syncUrlFromInternal(true)
}
const onHostFocus = () => {
hostFocused.value = true
}
const onDialogConfirm = () => {
syncUrlFromInternal(true)
editing.value = false
}
const isNoPortProto = computed(() => {
return props.protos[internalValue.value.proto] === 0
@@ -73,28 +124,22 @@ const isNoPortProto = computed(() => {
// Sync from external
watch(() => url.value, (newVal) => {
if (hostFocused.value) {
return
}
const parsed = parseUrl(newVal)
const internalHost = internalValue.value.host ?? ''
const sameHost = parsed.host === internalHost || (!internalHost.trim() && parsed.host === defaultHost)
if (parsed.proto !== internalValue.value.proto ||
parsed.host !== internalValue.value.host ||
!sameHost ||
parsed.port !== internalValue.value.port) {
internalValue.value = parsed
}
})
// Sync to external
watch(internalValue, (newVal) => {
const proto = newVal.proto || 'tcp'
const host = newVal.host || '0.0.0.0'
let port = newVal.port
if (isNaN(parseInt(port as any))) {
port = props.protos[proto] ?? 11010
}
if (props.protos[proto] === 0) {
url.value = `${proto}://${host}`
} else {
url.value = `${proto}://${host}:${port}`
}
watch(internalValue, () => {
syncUrlFromInternal(false)
}, { deep: true })
const protoOptions = computed(() => Object.keys(props.protos))
@@ -128,7 +173,8 @@ const onProtoChange = (newProto: string) => {
<AutoComplete :model-value="internalValue.proto" :suggestions="filteredProtos" dropdown
class="max-w-32 proto-autocomplete-in-group" @complete="searchProtos"
@update:model-value="onProtoChange" />
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow" />
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow"
@focus="onHostFocus" @blur="onHostBlur" />
<template v-if="!isNoPortProto">
<InputGroupAddon>
<span style="font-weight: bold">:</span>
@@ -156,7 +202,8 @@ const onProtoChange = (newProto: string) => {
</div>
<div class="flex flex-col gap-2">
<label>{{ t('web.common.address') || 'Address' }}</label>
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="w-full" />
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="w-full"
@focus="onHostFocus" @blur="onHostBlur" />
</div>
<div v-if="!isNoPortProto" class="flex flex-col gap-2">
<label>{{ t('port') }}</label>
@@ -164,7 +211,7 @@ const onProtoChange = (newProto: string) => {
</div>
</div>
<template #footer>
<Button :label="t('web.common.confirm') || 'Done'" icon="pi pi-check" @click="editing = false"
<Button :label="t('web.common.confirm') || 'Done'" icon="pi pi-check" @click="onDialogConfirm"
autofocus />
</template>
</Dialog>
@@ -10,7 +10,7 @@ initial_nodes_help: |
• 留空 = 节点独立启动,等别人来连,或你后续手动连。
• 无论直接还是间接连通(通过其他节点搭桥),都能组网互通。
初始节点可以用自己的,也可以用别人分享的。
initial_node_placeholder: 例如:tcp://node.example.com:11010
initial_node_placeholder: 例如:node.example.com
virtual_ipv4: 虚拟IPv4地址
virtual_ipv4_dhcp: DHCP
network_name: 网络名称
@@ -117,7 +117,7 @@ disable_quic_input: 禁用 QUIC 输入
disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。
disable_p2p: 禁用 P2P
disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转
disable_p2p_help: 禁用普通自动 P2P。开启 need-p2p 的节点仍可与当前节点建立 P2P
p2p_only: 仅 P2P
p2p_only_help: 仅与已经建立P2P连接的对等节点通信,不通过其他节点中转。
@@ -196,6 +196,12 @@ mtu_help: |
TUN设备的MTU,默认为非加密时为1380,加密时为1360。范围:400-1380
mtu_placeholder: 留空为默认值1380
instance_recv_bps_limit: 实例接收限速
instance_recv_bps_limit_help: |
限制当前实例整体入站流量的总接收速率,单位为字节每秒。
留空表示不限速。
instance_recv_bps_limit_placeholder: 留空表示不限速
mapped_listeners: 监听映射
mapped_listeners_help: |
手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。
@@ -10,7 +10,7 @@ initial_nodes_help: |
• Leaving it empty = the node starts alone until others connect to it, or you connect it later yourself.
• Direct or indirect connectivity, including through relay nodes, can form one network.
Initial nodes can be your own nodes or ones shared by others.
initial_node_placeholder: "Example: tcp://node.example.com:11010"
initial_node_placeholder: "Example: node.example.com"
virtual_ipv4: Virtual IPv4
virtual_ipv4_dhcp: DHCP
network_name: Network Name
@@ -116,7 +116,7 @@ disable_quic_input: Disable QUIC Input
disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP.
disable_p2p: Disable P2P
disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server.
disable_p2p_help: Disable ordinary automatic P2P. Nodes with need-p2p enabled can still establish P2P with this node.
p2p_only: P2P Only
p2p_only_help: Only communicate with peers that have already established P2P connections, do not relay through other nodes.
@@ -196,6 +196,12 @@ mtu_help: |
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380
mtu_placeholder: Leave blank as default value 1380
instance_recv_bps_limit: Instance Receive Limit
instance_recv_bps_limit_help: |
Limit the total receive bandwidth for the whole instance. Unit: bytes per second.
Leave blank for no limit.
instance_recv_bps_limit_placeholder: Leave blank for no limit
mapped_listeners: Map Listeners
mapped_listeners_help: |
Manually specify the public address of the listener, other nodes can use this address to connect to this node.
@@ -78,6 +78,7 @@ export interface NetworkConfig {
socks5_port: number
mtu: number | null
instance_recv_bps_limit: number | null
mapped_listeners: string[]
enable_magic_dns?: boolean
@@ -146,6 +147,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
enable_socks5: false,
socks5_port: 1080,
mtu: null,
instance_recv_bps_limit: null,
mapped_listeners: [],
enable_magic_dns: false,
enable_private_mode: false,
+201 -56
View File
@@ -1,4 +1,9 @@
use std::{collections::HashSet, fmt::Debug, str::FromStr as _, sync::Arc};
use std::{
collections::{HashMap, HashSet},
fmt::Debug,
str::FromStr as _,
sync::Arc,
};
use anyhow::Context;
use easytier::{
@@ -37,6 +42,7 @@ pub struct SessionData {
storage_token: Option<StorageToken>,
binding_version: Option<u64>,
applied_config_revision: Option<String>,
notifier: broadcast::Sender<HeartbeatRequest>,
req: Option<HeartbeatRequest>,
location: Option<Location>,
@@ -59,6 +65,7 @@ impl SessionData {
client_url,
storage_token: None,
binding_version: None,
applied_config_revision: None,
notifier: tx,
req: None,
location,
@@ -117,37 +124,16 @@ struct SessionRpcService {
}
impl SessionRpcService {
async fn persist_webhook_network_config(
storage: &Storage,
user_id: i32,
machine_id: uuid::Uuid,
network_config: serde_json::Value,
) -> anyhow::Result<()> {
let mut network_config = network_config;
fn normalize_network_config(
mut network_config: serde_json::Value,
inst_id: uuid::Uuid,
) -> anyhow::Result<NetworkConfig> {
let network_name = network_config
.get("network_name")
.and_then(|v| v.as_str())
.filter(|v| !v.is_empty())
.ok_or_else(|| anyhow::anyhow!("webhook response missing network_name"))?
.to_string();
let existing_configs = storage
.db()
.list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await
.map_err(|e| anyhow::anyhow!("failed to list existing network configs: {:?}", e))?;
let inst_id = existing_configs
.iter()
.find_map(|cfg| {
let value = serde_json::from_str::<serde_json::Value>(&cfg.network_config).ok()?;
let cfg_network_name = value.get("network_name")?.as_str()?;
if cfg_network_name == network_name {
uuid::Uuid::parse_str(&cfg.network_instance_id).ok()
} else {
None
}
})
.unwrap_or_else(uuid::Uuid::new_v4);
let config_obj = network_config
.as_object_mut()
.ok_or_else(|| anyhow::anyhow!("webhook network_config must be a JSON object"))?;
@@ -157,14 +143,66 @@ impl SessionRpcService {
);
config_obj
.entry("instance_name".to_string())
.or_insert_with(|| serde_json::Value::String(network_name.clone()));
.or_insert_with(|| serde_json::Value::String(network_name));
let config = serde_json::from_value::<NetworkConfig>(network_config)?;
storage
Ok(serde_json::from_value::<NetworkConfig>(network_config)?)
}
async fn reconcile_managed_network_configs(
storage: &Storage,
user_id: i32,
machine_id: uuid::Uuid,
desired_configs: Vec<crate::webhook::ManagedNetworkConfig>,
) -> anyhow::Result<()> {
let existing_configs = storage
.db()
.insert_or_update_user_network_config((user_id, machine_id), inst_id, config)
.list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await
.map_err(|e| anyhow::anyhow!("failed to persist webhook network config: {:?}", e))?;
.map_err(|e| anyhow::anyhow!("failed to list existing network configs: {:?}", e))?;
let existing_ids = existing_configs
.iter()
.filter_map(|cfg| uuid::Uuid::parse_str(&cfg.network_instance_id).ok())
.collect::<HashSet<_>>();
let mut desired_ids = HashSet::with_capacity(desired_configs.len());
let mut normalized = HashMap::with_capacity(desired_configs.len());
for desired in desired_configs {
let inst_id = uuid::Uuid::parse_str(&desired.instance_id).with_context(|| {
format!(
"invalid desired managed instance id: {}",
desired.instance_id
)
})?;
let config = Self::normalize_network_config(desired.network_config, inst_id)?;
desired_ids.insert(inst_id);
normalized.insert(inst_id, config);
}
for (inst_id, config) in normalized {
storage
.db()
.insert_or_update_user_network_config((user_id, machine_id), inst_id, config)
.await
.map_err(|e| {
anyhow::anyhow!(
"failed to persist managed network config {}: {:?}",
inst_id,
e
)
})?;
}
let stale_ids = existing_ids
.difference(&desired_ids)
.copied()
.collect::<Vec<_>>();
if !stale_ids.is_empty() {
storage
.db()
.delete_network_configs((user_id, machine_id), &stale_ids)
.await
.map_err(|e| anyhow::anyhow!("failed to delete stale network configs: {:?}", e))?;
}
Ok(())
}
@@ -185,10 +223,13 @@ impl SessionRpcService {
req.machine_id
))?;
let (user_id, webhook_network_config, webhook_validated, binding_version) = if data
.webhook_config
.is_enabled()
{
let (
user_id,
webhook_managed_network_configs,
webhook_config_revision,
webhook_validated,
binding_version,
) = if data.webhook_config.is_enabled() {
let webhook_req = crate::webhook::ValidateTokenRequest {
token: req.user_token.clone(),
machine_id: machine_id.to_string(),
@@ -223,7 +264,8 @@ impl SessionRpcService {
};
(
user_id,
resp.network_config,
resp.managed_network_configs,
resp.config_revision,
true,
Some(resp.binding_version),
)
@@ -257,21 +299,21 @@ impl SessionRpcService {
);
}
};
(user_id, None, false, None)
(user_id, Vec::new(), String::new(), false, None)
};
if webhook_validated {
if let Some(network_config) = webhook_network_config {
Self::persist_webhook_network_config(&storage, user_id, machine_id, network_config)
.await
.map_err(rpc_types::error::Error::from)?;
}
} else if webhook_network_config.is_some() {
return Err(anyhow::anyhow!(
"unexpected webhook network_config for non-webhook token {:?}",
req.user_token
if webhook_validated
&& data.applied_config_revision.as_deref() != Some(webhook_config_revision.as_str())
{
Self::reconcile_managed_network_configs(
&storage,
user_id,
machine_id,
webhook_managed_network_configs,
)
.into());
.await
.map_err(rpc_types::error::Error::from)?;
data.applied_config_revision = Some(webhook_config_revision);
}
if data.req.replace(req.clone()).is_none() {
@@ -411,6 +453,7 @@ impl Session {
rpc_client: SessionRpcClient,
) {
let mut cleaned_web_managed_instances = false;
let mut last_desired_inst_ids: Option<HashSet<String>> = None;
loop {
heartbeat_waiter = heartbeat_waiter.resubscribe();
let req = heartbeat_waiter.recv().await;
@@ -467,8 +510,15 @@ impl Session {
};
let mut has_failed = false;
let should_be_alive_inst_ids = local_configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
let desired_changed = last_desired_inst_ids
.as_ref()
.is_none_or(|last| last != &should_be_alive_inst_ids);
if !cleaned_web_managed_instances {
if !cleaned_web_managed_instances || desired_changed {
let all_local_configs = match storage
.db
.list_network_configs((user_id, machine_id.into()), ListNetworkProps::All)
@@ -486,11 +536,6 @@ impl Session {
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
let should_be_alive_inst_ids = local_configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
let should_delete_ids = running_inst_ids
.iter()
.chain(all_inst_ids.iter())
@@ -519,6 +564,7 @@ impl Session {
if !has_failed {
cleaned_web_managed_instances = true;
last_desired_inst_ids = Some(should_be_alive_inst_ids.clone());
}
}
@@ -549,8 +595,7 @@ impl Session {
}
if !has_failed {
tracing::info!(?req, "All network instances are running");
break;
last_desired_inst_ids = Some(should_be_alive_inst_ids);
}
}
}
@@ -585,3 +630,103 @@ impl Session {
self.data.read().await.req()
}
}
#[cfg(test)]
mod tests {
use easytier::rpc_service::remote_client::{ListNetworkProps, Storage as _};
use serde_json::json;
use super::{super::storage::Storage, *};
#[tokio::test]
async fn reconcile_managed_network_configs_upserts_and_deletes_exact_set() {
let storage = Storage::new(crate::db::Db::memory_db().await);
let user_id = storage
.db()
.auto_create_user("webhook-user")
.await
.unwrap()
.id;
let machine_id = uuid::Uuid::new_v4();
let keep_id = uuid::Uuid::new_v4();
let stale_id = uuid::Uuid::new_v4();
let new_id = uuid::Uuid::new_v4();
storage
.db()
.insert_or_update_user_network_config(
(user_id, machine_id),
keep_id,
NetworkConfig {
network_name: Some("old-name".to_string()),
..Default::default()
},
)
.await
.unwrap();
storage
.db()
.insert_or_update_user_network_config(
(user_id, machine_id),
stale_id,
NetworkConfig {
network_name: Some("stale".to_string()),
..Default::default()
},
)
.await
.unwrap();
SessionRpcService::reconcile_managed_network_configs(
&storage,
user_id,
machine_id,
vec![
crate::webhook::ManagedNetworkConfig {
instance_id: keep_id.to_string(),
network_config: json!({
"instance_id": keep_id.to_string(),
"network_name": "updated-name"
}),
},
crate::webhook::ManagedNetworkConfig {
instance_id: new_id.to_string(),
network_config: json!({
"instance_id": new_id.to_string(),
"network_name": "new-name"
}),
},
],
)
.await
.unwrap();
let configs = storage
.db()
.list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await
.unwrap();
let config_ids = configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
assert_eq!(configs.len(), 2);
assert!(config_ids.contains(&keep_id.to_string()));
assert!(config_ids.contains(&new_id.to_string()));
assert!(!config_ids.contains(&stale_id.to_string()));
let updated_keep = storage
.db()
.get_network_config((user_id, machine_id), &keep_id.to_string())
.await
.unwrap()
.unwrap();
let updated_keep_config: NetworkConfig =
serde_json::from_str(&updated_keep.network_config).unwrap();
assert_eq!(
updated_keep_config.network_name.as_deref(),
Some("updated-name")
);
}
}
+71 -9
View File
@@ -154,13 +154,17 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
use entity::user_running_network_configs as urnc;
let on_conflict = OnConflict::column(urnc::Column::NetworkInstanceId)
.update_columns([
urnc::Column::NetworkConfig,
urnc::Column::Disabled,
urnc::Column::UpdateTime,
])
.to_owned();
let on_conflict = OnConflict::columns([
urnc::Column::UserId,
urnc::Column::DeviceId,
urnc::Column::NetworkInstanceId,
])
.update_columns([
urnc::Column::NetworkConfig,
urnc::Column::Disabled,
urnc::Column::UpdateTime,
])
.to_owned();
let insert_m = urnc::ActiveModel {
user_id: sea_orm::Set(user_id),
device_id: sea_orm::Set(device_id.to_string()),
@@ -184,13 +188,14 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
async fn delete_network_configs(
&self,
(user_id, _): (UserIdInDb, Uuid),
(user_id, device_id): (UserIdInDb, Uuid),
network_inst_ids: &[Uuid],
) -> Result<(), DbErr> {
use entity::user_running_network_configs as urnc;
urnc::Entity::delete_many()
.filter(urnc::Column::UserId.eq(user_id))
.filter(urnc::Column::DeviceId.eq(device_id.to_string()))
.filter(
urnc::Column::NetworkInstanceId
.is_in(network_inst_ids.iter().map(|id| id.to_string())),
@@ -203,7 +208,7 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
async fn update_network_config_state(
&self,
(user_id, _): (UserIdInDb, Uuid),
(user_id, device_id): (UserIdInDb, Uuid),
network_inst_id: Uuid,
disabled: bool,
) -> Result<(), DbErr> {
@@ -211,6 +216,7 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
urnc::Entity::update_many()
.filter(urnc::Column::UserId.eq(user_id))
.filter(urnc::Column::DeviceId.eq(device_id.to_string()))
.filter(urnc::Column::NetworkInstanceId.eq(network_inst_id.to_string()))
.col_expr(urnc::Column::Disabled, Expr::value(disabled))
.col_expr(
@@ -341,4 +347,60 @@ mod tests {
.unwrap();
assert!(result3.is_none());
}
#[tokio::test]
async fn test_user_network_config_same_instance_id_is_scoped_by_device() {
let db = Db::memory_db().await;
let user_id = db.auto_create_user("user-1").await.unwrap().id;
let device1 = uuid::Uuid::new_v4();
let device2 = uuid::Uuid::new_v4();
let inst_id = uuid::Uuid::new_v4();
db.insert_or_update_user_network_config(
(user_id, device1),
inst_id,
NetworkConfig {
network_name: Some("cfg-1".to_string()),
..Default::default()
},
)
.await
.unwrap();
db.insert_or_update_user_network_config(
(user_id, device2),
inst_id,
NetworkConfig {
network_name: Some("cfg-2".to_string()),
..Default::default()
},
)
.await
.unwrap();
let first = db
.get_network_config((user_id, device1), &inst_id.to_string())
.await
.unwrap()
.unwrap();
let second = db
.get_network_config((user_id, device2), &inst_id.to_string())
.await
.unwrap()
.unwrap();
assert_eq!(first.user_id, user_id);
assert_eq!(first.device_id, device1.to_string());
assert_eq!(second.user_id, user_id);
assert_eq!(second.device_id, device2.to_string());
let device1_configs = db
.list_network_configs((user_id, device1), ListNetworkProps::All)
.await
.unwrap();
let device2_configs = db
.list_network_configs((user_id, device2), ListNetworkProps::All)
.await
.unwrap();
assert_eq!(device1_configs.len(), 1);
assert_eq!(device2_configs.len(), 1);
}
}
+25 -17
View File
@@ -7,7 +7,7 @@ use std::net::IpAddr;
use std::sync::Arc;
use clap::Parser;
use easytier::tunnel::websocket::WSTunnelListener;
use easytier::tunnel::websocket::WsTunnelListener;
use easytier::{
common::{
config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader},
@@ -20,6 +20,8 @@ use easytier::{
utils::setup_panic_handler,
};
use easytier::tunnel::IpScheme;
use easytier::utils::BoxExt;
use mimalloc::MiMalloc;
mod client_manager;
@@ -192,14 +194,12 @@ impl LoggingConfigLoader for &Cli {
}
}
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.scheme() {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
"ws" => Box::new(WSTunnelListener::new(l.clone())),
_ => {
return Err(Error::InvalidUrl(l.to_string()));
}
pub fn get_listener_by_url(scheme: IpScheme, l: &url::Url) -> Option<Box<dyn TunnelListener>> {
Some(match scheme {
IpScheme::Tcp => TcpTunnelListener::new(l.clone()).boxed(),
IpScheme::Udp => UdpTunnelListener::new(l.clone()).boxed(),
IpScheme::Ws => WsTunnelListener::new(l.clone()).boxed(),
_ => return None,
})
}
@@ -213,15 +213,23 @@ async fn get_dual_stack_listener(
),
Error,
> {
let is_protocol_support_dual_stack =
protocol.trim().to_lowercase() == "tcp" || protocol.trim().to_lowercase() == "udp";
let v6_listener = if is_protocol_support_dual_stack && local_ipv6().await.is_ok() {
get_listener_by_url(&format!("{}://[::0]:{}", protocol, port).parse().unwrap()).ok()
} else {
None
};
let scheme = protocol
.parse()
.map_err(|_| Error::InvalidUrl(protocol.to_string()))?;
let v6_listener =
if local_ipv6().await.is_ok() && matches!(scheme, IpScheme::Tcp | IpScheme::Udp) {
get_listener_by_url(
scheme,
&format!("{protocol}://[::]:{port}").parse().unwrap(),
)
} else {
None
};
let v4_listener = if local_ipv4().await.is_ok() {
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok()
get_listener_by_url(
scheme,
&format!("{protocol}://0.0.0.0:{port}").parse().unwrap(),
)
} else {
None
};
@@ -0,0 +1,120 @@
use sea_orm_migration::prelude::*;
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m20260403_000002_scope_network_config_unique"
}
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
db.execute_unprepared(
r#"
CREATE TABLE user_running_network_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL,
device_id TEXT NOT NULL,
network_instance_id TEXT NOT NULL,
network_config TEXT NOT NULL,
disabled BOOLEAN NOT NULL DEFAULT FALSE,
create_time TEXT NOT NULL,
update_time TEXT NOT NULL,
CONSTRAINT fk_user_running_network_configs_user_id_to_users_id
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
INSERT INTO user_running_network_configs_new (
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
)
SELECT
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
FROM user_running_network_configs;
DROP TABLE user_running_network_configs;
ALTER TABLE user_running_network_configs_new RENAME TO user_running_network_configs;
CREATE INDEX idx_user_running_network_configs_user_id
ON user_running_network_configs(user_id);
CREATE UNIQUE INDEX idx_user_running_network_configs_scope_inst
ON user_running_network_configs(user_id, device_id, network_instance_id);
"#,
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
db.execute_unprepared(
r#"
CREATE TABLE user_running_network_configs_old (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL,
device_id TEXT NOT NULL,
network_instance_id TEXT NOT NULL UNIQUE,
network_config TEXT NOT NULL,
disabled BOOLEAN NOT NULL DEFAULT FALSE,
create_time TEXT NOT NULL,
update_time TEXT NOT NULL,
CONSTRAINT fk_user_running_network_configs_user_id_to_users_id
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
INSERT INTO user_running_network_configs_old (
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
)
SELECT
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
FROM user_running_network_configs;
DROP TABLE user_running_network_configs;
ALTER TABLE user_running_network_configs_old RENAME TO user_running_network_configs;
CREATE INDEX idx_user_running_network_configs_user_id
ON user_running_network_configs(user_id);
"#,
)
.await?;
Ok(())
}
}
+5 -1
View File
@@ -1,12 +1,16 @@
use sea_orm_migration::prelude::*;
mod m20241029_000001_init;
mod m20260403_000002_scope_network_config_unique;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20241029_000001_init::Migration)]
vec![
Box::new(m20241029_000001_init::Migration),
Box::new(m20260403_000002_scope_network_config_unique::Migration),
]
}
}
+8 -1
View File
@@ -65,7 +65,14 @@ pub struct ValidateTokenResponse {
pub pre_approved: bool,
#[serde(default)]
pub binding_version: u64,
pub network_config: Option<serde_json::Value>,
pub managed_network_configs: Vec<ManagedNetworkConfig>,
pub config_revision: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ManagedNetworkConfig {
pub instance_id: String,
pub network_config: serde_json::Value,
}
#[derive(Debug, Serialize)]
+9 -5
View File
@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
version = "2.5.0"
version = "2.6.0"
edition = "2021"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
@@ -37,7 +37,7 @@ tracing-subscriber = { version = "0.3", features = [
"time",
] }
derivative = "2.2.0"
derive_more = {version = "2.1.1", features = ["full"]}
derive_more = { version = "2.1.1", features = ["full"] }
console-subscriber = { version = "0.4.1", optional = true }
indoc = "2.0.7"
regex = "1.8"
@@ -50,6 +50,8 @@ time = "0.3"
toml = "0.8.12"
chrono = { version = "0.4.37", features = ["serde"] }
cfg-if = "1.0"
itertools = "0.14.0"
strum = { version = "0.27.2", features = ["derive"] }
@@ -79,12 +81,12 @@ quinn = { version = "0.11.8", optional = true, features = ["ring"] }
quinn-plaintext = { version = "0.3.0", optional = true }
rustls = { version = "0.23.0", features = [
"ring","tls12"
"ring", "tls12"
], default-features = false, optional = true }
rcgen = { version = "0.12.1", optional = true }
# for websocket
tokio-websockets = { version = "0.8", optional = true, features = [
tokio-websockets = { version = "0.13.2", optional = true, features = [
"rustls-webpki-roots",
"client",
"server",
@@ -94,6 +96,7 @@ tokio-websockets = { version = "0.8", optional = true, features = [
http = { version = "1", default-features = false, features = [
"std",
], optional = true }
forwarded-header-value = { version = "0.1.1", optional = true }
tokio-rustls = { version = "0.26", default-features = false, optional = true }
# for tap device
@@ -249,7 +252,6 @@ shellexpand = "3.1.1"
# for fake tcp
flume = { version = "0.12", optional = true }
cfg-if = "1.0"
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3"
@@ -312,6 +314,7 @@ jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [
], optional = true }
[build-dependencies]
cfg_aliases = "0.2.1"
tonic-build = "0.12"
globwalk = "0.8.1"
regex = "1"
@@ -387,6 +390,7 @@ tun = ["dep:tun"]
websocket = [
"dep:tokio-websockets",
"dep:http",
"dep:forwarded-header-value",
"dep:tokio-rustls",
"dep:rustls",
"dep:rcgen",
+13 -2
View File
@@ -1,9 +1,9 @@
use cfg_aliases::cfg_aliases;
use prost_wkt_build::{FileDescriptorSet, Message as _};
#[cfg(target_os = "windows")]
use std::io::Cursor;
use std::{env, path::PathBuf};
use prost_wkt_build::{FileDescriptorSet, Message as _};
#[cfg(target_os = "windows")]
struct WindowsBuild {}
@@ -130,6 +130,17 @@ fn check_locale() {
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cfg_aliases! {
mobile: {
any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "macos-ne"),
target_env = "ohos"
)
}
}
// enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")]
if !std::env::var("TARGET")
+7 -4
View File
@@ -152,8 +152,8 @@ core_clap:
如果该参数为空,则禁用转发。默认允许所有网络。
例如:'*'(所有网络),'def*'(以def为前缀的网络),'net1 net2'(只允许net1和net2"
disable_p2p:
en: "disable p2p communication, will only relay packets with peers specified by --peers"
zh-CN: "禁用P2P通信,只通过--peers指定的节点转发数据包"
en: "disable ordinary automatic p2p; still establish p2p with peers marked as need-p2p, and other peers should not proactively connect to this node"
zh-CN: "禁用普通自动P2P;仍会与标记为 need-p2p 的节点建立P2P连接,其他节点不应主动与当前节点建立P2P"
p2p_only:
en: "only communicate with peers that already establish p2p connection"
zh-CN: "仅与已经建立P2P连接的对等节点通信"
@@ -212,11 +212,14 @@ core_clap:
en: "specify the top-level domain zone for magic DNS. if not provided, defaults to the value from dns_server module (et.net.). only used when accept_dns is true."
zh-CN: "指定魔法DNS的顶级域名区域。如果未提供,默认使用dns_server模块中的值(et.net.)。仅在accept_dns为true时使用。"
private_mode:
en: "if true, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node."
zh-CN: "如果为true,则允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转"
en: "if true, foreign networks are only allowed when this node can verify they use the same network secret, or when a foreign credential node is already trusted via admin-issued credential propagation; different or missing secrets are otherwise rejected."
zh-CN: "如果为true,则允许两类 foreign network 接入:本节点能验证其使用相同 network secret 的节点,或已通过 foreign network 管理节点传播而被信任的 credential 节点;否则 secret 不同或缺失时会被拒绝。"
foreign_relay_bps_limit:
en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)"
zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)"
instance_recv_bps_limit:
en: "the maximum total receive bps limit for this instance, default is no limit. unit: BPS (bytes per second)"
zh-CN: "限制当前网络实例整体入站流量的总接收速率,默认无限制,单位 BPS (字节每秒)"
tcp_whitelist:
en: "tcp port whitelist. Supports single ports (80) and ranges (8000-9000)"
zh-CN: "TCP 端口白名单。支持单个端口(80)和范围(8000-9000"
+1
View File
@@ -69,6 +69,7 @@ pub fn gen_default_flags() -> Flags {
quic_listen_port: u32::MAX,
need_p2p: false,
instance_recv_bps_limit: u64::MAX,
}
}
+25 -24
View File
@@ -1,8 +1,7 @@
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::{
collections::{hash_map::DefaultHasher, HashMap},
hash::Hasher,
net::{IpAddr, SocketAddr},
sync::{Arc, Mutex},
time::{SystemTime, UNIX_EPOCH},
};
@@ -10,21 +9,6 @@ use std::{
use arc_swap::ArcSwap;
use dashmap::DashMap;
use crate::common::config::ProxyNetworkConfig;
use crate::common::shrink_dashmap;
use crate::common::stats_manager::StatsManager;
use crate::common::token_bucket::TokenBucketManager;
use crate::peers::acl_filter::AclFilter;
use crate::peers::credential_manager::CredentialManager;
use crate::proto::acl::GroupIdentity;
use crate::proto::api::config::InstanceConfigPatch;
use crate::proto::api::instance::PeerConnInfo;
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
use crate::proto::peer_rpc::PeerGroupInfo;
use crossbeam::atomic::AtomicCell;
use hmac::{Hmac, Mac};
use sha2::Sha256;
use super::{
config::{ConfigLoader, Flags},
netns::NetNS,
@@ -32,6 +16,24 @@ use super::{
stun::{StunInfoCollector, StunInfoCollectorTrait},
PeerId,
};
use crate::{
common::{
config::ProxyNetworkConfig, shrink_dashmap, stats_manager::StatsManager,
token_bucket::TokenBucketManager,
},
peers::{acl_filter::AclFilter, credential_manager::CredentialManager},
proto::{
acl::GroupIdentity,
api::{config::InstanceConfigPatch, instance::PeerConnInfo},
common::{PeerFeatureFlag, PortForwardConfigPb},
peer_rpc::PeerGroupInfo,
},
tunnel::matches_protocol,
};
use crossbeam::atomic::AtomicCell;
use hmac::{Hmac, Mac};
use sha2::Sha256;
use socket2::Protocol;
pub type NetworkIdentity = crate::common::config::NetworkIdentity;
@@ -242,6 +244,7 @@ impl GlobalCtx {
feature_flags.quic_input = !flags.disable_quic_input;
feature_flags.no_relay_quic = flags.disable_relay_quic;
feature_flags.need_p2p = flags.need_p2p;
feature_flags.disable_p2p = flags.disable_p2p;
feature_flags
}
@@ -625,15 +628,11 @@ impl GlobalCtx {
}
fn is_port_in_running_listeners(&self, port: u16, is_udp: bool) -> bool {
let check_proto = |listener_proto: &str| {
let listener_is_udp = matches!(listener_proto, "udp" | "wg");
listener_is_udp == is_udp
};
self.running_listeners
.lock()
.unwrap()
.iter()
.any(|x| x.port() == Some(port) && check_proto(x.scheme()))
.any(|x| x.port() == Some(port) && matches_protocol!(x, Protocol::UDP) == is_udp)
}
#[tracing::instrument(ret, skip(self))]
@@ -745,12 +744,13 @@ pub mod tests {
feature_flags.is_public_server = true;
global_ctx.set_feature_flags(feature_flags);
let mut flags = global_ctx.get_flags();
let mut flags = global_ctx.get_flags().clone();
flags.disable_kcp_input = true;
flags.disable_relay_kcp = true;
flags.disable_quic_input = true;
flags.disable_relay_quic = true;
flags.need_p2p = true;
flags.disable_p2p = true;
global_ctx.set_flags(flags);
let feature_flags = global_ctx.get_feature_flags();
@@ -759,6 +759,7 @@ pub mod tests {
assert!(!feature_flags.quic_input);
assert!(feature_flags.no_relay_quic);
assert!(feature_flags.need_p2p);
assert!(feature_flags.disable_p2p);
assert!(feature_flags.support_conn_list_sync);
assert!(feature_flags.avoid_relay_data);
assert!(feature_flags.is_public_server);
+89
View File
@@ -1,6 +1,12 @@
use std::{net::IpAddr, ops::Deref, sync::Arc};
#[cfg(target_os = "windows")]
use network_interface::{
Addr as SystemAddr, NetworkInterface as SystemNetworkInterface, NetworkInterfaceConfig,
};
use pnet::datalink::NetworkInterface;
#[cfg(target_os = "windows")]
use pnet::{ipnetwork::IpNetwork, util::MacAddr};
use tokio::{
sync::{Mutex, RwLock},
task::JoinSet,
@@ -264,6 +270,9 @@ impl IPCollector {
pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> {
let _g = net_ns.guard();
#[cfg(target_os = "windows")]
let ifaces = Self::collect_interfaces_windows();
#[cfg(not(target_os = "windows"))]
let ifaces = pnet::datalink::interfaces();
let mut ret = vec![];
for iface in ifaces {
@@ -281,6 +290,86 @@ impl IPCollector {
ret
}
#[cfg(target_os = "windows")]
fn collect_interfaces_windows() -> Vec<NetworkInterface> {
match SystemNetworkInterface::show() {
Ok(ifaces) => ifaces
.into_iter()
.map(Self::convert_windows_interface)
.collect(),
Err(e) => {
tracing::warn!(
?e,
"failed to enumerate interfaces via network-interface, falling back to pnet"
);
match std::panic::catch_unwind(pnet::datalink::interfaces) {
Ok(ifaces) => ifaces,
Err(_) => {
tracing::error!(
"failed to enumerate interfaces via both network-interface and pnet"
);
Vec::new()
}
}
}
}
}
#[cfg(target_os = "windows")]
fn convert_windows_interface(iface: SystemNetworkInterface) -> NetworkInterface {
let mac = iface.mac_addr.as_deref().and_then(|mac| {
mac.parse::<MacAddr>()
.map_err(|e| {
tracing::debug!(iface = %iface.name, mac, ?e, "failed to parse interface mac")
})
.ok()
});
let ips = iface
.addr
.into_iter()
.filter_map(Self::convert_windows_interface_addr)
.collect();
NetworkInterface {
name: iface.name,
description: String::new(),
index: iface.index,
mac,
ips,
// pnet does not populate Windows flags either, so keep the existing semantics.
flags: 0,
}
}
#[cfg(target_os = "windows")]
fn convert_windows_interface_addr(addr: SystemAddr) -> Option<IpNetwork> {
match addr {
SystemAddr::V4(addr) => {
let netmask = addr
.netmask
.map(IpAddr::V4)
.unwrap_or(IpAddr::V4(std::net::Ipv4Addr::new(255, 255, 255, 255)));
IpNetwork::with_netmask(IpAddr::V4(addr.ip), netmask)
.map_err(|e| {
tracing::debug!(ip = %addr.ip, ?addr.netmask, ?e, "failed to convert ipv4")
})
.ok()
}
SystemAddr::V6(addr) => {
let netmask = addr
.netmask
.map(IpAddr::V6)
.unwrap_or(IpAddr::V6(std::net::Ipv6Addr::from(u128::MAX)));
IpNetwork::with_netmask(IpAddr::V6(addr.ip), netmask)
.map_err(|e| {
tracing::debug!(ip = %addr.ip, ?addr.netmask, ?e, "failed to convert ipv6")
})
.ok()
}
}
}
#[tracing::instrument(skip(net_ns))]
async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse {
let mut ret = GetIpListResponse::default();
+30 -3
View File
@@ -581,9 +581,9 @@ impl StatsManager {
break;
};
// Remove entries that haven't been updated for 3 minutes
counters.retain(|_, metric_data: &mut Arc<MetricData>| unsafe {
metric_data.get_last_updated() > cutoff_time
counters.retain(|_, metric_data: &mut Arc<MetricData>| {
Arc::strong_count(metric_data) > 1
|| unsafe { metric_data.get_last_updated() > cutoff_time }
});
counters.shrink_to_fit();
}
@@ -900,6 +900,33 @@ mod tests {
assert_eq!(counter2.get(), 25);
}
#[tokio::test]
async fn test_cleanup_keeps_metrics_with_live_handles() {
let stats = StatsManager::new();
let counter = stats.get_simple_counter(MetricName::TrafficBytesForwarded);
counter.set(1);
let cutoff_time = Instant::now().checked_add(Duration::from_secs(1)).unwrap();
stats
.counters
.retain(|_, metric_data: &mut Arc<MetricData>| {
Arc::strong_count(metric_data) > 1
|| unsafe { metric_data.get_last_updated() > cutoff_time }
});
assert_eq!(stats.metric_count(), 1);
assert_eq!(stats.get_all_metrics().len(), 1);
drop(counter);
stats
.counters
.retain(|_, metric_data: &mut Arc<MetricData>| {
Arc::strong_count(metric_data) > 1
|| unsafe { metric_data.get_last_updated() > cutoff_time }
});
assert_eq!(stats.metric_count(), 0);
}
#[tokio::test]
async fn test_stats_rpc_data_structures() {
// Test GetStatsRequest
+33 -31
View File
@@ -31,19 +31,20 @@ use crate::{
},
rpc_types::controller::BaseController,
},
tunnel::{udp::UdpTunnelConnector, IpVersion},
tunnel::{matches_protocol, udp::UdpTunnelConnector, IpVersion},
use_global_var,
};
use anyhow::Context;
use rand::Rng;
use tokio::{net::UdpSocket, task::JoinSet, time::timeout};
use url::Host;
use super::{
create_connector_by_url, should_background_p2p_with_peer, should_try_p2p_with_peer,
udp_hole_punch,
};
use crate::tunnel::{matches_scheme, FromUrl, IpScheme, TunnelScheme};
use anyhow::Context;
use rand::Rng;
use socket2::Protocol;
use tokio::{net::UdpSocket, task::JoinSet, time::timeout};
use url::Host;
pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1;
pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300;
@@ -61,7 +62,8 @@ impl PeerManagerForDirectConnector for PeerManager {
async fn list_peers(&self) -> Vec<PeerId> {
let mut ret = vec![];
let allow_public_server = use_global_var!(DIRECT_CONNECT_TO_PUBLIC_SERVER);
let lazy_p2p = self.get_global_ctx().get_flags().lazy_p2p;
let flags = self.get_global_ctx().get_flags();
let lazy_p2p = flags.lazy_p2p;
let now = Instant::now();
let routes = self.list_routes().await;
@@ -70,10 +72,15 @@ impl PeerManagerForDirectConnector for PeerManager {
route.feature_flag.as_ref(),
allow_public_server,
lazy_p2p,
flags.disable_p2p,
flags.need_p2p,
);
let dynamic_allowed =
should_try_p2p_with_peer(route.feature_flag.as_ref(), allow_public_server)
&& self.has_recent_traffic(route.peer_id, now);
let dynamic_allowed = should_try_p2p_with_peer(
route.feature_flag.as_ref(),
allow_public_server,
flags.disable_p2p,
flags.need_p2p,
) && self.has_recent_traffic(route.peer_id, now);
if static_allowed || dynamic_allowed {
ret.push(route.peer_id);
}
@@ -189,9 +196,7 @@ impl DirectConnectorManagerData {
.await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr =
super::check_scheme_and_get_socket_addr::<SocketAddr>(remote_url, "udp", IpVersion::V6)
.await?;
let remote_addr = SocketAddr::from_url(remote_url.clone(), IpVersion::V6).await?;
let ret = udp_connector
.try_connect_with_socket(local_socket, remote_addr)
.await?;
@@ -205,18 +210,19 @@ impl DirectConnectorManagerData {
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?;
let remote_url = connector.remote_url();
let (peer_id, conn_id) =
if remote_url.scheme() == "udp" && matches!(remote_url.host(), Some(Host::Ipv6(_))) {
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
.await?
} else {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager
.try_direct_connect_with_peer_id_hint(connector, Some(dst_peer_id)),
)
.await??
};
let (peer_id, conn_id) = if matches_scheme!(remote_url, TunnelScheme::Ip(IpScheme::Udp))
&& matches!(remote_url.host(), Some(Host::Ipv6(_)))
{
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
.await?
} else {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager
.try_direct_connect_with_peer_id_hint(connector, Some(dst_peer_id)),
)
.await??
};
if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) {
tracing::info!(
@@ -306,7 +312,7 @@ impl DirectConnectorManagerData {
let listener_host = addrs.pop();
tracing::info!(?listener_host, ?listener, "try direct connect to peer");
let is_udp = matches!(listener.scheme(), "udp" | "wg");
let is_udp = matches_protocol!(listener, Protocol::UDP);
// Snapshot running listeners once; used for cheap port pre-checks before the
// expensive should_deny_proxy call (which binds a socket per IP) in the
// unspecified-address expansion loops below.
@@ -314,7 +320,7 @@ impl DirectConnectorManagerData {
let port_has_local_listener = |port: u16| -> bool {
local_listeners
.iter()
.any(|l| l.port() == Some(port) && (matches!(l.scheme(), "udp" | "wg") == is_udp))
.any(|l| l.port() == Some(port) && matches_protocol!(l, Protocol::UDP) == is_udp)
};
match listener_host {
@@ -650,10 +656,6 @@ impl DirectConnectorManager {
}
pub fn run(&mut self) {
if self.global_ctx.get_flags().disable_p2p {
return;
}
self.run_as_server();
self.run_as_client();
}
+43 -45
View File
@@ -1,5 +1,6 @@
use std::{net::SocketAddr, sync::Arc};
use super::{create_connector_by_url, http_connector::TunnelWithInfo};
use crate::{
common::{
dns::{resolve_txt_record, RESOLVER},
@@ -7,16 +8,15 @@ use crate::{
global_ctx::ArcGlobalCtx,
log,
},
tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET},
proto::common::TunnelInfo,
tunnel::{IpScheme, IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelScheme},
};
use anyhow::Context;
use dashmap::DashSet;
use hickory_resolver::proto::rr::rdata::SRV;
use itertools::Itertools;
use rand::{seq::SliceRandom, Rng as _};
use crate::proto::common::TunnelInfo;
use super::{create_connector_by_url, http_connector::TunnelWithInfo};
use strum::VariantArray;
fn weighted_choice<T>(options: &[(T, u64)]) -> Option<&T> {
let total_weight = options.iter().map(|(_, weight)| *weight).sum();
@@ -35,16 +35,18 @@ fn weighted_choice<T>(options: &[(T, u64)]) -> Option<&T> {
}
#[derive(Debug)]
pub struct DNSTunnelConnector {
pub struct DnsTunnelConnector {
scheme: TunnelScheme,
addr: url::Url,
bind_addrs: Vec<SocketAddr>,
global_ctx: ArcGlobalCtx,
ip_version: IpVersion,
}
impl DNSTunnelConnector {
impl DnsTunnelConnector {
pub fn new(addr: url::Url, global_ctx: ArcGlobalCtx) -> Self {
Self {
scheme: (&addr).try_into().unwrap(),
addr,
bind_addrs: Vec::new(),
global_ctx,
@@ -82,7 +84,7 @@ impl DNSTunnelConnector {
Ok(connector)
}
fn handle_one_srv_record(record: &SRV, protocol: &str) -> Result<(url::Url, u64), Error> {
fn handle_one_srv_record(record: &SRV, protocol: IpScheme) -> Result<(url::Url, u64), Error> {
// port must be non-zero
if record.port() == 0 {
return Err(anyhow::anyhow!("port must be non-zero").into());
@@ -112,15 +114,15 @@ impl DNSTunnelConnector {
) -> Result<Box<dyn TunnelConnector>, Error> {
tracing::info!("handle_srv_record: {}", domain_name);
let srv_domains = PROTO_PORT_OFFSET
let srv_domains = IpScheme::VARIANTS
.iter()
.map(|(p, _)| (format!("_easytier._{}.{}", p, domain_name), *p)) // _easytier._udp.{domain_name}
.collect::<Vec<_>>();
.map(|s| (s, format!("_easytier._{}.{}", s, domain_name)))
.collect_vec();
tracing::info!("build srv_domains: {:?}", srv_domains);
let responses = Arc::new(DashSet::new());
let srv_lookup_tasks = srv_domains
.iter()
.map(|(srv_domain, protocol)| {
.map(|(protocol, srv_domain)| {
let resolver = RESOLVER.clone();
let responses = responses.clone();
async move {
@@ -129,7 +131,7 @@ impl DNSTunnelConnector {
})?;
tracing::info!(?response, ?srv_domain, "srv_lookup response");
for record in response.iter() {
let parsed_record = Self::handle_one_srv_record(record, protocol);
let parsed_record = Self::handle_one_srv_record(record, **protocol);
tracing::info!(?parsed_record, ?srv_domain, "parsed_record");
if let Err(e) = &parsed_record {
log::warn!("got invalid srv record {:?}", e);
@@ -162,32 +164,28 @@ impl DNSTunnelConnector {
}
#[async_trait::async_trait]
impl super::TunnelConnector for DNSTunnelConnector {
impl super::TunnelConnector for DnsTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let mut conn = if self.addr.scheme() == "txt" {
self.handle_txt_record(
self.addr
.host_str()
.as_ref()
.ok_or(anyhow::anyhow!("host should not be empty in txt url"))?,
)
.await
.with_context(|| "get txt record url failed")?
} else if self.addr.scheme() == "srv" {
self.handle_srv_record(
self.addr
.host_str()
.as_ref()
.ok_or(anyhow::anyhow!("host should not be empty in srv url"))?,
)
.await
.with_context(|| "get srv record url failed")?
} else {
return Err(anyhow::anyhow!(
"unsupported dns scheme: {}, expecting txt or srv",
self.addr.scheme()
)
.into());
let mut conn = match self.scheme {
TunnelScheme::Txt => self
.handle_txt_record(
self.addr
.host_str()
.as_ref()
.ok_or(anyhow::anyhow!("host should not be empty in txt url"))?,
)
.await
.with_context(|| "get txt record url failed")?,
TunnelScheme::Srv => self
.handle_srv_record(
self.addr
.host_str()
.as_ref()
.ok_or(anyhow::anyhow!("host should not be empty in srv url"))?,
)
.await
.with_context(|| "get srv record url failed")?,
_ => return Err(anyhow::anyhow!("unsupported dns scheme: {:?}", self.scheme).into()),
};
let t = conn.connect().await?;
let info = t.info().unwrap_or_default();
@@ -196,11 +194,11 @@ impl super::TunnelConnector for DNSTunnelConnector {
TunnelInfo {
local_addr: info.local_addr.clone(),
remote_addr: Some(self.addr.clone().into()),
tunnel_type: format!(
"{}-{}",
self.addr.scheme(),
info.remote_addr.unwrap_or_default()
),
resolved_remote_addr: info
.resolved_remote_addr
.clone()
.or(info.remote_addr.clone()),
tunnel_type: format!("{}-{}", self.addr.scheme(), info.tunnel_type),
},
)))
}
@@ -227,7 +225,7 @@ mod tests {
async fn test_txt() {
let url = "txt://txt.easytier.cn";
let global_ctx = get_mock_global_ctx();
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
let mut connector = DnsTunnelConnector::new(url.parse().unwrap(), global_ctx);
connector.set_ip_version(IpVersion::V4);
for _ in 0..5 {
match connector.connect().await {
@@ -246,7 +244,7 @@ mod tests {
async fn test_srv() {
let url = "srv://easytier.cn";
let global_ctx = get_mock_global_ctx();
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
let mut connector = DnsTunnelConnector::new(url.parse().unwrap(), global_ctx);
connector.set_ip_version(IpVersion::V4);
for _ in 0..5 {
match connector.connect().await {
+7 -5
View File
@@ -229,11 +229,11 @@ impl super::TunnelConnector for HttpTunnelConnector {
TunnelInfo {
local_addr: info.local_addr.clone(),
remote_addr: Some(self.addr.clone().into()),
tunnel_type: format!(
"{:?}-{}",
self.redirect_type,
info.remote_addr.unwrap_or_default()
),
resolved_remote_addr: info
.resolved_remote_addr
.clone()
.or(info.remote_addr.clone()),
tunnel_type: format!("{}-{}", self.addr.scheme(), info.tunnel_type),
},
)))
}
@@ -353,6 +353,8 @@ mod tests {
let info = t.info().unwrap();
let remote_addr = info.remote_addr.unwrap();
assert_eq!(remote_addr, test_url.into());
let resolved_remote_addr = info.resolved_remote_addr.unwrap();
assert_eq!(resolved_remote_addr.url, "tcp://127.0.0.1:25888");
tokio::join!(task).0.unwrap();
}
+144 -124
View File
@@ -3,24 +3,17 @@ use std::{
sync::Arc,
};
use http_connector::HttpTunnelConnector;
#[cfg(feature = "faketcp")]
use crate::tunnel::fake_tcp::FakeTcpTunnelConnector;
#[cfg(feature = "quic")]
use crate::tunnel::quic::QUICTunnelConnector;
#[cfg(unix)]
use crate::tunnel::unix::UnixSocketTunnelConnector;
#[cfg(feature = "wireguard")]
use crate::tunnel::wireguard::{WgConfig, WgTunnelConnector};
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, idn, network::IPCollector},
connector::dns_connector::DnsTunnelConnector,
proto::common::PeerFeatureFlag,
tunnel::{
check_scheme_and_get_socket_addr, ring::RingTunnelConnector, tcp::TcpTunnelConnector,
udp::UdpTunnelConnector, IpVersion, TunnelConnector,
self, ring::RingTunnelConnector, tcp::TcpTunnelConnector, udp::UdpTunnelConnector, FromUrl,
IpScheme, IpVersion, TunnelConnector, TunnelError, TunnelScheme,
},
utils::BoxExt,
};
use http_connector::HttpTunnelConnector;
pub mod direct;
pub mod manual;
@@ -33,19 +26,31 @@ pub mod http_connector;
pub(crate) fn should_try_p2p_with_peer(
feature_flag: Option<&PeerFeatureFlag>,
allow_public_server: bool,
local_disable_p2p: bool,
local_need_p2p: bool,
) -> bool {
feature_flag
.map(|flag| allow_public_server || !flag.is_public_server)
.unwrap_or(true)
.map(|flag| {
(allow_public_server || !flag.is_public_server)
&& (!local_disable_p2p || flag.need_p2p)
&& (!flag.disable_p2p || local_need_p2p)
})
.unwrap_or(!local_disable_p2p)
}
pub(crate) fn should_background_p2p_with_peer(
feature_flag: Option<&PeerFeatureFlag>,
allow_public_server: bool,
lazy_p2p: bool,
local_disable_p2p: bool,
local_need_p2p: bool,
) -> bool {
should_try_p2p_with_peer(feature_flag, allow_public_server)
&& (!lazy_p2p || feature_flag.map(|flag| flag.need_p2p).unwrap_or(false))
should_try_p2p_with_peer(
feature_flag,
allow_public_server,
local_disable_p2p,
local_need_p2p,
) && (!lazy_p2p || feature_flag.map(|flag| flag.need_p2p).unwrap_or(false))
}
async fn set_bind_addr_for_peer_connector(
@@ -90,84 +95,34 @@ pub async fn create_connector_by_url(
) -> Result<Box<dyn TunnelConnector + 'static>, Error> {
let url = url::Url::parse(url).map_err(|_| Error::InvalidUrl(url.to_owned()))?;
let url = idn::convert_idn_to_ascii(url)?;
let mut connector: Box<dyn TunnelConnector + 'static> = match url.scheme() {
"tcp" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp", ip_version).await?;
let mut connector = TcpTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
}
Box::new(connector)
}
"udp" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp", ip_version).await?;
let mut connector = UdpTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
}
Box::new(connector)
}
"http" | "https" => {
let connector = HttpTunnelConnector::new(url, global_ctx.clone());
Box::new(connector)
}
"ring" => {
check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring", IpVersion::Both).await?;
let connector = RingTunnelConnector::new(url);
Box::new(connector)
}
#[cfg(feature = "quic")]
"quic" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic", ip_version).await?;
let mut connector = QUICTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
}
Box::new(connector)
}
#[cfg(feature = "wireguard")]
"wg" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg", ip_version).await?;
let nid = global_ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
);
let mut connector = WgTunnelConnector::new(url, wg_config);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
}
Box::new(connector)
}
#[cfg(feature = "websocket")]
"ws" | "wss" => {
use crate::tunnel::FromUrl;
let scheme = (&url)
.try_into()
.map_err(|_| TunnelError::InvalidProtocol(url.scheme().to_owned()))?;
let mut connector: Box<dyn TunnelConnector + 'static> = match scheme {
TunnelScheme::Ip(scheme) => {
let dst_addr = SocketAddr::from_url(url.clone(), ip_version).await?;
let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url);
let mut connector: Box<dyn TunnelConnector> = match scheme {
IpScheme::Tcp => TcpTunnelConnector::new(url).boxed(),
IpScheme::Udp => UdpTunnelConnector::new(url).boxed(),
#[cfg(feature = "quic")]
IpScheme::Quic => tunnel::quic::QuicTunnelConnector::new(url).boxed(),
#[cfg(feature = "wireguard")]
IpScheme::Wg => {
use crate::tunnel::wireguard::{WgConfig, WgTunnelConnector};
let nid = global_ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
);
WgTunnelConnector::new(url, wg_config).boxed()
}
#[cfg(feature = "websocket")]
IpScheme::Ws | IpScheme::Wss => {
tunnel::websocket::WsTunnelConnector::new(url).boxed()
}
#[cfg(feature = "faketcp")]
IpScheme::FakeTcp => tunnel::fake_tcp::FakeTcpTunnelConnector::new(url).boxed(),
};
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
@@ -176,40 +131,22 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
connector
}
"txt" | "srv" => {
#[cfg(unix)]
TunnelScheme::Unix => tunnel::unix::UnixSocketTunnelConnector::new(url).boxed(),
TunnelScheme::Http | TunnelScheme::Https => {
HttpTunnelConnector::new(url, global_ctx.clone()).boxed()
}
TunnelScheme::Ring => RingTunnelConnector::new(url).boxed(),
TunnelScheme::Txt | TunnelScheme::Srv => {
if url.host_str().is_none() {
return Err(Error::InvalidUrl(format!(
"host should not be empty in txt or srv url: {}",
url
)));
}
let connector = dns_connector::DNSTunnelConnector::new(url, global_ctx.clone());
Box::new(connector)
}
#[cfg(feature = "faketcp")]
"faketcp" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "faketcp", ip_version).await?;
let mut connector = FakeTcpTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
}
Box::new(connector)
}
#[cfg(unix)]
"unix" => {
let connector = UnixSocketTunnelConnector::new(url);
Box::new(connector)
}
_ => {
return Err(Error::InvalidUrl(url.into()));
DnsTunnelConnector::new(url, global_ctx.clone()).boxed()
}
};
connector.set_ip_version(ip_version);
@@ -237,17 +174,23 @@ mod tests {
assert!(should_background_p2p_with_peer(
Some(&no_need_p2p),
false,
false,
false,
false
));
assert!(!should_background_p2p_with_peer(
Some(&no_need_p2p),
false,
true
true,
false,
false
));
assert!(should_background_p2p_with_peer(
Some(&need_p2p),
false,
true
true,
false,
false
));
}
@@ -258,16 +201,93 @@ mod tests {
..Default::default()
};
assert!(!should_try_p2p_with_peer(Some(&public_server), false));
assert!(should_try_p2p_with_peer(Some(&public_server), true));
assert!(!should_try_p2p_with_peer(
Some(&public_server),
false,
false,
false
));
assert!(should_try_p2p_with_peer(
Some(&public_server),
true,
false,
false
));
assert!(!should_background_p2p_with_peer(
Some(&public_server),
false,
false,
false,
false
));
assert!(should_background_p2p_with_peer(
Some(&public_server),
true,
false,
false,
false
));
}
#[test]
fn disable_p2p_only_allows_need_p2p_exceptions() {
let normal_peer = PeerFeatureFlag::default();
let need_peer = PeerFeatureFlag {
need_p2p: true,
..Default::default()
};
let disable_peer = PeerFeatureFlag {
disable_p2p: true,
..Default::default()
};
let disable_need_peer = PeerFeatureFlag {
disable_p2p: true,
need_p2p: true,
..Default::default()
};
assert!(should_try_p2p_with_peer(
Some(&normal_peer),
false,
false,
false
));
assert!(should_try_p2p_with_peer(None, false, false, false));
assert!(!should_try_p2p_with_peer(None, false, true, false));
assert!(!should_try_p2p_with_peer(
Some(&normal_peer),
false,
true,
false
));
assert!(should_try_p2p_with_peer(
Some(&need_peer),
false,
true,
false
));
assert!(!should_try_p2p_with_peer(
Some(&disable_peer),
false,
false,
false
));
assert!(should_try_p2p_with_peer(
Some(&disable_peer),
false,
false,
true
));
assert!(should_try_p2p_with_peer(
Some(&disable_need_peer),
false,
true,
true
));
assert!(!should_try_p2p_with_peer(
Some(&disable_need_peer),
false,
true,
false
));
}
+17 -8
View File
@@ -420,7 +420,8 @@ impl PeerTaskLauncher for TcpHolePunchPeerTaskLauncher {
#[tracing::instrument(skip(self, data))]
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
let global_ctx = data.peer_mgr.get_global_ctx();
let lazy_p2p = global_ctx.get_flags().lazy_p2p;
let flags = global_ctx.get_flags();
let lazy_p2p = flags.lazy_p2p;
let my_tcp_nat_type = NatType::try_from(
global_ctx
.get_stun_info_collector()
@@ -443,10 +444,19 @@ impl PeerTaskLauncher for TcpHolePunchPeerTaskLauncher {
let mut peers_to_connect = Vec::new();
for route in data.peer_mgr.list_routes().await.iter() {
let static_allowed =
should_background_p2p_with_peer(route.feature_flag.as_ref(), false, lazy_p2p);
let dynamic_allowed = should_try_p2p_with_peer(route.feature_flag.as_ref(), false)
&& data.peer_mgr.has_recent_traffic(route.peer_id, now);
let static_allowed = should_background_p2p_with_peer(
route.feature_flag.as_ref(),
false,
lazy_p2p,
flags.disable_p2p,
flags.need_p2p,
);
let dynamic_allowed = should_try_p2p_with_peer(
route.feature_flag.as_ref(),
false,
flags.disable_p2p,
flags.need_p2p,
) && data.peer_mgr.has_recent_traffic(route.peer_id, now);
if !static_allowed && !dynamic_allowed {
continue;
}
@@ -554,10 +564,9 @@ impl TcpHolePunchConnector {
pub async fn run(&mut self) -> Result<(), Error> {
let flags = self.peer_mgr.get_global_ctx().get_flags();
if flags.disable_p2p || flags.disable_tcp_hole_punching {
if flags.disable_tcp_hole_punching {
tracing::debug!(
"tcp hole punch disabled by disable_p2p(={}) or disable_tcp_hole_punching(={});",
flags.disable_p2p,
"tcp hole punch disabled by disable_tcp_hole_punching(={});",
flags.disable_tcp_hole_punching
);
return Ok(());
+15 -8
View File
@@ -428,7 +428,8 @@ impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
}
let my_peer_id = data.peer_mgr.my_peer_id();
let lazy_p2p = data.peer_mgr.get_global_ctx().get_flags().lazy_p2p;
let flags = data.peer_mgr.get_global_ctx().get_flags();
let lazy_p2p = flags.lazy_p2p;
let now = Instant::now();
data.blacklist.cleanup();
@@ -438,10 +439,19 @@ impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
// 2. peers is full cone (any restricted type);
// 3. peers not in blacklist;
for route in data.peer_mgr.list_routes().await.iter() {
let static_allowed =
should_background_p2p_with_peer(route.feature_flag.as_ref(), false, lazy_p2p);
let dynamic_allowed = should_try_p2p_with_peer(route.feature_flag.as_ref(), false)
&& data.peer_mgr.has_recent_traffic(route.peer_id, now);
let static_allowed = should_background_p2p_with_peer(
route.feature_flag.as_ref(),
false,
lazy_p2p,
flags.disable_p2p,
flags.need_p2p,
);
let dynamic_allowed = should_try_p2p_with_peer(
route.feature_flag.as_ref(),
false,
flags.disable_p2p,
flags.need_p2p,
) && data.peer_mgr.has_recent_traffic(route.peer_id, now);
if !static_allowed && !dynamic_allowed {
continue;
}
@@ -565,9 +575,6 @@ impl UdpHolePunchConnector {
pub async fn run(&mut self) -> Result<(), Error> {
let global_ctx = self.peer_mgr.get_global_ctx();
if global_ctx.get_flags().disable_p2p {
return Ok(());
}
if global_ctx.get_flags().disable_udp_hole_punching {
return Ok(());
}
+24 -17
View File
@@ -22,7 +22,6 @@ use crate::{
launcher::add_proxy_network_to_config,
proto::common::{CompressionAlgoPb, SecureModeConfig},
rpc_service::ApiRpcServer,
tunnel::PROTO_PORT_OFFSET,
utils::setup_panic_handler,
web_client, ShellType,
};
@@ -30,8 +29,10 @@ use anyhow::Context;
use cidr::IpCidr;
use clap::{CommandFactory, Parser};
use rust_i18n::t;
use strum::VariantArray;
use tokio::io::AsyncReadExt;
use crate::tunnel::IpScheme;
#[cfg(feature = "jemalloc-prof")]
use jemalloc_ctl::{epoch, stats, Access as _, AsName as _};
@@ -560,6 +561,13 @@ struct NetworkOptions {
)]
foreign_relay_bps_limit: Option<u64>,
#[arg(
long,
env = "ET_INSTANCE_RECV_BPS_LIMIT",
help = t!("core_clap.instance_recv_bps_limit").to_string(),
)]
instance_recv_bps_limit: Option<u64>,
#[arg(
long,
value_delimiter = ',',
@@ -735,8 +743,12 @@ impl Cli {
let mut listeners: Vec<String> = Vec::new();
if origin_listeners.len() == 1 {
if let Ok(port) = origin_listeners[0].parse::<u16>() {
for (proto, offset) in PROTO_PORT_OFFSET {
listeners.push(format!("{}://0.0.0.0:{}", proto, port + *offset));
for proto in IpScheme::VARIANTS {
listeners.push(format!(
"{}://0.0.0.0:{}",
proto,
port + proto.port_offset()
));
}
return Ok(listeners);
}
@@ -751,20 +763,15 @@ impl Cli {
panic!("failed to parse listener: {}", l);
}
} else {
let Some((proto, offset)) = PROTO_PORT_OFFSET
.iter()
.find(|(proto, _)| *proto == proto_port[0])
else {
return Err(anyhow::anyhow!("unknown protocol: {}", proto_port[0]));
};
let scheme: IpScheme = proto_port[0].parse()?;
let port = if proto_port.len() == 2 {
proto_port[1].parse::<u16>().unwrap()
} else {
11010 + offset
11010 + scheme.port_offset()
};
listeners.push(format!("{}://0.0.0.0:{}", proto, port));
listeners.push(format!("{}://0.0.0.0:{}", scheme, port));
}
}
@@ -1060,6 +1067,9 @@ impl NetworkOptions {
f.foreign_relay_bps_limit = self
.foreign_relay_bps_limit
.unwrap_or(f.foreign_relay_bps_limit);
f.instance_recv_bps_limit = self
.instance_recv_bps_limit
.unwrap_or(f.instance_recv_bps_limit);
f.multi_thread_count = self.multi_thread_count.unwrap_or(f.multi_thread_count);
f.disable_relay_kcp = self.disable_relay_kcp.unwrap_or(f.disable_relay_kcp);
f.disable_relay_quic = self.disable_relay_quic.unwrap_or(f.disable_relay_quic);
@@ -1124,8 +1134,7 @@ impl LoggingConfigLoader for &LoggingOptions {
#[cfg(target_os = "windows")]
fn win_service_set_work_dir(service_name: &std::ffi::OsString) -> anyhow::Result<()> {
use crate::common::constants::WIN_SERVICE_WORK_DIR_REG_KEY;
use winreg::enums::*;
use winreg::RegKey;
use winreg::{enums::*, RegKey};
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let key = hklm.open_subkey_with_flags(WIN_SERVICE_WORK_DIR_REG_KEY, KEY_READ)?;
@@ -1205,11 +1214,9 @@ fn parse_cli() -> Cli {
#[cfg(target_os = "windows")]
fn win_service_main(arg: Vec<std::ffi::OsString>) {
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use tokio::sync::Notify;
use windows_service::service::*;
use windows_service::service_control_handler::*;
use windows_service::{service::*, service_control_handler::*};
_ = win_service_set_work_dir(&arg[0]);
+73 -22
View File
@@ -75,7 +75,7 @@ use easytier::{
rpc_impl::standalone::StandAloneClient,
rpc_types::controller::BaseController,
},
tunnel::tcp::TcpTunnelConnector,
tunnel::{tcp::TcpTunnelConnector, TunnelScheme},
utils::{cost_to_str, PeerRoutePair},
};
@@ -192,8 +192,6 @@ struct PeerArgs {
#[derive(Subcommand, Debug)]
enum PeerSubCommand {
Add,
Remove,
List,
ListForeign {
#[arg(
@@ -232,8 +230,16 @@ struct ConnectorArgs {
#[derive(Subcommand, Debug)]
enum ConnectorSubCommand {
Add,
Remove,
/// Add a connector
Add {
#[arg(help = "connector url, e.g., tcp://1.2.3.4:11010")]
url: String,
},
/// Remove a connector
Remove {
#[arg(help = "connector url, e.g., tcp://1.2.3.4:11010")]
url: String,
},
List,
}
@@ -1152,14 +1158,59 @@ impl<'a> CommandHandler<'a> {
.prometheus_text)
}
#[allow(dead_code)]
fn handle_peer_add(&self, _args: PeerArgs) {
println!("add peer");
fn connector_validate_url(url: &str) -> Result<url::Url, Error> {
let url = url::Url::parse(url).map_err(|e| anyhow::anyhow!("invalid url ({url}): {e}"))?;
TunnelScheme::try_from(&url).map_err(|_| {
anyhow::anyhow!("unsupported scheme \"{}\" in url ({url})", url.scheme())
})?;
Ok(url)
}
#[allow(dead_code)]
fn handle_peer_remove(&self, _args: PeerArgs) {
println!("remove peer");
async fn apply_connector_modify(
&self,
url: &str,
action: ConfigPatchAction,
) -> Result<(), Error> {
let url = match action {
ConfigPatchAction::Add => Self::connector_validate_url(url)?,
ConfigPatchAction::Remove => {
url::Url::parse(url).map_err(|e| anyhow::anyhow!("invalid url ({url}): {e}"))?
}
ConfigPatchAction::Clear => {
return Err(anyhow::anyhow!(
"unsupported connector patch action: {:?}",
action
));
}
};
let client = self.get_config_client().await?;
let request = PatchConfigRequest {
instance: Some(self.instance_selector.clone()),
patch: Some(InstanceConfigPatch {
connectors: vec![UrlPatch {
action: action.into(),
url: Some(url.into()),
}],
..Default::default()
}),
};
let _response = client
.patch_config(BaseController::default(), request)
.await?;
Ok(())
}
async fn handle_connector_modify(
&self,
url: &str,
action: ConfigPatchAction,
) -> Result<(), Error> {
let url = url.to_string();
self.apply_to_instances(|handler| {
let url = url.clone();
Box::pin(async move { handler.apply_connector_modify(&url, action).await })
})
.await
}
async fn handle_peer_list(&self) -> Result<(), Error> {
@@ -1353,7 +1404,7 @@ impl<'a> CommandHandler<'a> {
"remote_addr: {}, rx_bytes: {}, tx_bytes: {}, latency_us: {}",
conn.tunnel
.as_ref()
.map(|t| t.remote_addr.clone().unwrap_or_default())
.and_then(|t| t.display_remote_addr())
.unwrap_or_default(),
conn.stats.as_ref().map(|s| s.rx_bytes).unwrap_or_default(),
conn.stats.as_ref().map(|s| s.tx_bytes).unwrap_or_default(),
@@ -2572,12 +2623,6 @@ async fn main() -> Result<(), Error> {
match cli.sub_command {
SubCommand::Peer(peer_args) => match &peer_args.sub_command {
Some(PeerSubCommand::Add) => {
println!("add peer");
}
Some(PeerSubCommand::Remove) => {
println!("remove peer");
}
Some(PeerSubCommand::List) => {
handler.handle_peer_list().await?;
}
@@ -2592,11 +2637,17 @@ async fn main() -> Result<(), Error> {
}
},
SubCommand::Connector(conn_args) => match conn_args.sub_command {
Some(ConnectorSubCommand::Add) => {
println!("add connector");
Some(ConnectorSubCommand::Add { url }) => {
handler
.handle_connector_modify(&url, ConfigPatchAction::Add)
.await?;
println!("connector add applied to selected instance(s): {url}");
}
Some(ConnectorSubCommand::Remove) => {
println!("remove connector");
Some(ConnectorSubCommand::Remove { url }) => {
handler
.handle_connector_modify(&url, ConfigPatchAction::Remove)
.await?;
println!("connector remove applied to selected instance(s): {url}");
}
Some(ConnectorSubCommand::List) => {
handler.handle_connector_list().await?;
@@ -232,6 +232,7 @@ async fn test_magic_dns_update_replaces_records_for_same_client() {
remote_addr: Some(crate::proto::common::Url {
url: "tcp://127.0.0.1:54321".to_string(),
}),
resolved_remote_addr: None,
}));
dns_server_inst
@@ -299,6 +300,7 @@ async fn test_magic_dns_update_replaces_records_for_same_client() {
remote_addr: Some(crate::proto::common::Url {
url: "tcp://127.0.0.1:54321".to_string(),
}),
resolved_remote_addr: None,
}));
dns_server_inst
+42 -62
View File
@@ -808,14 +808,7 @@ impl Instance {
continue;
}
#[cfg(all(
not(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
)),
feature = "tun"
))]
#[cfg(all(not(mobile), feature = "tun"))]
{
let mut new_nic_ctx = NicCtx::new(
global_ctx_c.clone(),
@@ -856,14 +849,7 @@ impl Instance {
});
}
#[cfg(all(
not(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
)),
feature = "tun"
))]
#[cfg(all(not(mobile), feature = "tun"))]
fn check_for_static_ip(&self, first_round_output: oneshot::Sender<Result<(), Error>>) {
let ipv4_addr = self.global_ctx.get_ipv4();
let ipv6_addr = self.global_ctx.get_ipv6();
@@ -881,46 +867,48 @@ impl Instance {
tokio::spawn(async move {
let mut output_tx = Some(first_round_output);
loop {
let Some(peer_manager) = peer_mgr.upgrade() else {
tracing::warn!("peer manager is dropped, stop static ip check.");
if let Some(output_tx) = output_tx.take() {
let _ = output_tx.send(Err(Error::Unknown));
return;
}
return;
};
let close_notifier = Arc::new(Notify::new());
let mut new_nic_ctx = NicCtx::new(
peer_manager.get_global_ctx(),
&peer_manager,
peer_packet_receiver.clone(),
close_notifier.clone(),
);
if let Err(e) = new_nic_ctx.run(ipv4_addr, ipv6_addr).await {
if let Some(output_tx) = output_tx.take() {
let _ = output_tx.send(Err(e));
return;
}
tracing::error!("failed to create new nic ctx, err: {:?}", e);
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
}
// Create Magic DNS runner only if we have IPv4
#[cfg(feature = "magic-dns")]
{
let ifname = new_nic_ctx.ifname().await;
let dns_runner = if let Some(ipv4) = ipv4_addr {
Self::create_magic_dns_runner(peer_manager, ifname, ipv4)
} else {
None
let Some(peer_mgr) = peer_mgr.upgrade() else {
tracing::warn!("peer manager is dropped, stop static ip check.");
if let Some(output_tx) = output_tx.take() {
let _ = output_tx.send(Err(Error::Unknown));
return;
}
return;
};
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, dns_runner).await;
let mut new_nic_ctx = NicCtx::new(
peer_mgr.get_global_ctx(),
&peer_mgr,
peer_packet_receiver.clone(),
close_notifier.clone(),
);
if let Err(e) = new_nic_ctx.run(ipv4_addr, ipv6_addr).await {
if let Some(output_tx) = output_tx.take() {
let _ = output_tx.send(Err(e));
return;
}
tracing::error!("failed to create new nic ctx, err: {:?}", e);
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
}
// Create Magic DNS runner only if we have IPv4
#[cfg(feature = "magic-dns")]
{
let ifname = new_nic_ctx.ifname().await;
let dns_runner = if let Some(ipv4) = ipv4_addr {
Self::create_magic_dns_runner(peer_mgr, ifname, ipv4)
} else {
None
};
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, dns_runner).await;
}
#[cfg(not(feature = "magic-dns"))]
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx).await;
}
#[cfg(not(feature = "magic-dns"))]
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx).await;
if let Some(output_tx) = output_tx.take() {
let _ = output_tx.send(Ok(()));
@@ -951,11 +939,7 @@ impl Instance {
{
Self::clear_nic_ctx(self.nic_ctx.clone(), self.peer_packet_receiver.clone()).await;
#[cfg(not(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
)))]
#[cfg(not(mobile))]
if !self.global_ctx.config.get_flags().no_tun {
let (output_tx, output_rx) = oneshot::channel();
self.check_for_static_ip(output_tx);
@@ -1475,11 +1459,7 @@ impl Instance {
self.peer_packet_receiver.clone()
}
#[cfg(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
))]
#[cfg(mobile)]
pub async fn setup_nic_ctx_for_mobile(
nic_ctx: ArcNicCtx,
global_ctx: ArcGlobalCtx,
+37 -45
View File
@@ -9,12 +9,6 @@ use anyhow::Context;
use async_trait::async_trait;
use tokio::task::JoinSet;
#[cfg(feature = "faketcp")]
use crate::tunnel::fake_tcp::FakeTcpTunnelListener;
#[cfg(feature = "quic")]
use crate::tunnel::quic::QUICTunnelListener;
#[cfg(feature = "wireguard")]
use crate::tunnel::wireguard::{WgConfig, WgTunnelListener};
use crate::{
common::{
error::Error,
@@ -23,44 +17,42 @@ use crate::{
},
peers::peer_manager::PeerManager,
tunnel::{
ring::RingTunnelListener, tcp::TcpTunnelListener, udp::UdpTunnelListener, Tunnel,
TunnelListener,
self, ring::RingTunnelListener, tcp::TcpTunnelListener, udp::UdpTunnelListener, IpScheme,
Tunnel, TunnelListener, TunnelScheme,
},
utils::BoxExt,
};
pub fn get_listener_by_url(
pub fn create_listener_by_url(
l: &url::Url,
_ctx: ArcGlobalCtx,
#[allow(unused_variables)] ctx: ArcGlobalCtx,
) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.scheme() {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
#[cfg(feature = "wireguard")]
"wg" => {
let nid = _ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
);
Box::new(WgTunnelListener::new(l.clone(), wg_config))
}
#[cfg(feature = "quic")]
"quic" => Box::new(QUICTunnelListener::new(l.clone())),
#[cfg(feature = "websocket")]
"ws" | "wss" => {
use crate::tunnel::websocket::WSTunnelListener;
Box::new(WSTunnelListener::new(l.clone()))
}
#[cfg(feature = "faketcp")]
"faketcp" => Box::new(FakeTcpTunnelListener::new(l.clone())),
Ok(match l.try_into()? {
TunnelScheme::Ip(scheme) => match scheme {
IpScheme::Tcp => TcpTunnelListener::new(l.clone()).boxed(),
IpScheme::Udp => UdpTunnelListener::new(l.clone()).boxed(),
#[cfg(feature = "wireguard")]
IpScheme::Wg => {
use crate::tunnel::wireguard::{WgConfig, WgTunnelListener};
let nid = ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
);
WgTunnelListener::new(l.clone(), wg_config).boxed()
}
#[cfg(feature = "quic")]
IpScheme::Quic => tunnel::quic::QuicTunnelListener::new(l.clone()).boxed(),
#[cfg(feature = "websocket")]
IpScheme::Ws | IpScheme::Wss => {
tunnel::websocket::WsTunnelListener::new(l.clone()).boxed()
}
#[cfg(feature = "faketcp")]
IpScheme::FakeTcp => tunnel::fake_tcp::FakeTcpTunnelListener::new(l.clone()).boxed(),
},
#[cfg(unix)]
"unix" => {
use crate::tunnel::unix::UnixSocketTunnelListener;
Box::new(UnixSocketTunnelListener::new(l.clone()))
}
_ => {
return Err(Error::InvalidUrl(l.to_string()));
}
TunnelScheme::Unix => tunnel::unix::UnixSocketTunnelListener::new(l.clone()).boxed(),
_ => return Err(Error::InvalidUrl(l.to_string())),
})
}
@@ -133,7 +125,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
for l in self.global_ctx.config.get_listener_uris().iter() {
let l = l.clone();
let Ok(_) = get_listener_by_url(&l, self.global_ctx.clone()) else {
let Ok(_) = create_listener_by_url(&l, self.global_ctx.clone()) else {
let msg = format!("failed to get listener by url: {}, maybe not supported", l);
self.global_ctx
.issue_event(GlobalCtxEvent::ListenerAddFailed(l.clone(), msg));
@@ -143,7 +135,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
let listener = l.clone();
self.add_listener(
move || get_listener_by_url(&listener, ctx.clone()).unwrap(),
move || create_listener_by_url(&listener, ctx.clone()).unwrap(),
true,
)
.await?;
@@ -160,7 +152,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
.with_context(|| format!("failed to set ipv6 host for listener: {}", l))?;
let ctx = self.global_ctx.clone();
self.add_listener(
move || get_listener_by_url(&ipv6_listener, ctx.clone()).unwrap(),
move || create_listener_by_url(&ipv6_listener, ctx.clone()).unwrap(),
false,
)
.await?;
@@ -361,10 +353,6 @@ mod tests {
#[async_trait::async_trait]
impl TunnelListener for MockListener {
fn local_url(&self) -> url::Url {
"mock://".parse().unwrap()
}
async fn listen(&mut self) -> Result<(), TunnelError> {
self.counter.fetch_add(1, Ordering::Relaxed);
Ok(())
@@ -374,6 +362,10 @@ mod tests {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
Err(TunnelError::BufferFull)
}
fn local_url(&self) -> url::Url {
"mock://".parse().unwrap()
}
}
impl Drop for MockListener {
+2 -10
View File
@@ -575,11 +575,7 @@ impl VirtualNic {
Ok(tun::create(&config)?)
}
#[cfg(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
))]
#[cfg(mobile)]
pub async fn create_dev_for_mobile(
&mut self,
tun_fd: std::os::fd::RawFd,
@@ -1175,11 +1171,7 @@ impl NicCtx {
Ok(())
}
#[cfg(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
))]
#[cfg(mobile)]
pub async fn run_for_mobile(&mut self, tun_fd: std::os::fd::RawFd) -> Result<(), Error> {
let tunnel = {
let mut nic = self.nic.lock().await;
+10 -4
View File
@@ -227,11 +227,17 @@ impl NetworkInstanceManager {
}
pub fn set_tun_fd(&self, instance_id: &uuid::Uuid, fd: i32) -> Result<(), anyhow::Error> {
let mut instance = self
let sender = self
.instance_map
.get_mut(instance_id)
.ok_or_else(|| anyhow::anyhow!("instance not found"))?;
instance.set_tun_fd(fd);
.get(instance_id)
.ok_or_else(|| anyhow::anyhow!("instance not found"))?
.get_tun_fd_sender()
.ok_or_else(|| anyhow::anyhow!("tun fd sender not found"))?;
sender
.try_send(Some(fd))
.map_err(|e| anyhow::anyhow!("failed to send tun fd: {}", e))?;
Ok(())
}
+14 -30
View File
@@ -93,11 +93,7 @@ impl EasyTierLauncher {
}
}
#[cfg(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
))]
#[cfg(mobile)]
async fn run_routine_for_mobile(
instance: &Instance,
data: &EasyTierData,
@@ -156,11 +152,7 @@ impl EasyTierLauncher {
}
});
#[cfg(any(
target_os = "android",
any(target_os = "ios", all(target_os = "macos", feature = "macos-ne")),
target_env = "ohos"
))]
#[cfg(mobile)]
Self::run_routine_for_mobile(&instance, &data, &mut tasks).await;
instance.run().await?;
@@ -403,12 +395,6 @@ impl NetworkInstance {
self.config.get_network_identity().network_name
}
pub fn set_tun_fd(&mut self, tun_fd: i32) {
if let Some(launcher) = self.launcher.as_ref() {
let _ = launcher.data.tun_fd.0.blocking_send(Some(tun_fd));
}
}
pub fn get_tun_fd_sender(&self) -> Option<mpsc::Sender<TunFd>> {
self.launcher
.as_ref()
@@ -573,8 +559,9 @@ impl NetworkConfig {
peer_public_key: None,
});
}
cfg.set_peers(peers);
if !peers.is_empty() {
cfg.set_peers(peers);
}
}
NetworkingMethod::Standalone => {}
}
@@ -826,6 +813,10 @@ impl NetworkConfig {
flags.mtu = mtu as u32;
}
if let Some(instance_recv_bps_limit) = self.instance_recv_bps_limit {
flags.instance_recv_bps_limit = instance_recv_bps_limit;
}
if let Some(enable_private_mode) = self.enable_private_mode {
flags.private_mode = enable_private_mode;
}
@@ -870,18 +861,9 @@ impl NetworkConfig {
}
let peers = config.get_peers();
match peers.len() {
1 => {
result.networking_method = Some(NetworkingMethod::PublicServer as i32);
result.public_server_url = Some(peers[0].uri.to_string());
}
0 => {
result.networking_method = Some(NetworkingMethod::Standalone as i32);
}
_ => {
result.networking_method = Some(NetworkingMethod::Manual as i32);
result.peer_urls = peers.iter().map(|p| p.uri.to_string()).collect();
}
result.networking_method = Some(NetworkingMethod::Manual as i32);
if !peers.is_empty() {
result.peer_urls = peers.iter().map(|p| p.uri.to_string()).collect();
}
result.listener_urls = config
@@ -978,6 +960,8 @@ impl NetworkConfig {
result.disable_sym_hole_punching = Some(flags.disable_sym_hole_punching);
result.enable_magic_dns = Some(flags.accept_dns);
result.mtu = Some(flags.mtu as i32);
result.instance_recv_bps_limit =
(flags.instance_recv_bps_limit != u64::MAX).then_some(flags.instance_recv_bps_limit);
result.enable_private_mode = Some(flags.private_mode);
if flags.relay_network_whitelist == "*" {
+5 -2
View File
@@ -2,7 +2,6 @@ use crate::{
common::{config::EncryptionAlgorithm, log},
tunnel::packet_def::ZCPacket,
};
use cfg_if::cfg_if;
use std::sync::Arc;
#[cfg(feature = "wireguard")]
@@ -61,8 +60,11 @@ impl Encryptor for NullCipher {
pub fn create_encryptor(
algorithm: &str,
key_128: [u8; 16],
key_256: [u8; 32],
#[allow(unused_variables)] key_256: [u8; 32],
) -> Arc<dyn Encryptor> {
#[cfg(any(feature = "aes-gcm", feature = "wireguard", feature = "openssl-crypto"))]
use cfg_if::cfg_if;
let algorithm = match EncryptionAlgorithm::try_from(algorithm) {
Ok(algorithm) => algorithm,
Err(_) => {
@@ -75,6 +77,7 @@ pub fn create_encryptor(
default
}
};
match algorithm {
EncryptionAlgorithm::Xor => Arc::new(xor::XorCipher::new(&key_128)),
+39 -8
View File
@@ -730,18 +730,46 @@ impl ForeignNetworkManager {
matches!(identity_type, PeerIdentityType::Admin)
}
async fn is_credential_pubkey_trusted(
entry: &ForeignNetworkEntry,
fn credential_pubkey_is_trusted(
global_ctx: &ArcGlobalCtx,
network_name: &str,
remote_static_pubkey: &[u8],
) -> bool {
remote_static_pubkey.len() == 32
&& entry.global_ctx.is_pubkey_trusted_with_source(
&& global_ctx.is_pubkey_trusted_with_source(
remote_static_pubkey,
&entry.network.network_name,
network_name,
TrustedKeySource::OspfCredential,
)
}
fn is_credential_pubkey_trusted(
entry: &ForeignNetworkEntry,
remote_static_pubkey: &[u8],
) -> bool {
Self::credential_pubkey_is_trusted(
&entry.global_ctx,
&entry.network.network_name,
remote_static_pubkey,
)
}
pub(crate) fn is_existing_credential_pubkey_trusted(
&self,
network_name: &str,
remote_static_pubkey: &[u8],
) -> bool {
self.data
.get_network_entry(network_name)
.is_some_and(|entry| {
Self::credential_pubkey_is_trusted(
&entry.global_ctx,
&entry.network.network_name,
remote_static_pubkey,
)
})
}
fn build_trusted_key_items(entry: &ForeignNetworkEntry) -> Vec<TrustedKeyInfoPb> {
entry
.global_ctx
@@ -839,8 +867,7 @@ impl ForeignNetworkManager {
let same_identity = entry.network == peer_network;
let peer_identity_type = peer_conn.get_peer_identity_type();
let credential_peer_trusted = peer_digest_empty
&& Self::is_credential_pubkey_trusted(&entry, &conn_info.noise_remote_static_pubkey)
.await;
&& Self::is_credential_pubkey_trusted(&entry, &conn_info.noise_remote_static_pubkey);
let credential_identity_mismatch = credential_peer_trusted
&& Self::should_reject_credential_trust_path(peer_identity_type);
@@ -1483,7 +1510,9 @@ pub mod tests {
)]),
&foreign_network.network_name,
);
assert!(!ForeignNetworkManager::is_credential_pubkey_trusted(&entry, &pubkey).await);
assert!(!ForeignNetworkManager::is_credential_pubkey_trusted(
&entry, &pubkey
));
entry.global_ctx.update_trusted_keys(
HashMap::from([(
@@ -1495,7 +1524,9 @@ pub mod tests {
)]),
&foreign_network.network_name,
);
assert!(ForeignNetworkManager::is_credential_pubkey_trusted(&entry, &pubkey).await);
assert!(ForeignNetworkManager::is_credential_pubkey_trusted(
&entry, &pubkey
));
}
#[test]
+45
View File
@@ -1365,6 +1365,17 @@ impl PeerConn {
&format!("{}:recv", conn_info_for_instrument.network_name),
limiter_config.into(),
))
} else if self.global_ctx.get_flags().instance_recv_bps_limit != u64::MAX {
let limiter_config = LimiterConfig {
burst_rate: None,
bps: Some(self.global_ctx.get_flags().instance_recv_bps_limit),
fill_duration_ms: None,
};
Some(
self.global_ctx
.token_bucket_manager()
.get_or_create("instance:recv", limiter_config.into()),
)
} else {
None
};
@@ -1472,6 +1483,40 @@ impl PeerConn {
ret
}
fn network_secret_digest_is_empty(network: &NetworkIdentity) -> bool {
network
.network_secret_digest
.as_ref()
.is_none_or(|digest| digest.iter().all(|byte| *byte == 0))
}
fn matches_local_secret_proof(&self) -> bool {
let Some(secret_proof) = self
.noise_handshake_result
.as_ref()
.and_then(|noise| noise.client_secret_proof.as_ref())
else {
return false;
};
self.global_ctx
.get_secret_proof(&secret_proof.challenge)
.is_some_and(|mac| mac.verify_slice(&secret_proof.proof).is_ok())
}
pub(crate) fn matches_local_network_secret(&self) -> bool {
if self.matches_local_secret_proof() {
return true;
}
let my_identity = self.global_ctx.get_network_identity();
let peer_identity = self.get_network_identity();
!Self::network_secret_digest_is_empty(&my_identity)
&& !Self::network_secret_digest_is_empty(&peer_identity)
&& my_identity.network_secret_digest == peer_identity.network_secret_digest
}
pub fn get_close_notifier(&self) -> Arc<PeerConnCloseNotify> {
self.close_event_notifier.clone()
}
+25 -11
View File
@@ -697,12 +697,6 @@ impl PeerManager {
return Ok(());
}
if self.global_ctx.config.get_flags().private_mode {
return Err(Error::SecretKeyError(
"private mode is turned on, network identity not match".to_string(),
));
}
let mut peer_id = self
.foreign_network_manager
.get_network_peer_id(network_name);
@@ -724,11 +718,31 @@ impl PeerManager {
})
.await?;
let peer_network_name = conn.get_network_identity().network_name.clone();
let peer_identity = conn.get_network_identity();
let peer_network_name = peer_identity.network_name.clone();
let my_identity = self.global_ctx.get_network_identity();
let is_local_network = peer_network_name == my_identity.network_name;
let trusted_foreign_credential =
matches!(conn.get_peer_identity_type(), PeerIdentityType::Credential)
&& self
.foreign_network_manager
.is_existing_credential_pubkey_trusted(
&peer_network_name,
&conn.get_conn_info().noise_remote_static_pubkey,
);
let foreign_network_allowed =
conn.matches_local_network_secret() || trusted_foreign_credential;
if !is_local_network && self.global_ctx.get_flags().private_mode && !foreign_network_allowed
{
return Err(Error::SecretKeyError(
"private mode is turned on, foreign network secret mismatch".to_string(),
));
}
conn.set_is_hole_punched(!is_directly_connected);
if peer_network_name == self.global_ctx.get_network_identity().network_name {
if is_local_network {
self.add_new_peer_conn(conn).await?;
} else {
self.foreign_network_manager.add_peer_conn(conn).await?;
@@ -1989,7 +2003,7 @@ mod tests {
create_connector_by_url, direct::PeerManagerForDirectConnector,
udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
},
instance::listeners::get_listener_by_url,
instance::listeners::create_listener_by_url,
peers::{
create_packet_recv_chan,
peer_conn::tests::set_secure_mode_cfg,
@@ -2769,7 +2783,7 @@ mod tests {
let peer_mgr_c = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
register_service(&peer_mgr_c.peer_rpc_mgr, "", 0, "hello c");
let mut listener1 = get_listener_by_url(
let mut listener1 = create_listener_by_url(
&format!("{}://0.0.0.0:31013", proto1).parse().unwrap(),
peer_mgr_b.get_global_ctx(),
)
@@ -2788,7 +2802,7 @@ mod tests {
.await
.unwrap();
let mut listener2 = get_listener_by_url(
let mut listener2 = create_listener_by_url(
&format!("{}://0.0.0.0:31014", proto2).parse().unwrap(),
peer_mgr_c.get_global_ctx(),
)
+30 -6
View File
@@ -659,7 +659,8 @@ impl SyncedRouteInfo {
}
}
fn update_foreign_network(&self, foreign_network: &RouteForeignNetworkInfos) {
fn update_foreign_network(&self, foreign_network: &RouteForeignNetworkInfos) -> bool {
let mut changed = false;
for item in foreign_network.infos.iter().map(Clone::clone) {
let Some(key) = item.key else {
continue;
@@ -675,10 +676,15 @@ impl SyncedRouteInfo {
.and_modify(|old_entry| {
if entry.version > old_entry.version {
*old_entry = entry.clone();
changed = true;
}
})
.or_insert_with(|| entry.clone());
.or_insert_with(|| {
changed = true;
entry.clone()
});
}
changed
}
fn update_my_peer_info(
@@ -2847,8 +2853,14 @@ impl RouteSessionManager {
dst_peer_id: PeerId,
mut sync_now: tokio::sync::broadcast::Receiver<()>,
) {
const RETRY_BASE_MS: u64 = 50;
const RETRY_MAX_MS: u64 = 5000;
let mut last_sync = Instant::now();
let mut last_clean_dst_saved_map = Instant::now();
// Keep retry_delay_ms across outer iterations so that rapid
// connect/disconnect flaps don't fully reset the backoff.
let mut retry_delay_ms = RETRY_BASE_MS;
loop {
loop {
let Some(service_impl) = service_impl.clone().upgrade() else {
@@ -2875,13 +2887,18 @@ impl RouteSessionManager {
last_clean_dst_saved_map = Instant::now();
service_impl.clean_dst_saved_map(dst_peer_id);
}
// Successful sync: decay backoff towards base so the next
// real failure still starts at a reasonable level, but
// don't fully reset to avoid 50ms bursts during flapping.
retry_delay_ms = (retry_delay_ms / 2).max(RETRY_BASE_MS);
break;
}
drop(service_impl);
drop(peer_rpc);
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(Duration::from_millis(retry_delay_ms)).await;
retry_delay_ms = (retry_delay_ms * 2).min(RETRY_MAX_MS);
}
sync_now = sync_now.resubscribe();
@@ -3214,17 +3231,18 @@ impl RouteSessionManager {
service_impl.update_route_table_and_cached_local_conn_bitmap();
}
let mut foreign_network_changed = false;
if let Some(foreign_network) = &foreign_network {
// Step 9b: credential peers' foreign_network_infos are always ignored
if !from_is_credential {
service_impl
foreign_network_changed = service_impl
.synced_route_info
.update_foreign_network(foreign_network);
session.update_dst_saved_foreign_network_version(foreign_network, from_peer_id);
}
}
if need_update_route_table || foreign_network.is_some() {
if need_update_route_table || foreign_network_changed {
service_impl.update_foreign_network_owner_map();
}
@@ -3243,7 +3261,13 @@ impl RouteSessionManager {
.disconnect_untrusted_peers(&untrusted_peers)
.await;
self.sync_now("sync_route_info");
// Only trigger reverse sync when we actually received new data that
// needs to be propagated to other peers. Previously this was
// unconditional, which created an A→B→A→B ping-pong storm even when
// there was nothing new to propagate.
if need_update_route_table || foreign_network_changed {
self.sync_now("sync_route_info");
}
Ok(SyncRouteInfoResponse {
is_initiator,
+226
View File
@@ -13,6 +13,7 @@ use crate::{
stats_manager::{LabelSet, LabelType, MetricName},
PeerId,
},
proto::api::instance::TrustedKeySourcePb,
tunnel::{
common::tests::wait_for_condition,
packet_def::{PacketType, ZCPacket},
@@ -63,6 +64,92 @@ pub async fn create_mock_peer_manager_secure(
peer_mgr
}
fn set_private_mode(peer_mgr: &PeerManager, enabled: bool) {
let global_ctx = peer_mgr.get_global_ctx();
let mut flags = global_ctx.get_flags();
flags.private_mode = enabled;
global_ctx.set_flags(flags);
}
async fn connect_client_and_server(
client: Arc<PeerManager>,
server: Arc<PeerManager>,
) -> (Result<(), Error>, Result<(), Error>) {
let (client_ring, server_ring) = create_ring_tunnel_pair();
tokio::join!(
{
let client = client.clone();
async move {
client.add_client_tunnel(client_ring, false).await?;
Ok(())
}
},
{
let server = server.clone();
async move { server.add_tunnel_as_server(server_ring, true).await }
}
)
}
async fn wait_for_foreign_network(server: Arc<PeerManager>, network_name: &'static str) {
wait_for_condition(
|| {
let server = server.clone();
async move {
server
.get_foreign_network_manager()
.list_foreign_networks()
.await
.foreign_networks
.contains_key(network_name)
}
},
Duration::from_secs(10),
)
.await;
}
async fn wait_for_foreign_network_peer_count_at_least(
server: Arc<PeerManager>,
network_name: &'static str,
min_peer_count: usize,
) {
wait_for_condition(
|| {
let server = server.clone();
async move {
server
.get_foreign_network_manager()
.list_foreign_networks()
.await
.foreign_networks
.get(network_name)
.map(|entry| entry.peers.len() >= min_peer_count)
.unwrap_or(false)
}
},
Duration::from_secs(10),
)
.await;
}
async fn wait_for_public_peers_empty(client: Arc<PeerManager>) {
wait_for_condition(
|| {
let client = client.clone();
async move {
client
.get_foreign_network_client()
.list_public_peers()
.await
.is_empty()
}
},
Duration::from_secs(5),
)
.await;
}
pub async fn connect_peer_manager(client: Arc<PeerManager>, server: Arc<PeerManager>) {
let (a_ring, b_ring) = create_ring_tunnel_pair();
let a_mgr_copy = client;
@@ -205,6 +292,145 @@ async fn relay_peer_map_secure_session_decrypt() {
assert_eq!(packet.payload(), b"relay-hello");
}
#[tokio::test]
async fn private_mode_allows_foreign_network_with_same_secret() {
let server = create_mock_peer_manager_secure("public".to_string(), "shared".to_string()).await;
let client =
create_mock_peer_manager_secure("tenant-a".to_string(), "shared".to_string()).await;
set_private_mode(&server, true);
let (client_ret, server_ret) = connect_client_and_server(client, server.clone()).await;
assert!(client_ret.is_ok(), "client should connect in private mode");
assert!(
server_ret.is_ok(),
"server should accept foreign network with matching secret: {:?}",
server_ret
);
wait_for_foreign_network(server, "tenant-a").await;
}
#[tokio::test]
async fn private_mode_rejects_foreign_network_with_different_secret() {
let server = create_mock_peer_manager_secure("public".to_string(), "shared".to_string()).await;
let client = create_mock_peer_manager_secure("tenant-a".to_string(), "other".to_string()).await;
set_private_mode(&server, true);
let (client_ret, server_ret) = connect_client_and_server(client.clone(), server.clone()).await;
assert!(
server_ret.is_err(),
"server should reject foreign network with mismatched secret in private mode"
);
let _ = client_ret;
wait_for_public_peers_empty(client).await;
assert!(server
.get_foreign_network_manager()
.list_foreign_networks()
.await
.foreign_networks
.is_empty());
}
#[tokio::test]
async fn private_mode_allows_trusted_foreign_credential() {
let server = create_mock_peer_manager_secure("public".to_string(), "shared".to_string()).await;
let admin = create_mock_peer_manager_secure("tenant-a".to_string(), "shared".to_string()).await;
set_private_mode(&server, true);
let (_cred_id, cred_secret) = admin
.get_global_ctx()
.get_credential_manager()
.generate_credential(vec![], false, vec![], Duration::from_secs(3600));
let privkey_bytes: [u8; 32] = base64::engine::general_purpose::STANDARD
.decode(&cred_secret)
.unwrap()
.try_into()
.unwrap();
let private = x25519_dalek::StaticSecret::from(privkey_bytes);
let public = x25519_dalek::PublicKey::from(&private);
let credential = create_mock_peer_manager_credential("tenant-a".to_string(), &private).await;
connect_peer_manager(admin.clone(), server.clone()).await;
wait_for_condition(
|| {
let server = server.clone();
let pubkey = public.as_bytes().to_vec();
async move {
server
.get_foreign_network_manager()
.list_foreign_networks_with_options(true)
.await
.foreign_networks
.get("tenant-a")
.map(|entry| {
entry.trusted_keys.iter().any(|trusted_key| {
trusted_key.pubkey == pubkey
&& trusted_key.source == TrustedKeySourcePb::OspfCredential as i32
})
})
.unwrap_or(false)
}
},
Duration::from_secs(10),
)
.await;
let (client_ret, server_ret) = connect_client_and_server(credential, server.clone()).await;
assert!(
client_ret.is_ok(),
"trusted foreign credential client should connect in private mode"
);
assert!(
server_ret.is_ok(),
"server should allow trusted foreign credential in private mode: {:?}",
server_ret
);
wait_for_foreign_network_peer_count_at_least(server, "tenant-a", 2).await;
}
#[tokio::test]
async fn private_mode_rejects_untrusted_foreign_credential() {
let server = create_mock_peer_manager_secure("public".to_string(), "shared".to_string()).await;
let admin = create_mock_peer_manager_secure("tenant-a".to_string(), "shared".to_string()).await;
set_private_mode(&server, true);
let random_private = x25519_dalek::StaticSecret::random_from_rng(rand::rngs::OsRng);
let unknown_credential =
create_mock_peer_manager_credential("tenant-a".to_string(), &random_private).await;
connect_peer_manager(admin.clone(), server.clone()).await;
wait_for_foreign_network(server.clone(), "tenant-a").await;
let (client_ret, server_ret) =
connect_client_and_server(unknown_credential, server.clone()).await;
let _ = client_ret;
assert!(
server_ret.is_err(),
"server should reject untrusted foreign credential in private mode"
);
wait_for_condition(
|| {
let server = server.clone();
async move {
server
.get_foreign_network_manager()
.list_foreign_networks()
.await
.foreign_networks
.get("tenant-a")
.map(|entry| entry.peers.len() == 1)
.unwrap_or(false)
}
},
Duration::from_secs(10),
)
.await;
}
#[tokio::test]
async fn relay_peer_map_retry_backoff_and_evict() {
let (s, _r) = create_packet_recv_chan();
+62 -23
View File
@@ -135,36 +135,22 @@ pub mod instance {
}
pub fn get_loss_rate(&self) -> Option<f64> {
let mut ret = 0.0;
let p = self.peer.as_ref()?;
let default_conn_id = p.default_conn_id.map(|id| id.to_string());
let mut ret = None;
for conn in p.conns.iter() {
ret += conn.loss_rate;
if default_conn_id == Some(conn.conn_id.to_string()) {
return Some(conn.loss_rate as f64);
}
ret.get_or_insert(conn.loss_rate as f64);
}
if ret == 0.0 {
None
} else {
Some(ret as f64)
}
}
fn is_tunnel_ipv6(tunnel_info: &super::super::common::TunnelInfo) -> bool {
let Some(local_addr) = &tunnel_info.local_addr else {
return false;
};
let u: url::Url = local_addr.clone().into();
u.host()
.map(|h| matches!(h, url::Host::Ipv6(_)))
.unwrap_or(false)
ret
}
fn get_tunnel_proto_str(tunnel_info: &super::super::common::TunnelInfo) -> String {
if Self::is_tunnel_ipv6(tunnel_info) {
format!("{}6", tunnel_info.tunnel_type)
} else {
tunnel_info.tunnel_type.clone()
}
tunnel_info.display_tunnel_type()
}
pub fn get_conn_protos(&self) -> Option<Vec<String>> {
@@ -266,6 +252,7 @@ mod tests {
use bytes::Bytes;
use prost::Message;
use super::instance::{PeerConnInfo, PeerInfo, PeerRoutePair};
use super::manage::{
ListNetworkInstanceRequest, ListNetworkInstanceResponse, WebClientService,
WebClientServiceClient, WebClientServiceDescriptor, WebClientServiceMethodDescriptor,
@@ -355,4 +342,56 @@ mod tests {
.await;
assert!(ret.is_err());
}
#[test]
fn peer_route_pair_loss_rate_uses_default_conn() {
let default_conn_id = uuid::Uuid::new_v4();
let pair = PeerRoutePair {
peer: Some(PeerInfo {
default_conn_id: Some(default_conn_id.into()),
conns: vec![
PeerConnInfo {
conn_id: uuid::Uuid::new_v4().to_string(),
loss_rate: 0.8,
..Default::default()
},
PeerConnInfo {
conn_id: default_conn_id.to_string(),
loss_rate: 0.4,
..Default::default()
},
],
..Default::default()
}),
..Default::default()
};
assert!(pair
.get_loss_rate()
.is_some_and(|loss_rate| (loss_rate - 0.4).abs() < 1e-6));
}
#[test]
fn peer_route_pair_loss_rate_falls_back_to_first_conn() {
let pair = PeerRoutePair {
peer: Some(PeerInfo {
conns: vec![
PeerConnInfo {
conn_id: uuid::Uuid::new_v4().to_string(),
loss_rate: 0.0,
..Default::default()
},
PeerConnInfo {
conn_id: uuid::Uuid::new_v4().to_string(),
loss_rate: 0.7,
..Default::default()
},
],
..Default::default()
}),
..Default::default()
};
assert_eq!(pair.get_loss_rate(), Some(0.0));
}
}
+1
View File
@@ -87,6 +87,7 @@ message NetworkConfig {
optional string credential_file = 57;
optional bool lazy_p2p = 58;
optional bool need_p2p = 59;
optional uint64 instance_recv_bps_limit = 60;
}
message PortForwardConfig {
+3
View File
@@ -73,6 +73,7 @@ message FlagsInConfig {
bool lazy_p2p = 37;
bool need_p2p = 38;
uint64 instance_recv_bps_limit = 39;
}
message RpcDescriptor {
@@ -200,6 +201,7 @@ message TunnelInfo {
string tunnel_type = 1;
common.Url local_addr = 2;
common.Url remote_addr = 3;
common.Url resolved_remote_addr = 4;
}
message StunInfo {
@@ -221,6 +223,7 @@ message PeerFeatureFlag {
bool no_relay_quic = 7;
bool is_credential_peer = 8;
bool need_p2p = 9;
bool disable_p2p = 10;
}
enum SocketType {
+279 -1
View File
@@ -5,8 +5,9 @@ use std::{
use anyhow::Context;
use base64::{prelude::BASE64_STANDARD, Engine as _};
use strum::VariantArray;
use crate::tunnel::packet_def::CompressorAlgo;
use crate::tunnel::{packet_def::CompressorAlgo, IpScheme};
include!(concat!(env!("OUT_DIR"), "/common.rs"));
@@ -284,6 +285,105 @@ impl fmt::Display for Url {
}
}
fn split_tunnel_scheme(raw_scheme: &str) -> Option<(&str, &'static str, bool)> {
for scheme in IpScheme::VARIANTS {
let scheme: &'static str = scheme.into();
if let Some(base) = raw_scheme.strip_suffix('6') {
if let Some(prefix) = base.strip_suffix(scheme) {
if prefix.is_empty() || prefix.ends_with('-') {
return Some((prefix, scheme, true));
}
}
}
if let Some(prefix) = raw_scheme.strip_suffix(scheme) {
if prefix.is_empty() || prefix.ends_with('-') {
return Some((prefix, scheme, false));
}
}
}
None
}
fn normalize_tunnel_scheme(raw_scheme: &str, is_ipv6: bool) -> Option<String> {
let (prefix, scheme, had_ipv6_suffix) = split_tunnel_scheme(raw_scheme)?;
let suffix = if is_ipv6 || had_ipv6_suffix { "6" } else { "" };
Some(format!("{prefix}{scheme}{suffix}"))
}
fn infer_tunnel_ipv6(raw: &str) -> Option<bool> {
let (_, rest) = raw.split_once("://")?;
if rest.starts_with('[') {
return Some(true);
}
match url::Url::parse(raw).ok()?.host() {
Some(url::Host::Ipv4(_)) => Some(false),
Some(url::Host::Ipv6(_)) => Some(true),
Some(url::Host::Domain(_)) | None => None,
}
}
fn normalize_tunnel_port(raw_port: &str, is_ipv6: bool) -> Option<u16> {
if let Ok(port) = raw_port.parse::<u16>() {
return Some(port);
}
if is_ipv6 && raw_port.ends_with('6') {
return raw_port[..raw_port.len() - 1].parse::<u16>().ok();
}
None
}
fn normalize_tunnel_url(raw: &str, fallback_ipv6: Option<bool>) -> Option<String> {
let (raw_scheme, rest) = raw.split_once("://")?;
if let Some(rest) = rest.strip_prefix('[') {
let (host, remainder) = rest.split_once(']')?;
let scheme = normalize_tunnel_scheme(raw_scheme, true)?;
if remainder.is_empty() {
return Some(format!("{scheme}://[{host}]"));
}
let raw_port = remainder.strip_prefix(':')?;
let port = normalize_tunnel_port(raw_port, true)?;
return Some(format!("{scheme}://[{host}]:{port}"));
}
let is_ipv6 = infer_tunnel_ipv6(raw).or(fallback_ipv6).unwrap_or(false);
let scheme = normalize_tunnel_scheme(raw_scheme, is_ipv6)?;
if let Ok(url) = url::Url::parse(raw) {
let host = match url.host()? {
url::Host::Ipv4(host) => host.to_string(),
url::Host::Ipv6(host) => format!("[{host}]"),
url::Host::Domain(host) => host.to_string(),
};
return Some(match url.port_or_known_default() {
Some(port) => format!("{scheme}://{host}:{port}"),
None => format!("{scheme}://{host}"),
});
}
let (host, raw_port) = rest.rsplit_once(':')?;
let port = normalize_tunnel_port(raw_port, is_ipv6)?;
Some(format!("{scheme}://{host}:{port}"))
}
impl Url {
pub fn is_ipv6_tunnel_endpoint(&self) -> bool {
infer_tunnel_ipv6(&self.url).unwrap_or(false)
}
pub fn normalized_tunnel_display(&self) -> String {
normalize_tunnel_url(&self.url, None).unwrap_or_else(|| self.url.clone())
}
}
impl From<std::net::SocketAddr> for SocketAddr {
fn from(value: std::net::SocketAddr) -> Self {
match value {
@@ -325,6 +425,38 @@ impl Display for SocketAddr {
}
}
impl TunnelInfo {
pub fn effective_remote_addr(&self) -> Option<&Url> {
self.resolved_remote_addr
.as_ref()
.or(self.remote_addr.as_ref())
}
pub fn display_tunnel_type(&self) -> String {
let is_ipv6 = infer_tunnel_ipv6(&self.tunnel_type).or_else(|| {
self.resolved_remote_addr
.as_ref()
.or(self.local_addr.as_ref())
.or(self.remote_addr.as_ref())
.map(Url::is_ipv6_tunnel_endpoint)
});
if self.tunnel_type.contains("://") {
normalize_tunnel_url(&self.tunnel_type, is_ipv6)
.unwrap_or_else(|| self.tunnel_type.clone())
} else {
is_ipv6
.and_then(|is_ipv6| normalize_tunnel_scheme(&self.tunnel_type, is_ipv6))
.unwrap_or_else(|| self.tunnel_type.clone())
}
}
pub fn display_remote_addr(&self) -> Option<String> {
self.effective_remote_addr()
.map(Url::normalized_tunnel_display)
}
}
impl TryFrom<CompressionAlgoPb> for CompressorAlgo {
type Error = anyhow::Error;
@@ -397,3 +529,149 @@ impl SecureModeConfig {
Ok(x25519_dalek::PublicKey::from(k))
}
}
#[cfg(test)]
mod tests {
use super::{normalize_tunnel_url, TunnelInfo, Url};
fn assert_ipv6_tunnel_normalization(scheme: &str, port: u16) {
let expected = format!("{scheme}6://[2001:db8::1]:{port}");
assert_eq!(
normalize_tunnel_url(&format!("{scheme}://[2001:db8::1]:{port}"), None).as_deref(),
Some(expected.as_str())
);
}
#[test]
fn normalize_plain_ipv6_tunnel_url() {
let url = Url {
url: "tcp://[2001:db8::1]:11010".to_string(),
};
assert_eq!(
url.normalized_tunnel_display(),
"tcp6://[2001:db8::1]:11010"
);
assert!(url.is_ipv6_tunnel_endpoint());
}
#[test]
fn normalize_all_enabled_ipv6_tunnel_urls() {
assert_ipv6_tunnel_normalization("tcp", 11010);
assert_ipv6_tunnel_normalization("udp", 11010);
#[cfg(feature = "wireguard")]
assert_ipv6_tunnel_normalization("wg", 11011);
#[cfg(feature = "quic")]
assert_ipv6_tunnel_normalization("quic", 11012);
#[cfg(feature = "websocket")]
assert_ipv6_tunnel_normalization("ws", 80);
#[cfg(feature = "websocket")]
assert_ipv6_tunnel_normalization("wss", 443);
#[cfg(feature = "faketcp")]
assert_ipv6_tunnel_normalization("faketcp", 11013);
}
#[test]
fn normalize_composite_ipv6_tunnel_url() {
assert_eq!(
normalize_tunnel_url("txt-tcp://[2001:db8::1]:11010", None).as_deref(),
Some("txt-tcp6://[2001:db8::1]:11010")
);
}
#[test]
fn recover_malformed_composite_ipv6_tunnel_url() {
assert_eq!(
normalize_tunnel_url("txt-tcp://[2001:db8::1]:110106", None).as_deref(),
Some("txt-tcp6://[2001:db8::1]:11010")
);
}
#[test]
fn keep_normalized_ipv6_tunnel_url_stable() {
assert_eq!(
normalize_tunnel_url("tcp6://[2001:db8::1]:11010", None).as_deref(),
Some("tcp6://[2001:db8::1]:11010")
);
}
#[test]
fn normalize_ipv6_tunnel_url_without_explicit_port() {
assert_eq!(
normalize_tunnel_url("tcp://[2001:db8::1]", None).as_deref(),
Some("tcp6://[2001:db8::1]")
);
}
#[test]
fn keep_domain_host_unbracketed_when_ipv6_falls_back() {
assert_eq!(
normalize_tunnel_url("tcp://localhost:11010", Some(true)).as_deref(),
Some("tcp6://localhost:11010")
);
}
#[test]
fn tunnel_info_display_tunnel_type_preserves_composite_prefix() {
let tunnel = TunnelInfo {
tunnel_type: "txt-tcp://[2001:db8::2]:110106".to_string(),
local_addr: None,
remote_addr: Some(Url {
url: "txt://et.example.com".to_string(),
}),
resolved_remote_addr: None,
};
assert_eq!(
tunnel.display_tunnel_type(),
"txt-tcp6://[2001:db8::2]:11010"
);
}
#[test]
fn tunnel_info_display_tunnel_type_uses_remote_addr_fallback() {
let tunnel = TunnelInfo {
tunnel_type: "tcp".to_string(),
local_addr: None,
remote_addr: Some(Url {
url: "tcp://[2001:db8::2]:11010".to_string(),
}),
resolved_remote_addr: None,
};
assert_eq!(tunnel.display_tunnel_type(), "tcp6");
assert_eq!(
tunnel.display_remote_addr().as_deref(),
Some("tcp6://[2001:db8::2]:11010")
);
}
#[test]
fn tunnel_info_prefers_resolved_remote_addr() {
let tunnel = TunnelInfo {
tunnel_type: "txt-tcp".to_string(),
local_addr: None,
remote_addr: Some(Url {
url: "txt://et.example.com".to_string(),
}),
resolved_remote_addr: Some(Url {
url: "tcp://[2001:db8::3]:11010".to_string(),
}),
};
assert_eq!(tunnel.display_tunnel_type(), "txt-tcp6");
assert_eq!(
tunnel.display_remote_addr().as_deref(),
Some("tcp6://[2001:db8::3]:11010")
);
assert_eq!(
tunnel.effective_remote_addr().map(|url| url.url.as_str()),
Some("tcp://[2001:db8::3]:11010")
);
}
}
+111 -2
View File
@@ -156,14 +156,14 @@ async fn init_three_node_ex_with_inst3<F: Fn(TomlConfigLoader) -> TomlConfigLoad
#[cfg(feature = "websocket")]
inst1
.get_conn_manager()
.add_connector(crate::tunnel::websocket::WSTunnelConnector::new(
.add_connector(crate::tunnel::websocket::WsTunnelConnector::new(
"ws://10.1.1.2:11011".parse().unwrap(),
));
} else if proto == "wss" {
#[cfg(feature = "websocket")]
inst1
.get_conn_manager()
.add_connector(crate::tunnel::websocket::WSTunnelConnector::new(
.add_connector(crate::tunnel::websocket::WsTunnelConnector::new(
"wss://10.1.1.2:11012".parse().unwrap(),
));
}
@@ -1535,6 +1535,48 @@ pub async fn relay_bps_limit_test(#[values(100, 200, 400, 800)] bps_limit: u64)
drop_insts(insts).await;
}
#[rstest::rstest]
#[serial_test::serial]
#[tokio::test]
pub async fn instance_recv_bps_limit_test(#[values(100, 800)] bps_limit: u64) {
let insts = init_three_node_ex(
"tcp",
|cfg| {
if cfg.get_inst_name() == "inst2" {
let mut f = cfg.get_flags();
f.instance_recv_bps_limit = bps_limit * 1024;
cfg.set_flags(f);
}
cfg
},
false,
)
.await;
let tcp_listener = TcpTunnelListener::new("tcp://0.0.0.0:22223".parse().unwrap());
let tcp_connector = TcpTunnelConnector::new("tcp://10.144.144.3:22223".parse().unwrap());
let bps = _tunnel_bench_netns(
tcp_listener,
tcp_connector,
NetNS::new(Some("net_c".into())),
NetNS::new(Some("net_a".into())),
)
.await;
println!("bps: {}", bps);
let bps = bps as u64 / 1024;
assert!(
bps >= bps_limit - 50 && bps <= bps_limit + 50,
"bps: {}, bps_limit: {}",
bps,
bps_limit
);
drop_insts(insts).await;
}
async fn assert_try_direct_connect_err<C>(inst: &Instance, connector: C)
where
C: crate::tunnel::TunnelConnector + std::fmt::Debug,
@@ -2523,6 +2565,73 @@ pub async fn need_p2p_overrides_lazy_p2p() {
drop_insts(insts).await;
}
#[tokio::test]
#[serial_test::serial]
pub async fn disable_p2p_still_connects_to_need_p2p_peers() {
let insts = init_lazy_p2p_three_node_ex("udp", |cfg| {
let mut flags = cfg.get_flags();
if cfg.get_inst_name() == "inst1" {
flags.disable_p2p = true;
}
if cfg.get_inst_name() == "inst3" {
flags.need_p2p = true;
}
cfg.set_flags(flags);
cfg
})
.await;
let inst3_peer_id = insts[2].peer_id();
wait_route_cost(&insts[0], inst3_peer_id, 2, Duration::from_secs(5)).await;
wait_for_condition(
|| async {
insts[0]
.get_peer_manager()
.get_peer_map()
.has_peer(inst3_peer_id)
},
Duration::from_secs(10),
)
.await;
wait_route_cost(&insts[0], inst3_peer_id, 1, Duration::from_secs(10)).await;
drop_insts(insts).await;
}
#[tokio::test]
#[serial_test::serial]
pub async fn ordinary_nodes_do_not_proactively_connect_to_disable_p2p_peers() {
let insts = init_lazy_p2p_three_node_ex("udp", |cfg| {
if cfg.get_inst_name() == "inst3" {
let mut flags = cfg.get_flags();
flags.disable_p2p = true;
cfg.set_flags(flags);
}
cfg
})
.await;
let inst3_peer_id = insts[2].peer_id();
wait_route_cost(&insts[0], inst3_peer_id, 2, Duration::from_secs(5)).await;
assert!(
ping_test("net_a", "10.144.144.3", None).await,
"relay traffic to disable-p2p peers should still succeed"
);
tokio::time::sleep(Duration::from_secs(3)).await;
assert!(
!insts[0]
.get_peer_manager()
.get_peer_map()
.has_peer(inst3_peer_id),
"ordinary nodes should not proactively establish p2p with disable-p2p peers"
);
wait_route_cost(&insts[0], inst3_peer_id, 2, Duration::from_secs(3)).await;
drop_insts(insts).await;
}
#[tokio::test]
#[serial_test::serial]
pub async fn lazy_p2p_warms_up_before_p2p_only_send() {
+44 -35
View File
@@ -2,20 +2,28 @@ mod netfilter;
mod packet;
mod stack;
use std::net::{IpAddr, Ipv4Addr, UdpSocket};
use std::sync::Arc;
use std::{net::SocketAddr, pin::Pin};
use bytes::BytesMut;
use pnet::datalink;
use futures::{Sink, Stream};
use network_interface::NetworkInterfaceConfig;
use pnet::util::MacAddr;
use tokio::io::AsyncReadExt;
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
pin::Pin,
sync::Arc,
task::{Context as TaskContext, Poll},
};
use tokio::{io::AsyncReadExt, net::TcpStream, sync::Mutex};
use crate::common::scoped_task::ScopedTask;
use crate::tunnel::fake_tcp::netfilter::create_tun;
use crate::tunnel::{common::TunnelWrapper, Tunnel, TunnelError, TunnelInfo, TunnelListener};
use crate::{
common::scoped_task::ScopedTask,
tunnel::{
common::TunnelWrapper,
fake_tcp::netfilter::create_tun,
packet_def::{ZCPacket, ZCPacketType, PEER_MANAGER_HEADER_SIZE, TCP_TUNNEL_HEADER_SIZE},
FromUrl, IpVersion, SinkError, SinkItem, StreamItem, Tunnel, TunnelConnector, TunnelError,
TunnelInfo, TunnelListener,
},
};
use futures::Future;
@@ -34,11 +42,18 @@ impl IpToIfNameCache {
fn reload_ip_to_ifname(&self) {
self.ip_to_ifname.clear();
let interfaces = datalink::interfaces();
let Ok(interfaces) = network_interface::NetworkInterface::show() else {
tracing::warn!("failed to enumerate interfaces when reloading faketcp ip cache");
return;
};
for iface in interfaces {
for ip in iface.ips.iter() {
self.ip_to_ifname
.insert(ip.ip(), (iface.name.clone(), iface.mac));
let mac = iface.mac_addr.as_deref().and_then(|mac| {
mac.parse::<MacAddr>().map_err(|e| {
tracing::debug!(iface = %iface.name, mac, ?e, "failed to parse interface mac")
}).ok()
});
for ip in iface.addr.iter() {
self.ip_to_ifname.insert(ip.ip(), (iface.name.clone(), mac));
}
}
}
@@ -200,12 +215,7 @@ struct AcceptResult {
impl TunnelListener for FakeTcpTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
let port = self.addr.port().unwrap_or(0);
let bind_addr = crate::tunnel::check_scheme_and_get_socket_addr::<SocketAddr>(
&self.addr,
"faketcp",
crate::tunnel::IpVersion::Both,
)
.await?;
let bind_addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let os_listener = tokio::net::TcpListener::bind(bind_addr).await?;
tracing::info!(port, "FakeTcpTunnelListener listening");
self.os_listener = Some(os_listener);
@@ -239,6 +249,13 @@ impl TunnelListener for FakeTcpTunnelListener {
)
.into(),
),
resolved_remote_addr: Some(
crate::tunnel::build_url_from_socket_addr(
&socket.remote_addr().to_string(),
"faketcp",
)
.into(),
),
};
// We treat the fake tcp socket as a datagram tunnel directly
@@ -299,14 +316,9 @@ fn get_local_ip_for_destination(destination: IpAddr) -> Option<IpAddr> {
}
#[async_trait::async_trait]
impl crate::tunnel::TunnelConnector for FakeTcpTunnelConnector {
impl TunnelConnector for FakeTcpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let remote_addr = crate::tunnel::check_scheme_and_get_socket_addr::<SocketAddr>(
&self.addr,
"faketcp",
crate::tunnel::IpVersion::Both,
)
.await?;
let remote_addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let local_ip = get_local_ip_for_destination(remote_addr.ip())
.ok_or(TunnelError::InternalError("Failed to get local ip".into()))?;
@@ -361,6 +373,10 @@ impl crate::tunnel::TunnelConnector for FakeTcpTunnelConnector {
.into(),
),
remote_addr: Some(self.addr.clone().into()),
resolved_remote_addr: Some(
crate::tunnel::build_url_from_socket_addr(&remote_addr.to_string(), "faketcp")
.into(),
),
};
let socket = Arc::new(socket);
@@ -380,13 +396,6 @@ impl crate::tunnel::TunnelConnector for FakeTcpTunnelConnector {
}
}
use crate::tunnel::packet_def::{
ZCPacket, ZCPacketType, PEER_MANAGER_HEADER_SIZE, TCP_TUNNEL_HEADER_SIZE,
};
use crate::tunnel::{SinkError, SinkItem, StreamItem};
use futures::{Sink, Stream};
use std::task::{Context as TaskContext, Poll};
type RecvFut = Pin<Box<dyn Future<Output = Option<(BytesMut, usize)>> + Send + Sync>>;
enum FakeTcpStreamState {
@@ -221,7 +221,8 @@ fn get_or_create_worker(interface_name: &str) -> io::Result<Arc<InterfaceWorker>
// But creation is rare.
// Let's find interface first.
let interfaces = datalink::interfaces();
let interfaces = std::panic::catch_unwind(datalink::interfaces)
.map_err(|_| io::Error::other("failed to enumerate network interfaces: pnet panicked"))?;
let interface = interfaces
.into_iter()
.find(|iface| iface.name == interface_name)
+140 -52
View File
@@ -1,16 +1,19 @@
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::{net::SocketAddr, pin::Pin, sync::Arc};
use std::{
collections::hash_map::DefaultHasher, hash::Hasher, net::SocketAddr, pin::Pin, sync::Arc,
};
use crate::{
common::{dns::socket_addrs, error::Error},
proto::common::TunnelInfo,
};
use async_trait::async_trait;
use derive_more::{From, TryInto};
use futures::{Sink, Stream};
use socket2::Protocol;
use std::fmt::Debug;
use strum::{Display, EnumString, IntoStaticStr, VariantArray};
use tokio::time::error::Elapsed;
use crate::common::dns::socket_addrs;
use crate::proto::common::TunnelInfo;
use self::packet_def::ZCPacket;
pub mod buf;
@@ -23,15 +26,6 @@ pub mod stats;
pub mod tcp;
pub mod udp;
pub const PROTO_PORT_OFFSET: &[(&str, u16)] = &[
("tcp", 0),
("udp", 0),
("wg", 1),
("ws", 1),
("wss", 2),
("faketcp", 3),
];
#[cfg(feature = "faketcp")]
pub mod fake_tcp;
@@ -193,45 +187,23 @@ pub(crate) trait FromUrl {
Self: Sized;
}
pub(crate) async fn check_scheme_and_get_socket_addr<T>(
url: &url::Url,
scheme: &str,
ip_version: IpVersion,
) -> Result<T, TunnelError>
where
T: FromUrl,
{
if url.scheme() != scheme {
return Err(TunnelError::InvalidProtocol(url.scheme().to_string()));
}
T::from_url(url.clone(), ip_version).await
}
fn default_port(scheme: &str) -> Option<u16> {
match scheme {
"tcp" => Some(11010),
"udp" => Some(11010),
"ws" => Some(80),
"wss" => Some(443),
"faketcp" => Some(11013),
"quic" => Some(11012),
"wg" => Some(11011),
_ => None,
}
}
#[async_trait::async_trait]
impl FromUrl for SocketAddr {
async fn from_url(url: url::Url, ip_version: IpVersion) -> Result<Self, TunnelError> {
let addrs = socket_addrs(&url, || default_port(url.scheme()))
.await
.map_err(|e| {
TunnelError::InvalidAddr(format!(
"failed to resolve socket addr, url: {}, error: {}",
url, e
))
})?;
let addrs = socket_addrs(&url, || {
(&url)
.try_into()
.ok()
.and_then(|s: TunnelScheme| s.try_into().ok())
.map(IpScheme::default_port)
})
.await
.map_err(|e| {
TunnelError::InvalidAddr(format!(
"failed to resolve socket addr, url: {}, error: {}",
url, e
))
})?;
tracing::debug!(?addrs, ?ip_version, ?url, "convert url to socket addrs");
let addrs = addrs
.into_iter()
@@ -305,3 +277,119 @@ pub fn generate_digest_from_str(str1: &str, str2: &str, digest: &mut [u8]) {
hasher.write(&digest[..(i + 1) * 8]);
}
}
#[derive(Debug, Clone, Copy)]
struct IpSchemeAttributes {
protocol: Protocol,
port_offset: u16,
}
#[derive(Debug, Clone, Copy, PartialEq, Display, EnumString, IntoStaticStr, VariantArray)]
#[strum(serialize_all = "lowercase")]
pub enum IpScheme {
Tcp,
Udp,
#[cfg(feature = "wireguard")]
Wg,
#[cfg(feature = "quic")]
Quic,
#[cfg(feature = "websocket")]
Ws,
#[cfg(feature = "websocket")]
Wss,
#[cfg(feature = "faketcp")]
FakeTcp,
}
impl IpScheme {
const fn attributes(self) -> IpSchemeAttributes {
let (protocol, port_offset) = match self {
Self::Tcp => (Protocol::TCP, 0),
Self::Udp => (Protocol::UDP, 0),
#[cfg(feature = "wireguard")]
Self::Wg => (Protocol::UDP, 1),
#[cfg(feature = "quic")]
Self::Quic => (Protocol::UDP, 2),
#[cfg(feature = "websocket")]
Self::Ws => (Protocol::TCP, 1),
#[cfg(feature = "websocket")]
Self::Wss => (Protocol::TCP, 2),
#[cfg(feature = "faketcp")]
Self::FakeTcp => (Protocol::TCP, 3),
};
IpSchemeAttributes {
protocol,
port_offset,
}
}
pub const fn protocol(self) -> Protocol {
self.attributes().protocol
}
pub const fn port_offset(self) -> u16 {
self.attributes().port_offset
}
pub const fn default_port(self) -> u16 {
match self {
#[cfg(feature = "websocket")]
Self::Ws => 80,
#[cfg(feature = "websocket")]
Self::Wss => 443,
_ => 11010 + self.port_offset(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, EnumString, From, TryInto)]
#[strum(serialize_all = "lowercase")]
pub enum TunnelScheme {
#[strum(disabled)]
Ip(IpScheme),
#[cfg(unix)]
Unix,
// Only for connector
Http,
Https,
Ring,
Txt,
Srv,
}
impl TryFrom<&url::Url> for TunnelScheme {
type Error = Error;
fn try_from(value: &url::Url) -> Result<Self, Self::Error> {
let scheme = value.scheme();
scheme.parse().or_else(|_| {
Ok(TunnelScheme::Ip(
scheme
.parse()
.map_err(|_| Error::InvalidUrl(value.to_string()))?,
))
})
}
}
macro_rules! __matches_scheme__ {
($url:expr, $( $pattern:pat_param )|+ ) => {
matches!($crate::tunnel::TunnelScheme::try_from(($url).as_ref()), Ok($( $pattern )|+))
};
}
pub(crate) use __matches_scheme__ as matches_scheme;
pub fn get_protocol_by_url(l: &url::Url) -> Result<Protocol, Error> {
let TunnelScheme::Ip(scheme) = l.try_into()? else {
return Err(Error::InvalidUrl(l.to_string()));
};
Ok(scheme.protocol())
}
macro_rules! __matches_protocol__ {
($url:expr, $( $pattern:pat_param )|+ ) => {
matches!($crate::tunnel::get_protocol_by_url($url), Ok($( $pattern )|+))
};
}
pub(crate) use __matches_protocol__ as matches_protocol;
+33 -34
View File
@@ -8,20 +8,16 @@ use std::{
use crate::tunnel::{
common::{setup_sokcet2, FramedReader, FramedWriter, TunnelWrapper},
TunnelInfo,
FromUrl, TunnelInfo,
};
use anyhow::Context;
use super::{IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener};
use quinn::{
congestion::BbrConfig, udp::RecvMeta, AsyncUdpSocket, ClientConfig, Connection, Endpoint,
EndpointConfig, ServerConfig, TransportConfig, UdpPoller,
};
use super::{
check_scheme_and_get_socket_addr, IpVersion, Tunnel, TunnelConnector, TunnelError,
TunnelListener,
};
pub fn transport_config() -> Arc<TransportConfig> {
let mut config = TransportConfig::default();
@@ -145,14 +141,14 @@ impl Drop for ConnWrapper {
}
}
pub struct QUICTunnelListener {
pub struct QuicTunnelListener {
addr: url::Url,
endpoint: Option<Endpoint>,
}
impl QUICTunnelListener {
impl QuicTunnelListener {
pub fn new(addr: url::Url) -> Self {
QUICTunnelListener {
QuicTunnelListener {
addr,
endpoint: None,
}
@@ -179,6 +175,9 @@ impl QUICTunnelListener {
remote_addr: Some(
super::build_url_from_socket_addr(&remote_addr.to_string(), "quic").into(),
),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&remote_addr.to_string(), "quic").into(),
),
};
Ok(Box::new(TunnelWrapper::new(
@@ -190,11 +189,9 @@ impl QUICTunnelListener {
}
#[async_trait::async_trait]
impl TunnelListener for QUICTunnelListener {
impl TunnelListener for QuicTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
let addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&self.addr, "quic", IpVersion::Both)
.await?;
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let endpoint = make_server_endpoint(addr)
.map_err(|e| anyhow::anyhow!("make server endpoint error: {:?}", e))?;
self.endpoint = Some(endpoint);
@@ -223,15 +220,15 @@ impl TunnelListener for QUICTunnelListener {
}
}
pub struct QUICTunnelConnector {
pub struct QuicTunnelConnector {
addr: url::Url,
endpoint: Option<Endpoint>,
ip_version: IpVersion,
}
impl QUICTunnelConnector {
impl QuicTunnelConnector {
pub fn new(addr: url::Url) -> Self {
QUICTunnelConnector {
QuicTunnelConnector {
addr,
endpoint: None,
ip_version: IpVersion::Both,
@@ -240,11 +237,9 @@ impl QUICTunnelConnector {
}
#[async_trait::async_trait]
impl TunnelConnector for QUICTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&self.addr, "quic", self.ip_version)
.await?;
impl TunnelConnector for QuicTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
if addr.port() == 0 {
return Err(TunnelError::InvalidAddr(format!(
"invalid remote QUIC port 0 in url: {} (port 0 is not a valid QUIC port)",
@@ -288,6 +283,10 @@ impl TunnelConnector for QUICTunnelConnector {
super::build_url_from_socket_addr(&local_addr.to_string(), "quic").into(),
),
remote_addr: Some(self.addr.clone().into()),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&connection.remote_address().to_string(), "quic")
.into(),
),
};
let arc_conn = Arc::new(ConnWrapper { conn: connection });
@@ -318,36 +317,36 @@ mod tests {
#[tokio::test]
async fn quic_pingpong() {
let listener = QUICTunnelListener::new("quic://0.0.0.0:21011".parse().unwrap());
let connector = QUICTunnelConnector::new("quic://127.0.0.1:21011".parse().unwrap());
let listener = QuicTunnelListener::new("quic://0.0.0.0:21011".parse().unwrap());
let connector = QuicTunnelConnector::new("quic://127.0.0.1:21011".parse().unwrap());
_tunnel_pingpong(listener, connector).await
}
#[tokio::test]
async fn quic_bench() {
let listener = QUICTunnelListener::new("quic://0.0.0.0:21012".parse().unwrap());
let connector = QUICTunnelConnector::new("quic://127.0.0.1:21012".parse().unwrap());
let listener = QuicTunnelListener::new("quic://0.0.0.0:21012".parse().unwrap());
let connector = QuicTunnelConnector::new("quic://127.0.0.1:21012".parse().unwrap());
_tunnel_bench(listener, connector).await
}
#[tokio::test]
async fn ipv6_pingpong() {
let listener = QUICTunnelListener::new("quic://[::1]:31015".parse().unwrap());
let connector = QUICTunnelConnector::new("quic://[::1]:31015".parse().unwrap());
let listener = QuicTunnelListener::new("quic://[::1]:31015".parse().unwrap());
let connector = QuicTunnelConnector::new("quic://[::1]:31015".parse().unwrap());
_tunnel_pingpong(listener, connector).await
}
#[tokio::test]
async fn ipv6_domain_pingpong() {
let listener = QUICTunnelListener::new("quic://[::1]:31016".parse().unwrap());
let listener = QuicTunnelListener::new("quic://[::1]:31016".parse().unwrap());
let mut connector =
QUICTunnelConnector::new("quic://test.easytier.top:31016".parse().unwrap());
QuicTunnelConnector::new("quic://test.easytier.top:31016".parse().unwrap());
connector.set_ip_version(IpVersion::V6);
_tunnel_pingpong(listener, connector).await;
let listener = QUICTunnelListener::new("quic://127.0.0.1:31016".parse().unwrap());
let listener = QuicTunnelListener::new("quic://127.0.0.1:31016".parse().unwrap());
let mut connector =
QUICTunnelConnector::new("quic://test.easytier.top:31016".parse().unwrap());
QuicTunnelConnector::new("quic://test.easytier.top:31016".parse().unwrap());
connector.set_ip_version(IpVersion::V4);
_tunnel_pingpong(listener, connector).await;
}
@@ -355,13 +354,13 @@ mod tests {
#[tokio::test]
async fn test_alloc_port() {
// v4
let mut listener = QUICTunnelListener::new("quic://0.0.0.0:0".parse().unwrap());
let mut listener = QuicTunnelListener::new("quic://0.0.0.0:0".parse().unwrap());
listener.listen().await.unwrap();
let port = listener.local_url().port().unwrap();
assert!(port > 0);
// v6
let mut listener = QUICTunnelListener::new("quic://[::]:0".parse().unwrap());
let mut listener = QuicTunnelListener::new("quic://[::]:0".parse().unwrap());
listener.listen().await.unwrap();
let port = listener.local_url().port().unwrap();
assert!(port > 0);
@@ -369,7 +368,7 @@ mod tests {
#[tokio::test]
async fn quic_connector_reject_port_zero() {
let mut connector = QUICTunnelConnector::new("quic://127.0.0.1:0".parse().unwrap());
let mut connector = QuicTunnelConnector::new("quic://127.0.0.1:0".parse().unwrap());
let err = connector.connect().await.unwrap_err().to_string();
assert!(err.contains("port 0"), "unexpected error: {}", err);
}
+23 -28
View File
@@ -1,3 +1,5 @@
use async_ringbuf::{traits::*, AsyncHeapCons, AsyncHeapProd, AsyncHeapRb};
use crossbeam::atomic::AtomicCell;
use std::{
collections::HashMap,
fmt::Debug,
@@ -5,9 +7,6 @@ use std::{
task::{ready, Poll},
};
use async_ringbuf::{traits::*, AsyncHeapCons, AsyncHeapProd, AsyncHeapRb};
use crossbeam::atomic::AtomicCell;
use async_trait::async_trait;
use futures::{Sink, SinkExt, Stream, StreamExt};
use once_cell::sync::Lazy;
@@ -16,15 +15,15 @@ use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use uuid::Uuid;
use crate::tunnel::{SinkError, SinkItem};
use crate::tunnel::{FromUrl, IpVersion, SinkError, SinkItem};
use super::{
build_url_from_socket_addr, check_scheme_and_get_socket_addr, common::TunnelWrapper,
StreamItem, Tunnel, TunnelConnector, TunnelError, TunnelInfo, TunnelListener,
build_url_from_socket_addr, common::TunnelWrapper, StreamItem, Tunnel, TunnelConnector,
TunnelError, TunnelInfo, TunnelListener,
};
pub static RING_TUNNEL_CAP: usize = 128;
static RING_TUNNEL_RESERVERD_CAP: usize = 4;
static RING_TUNNEL_RESERVED_CAP: usize = 4;
type RingLock = parking_lot::Mutex<()>;
@@ -44,7 +43,7 @@ impl RingTunnel {
pub fn new(cap: usize) -> Self {
let id = Uuid::new_v4();
let ring_impl = AsyncHeapRb::new(std::cmp::max(RING_TUNNEL_RESERVERD_CAP * 2, cap));
let ring_impl = AsyncHeapRb::new(std::cmp::max(RING_TUNNEL_RESERVED_CAP * 2, cap));
let (ring_prod_impl, ring_cons_impl) = ring_impl.split();
Self {
id,
@@ -120,7 +119,7 @@ impl RingSink {
pub fn try_send(&mut self, item: RingItem) -> Result<(), RingItem> {
let base = self.ring_prod_impl.base();
if base.occupied_len() >= base.capacity().get() - RING_TUNNEL_RESERVERD_CAP {
if base.occupied_len() >= base.capacity().get() - RING_TUNNEL_RESERVED_CAP {
return Err(item);
}
self.ring_prod_impl.try_push(item)
@@ -188,7 +187,7 @@ static CONNECTION_MAP: Lazy<Arc<std::sync::Mutex<ConnectionMap>>> =
#[derive(Debug)]
pub struct RingTunnelListener {
listerner_addr: url::Url,
listener_addr: url::Url,
conn_sender: UnboundedSender<Arc<Connection>>,
conn_receiver: UnboundedReceiver<Arc<Connection>>,
@@ -199,7 +198,7 @@ impl RingTunnelListener {
pub fn new(key: url::Url) -> Self {
let (conn_sender, conn_receiver) = tokio::sync::mpsc::unbounded_channel();
RingTunnelListener {
listerner_addr: key,
listener_addr: key,
conn_sender,
conn_receiver,
key_in_conn_map: None,
@@ -215,6 +214,9 @@ fn get_tunnel_for_client(conn: Arc<Connection>) -> impl Tunnel {
tunnel_type: "ring".to_owned(),
local_addr: Some(build_url_from_socket_addr(&conn.client.id.into(), "ring").into()),
remote_addr: Some(build_url_from_socket_addr(&conn.server.id.into(), "ring").into()),
resolved_remote_addr: Some(
build_url_from_socket_addr(&conn.server.id.into(), "ring").into(),
),
}),
)
}
@@ -227,25 +229,23 @@ fn get_tunnel_for_server(conn: Arc<Connection>) -> impl Tunnel {
tunnel_type: "ring".to_owned(),
local_addr: Some(build_url_from_socket_addr(&conn.server.id.into(), "ring").into()),
remote_addr: Some(build_url_from_socket_addr(&conn.client.id.into(), "ring").into()),
resolved_remote_addr: Some(
build_url_from_socket_addr(&conn.client.id.into(), "ring").into(),
),
}),
)
}
impl RingTunnelListener {
async fn get_addr(&self) -> Result<uuid::Uuid, TunnelError> {
check_scheme_and_get_socket_addr::<Uuid>(
&self.listerner_addr,
"ring",
super::IpVersion::Both,
)
.await
async fn get_addr(&self) -> Result<Uuid, TunnelError> {
Uuid::from_url(self.listener_addr.clone(), IpVersion::Both).await
}
}
#[async_trait]
impl TunnelListener for RingTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
tracing::info!("listen new conn of key: {}", self.listerner_addr);
tracing::info!("listen new conn of key: {}", self.listener_addr);
let addr = self.get_addr().await?;
CONNECTION_MAP
.lock()
@@ -256,11 +256,11 @@ impl TunnelListener for RingTunnelListener {
}
async fn accept(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
tracing::info!("waiting accept new conn of key: {}", self.listerner_addr);
tracing::info!("waiting accept new conn of key: {}", self.listener_addr);
let my_addr = self.get_addr().await?;
if let Some(conn) = self.conn_receiver.recv().await {
if conn.server.id == my_addr {
tracing::info!("accept new conn of key: {}", self.listerner_addr);
tracing::info!("accept new conn of key: {}", self.listener_addr);
return Ok(Box::new(get_tunnel_for_server(conn)));
} else {
tracing::error!(?conn.server.id, ?my_addr, "got new conn with wrong id");
@@ -276,7 +276,7 @@ impl TunnelListener for RingTunnelListener {
}
fn local_url(&self) -> url::Url {
self.listerner_addr.clone()
self.listener_addr.clone()
}
}
@@ -301,12 +301,7 @@ impl RingTunnelConnector {
#[async_trait]
impl TunnelConnector for RingTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let remote_addr = check_scheme_and_get_socket_addr::<Uuid>(
&self.remote_addr,
"ring",
super::IpVersion::Both,
)
.await?;
let remote_addr = Uuid::from_url(self.remote_addr.clone(), IpVersion::Both).await?;
let entry = CONNECTION_MAP
.lock()
.unwrap()
+39 -11
View File
@@ -1,14 +1,12 @@
use std::net::SocketAddr;
use super::{FromUrl, TunnelInfo};
use crate::tunnel::common::setup_sokcet2;
use async_trait::async_trait;
use futures::stream::FuturesUnordered;
use tokio::net::{TcpListener, TcpSocket, TcpStream};
use super::TunnelInfo;
use crate::tunnel::common::setup_sokcet2;
use super::{
check_scheme_and_get_socket_addr,
common::{wait_for_connect_futures, FramedReader, FramedWriter, TunnelWrapper},
IpVersion, Tunnel, TunnelError, TunnelListener,
};
@@ -43,6 +41,9 @@ impl TcpTunnelListener {
remote_addr: Some(
super::build_url_from_socket_addr(&stream.peer_addr()?.to_string(), "tcp").into(),
),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&stream.peer_addr()?.to_string(), "tcp").into(),
),
};
let (r, w) = stream.into_split();
@@ -58,9 +59,7 @@ impl TcpTunnelListener {
impl TunnelListener for TcpTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
self.listener = None;
let addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&self.addr, "tcp", IpVersion::Both)
.await?;
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let socket2_socket = socket2::Socket::new(
socket2::Domain::for_address(addr),
@@ -121,6 +120,9 @@ fn get_tunnel_with_tcp_stream(
super::build_url_from_socket_addr(&stream.local_addr()?.to_string(), "tcp").into(),
),
remote_addr: Some(remote_url.into()),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&stream.peer_addr()?.to_string(), "tcp").into(),
),
};
let (r, w) = stream.into_split();
@@ -189,10 +191,8 @@ impl TcpTunnelConnector {
#[async_trait]
impl super::TunnelConnector for TcpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&self.addr, "tcp", self.ip_version)
.await?;
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
if self.bind_addrs.is_empty() {
self.connect_with_default_bind(addr).await
} else {
@@ -283,6 +283,34 @@ mod tests {
_tunnel_pingpong(listener, connector).await;
}
#[tokio::test]
async fn connector_keeps_source_addr_and_reports_resolved_addr() {
let mut listener = TcpTunnelListener::new("tcp://127.0.0.1:0".parse().unwrap());
listener.listen().await.unwrap();
let port = listener.local_url().port().unwrap();
let source_url: url::Url = format!("tcp://localhost:{port}").parse().unwrap();
let mut connector = TcpTunnelConnector::new(source_url.clone());
connector.set_ip_version(IpVersion::V4);
let accept_task = tokio::spawn(async move { listener.accept().await.unwrap() });
let tunnel = connector.connect().await.unwrap();
let accepted_tunnel = accept_task.await.unwrap();
let info = tunnel.info().unwrap();
assert_eq!(info.remote_addr.unwrap().url, source_url.to_string());
let resolved_remote_addr: url::Url = info.resolved_remote_addr.unwrap().into();
assert_eq!(resolved_remote_addr.host_str(), Some("127.0.0.1"));
assert_eq!(resolved_remote_addr.port(), Some(port));
let accepted_info = accepted_tunnel.info().unwrap();
assert_eq!(
accepted_info.remote_addr,
accepted_info.resolved_remote_addr,
);
}
#[tokio::test]
async fn test_alloc_port() {
// v4
+24 -31
View File
@@ -21,7 +21,13 @@ use tokio::{
use tracing::{instrument, Instrument};
use super::{packet_def::V6HolePunchPacket, TunnelInfo};
use super::{
common::{setup_sokcet2, setup_sokcet2_ext, wait_for_connect_futures},
packet_def::{UDPTunnelHeader, V6HolePunchPacket, UDP_TUNNEL_HEADER_SIZE},
ring::{RingSink, RingStream},
FromUrl, IpVersion, Tunnel, TunnelConnCounter, TunnelError, TunnelInfo, TunnelListener,
TunnelUrl,
};
use crate::{
common::{join_joinset_background, scoped_task::ScopedTask, shrink_dashmap},
tunnel::{
@@ -32,13 +38,6 @@ use crate::{
},
};
use super::{
common::{setup_sokcet2, setup_sokcet2_ext, wait_for_connect_futures},
packet_def::{UDPTunnelHeader, UDP_TUNNEL_HEADER_SIZE},
ring::{RingSink, RingStream},
IpVersion, Tunnel, TunnelConnCounter, TunnelError, TunnelListener, TunnelUrl,
};
pub const UDP_DATA_MTU: usize = 2000;
type UdpCloseEventSender = UnboundedSender<(SocketAddr, Option<TunnelError>)>;
@@ -149,11 +148,11 @@ async fn respond_stun_packet(
req_buf: Vec<u8>,
) -> Result<(), anyhow::Error> {
use crate::common::stun_codec_ext::*;
use bytecodec::DecodeExt as _;
use bytecodec::EncodeExt as _;
use stun_codec::rfc5389::attributes::XorMappedAddress;
use stun_codec::rfc5389::methods::BINDING;
use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder};
use bytecodec::{DecodeExt as _, EncodeExt as _};
use stun_codec::{
rfc5389::{attributes::XorMappedAddress, methods::BINDING},
Message, MessageClass, MessageDecoder, MessageEncoder,
};
let mut decoder = MessageDecoder::<Attribute>::new();
let req_msg = decoder
@@ -429,6 +428,9 @@ impl UdpTunnelListenerData {
remote_addr: Some(
build_url_from_socket_addr(&remote_addr.to_string(), "udp").into(),
),
resolved_remote_addr: Some(
build_url_from_socket_addr(&remote_addr.to_string(), "udp").into(),
),
}),
));
@@ -532,13 +534,8 @@ impl UdpTunnelListener {
#[async_trait]
impl TunnelListener for UdpTunnelListener {
async fn listen(&mut self) -> Result<(), super::TunnelError> {
let addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
&self.addr,
"udp",
IpVersion::Both,
)
.await?;
async fn listen(&mut self) -> Result<(), TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let socket2_socket = socket2::Socket::new(
socket2::Domain::for_address(addr),
@@ -778,6 +775,9 @@ impl UdpTunnelConnector {
build_url_from_socket_addr(&socket.local_addr()?.to_string(), "udp").into(),
),
remote_addr: Some(self.addr.clone().into()),
resolved_remote_addr: Some(
build_url_from_socket_addr(&dst_addr.to_string(), "udp").into(),
),
}),
)))
}
@@ -851,13 +851,8 @@ impl UdpTunnelConnector {
#[async_trait]
impl super::TunnelConnector for UdpTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
let addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
&self.addr,
"udp",
self.ip_version,
)
.await?;
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
if self.bind_addrs.is_empty() || addr.is_ipv6() {
self.connect_with_default_bind(addr).await
} else {
@@ -889,7 +884,6 @@ mod tests {
use crate::{
common::global_ctx::tests::get_mock_global_ctx,
tunnel::{
check_scheme_and_get_socket_addr,
common::{
get_interface_name_by_ip,
tests::{_tunnel_bench, _tunnel_echo_server, _tunnel_pingpong, wait_for_condition},
@@ -1034,9 +1028,8 @@ mod tests {
for ip in ips {
println!("bind to ip: {}, {:?}", ip, bind_dev);
let addr = check_scheme_and_get_socket_addr::<SocketAddr>(
&format!("udp://{}:11111", ip).parse().unwrap(),
"udp",
let addr = SocketAddr::from_url(
format!("udp://{}:11111", ip).parse().unwrap(),
IpVersion::Both,
)
.await
+3 -1
View File
@@ -43,7 +43,8 @@ impl UnixSocketTunnelListener {
let info = TunnelInfo {
tunnel_type: "unix".to_owned(),
local_addr: Some(self.local_url().into()),
remote_addr: remote_addr.map(Into::into),
remote_addr: remote_addr.clone().map(Into::into),
resolved_remote_addr: remote_addr.map(Into::into),
};
let (r, w) = stream.into_split();
@@ -122,6 +123,7 @@ impl super::TunnelConnector for UnixSocketTunnelConnector {
tunnel_type: "unix".to_owned(),
local_addr: local_addr.map(Into::into),
remote_addr: Some(self.addr.clone().into()),
resolved_remote_addr: Some(self.addr.clone().into()),
};
let (r, w) = stream.into_split();
+164 -64
View File
@@ -1,25 +1,28 @@
use std::{net::SocketAddr, sync::Arc, time::Duration};
use anyhow::Context;
use bytes::BytesMut;
use futures::{stream::FuturesUnordered, SinkExt, StreamExt};
use tokio::{
net::{TcpListener, TcpSocket, TcpStream},
time::timeout,
};
use tokio_rustls::TlsAcceptor;
use tokio_websockets::{ClientBuilder, Limits, MaybeTlsStream, Message};
use zerocopy::AsBytes;
use super::TunnelInfo;
use crate::tunnel::insecure_tls::get_insecure_tls_client_config;
use super::{
common::{setup_sokcet2, wait_for_connect_futures, TunnelWrapper},
insecure_tls::{get_insecure_tls_cert, init_crypto_provider},
packet_def::{ZCPacket, ZCPacketType},
FromUrl, IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener,
};
use crate::{proto::common::TunnelInfo, tunnel::insecure_tls::get_insecure_tls_client_config};
use anyhow::Context;
use bytes::BytesMut;
use forwarded_header_value::ForwardedHeaderValue;
use futures::{stream::FuturesUnordered, SinkExt, StreamExt};
use pnet::ipnetwork::IpNetwork;
use std::{
net::SocketAddr,
sync::{Arc, LazyLock},
time::Duration,
};
use tokio::{
net::{TcpListener, TcpSocket, TcpStream},
time::timeout,
};
use tokio_rustls::TlsAcceptor;
use tokio_util::either::Either;
use tokio_websockets::{ClientBuilder, Limits, MaybeTlsStream, Message, ServerBuilder};
use zerocopy::AsBytes;
fn is_wss(addr: &url::Url) -> Result<bool, TunnelError> {
match addr.scheme() {
@@ -59,67 +62,106 @@ async fn map_from_ws_message(
)))
}
static TRUSTED_PROXIES: LazyLock<Vec<IpNetwork>> = LazyLock::new(|| {
[
"127.0.0.0/8", // IPV4 Loopback
"10.0.0.0/8", // IPV4 Private Networks
"172.16.0.0/12",
"192.168.0.0/16",
"::1/128", // IPV6 Loopback
"fc00::/7", // IPV6 Private network
]
.into_iter()
.map(|s| s.parse().unwrap())
.collect()
});
#[derive(Debug)]
pub struct WSTunnelListener {
pub struct WsTunnelListener {
addr: url::Url,
listener: Option<TcpListener>,
}
impl WSTunnelListener {
impl WsTunnelListener {
pub fn new(addr: url::Url) -> Self {
WSTunnelListener {
WsTunnelListener {
addr,
listener: None,
}
}
async fn try_accept(&self, stream: TcpStream) -> Result<Box<dyn Tunnel>, TunnelError> {
let info = TunnelInfo {
tunnel_type: self.addr.scheme().to_owned(),
local_addr: Some(self.local_url().into()),
remote_addr: Some(
super::build_url_from_socket_addr(
&stream.peer_addr()?.to_string(),
self.addr.scheme().to_string().as_str(),
)
.into(),
),
};
let peer_addr = stream.peer_addr()?;
let mut remote_addr =
super::build_url_from_socket_addr(&peer_addr.to_string(), self.addr.scheme());
let server_bulder = tokio_websockets::ServerBuilder::new().limits(Limits::unlimited());
let ret: Box<dyn Tunnel> = if is_wss(&self.addr)? {
let stream = if is_wss(&self.addr)? {
init_crypto_provider();
let (certs, key) = get_insecure_tls_cert();
let config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)
.with_context(|| "Failed to create server config")?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let stream = acceptor.accept(stream).await?;
let (write, read) = server_bulder.accept(stream).await?.split();
Box::new(TunnelWrapper::new(
read.filter_map(map_from_ws_message),
write.with(sink_from_zc_packet),
Some(info),
))
let stream = TlsAcceptor::from(Arc::new(config)).accept(stream).await?;
Either::Left(stream)
} else {
let (write, read) = server_bulder.accept(stream).await?.split();
Box::new(TunnelWrapper::new(
read.filter_map(map_from_ws_message),
write.with(sink_from_zc_packet),
Some(info),
))
Either::Right(stream)
};
Ok(ret)
let (request, stream) = ServerBuilder::new()
.limits(Limits::unlimited())
.accept(stream)
.await?;
if TRUSTED_PROXIES
.iter()
.any(|net| net.contains(peer_addr.ip()))
{
if let Some(forwarded) = request
.headers()
.get("Forwarded")
.and_then(|f| f.to_str().ok())
.and_then(|f| ForwardedHeaderValue::from_forwarded(f).ok())
.or_else(|| {
request
.headers()
.get("X-Forwarded-For")
.and_then(|f| f.to_str().ok())
.and_then(|f| ForwardedHeaderValue::from_x_forwarded_for(f).ok())
})
{
if let Some(ip) = forwarded.remotest_forwarded_for_ip() {
remote_addr.set_host(Some(&ip.to_string())).map_err(|_| {
TunnelError::InvalidAddr(format!("invalid forwarded ip {}", ip))
})?;
remote_addr
.query_pairs_mut()
.append_pair("proxy", &peer_addr.to_string());
}
}
}
let (write, read) = stream.split();
let remote_addr: crate::proto::common::Url = remote_addr.into();
let info = TunnelInfo {
tunnel_type: self.addr.scheme().to_owned(),
local_addr: Some(self.local_url().into()),
remote_addr: Some(remote_addr.clone()),
resolved_remote_addr: Some(remote_addr),
};
Ok(Box::new(TunnelWrapper::new(
read.filter_map(map_from_ws_message),
write.with(sink_from_zc_packet),
Some(info),
)))
}
}
#[async_trait::async_trait]
impl TunnelListener for WSTunnelListener {
impl TunnelListener for WsTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let socket2_socket = socket2::Socket::new(
@@ -159,16 +201,16 @@ impl TunnelListener for WSTunnelListener {
}
}
pub struct WSTunnelConnector {
pub struct WsTunnelConnector {
addr: url::Url,
ip_version: IpVersion,
bind_addrs: Vec<SocketAddr>,
}
impl WSTunnelConnector {
impl WsTunnelConnector {
pub fn new(addr: url::Url) -> Self {
WSTunnelConnector {
WsTunnelConnector {
addr,
ip_version: IpVersion::Both,
@@ -195,6 +237,9 @@ impl WSTunnelConnector {
.into(),
),
remote_addr: Some(addr.clone().into()),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&socket_addr.to_string(), addr.scheme()).into(),
),
};
let c = ClientBuilder::from_uri(http::Uri::try_from(addr.to_string()).unwrap());
@@ -267,8 +312,8 @@ impl WSTunnelConnector {
}
#[async_trait::async_trait]
impl TunnelConnector for WSTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
impl TunnelConnector for WsTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
if self.bind_addrs.is_empty() || addr.is_ipv6() {
self.connect_with_default_bind(addr).await
@@ -292,17 +337,17 @@ impl TunnelConnector for WSTunnelConnector {
#[cfg(test)]
pub mod tests {
use super::*;
use crate::tunnel::common::tests::_tunnel_pingpong;
use crate::tunnel::websocket::{WSTunnelConnector, WSTunnelListener};
use crate::tunnel::{TunnelConnector, TunnelListener};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
async fn ws_pingpong(#[values("ws", "wss")] proto: &str) {
let listener = WSTunnelListener::new(format!("{}://0.0.0.0:25556", proto).parse().unwrap());
let listener = WsTunnelListener::new(format!("{}://0.0.0.0:25556", proto).parse().unwrap());
let connector =
WSTunnelConnector::new(format!("{}://127.0.0.1:25556", proto).parse().unwrap());
WsTunnelConnector::new(format!("{}://127.0.0.1:25556", proto).parse().unwrap());
_tunnel_pingpong(listener, connector).await
}
@@ -310,9 +355,9 @@ pub mod tests {
#[tokio::test]
#[serial_test::serial]
async fn ws_pingpong_bind(#[values("ws", "wss")] proto: &str) {
let listener = WSTunnelListener::new(format!("{}://0.0.0.0:25557", proto).parse().unwrap());
let listener = WsTunnelListener::new(format!("{}://0.0.0.0:25557", proto).parse().unwrap());
let mut connector =
WSTunnelConnector::new(format!("{}://127.0.0.1:25557", proto).parse().unwrap());
WsTunnelConnector::new(format!("{}://127.0.0.1:25557", proto).parse().unwrap());
connector.set_bind_addrs(vec!["127.0.0.1:0".parse().unwrap()]);
_tunnel_pingpong(listener, connector).await
}
@@ -331,18 +376,73 @@ pub mod tests {
#[tokio::test]
async fn ws_accept_wss() {
let mut listener = WSTunnelListener::new("wss://0.0.0.0:25558".parse().unwrap());
let mut listener = WsTunnelListener::new("wss://0.0.0.0:25558".parse().unwrap());
listener.listen().await.unwrap();
let j = tokio::spawn(async move {
let _ = listener.accept().await;
});
let mut connector = WSTunnelConnector::new("ws://127.0.0.1:25558".parse().unwrap());
let mut connector = WsTunnelConnector::new("ws://127.0.0.1:25558".parse().unwrap());
connector.connect().await.unwrap_err();
let mut connector = WSTunnelConnector::new("wss://127.0.0.1:25558".parse().unwrap());
let mut connector = WsTunnelConnector::new("wss://127.0.0.1:25558".parse().unwrap());
connector.connect().await.unwrap();
j.abort();
}
#[tokio::test]
async fn ws_forwarded() {
let mut listener = WsTunnelListener::new("ws://127.0.0.1:25559".parse().unwrap());
listener.listen().await.unwrap();
let server_task = tokio::spawn(async move {
let tunnel = listener.accept().await.unwrap();
let remote_addr = tunnel
.info()
.unwrap()
.remote_addr
.unwrap()
.url
.parse::<url::Url>()
.unwrap();
assert_eq!(remote_addr.host_str().unwrap(), "203.0.113.5");
let proxy_addr = remote_addr
.query_pairs()
.find(|(k, _)| k == "proxy")
.map(|(_, v)| v.into_owned())
.unwrap();
assert_eq!(proxy_addr, "127.0.0.1:25560");
tunnel
});
let socket = TcpSocket::new_v4().unwrap();
socket.bind("127.0.0.1:25560".parse().unwrap()).unwrap();
let mut stream = socket
.connect("127.0.0.1:25559".parse().unwrap())
.await
.unwrap();
let handshake = "GET / HTTP/1.1\r\n\
Host: 127.0.0.1:25559\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n\
Sec-WebSocket-Version: 13\r\n\
X-Forwarded-For: 203.0.113.5, 192.168.1.1\r\n\
\r\n";
stream.write_all(handshake.as_bytes()).await.unwrap();
let mut buf = [0u8; 1024];
let bytes_read = stream.read(&mut buf).await.unwrap();
let response = String::from_utf8_lossy(&buf[..bytes_read]);
assert!(response.contains("101 Switching Protocols"));
let _tunnel = server_task.await.unwrap();
}
}
+26 -23
View File
@@ -20,7 +20,14 @@ use futures::{stream::FuturesUnordered, SinkExt, StreamExt};
use rand::RngCore;
use tokio::{net::UdpSocket, sync::Mutex, task::JoinSet};
use super::TunnelInfo;
use super::{
common::{setup_sokcet2, setup_sokcet2_ext, wait_for_connect_futures},
generate_digest_from_str,
packet_def::{ZCPacketType, PEER_MANAGER_HEADER_SIZE},
ring::create_ring_tunnel_pair,
FromUrl, IpVersion, Tunnel, TunnelError, TunnelInfo, TunnelListener, TunnelUrl, ZCPacketSink,
ZCPacketStream,
};
use crate::{
common::shrink_dashmap,
tunnel::{
@@ -30,15 +37,6 @@ use crate::{
},
};
use super::{
check_scheme_and_get_socket_addr,
common::{setup_sokcet2, setup_sokcet2_ext, wait_for_connect_futures},
generate_digest_from_str,
packet_def::{ZCPacketType, PEER_MANAGER_HEADER_SIZE},
ring::create_ring_tunnel_pair,
IpVersion, Tunnel, TunnelError, TunnelListener, TunnelUrl, ZCPacketSink, ZCPacketStream,
};
const MAX_PACKET: usize = 2048;
#[derive(Debug, Clone)]
@@ -202,7 +200,10 @@ impl WgPeerData {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
tracing::error!("Failed to send decapsulation-instructed packet to WireGuard endpoint: {:?}", e);
tracing::error!(
"Failed to send decapsulation-instructed packet to WireGuard endpoint: {:?}",
e
);
return;
}
};
@@ -214,7 +215,10 @@ impl WgPeerData {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
tracing::error!("Failed to send decapsulation-instructed packet to WireGuard endpoint: {:?}", e);
tracing::error!(
"Failed to send decapsulation-instructed packet to WireGuard endpoint: {:?}",
e
);
break;
}
};
@@ -534,6 +538,9 @@ impl WgTunnelListener {
remote_addr: Some(
build_url_from_socket_addr(&addr.to_string(), "wg").into(),
),
resolved_remote_addr: Some(
build_url_from_socket_addr(&addr.to_string(), "wg").into(),
),
}),
));
if let Err(e) = conn_sender.send(tunnel) {
@@ -550,10 +557,8 @@ impl WgTunnelListener {
#[async_trait]
impl TunnelListener for WgTunnelListener {
async fn listen(&mut self) -> Result<(), super::TunnelError> {
let addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&self.addr, "wg", IpVersion::Both)
.await?;
async fn listen(&mut self) -> Result<(), TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both).await?;
let socket2_socket = socket2::Socket::new(
socket2::Domain::for_address(addr),
socket2::Type::DGRAM,
@@ -683,6 +688,9 @@ impl WgTunnelConnector {
tunnel_type: "wg".to_owned(),
local_addr: Some(super::build_url_from_socket_addr(&local_addr, "wg").into()),
remote_addr: Some(addr_url.into()),
resolved_remote_addr: Some(
super::build_url_from_socket_addr(&addr.to_string(), "wg").into(),
),
}),
Some(Box::new(wg_peer)),
));
@@ -705,13 +713,8 @@ impl WgTunnelConnector {
#[async_trait]
impl super::TunnelConnector for WgTunnelConnector {
#[tracing::instrument]
async fn connect(&mut self) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
let addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
&self.addr,
"wg",
self.ip_version,
)
.await?;
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version).await?;
if addr.is_ipv6() {
return self.connect_with_ipv6(addr).await;
+9 -2
View File
@@ -36,8 +36,7 @@ thread_local! {
}
pub fn setup_panic_handler() {
use std::backtrace;
use std::io::Write;
use std::{backtrace, io::Write};
std::panic::set_hook(Box::new(|info| {
let mut stderr = std::io::stderr();
let sep = format!("{}\n", "=======".repeat(10));
@@ -141,3 +140,11 @@ pub fn weak_upgrade<T>(weak: &std::sync::Weak<T>) -> anyhow::Result<std::sync::A
weak.upgrade()
.ok_or_else(|| anyhow::anyhow!("{} not available", std::any::type_name::<T>()))
}
pub trait BoxExt: Sized {
fn boxed(self) -> Box<Self> {
Box::new(self)
}
}
impl<T> BoxExt for T {}
+2 -2
View File
@@ -7,7 +7,7 @@
Copies binaries to the install directory and updates the system PATH.
.PARAMETER Version
Target version: "latest", "stable", or a specific tag like "v2.5.0".
Target version: "latest", "stable", or a specific tag like "v2.6.0".
Default: "latest"
.PARAMETER InstallDir
@@ -16,7 +16,7 @@
.EXAMPLE
.\install.ps1
.\install.ps1 -Version v2.5.0
.\install.ps1 -Version v2.6.0
.\install.ps1 -InstallDir "C:\EasyTier"
.NOTES
@@ -14,6 +14,9 @@ class TauriVpnService : VpnService() {
companion object {
@JvmField var triggerCallback: (String, JSObject) -> Unit = { _, _ -> }
@JvmField var self: TauriVpnService? = null
@JvmField var ipv4Addr: String? = null
@JvmField var routes: Array<String> = emptyArray()
@JvmField var dns: String? = null
const val IPV4_ADDR = "IPV4_ADDR"
const val ROUTES = "ROUTES"
@@ -27,6 +30,9 @@ class TauriVpnService : VpnService() {
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
println("vpn on start command ${intent?.getExtras()} $intent")
var args = intent?.getExtras()
ipv4Addr = args?.getString(IPV4_ADDR)
routes = args?.getStringArray(ROUTES) ?: emptyArray()
dns = args?.getString(DNS)
vpnInterface = createVpnInterface(args)
println("vpn created ${vpnInterface.fd}")
@@ -63,6 +69,13 @@ class TauriVpnService : VpnService() {
triggerCallback("vpn_service_stop", JSObject())
vpnInterface.close()
}
clearStatus()
}
private fun clearStatus() {
ipv4Addr = null
routes = emptyArray()
dns = null
}
private fun createVpnInterface(args: Bundle?): ParcelFileDescriptor {
@@ -3,7 +3,9 @@ package com.plugin.vpnservice
import android.app.Activity
import android.content.Intent
import android.net.VpnService
import androidx.activity.result.ActivityResult
import app.tauri.annotation.Command
import app.tauri.annotation.ActivityCallback
import app.tauri.annotation.InvokeArg
import app.tauri.annotation.TauriPlugin
import app.tauri.plugin.Invoke
@@ -48,46 +50,70 @@ class VpnServicePlugin(private val activity: Activity) : Plugin(activity) {
@Command
fun prepareVpn(invoke: Invoke) {
println("prepare vpn in plugin")
val it = VpnService.prepare(activity)
var ret = JSObject()
if (it != null) {
activity.startActivityForResult(it, 0x0f)
ret.put("errorMsg", "again")
activity.runOnUiThread {
println("prepare vpn in plugin")
val it = VpnService.prepare(activity)
if (it != null) {
startActivityForResult(invoke, it, "onPrepareVpnResult")
return@runOnUiThread
}
val ret = JSObject()
ret.put("granted", true)
invoke.resolve(ret)
}
}
@ActivityCallback
fun onPrepareVpnResult(invoke: Invoke, result: ActivityResult) {
val ret = JSObject()
ret.put("granted", result.resultCode == Activity.RESULT_OK)
invoke.resolve(ret)
}
@Command
fun startVpn(invoke: Invoke) {
val args = invoke.parseArgs(StartVpnArgs::class.java)
println("start vpn in plugin, args: $args")
activity.runOnUiThread {
println("start vpn in plugin, args: $args")
TauriVpnService.self?.onRevoke()
TauriVpnService.self?.onRevoke()
val it = VpnService.prepare(activity)
var ret = JSObject()
if (it != null) {
ret.put("errorMsg", "need_prepare")
} else {
var intent = Intent(activity, TauriVpnService::class.java)
intent.putExtra(TauriVpnService.IPV4_ADDR, args.ipv4Addr)
intent.putExtra(TauriVpnService.ROUTES, args.routes)
intent.putExtra(TauriVpnService.DNS, args.dns)
intent.putExtra(TauriVpnService.DISALLOWED_APPLICATIONS, args.disallowedApplications)
intent.putExtra(TauriVpnService.MTU, args.mtu)
val it = VpnService.prepare(activity)
val ret = JSObject()
if (it != null) {
ret.put("errorMsg", "need_prepare")
} else {
val intent = Intent(activity, TauriVpnService::class.java)
intent.putExtra(TauriVpnService.IPV4_ADDR, args.ipv4Addr)
intent.putExtra(TauriVpnService.ROUTES, args.routes)
intent.putExtra(TauriVpnService.DNS, args.dns)
intent.putExtra(TauriVpnService.DISALLOWED_APPLICATIONS, args.disallowedApplications)
intent.putExtra(TauriVpnService.MTU, args.mtu)
activity.startService(intent)
activity.startService(intent)
}
invoke.resolve(ret)
}
invoke.resolve(ret)
}
@Command
fun stopVpn(invoke: Invoke) {
println("stop vpn in plugin")
TauriVpnService.self?.onRevoke()
activity.stopService(Intent(activity, TauriVpnService::class.java))
println("stop vpn in plugin end")
invoke.resolve(JSObject())
activity.runOnUiThread {
println("stop vpn in plugin")
TauriVpnService.self?.onRevoke()
activity.stopService(Intent(activity, TauriVpnService::class.java))
println("stop vpn in plugin end")
invoke.resolve(JSObject())
}
}
@Command
fun getVpnStatus(invoke: Invoke) {
val ret = JSObject()
ret.put("running", TauriVpnService.self != null)
ret.put("ipv4Addr", TauriVpnService.ipv4Addr)
ret.put("routes", TauriVpnService.routes)
ret.put("dns", TauriVpnService.dns)
invoke.resolve(ret)
}
}
+1
View File
@@ -3,6 +3,7 @@ const COMMANDS: &[&str] = &[
"prepare_vpn",
"start_vpn",
"stop_vpn",
"get_vpn_status",
"registerListener",
];
+12
View File
@@ -10,6 +10,7 @@ export async function ping(value: string): Promise<string | null> {
export interface InvokeResponse {
errorMsg?: string;
granted?: boolean;
}
export interface StartVpnRequest {
@@ -20,6 +21,13 @@ export interface StartVpnRequest {
mtu?: number;
}
export interface VpnStatusResponse {
running: boolean;
ipv4Addr?: string;
routes?: string[];
dns?: string;
}
export async function prepare_vpn(): Promise<InvokeResponse | null> {
return await invoke<InvokeResponse>('plugin:vpnservice|prepare_vpn', {})
}
@@ -33,3 +41,7 @@ export async function start_vpn(request: StartVpnRequest): Promise<InvokeRespons
export async function stop_vpn(): Promise<InvokeResponse | null> {
return await invoke<InvokeResponse>('plugin:vpnservice|stop_vpn', {})
}
export async function get_vpn_status(): Promise<VpnStatusResponse | null> {
return await invoke<VpnStatusResponse>('plugin:vpnservice|get_vpn_status', {})
}
@@ -12,6 +12,10 @@ class ExamplePlugin: Plugin {
let args = try invoke.parseArgs(PingArgs.self)
invoke.resolve(["value": args.value ?? ""])
}
@objc public func getVpnStatus(_ invoke: Invoke) {
invoke.resolve(["running": false])
}
}
@_cdecl("init_plugin_vpnservice")
@@ -0,0 +1,13 @@
# Automatically generated - DO NOT EDIT!
"$schema" = "../../schemas/schema.json"
[[permission]]
identifier = "allow-get-vpn-status"
description = "Enables the get_vpn_status command without any pre-configured scope."
commands.allow = ["get_vpn_status"]
[[permission]]
identifier = "deny-get-vpn-status"
description = "Denies the get_vpn_status command without any pre-configured scope."
commands.deny = ["get_vpn_status"]
@@ -16,6 +16,32 @@ Default permissions for the plugin
</tr>
<tr>
<td>
`vpnservice:allow-get-vpn-status`
</td>
<td>
Enables the get_vpn_status command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
`vpnservice:deny-get-vpn-status`
</td>
<td>
Denies the get_vpn_status command without any pre-configured scope.
</td>
</tr>
<tr>
<td>
@@ -294,6 +294,18 @@
"PermissionKind": {
"type": "string",
"oneOf": [
{
"description": "Enables the get_vpn_status command without any pre-configured scope.",
"type": "string",
"const": "allow-get-vpn-status",
"markdownDescription": "Enables the get_vpn_status command without any pre-configured scope."
},
{
"description": "Denies the get_vpn_status command without any pre-configured scope.",
"type": "string",
"const": "deny-get-vpn-status",
"markdownDescription": "Denies the get_vpn_status command without any pre-configured scope."
},
{
"description": "Enables the ping command without any pre-configured scope.",
"type": "string",
+6
View File
@@ -51,4 +51,10 @@ impl<R: Runtime> Vpnservice<R> {
.run_mobile_plugin("stop_vpn", payload)
.map_err(Into::into)
}
pub fn get_vpn_status(&self, payload: VoidRequest) -> crate::Result<VpnStatus> {
self.0
.run_mobile_plugin("get_vpn_status", payload)
.map_err(Into::into)
}
}
+9
View File
@@ -33,3 +33,12 @@ pub struct StartVpnRequest {
pub struct Status {
pub error_msg: Option<String>,
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct VpnStatus {
pub running: bool,
pub ipv4_addr: Option<String>,
pub routes: Option<Vec<String>>,
pub dns: Option<String>,
}