Compare commits

...

53 Commits

Author SHA1 Message Date
Mg Pig 40c6de8e31 fix(core): restrict implicit config merge to explicit config files (#2127) 2026-04-19 10:39:04 +08:00
KKRainbow 2db655bd6d fix: refresh ACL groups and enable TCP_NODELAY for WebSocket (#2118)
* fix: refresh ACL groups and enable TCP_NODELAY for WebSocket
* add remove_peers to remove list of peer id in ospf route
* fix secure tunnel for unreliable udp tunnel
* fix(web-client): timeout secure tunnel handshake
* fix(web-server): tolerate delayed secure hello
* fix quic endpoint panic
* fix replay check
2026-04-19 10:37:39 +08:00
Mg Pig c49c56612b feat(ui): add ACL graphical configuration interface (#1815) 2026-04-18 20:23:53 +08:00
Mg Pig 6ca074abae feat(nix): 添加 rustfmt 和 clippy 到 Rust 工具链扩展 (#2126) 2026-04-18 20:23:26 +08:00
Luna Yao 84430055ab remove hashbrown (#2108) 2026-04-18 11:06:34 +08:00
Mg Pig 432fcb3fc3 build(nix): add mold to the flake dev shell (#2122) 2026-04-18 09:06:45 +08:00
Luna Yao fae32361f2 chore: update Rust to 1.95; replace cfg_if with cfg_select (#2121) 2026-04-17 23:41:31 +08:00
Luna Yao bcb2e512d4 utils: move code to a dedicated mod; add AsyncRuntime (#2072) 2026-04-16 23:32:07 +08:00
Luna Yao 82ca04a8a7 proto(utils): add MessageModel & RepeatedMessageModel (#2068)
* add FromIterator, Extend, AsRef, AsMut, TryFrom<[Message]>
2026-04-15 19:40:09 +08:00
Luna Yao 2ef3b72224 proto: add some conversion for Url (#2067) 2026-04-15 19:39:24 +08:00
Luna Yao 6d319cba1d tests(relay_peer_e2e_encryption): wait for the key of inst3 before ping test (#2069) 2026-04-15 19:39:00 +08:00
Luna Yao 3687519ef3 turn off ansi for file log (#2110)
Co-authored-by: KKRainbow <443152178@qq.com>
2026-04-15 19:38:27 +08:00
Luna Yao 3a4ac59467 log: change default log level of tests to WARNING (#2113) 2026-04-14 18:10:38 +08:00
Luna Yao 1cfc135df3 ci: remove -D warnings from test (#2109)
Co-authored-by: KKRainbow <443152178@qq.com>
2026-04-14 12:35:05 +08:00
KKRainbow 5b35c51da9 fix packet split on udp tunnel and avoid tcp proxy access rpc portal (#2107)
* distinct control / data when forward packets
* fix rpc split for udp tunnel
* feat(easytier-web): pass public ip in validate token webhook
* protect rpc port from subnet proxy
2026-04-13 11:03:09 +08:00
Luna Yao ec7ddd3bad fix: filter overlapped proxy cidrs in ProxyCidrsMonitor (#2079)
* feat(route): add async methods to list proxy CIDRs for IPv4 and IPv6
* refactor(ProxyCidrsMonitor): get proxy cidrs from list_proxy_cidrs
2026-04-12 22:18:54 +08:00
Luna Yao 6f3e708679 tunnel(bind): gather all bind logic to a single function (#2070)
* extract a Bindable trait for binding TcpSocket, TcpListener, and UdpSocket
2026-04-12 22:16:58 +08:00
Luna Yao 869e1b89f5 fix: remove log (file) when level is explicitly set to OFF (#2083)
* fix level filter for OFF
* remove unwrap of file appender creation
2026-04-12 22:16:30 +08:00
Luna Yao 9e0a3b6936 ci: rewrite build workflows (#2089) 2026-04-12 22:14:41 +08:00
Luna Yao c6cb1a77d0 chore: clippy fix some code on Windows (#2106) 2026-04-12 22:13:58 +08:00
deddey 83010861ba Optimize network interface configuration for macOS and FreeBSD to avoid hard-coded IP addresses (#1853)
Co-authored-by: KKRainbow <443152178@qq.com>
2026-04-12 21:00:59 +08:00
Luna Yao daa53e5168 log: auto-init log for tests (#2073) 2026-04-12 13:04:21 +08:00
fanyang 51befdbf87 fix(faketcp): harden packet parsing against malformed frames (#2103)
Discard malformed fake TCP frames instead of panicking so OpenWrt
nodes can survive unexpected or truncated packets.

Also emit the correct IPv6 ethertype and cover the parser with
round-trip and truncation regression tests.
2026-04-12 13:02:23 +08:00
Luna Yao 8311b11713 refactor: remove NoGroAsyncUdpSocket (#1867) 2026-04-10 23:22:08 +08:00
Luna Yao 19c80c7b9c cli: do not add offset when port = 0 (#2085) 2026-04-10 23:21:15 +08:00
Luna Yao a879dd1b14 chore: update Rust to 2024 edition (#2066) 2026-04-10 00:22:12 +08:00
Luna Yao a8feb9ac2b chore: use Debug to print errors (#2086) 2026-04-09 09:45:55 +08:00
Luna Yao c5fbd29c0e ci: fix skip condition for draft pull requests in CI workflows (#2088)
* ci: run xxx-result only when pre_job is run successfully
* fix get-result steps
2026-04-09 09:45:04 +08:00
Luna Yao 26b1794723 ci: accecelerate pipeline (#2078)
* enable concurrency

pr

* do not run build on draft PRs

pr

* enable fail-fast for build workflows
2026-04-08 08:43:03 +08:00
Luna Yao 371b4b70a3 proto(utils): add TransientDigest trait (#2071) 2026-04-08 00:06:48 +08:00
Luna Yao b2cc38ee63 chore(clippy): disallow some methods from itertools (#2075) 2026-04-07 16:27:33 +08:00
Luna Yao 79b562cdc9 drop peer_mgr in time (#2064) 2026-04-06 11:31:05 +08:00
fanyang e3f089251c fix(ospf): mitigate route sync storm under connection flapping (#2063)
Addresses issue #2016 where nodes behind unstable networks
(e.g. campus firewalls) cause excessive traffic that can freeze
the remote node.

Two changes in peer_ospf_route.rs:

- Make do_sync_route_info only trigger reverse sync_now when
  incoming data actually changed the route table or foreign
  network state.  The previous unconditional sync_now created
  an A->B->A->B ping-pong cycle on every RPC exchange.

- Add exponential backoff (50ms..5s) to session_task retry loop.
  The previous fixed 50ms retry produced ~20 RPCs/s during
  sustained network instability.
2026-04-06 11:26:20 +08:00
fanyang cf6dcbc054 Fix IPv6 TCP tunnel display formatting (#1980)
Normalize composite tunnel display values before rendering peer and
debug output so IPv6 tunnel types no longer append `6` to the port.

- Preserve prefixes like `txt-` while converting tunnel schemes to
  their IPv6 form.
- Recover malformed values such as `txt-tcp://...:110106` into
  `txt-tcp6://...:11010`.
- Reuse the normalized remote address display in CLI debug output.
2026-04-05 22:12:55 +08:00
fanyang 2cf2b0fcac feat(cli): implement connector add/remove, drop peer stubs (#2058)
Implement the previously stubbed connector add/remove CLI commands
using PatchConfig RPC with InstanceConfigPatch.connectors, and
remove the peer add/remove stubs that had incorrect semantics.
2026-04-05 13:56:17 +08:00
dependabot[bot] aa0cca3bb6 build(deps): bump quinn-proto in /easytier-contrib/easytier-ohrs (#2059)
Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.11.13 to 0.11.14.
- [Release notes](https://github.com/quinn-rs/quinn/releases)
- [Commits](https://github.com/quinn-rs/quinn/compare/quinn-proto-0.11.13...quinn-proto-0.11.14)

---
updated-dependencies:
- dependency-name: quinn-proto
  dependency-version: 0.11.14
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-05 13:16:33 +08:00
KKRainbow fb59f01058 fix: reconcile webhook-managed configs and make disable_p2p more intelligent (#2057)
* reconcile infra configs on webhook validate
* make disable_p2p more intelligent
* fix stats
2026-04-04 23:41:57 +08:00
Luna Yao e91a0da70a refactor: listener/connector protocol abstraction (#2026)
* fix listener protocol detection
* replace IpProtocol with IpNextHeaderProtocol
* use an enum to gather all listener schemes
* rename ListenerScheme to TunnelScheme; replace IpNextHeaderProtocols with socket2::Protocol
* move TunnelScheme to tunnel
* add IpScheme, simplify connector creation
* format; fix some typos; remove check_scheme_...;
* remove PROTO_PORT_OFFSET
* rename WSTunnel.. -> WsTunnel.., DNSTunnel.. -> DnsTunnel..
2026-04-04 10:55:58 +08:00
Luna Yao 9cc617ae4c ci: build rpm package (#2044)
* add rpm to ci
* rename build_filter to build-filter
* use prepare-pnpm action
2026-04-04 10:32:08 +08:00
韩嘉乐 e4b0f1f1bb Rename libeasytier_ohrs.so to libeasytier_release.so when build release package (#2056)
Rename shared library file for release.
2026-04-04 10:29:37 +08:00
Luna Yao 443c3ca0b3 fix: append address of reverse proxy to remote_addr (#2034)
* append address of reverse proxy to remote_addr
* validate proxy address in test
2026-03-30 16:48:23 +08:00
Luna Yao 55a0e5952c chore: use cfg_aliases for mobile (#2033) 2026-03-30 16:38:39 +08:00
KKRainbow 1dff388717 bump version to v2.6.0 (#2039) 2026-03-30 15:50:07 +08:00
Luna Yao 61c741f887 add BoxExt trait (#2036) 2026-03-30 13:25:53 +08:00
ParkGarden 01dd9a05c3 fix: 重构了 Magisk 模块的 easytier_core.sh, action.sh, uninstall.sh 三个脚本的逻辑,优化参数解析与进程管理,调整措辞 (#1964) 2026-03-30 13:18:42 +08:00
KKRainbow 8c19a2293c fix(windows): avoid pnet interface enumeration panic (#2031) 2026-03-29 23:16:44 +08:00
KKRainbow a1bec48dc9 fix android vpn permission grant (#2023)
* fix android vpn permission grant
* fix url input behaviour
2026-03-29 23:16:32 +08:00
KKRainbow 7e289865b2 fix(faketcp): avoid pnet interface lookup on windows (#2029) 2026-03-29 19:26:29 +08:00
fanyang 742c7edd57 fix: use default connection loss rate for peer stats (#2030) 2026-03-29 19:25:25 +08:00
Luna Yao b71a2889ef suppress clippy warnings when no feature flags are enabled (#2028) 2026-03-29 11:02:23 +08:00
KKRainbow bcd75d6ce3 Add instance recv limiter in peer conn (#2027) 2026-03-29 10:28:02 +08:00
Luna Yao d4c1b0e867 fix: read X-Forwarded-For from HTTP header of WS/WSS (#2019) 2026-03-28 22:20:46 +08:00
KKRainbow b037ea9c3f Relax private mode foreign network secret checks (#2022) 2026-03-28 22:19:23 +08:00
243 changed files with 11146 additions and 5628 deletions
+35 -54
View File
@@ -1,29 +1,40 @@
[target.x86_64-unknown-linux-musl] # region Native
linker = "rust-lld"
rustflags = ["-C", "linker-flavor=ld.lld"] [target.x86_64-unknown-linux-gnu]
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
[target.aarch64-unknown-linux-gnu] [target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc" rustflags = ["-C", "link-arg=-fuse-ld=mold"]
[target.aarch64-unknown-linux-ohos] [target.'cfg(all(windows, target_env = "msvc"))']
ar = "/usr/local/ohos-sdk/linux/native/llvm/bin/llvm-ar" rustflags = ["-C", "target-feature=+crt-static"]
linker = "/home/runner/sdk/native/llvm/aarch64-unknown-linux-ohos-clang.sh"
[target.aarch64-unknown-linux-ohos.env] # region
PKG_CONFIG_PATH = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib/pkgconfig:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib/pkgconfig"
PKG_CONFIG_LIBDIR = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib" # region CI
PKG_CONFIG_SYSROOT_DIR = "/usr/local/ohos-sdk/linux/native/sysroot"
SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot" [target.x86_64-unknown-linux-musl]
rustflags = ["-C", "target-feature=+crt-static"]
[target.aarch64-unknown-linux-musl] [target.aarch64-unknown-linux-musl]
linker = "aarch64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"] rustflags = ["-C", "target-feature=+crt-static"]
[target.riscv64gc-unknown-linux-musl] [target.riscv64gc-unknown-linux-musl]
linker = "riscv64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"] rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))'] [target.armv7-unknown-linux-musleabihf]
rustflags = ["-C", "target-feature=+crt-static"]
[target.armv7-unknown-linux-musleabi]
rustflags = ["-C", "target-feature=+crt-static"]
[target.arm-unknown-linux-musleabihf]
rustflags = ["-C", "target-feature=+crt-static"]
[target.arm-unknown-linux-musleabi]
rustflags = ["-C", "target-feature=+crt-static"]
[target.loongarch64-unknown-linux-musl]
rustflags = ["-C", "target-feature=+crt-static"] rustflags = ["-C", "target-feature=+crt-static"]
[target.mipsel-unknown-linux-musl] [target.mipsel-unknown-linux-musl]
@@ -64,44 +75,14 @@ rustflags = [
"gcc", "gcc",
] ]
[target.armv7-unknown-linux-musleabihf] [target.aarch64-unknown-linux-ohos]
linker = "armv7-unknown-linux-musleabihf-gcc" ar = "/usr/local/ohos-sdk/linux/native/llvm/bin/llvm-ar"
rustflags = ["-C", "target-feature=+crt-static"] linker = "/home/runner/sdk/native/llvm/aarch64-unknown-linux-ohos-clang.sh"
[target.armv7-unknown-linux-musleabi] [target.aarch64-unknown-linux-ohos.env]
linker = "armv7-unknown-linux-musleabi-gcc" PKG_CONFIG_PATH = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib/pkgconfig:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib/pkgconfig"
rustflags = ["-C", "target-feature=+crt-static"] PKG_CONFIG_LIBDIR = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib"
PKG_CONFIG_SYSROOT_DIR = "/usr/local/ohos-sdk/linux/native/sysroot"
SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot"
[target.loongarch64-unknown-linux-musl] # endregion
linker = "loongarch64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.arm-unknown-linux-musleabihf]
linker = "arm-unknown-linux-musleabihf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-unknown-linux-musleabihf/arm-unknown-linux-musleabihf/lib",
"-L",
"./musl_gcc/arm-unknown-linux-musleabihf/lib/gcc/arm-unknown-linux-musleabihf/15.1.0",
"-l",
"atomic",
"-l",
"gcc",
]
[target.arm-unknown-linux-musleabi]
linker = "arm-unknown-linux-musleabi-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-unknown-linux-musleabi/arm-unknown-linux-musleabi/lib",
"-L",
"./musl_gcc/arm-unknown-linux-musleabi/lib/gcc/arm-unknown-linux-musleabi/15.1.0",
"-l",
"atomic",
"-l",
"gcc",
]
+60 -13
View File
@@ -2,10 +2,17 @@ name: prepare-build
author: Luna author: Luna
description: Prepare build environment description: Prepare build environment
inputs: inputs:
web: target:
description: 'Whether to prepare the web build environment' description: 'The target to build for'
required: false
pnpm:
description: 'Whether to run pnpm build'
required: true required: true
default: 'true' default: 'true'
pnpm-build-filter:
description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)'
required: false
default: './easytier-web/*'
gui: gui:
description: 'Whether to prepare the GUI build environment' description: 'Whether to prepare the GUI build environment'
required: true required: true
@@ -19,21 +26,61 @@ runs:
- run: mkdir -p easytier-gui/dist - run: mkdir -p easytier-gui/dist
shell: bash shell: bash
- name: Setup Frontend Environment - name: Install dependencies
if: ${{ inputs.web == 'true' }} if: ${{ runner.os == 'Linux' }}
uses: ./.github/actions/prepare-pnpm
with:
build_filter: './easytier-web/*'
- name: Install GUI dependencies (Used by clippy)
if: ${{ inputs.gui == 'true' }}
run: | run: |
bash ./.github/workflows/install_gui_dep.sh sudo apt-get update
sudo apt-get install -qqy build-essential mold musl-tools
shell: bash shell: bash
- name: Install Rust - name: Setup Frontend Environment
if: ${{ inputs.pnpm == 'true' }}
uses: ./.github/actions/prepare-pnpm
with:
build-filter: ${{ inputs.pnpm-build-filter }}
- name: Install GUI dependencies (Linux)
if: ${{ inputs.gui == 'true' && runner.os == 'Linux' }}
run: | run: |
bash ./.github/workflows/install_rust.sh sudo apt-get install -qq xdg-utils \
libappindicator3-dev \
libgtk-3-dev \
librsvg2-dev \
libwebkit2gtk-4.1-dev \
libxdo-dev
shell: bash
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: 1.95
target: ${{ !contains(inputs.target, 'mips') && inputs.target || '' }}
components: ${{ contains(inputs.target, 'mips') && 'rust-src' || '' }}
cache: false
rustflags: ''
- name: Install Rust (MIPS)
if: ${{ contains(inputs.target, 'mips') }}
run: |
MUSL_TARGET=${{ inputs.target }}sf
mkdir -p ./musl_gcc
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
sudo chmod -R a+rwx ./musl_gcc
if [[ -d "./musl_gcc/sysroot" ]]; then
echo "BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)" >> $GITHUB_ENV
fi
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
# for panic-abort
cp libgcc_eh.a libunwind.a
# for mimalloc
ar x libgcc.a _ctzsi2.o _clz.o _bswapsi2.o
ar rcs libctz.a _ctzsi2.o _clz.o _bswapsi2.o
shell: bash shell: bash
- name: Setup protoc - name: Setup protoc
+13 -7
View File
@@ -3,20 +3,21 @@ author: Luna
description: 'Setup Node.js, pnpm, and install dependencies' description: 'Setup Node.js, pnpm, and install dependencies'
inputs: inputs:
build_filter: build-filter:
description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)' description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)'
required: true required: false
default: ''
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Setup Node.js - name: Setup Node.js
uses: actions/setup-node@v4 uses: actions/setup-node@v5
with: with:
node-version: 22 node-version: 22
- name: Install pnpm - name: Install pnpm
uses: pnpm/action-setup@v4 uses: pnpm/action-setup@v5
with: with:
version: 10 version: 10
run_install: false run_install: false
@@ -27,7 +28,7 @@ runs:
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache - name: Setup pnpm cache
uses: actions/cache@v4 uses: actions/cache@v5
with: with:
path: ${{ env.STORE_PATH }} path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
@@ -38,5 +39,10 @@ runs:
shell: bash shell: bash
run: | run: |
pnpm -r install pnpm -r install
echo "Building with filter: ${{ inputs.build_filter }}" if [ -n "${{ inputs.build-filter }}" ]; then
pnpm -r --filter "${{ inputs.build_filter }}" build echo "Building with filter: ${{ inputs.build-filter }}"
pnpm -r --filter "${{ inputs.build-filter }}" build
else
echo "No build filter provided, building all packages"
pnpm -r build
fi
+132 -181
View File
@@ -2,9 +2,14 @@ name: EasyTier Core
on: on:
push: push:
branches: ["develop", "main", "releases/**"] branches: [ "develop", "main", "releases/**" ]
pull_request: pull_request:
branches: ["develop", "main"] branches: [ "develop", "main" ]
types: [ opened, synchronize, reopened, ready_for_review ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@@ -18,6 +23,7 @@ jobs:
pre_job: pre_job:
# continue-on-error: true # Uncomment once integration is finished # continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
# Map a step output to a job output # Map a step output to a job output
outputs: outputs:
# do not skip push on branch starts with releases/ # do not skip push on branch starts with releases/
@@ -30,85 +36,69 @@ jobs:
concurrent_skipping: 'same_content_newer' concurrent_skipping: 'same_content_newer'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
cancel_others: 'true' cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/workflows/install_rust.sh", "easytier-web/**"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/actions/**", "easytier-web/**"]'
build_web: build_web:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- uses: actions/setup-node@v4 - name: Setup Frontend Environment
uses: ./.github/actions/prepare-pnpm
with: with:
node-version: 22 build-filter: './easytier-web/*'
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r --filter "./easytier-web/*" build
- name: Archive artifact - name: Archive artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: easytier-web-dashboard name: easytier-web-dashboard
path: | path: |
easytier-web/frontend/dist/* easytier-web/frontend/dist/*
build: build:
strategy: strategy:
fail-fast: false fail-fast: true
matrix: matrix:
include: include:
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-aarch64
- TARGET: x86_64-unknown-linux-musl - TARGET: x86_64-unknown-linux-musl
OS: ubuntu-22.04 OS: ubuntu-24.04
ARTIFACT_NAME: linux-x86_64 ARTIFACT_NAME: linux-x86_64
- TARGET: riscv64gc-unknown-linux-musl - TARGET: aarch64-unknown-linux-musl
OS: ubuntu-22.04 OS: ubuntu-24.04-arm
ARTIFACT_NAME: linux-riscv64 ARTIFACT_NAME: linux-aarch64
- TARGET: mips-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-mips
- TARGET: mipsel-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-mipsel
- TARGET: armv7-unknown-linux-musleabihf # raspberry pi 2-3-4, not tested
OS: ubuntu-22.04
ARTIFACT_NAME: linux-armv7hf
- TARGET: armv7-unknown-linux-musleabi # raspberry pi 2-3-4, not tested
OS: ubuntu-22.04
ARTIFACT_NAME: linux-armv7
- TARGET: arm-unknown-linux-musleabihf # raspberry pi 0-1, not tested
OS: ubuntu-22.04
ARTIFACT_NAME: linux-armhf
- TARGET: arm-unknown-linux-musleabi # raspberry pi 0-1, not tested
OS: ubuntu-22.04
ARTIFACT_NAME: linux-arm
- TARGET: riscv64gc-unknown-linux-musl
OS: ubuntu-24.04
ARTIFACT_NAME: linux-riscv64
- TARGET: loongarch64-unknown-linux-musl - TARGET: loongarch64-unknown-linux-musl
OS: ubuntu-24.04 OS: ubuntu-24.04
ARTIFACT_NAME: linux-loongarch64 ARTIFACT_NAME: linux-loongarch64
- TARGET: armv7-unknown-linux-musleabihf # raspberry pi 2-3-4, not tested
OS: ubuntu-24.04
ARTIFACT_NAME: linux-armv7hf
- TARGET: armv7-unknown-linux-musleabi # raspberry pi 2-3-4, not tested
OS: ubuntu-24.04
ARTIFACT_NAME: linux-armv7
- TARGET: arm-unknown-linux-musleabihf # raspberry pi 0-1, not tested
OS: ubuntu-24.04
ARTIFACT_NAME: linux-armhf
- TARGET: arm-unknown-linux-musleabi # raspberry pi 0-1, not tested
OS: ubuntu-24.04
ARTIFACT_NAME: linux-arm
- TARGET: mips-unknown-linux-musl
OS: ubuntu-24.04
ARTIFACT_NAME: linux-mips
- TARGET: mipsel-unknown-linux-musl
OS: ubuntu-24.04
ARTIFACT_NAME: linux-mipsel
- TARGET: x86_64-unknown-freebsd
OS: ubuntu-24.04
ARTIFACT_NAME: freebsd-13.2-x86_64
BSD_VERSION: 13.2
- TARGET: x86_64-apple-darwin - TARGET: x86_64-apple-darwin
OS: macos-latest OS: macos-latest
ARTIFACT_NAME: macos-x86_64 ARTIFACT_NAME: macos-x86_64
@@ -119,17 +109,12 @@ jobs:
- TARGET: x86_64-pc-windows-msvc - TARGET: x86_64-pc-windows-msvc
OS: windows-latest OS: windows-latest
ARTIFACT_NAME: windows-x86_64 ARTIFACT_NAME: windows-x86_64
- TARGET: aarch64-pc-windows-msvc
OS: windows-latest
ARTIFACT_NAME: windows-arm64
- TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-msvc
OS: windows-latest OS: windows-latest
ARTIFACT_NAME: windows-i686 ARTIFACT_NAME: windows-i686
- TARGET: aarch64-pc-windows-msvc
- TARGET: x86_64-unknown-freebsd OS: windows-11-arm
OS: ubuntu-22.04 ARTIFACT_NAME: windows-arm64
ARTIFACT_NAME: freebsd-13.2-x86_64
BSD_VERSION: 13.2
runs-on: ${{ matrix.OS }} runs-on: ${{ matrix.OS }}
env: env:
@@ -142,7 +127,7 @@ jobs:
- build_web - build_web
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Set current ref as env variable - name: Set current ref as env variable
run: | run: |
@@ -154,8 +139,15 @@ jobs:
name: easytier-web-dashboard name: easytier-web-dashboard
path: easytier-web/frontend/dist/ path: easytier-web/frontend/dist/
- name: Prepare build environment
uses: ./.github/actions/prepare-build
with:
target: ${{ matrix.TARGET }}
gui: true
pnpm: true
token: ${{ secrets.GITHUB_TOKEN }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
with: with:
# The prefix cache key, this can be changed to start a new cache manually. # The prefix cache key, this can be changed to start a new cache manually.
# default: "v0-rust" # default: "v0-rust"
@@ -163,96 +155,51 @@ jobs:
shared-key: "core-registry" shared-key: "core-registry"
cache-targets: "false" cache-targets: "false"
- name: Setup protoc - uses: mlugg/setup-zig@v2
uses: arduino/setup-protoc@v3 if: ${{ contains(matrix.OS, 'ubuntu') }}
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build Core & Cli - uses: taiki-e/install-action@v2
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }} if: ${{ contains(matrix.OS, 'ubuntu') }}
with:
tool: cargo-zigbuild
- name: Build
if: ${{ !contains(matrix.TARGET, 'mips') }}
run: | run: |
bash ./.github/workflows/install_rust.sh if [[ "$TARGET" == *windows* ]]; then
SUFFIX=.exe
# loongarch need llvm-18
if [[ $TARGET =~ ^loongarch.*$ ]]; then
sudo apt-get install -qq llvm-18 clang-18
export LLVM_CONFIG_PATH=/usr/lib/llvm-18/bin/llvm-config
fi
# we set the sysroot when sysroot is a dir
# this dir is a soft link generated by install_rust.sh
# kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h
if [[ -d "./musl_gcc/sysroot" ]]; then
export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)
fi
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cargo +nightly-2026-02-02 build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier --features=jemalloc
else else
if [[ $OS =~ ^windows.*$ ]]; then SUFFIX=""
SUFFIX=.exe
CORE_FEATURES="--features=mimalloc"
elif [[ $TARGET =~ ^riscv64.*$ || $TARGET =~ ^loongarch64.*$ || $TARGET =~ ^aarch64.*$ ]]; then
CORE_FEATURES="--features=mimalloc"
else
CORE_FEATURES="--features=jemalloc"
fi
cargo build --release --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
cargo build --release --target $TARGET $CORE_FEATURES
fi fi
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267) if [[ "$TARGET" =~ (x86_64-unknown-linux-musl|aarch64-unknown-linux-musl|windows|darwin) ]]; then
- name: Build Core & Cli (X86_64 FreeBSD) BUILD=build
uses: vmactions/freebsd-vm@670398e4236735b8b65805c3da44b7a511fb8b27 else
if: ${{ endsWith(matrix.TARGET, 'freebsd') }} BUILD=zigbuild
fi
if [[ "$TARGET" =~ ^(riscv64|loongarch64|aarch64).*$ || "$TARGET" =~ windows ]]; then
FEATURES="mimalloc"
else
FEATURES="jemalloc"
fi
cargo $BUILD --release --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
cargo $BUILD --release --target $TARGET --features=$FEATURES
- name: Build (MIPS)
if: ${{ contains(matrix.TARGET, 'mips') }}
env: env:
TARGET: ${{ matrix.TARGET }} RUSTC_BOOTSTRAP: 1
with: run: |
envs: TARGET cargo build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier --features=jemalloc
release: ${{ matrix.BSD_VERSION }}
arch: x86_64
usesh: true
mem: 6144
cpu: 4
run: |
uname -a
echo $SHELL
pwd
ls -lah
whoami
env | sort
pkg install -y git protobuf llvm-devel sudo curl
curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
. $HOME/.cargo/env
rustup set auto-self-update disable
rustup install 1.93
rustup default 1.93
export CC=clang
export CXX=clang++
export CARGO_TERM_COLOR=always
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed
cargo build --release --verbose --target $TARGET --features=mimalloc
mkdir -p built-bins/$TARGET/release/
mv ./target/$TARGET/release/easytier-web-embed ./built-bins/$TARGET/release/easytier-web-embed
mv ./target/$TARGET/release/easytier-web ./built-bins/$TARGET/release/easytier-web
mv ./target/$TARGET/release/easytier-core ./built-bins/$TARGET/release/easytier-core
mv ./target/$TARGET/release/easytier-cli ./built-bins/$TARGET/release/easytier-cli
# remove dirs to avoid copy many files back
rm -rf ./target ~/.cargo
mv ./built-bins ./target
- name: Compress - name: Compress
run: | run: |
mkdir -p ./artifacts/objects/ mkdir -p ./artifacts/objects/
# windows is the only OS using a different convention for executable file name # windows is the only OS using a different convention for executable file name
if [[ $OS =~ ^windows.*$ ]]; then if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe SUFFIX=.exe
@@ -265,59 +212,55 @@ jobs:
find "easytier/third_party/${ARCH_DIR}" -maxdepth 1 -type f \( -name "*.dll" -o -name "*.sys" \) -exec cp {} ./artifacts/objects/ \; find "easytier/third_party/${ARCH_DIR}" -maxdepth 1 -type f \( -name "*.dll" -o -name "*.sys" \) -exec cp {} ./artifacts/objects/ \;
fi fi
fi fi
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME TAG=$GITHUB_REF_NAME
else else
TAG=$GITHUB_SHA TAG=$GITHUB_SHA
fi fi
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ && ! $TARGET =~ ^riscv64.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ (loongarch|freebsd) ]]; then
UPX_VERSION=4.2.4 HOST_ARCH=$(uname -m)
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf - case $HOST_ARCH in
cp upx-${UPX_VERSION}-amd64_linux/upx . x86_64) UPX_ARCH="amd64" ;;
./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX" aarch64) UPX_ARCH="arm64" ;;
./upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX" *) UPX_ARCH="amd64" ;;
esac
UPX_VERSION=5.1.1
UPX_PKG="upx-${UPX_VERSION}-${UPX_ARCH}_linux"
curl -L "https://github.com/upx/upx/releases/download/v${UPX_VERSION}/${UPX_PKG}.tar.xz" -s | tar xJvf -
cp "${UPX_PKG}/upx" .
UPX_BIN=./upx
fi fi
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/ for BIN in ./target/$TARGET/release/easytier-{core,cli,web,web-embed}"$SUFFIX"; do
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/ if [[ -f "$BIN" ]]; then
if [[ ! $TARGET =~ ^mips.*$ ]]; then if [[ -n "$UPX_BIN" ]]; then
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./artifacts/objects/ $UPX_BIN --lzma --best "$BIN" || true
mv ./target/$TARGET/release/easytier-web-embed"$SUFFIX" ./artifacts/objects/ fi
fi
mv "$BIN" ./artifacts/objects/
fi
done
mv ./artifacts/objects/* ./artifacts/ mv ./artifacts/objects/* ./artifacts/
rm -rf ./artifacts/objects/ rm -rf ./artifacts/objects/
- name: Archive artifact - name: Archive artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: easytier-${{ matrix.ARTIFACT_NAME }} name: easytier-${{ matrix.ARTIFACT_NAME }}
path: | path: |
./artifacts/* ./artifacts/*
core-result: build_magisk:
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest
needs:
- pre_job
- build_web
- build
steps:
- name: Mark result as failed
if: needs.build.result != 'success'
run: exit 1
magisk_build:
needs:
- pre_job
- build_web
- build
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [ pre_job, build_web, build ]
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v4 # 必须先检出代码才能获取模块配置 uses: actions/checkout@v5 # 必须先检出代码才能获取模块配置
# 下载二进制文件到独立目录 # 下载二进制文件到独立目录
- name: Download Linux aarch64 binaries - name: Download Linux aarch64 binaries
@@ -334,10 +277,9 @@ jobs:
cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/ cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/
cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/ cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/
# 上传生成的模块 # 上传生成的模块
- name: Upload Magisk Module - name: Upload Magisk Module
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: Easytier-Magisk name: Easytier-Magisk
path: | path: |
@@ -345,3 +287,12 @@ jobs:
!./easytier-contrib/easytier-magisk/build.sh !./easytier-contrib/easytier-magisk/build.sh
!./easytier-contrib/easytier-magisk/magisk_update.json !./easytier-contrib/easytier-magisk/magisk_update.json
if-no-files-found: error if-no-files-found: error
core-result:
runs-on: ubuntu-latest
needs: [ pre_job, build_web, build, build_magisk ]
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
steps:
- name: Mark result as failed
if: contains(needs.*.result, 'failure')
run: exit 1
+2 -2
View File
@@ -11,7 +11,7 @@ on:
image_tag: image_tag:
description: 'Tag for this image build' description: 'Tag for this image build'
type: string type: string
default: 'v2.5.0' default: 'v2.6.0'
required: true required: true
mark_latest: mark_latest:
description: 'Mark this image as latest' description: 'Mark this image as latest'
@@ -31,7 +31,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- -
name: Validate inputs name: Validate inputs
run: | run: |
+40 -105
View File
@@ -5,6 +5,11 @@ on:
branches: ["develop", "main", "releases/**"] branches: ["develop", "main", "releases/**"]
pull_request: pull_request:
branches: ["develop", "main"] branches: ["develop", "main"]
types: [opened, synchronize, reopened, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@@ -18,6 +23,7 @@ jobs:
pre_job: pre_job:
# continue-on-error: true # Uncomment once integration is finished # continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
# Map a step output to a job output # Map a step output to a job output
outputs: outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }} should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }}
@@ -29,20 +35,20 @@ jobs:
concurrent_skipping: 'same_content_newer' concurrent_skipping: 'same_content_newer'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
cancel_others: 'true' cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh", ".github/workflows/install_gui_dep.sh", "easytier-web/frontend-lib/**"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/actions/**", "easytier-web/frontend-lib/**"]'
build-gui: build-gui:
strategy: strategy:
fail-fast: false fail-fast: true
matrix: matrix:
include: include:
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-22.04
GUI_TARGET: aarch64-unknown-linux-gnu
ARTIFACT_NAME: linux-aarch64
- TARGET: x86_64-unknown-linux-musl - TARGET: x86_64-unknown-linux-musl
OS: ubuntu-22.04 OS: ubuntu-24.04
GUI_TARGET: x86_64-unknown-linux-gnu GUI_TARGET: x86_64-unknown-linux-gnu
ARTIFACT_NAME: linux-x86_64 ARTIFACT_NAME: linux-x86_64
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-24.04-arm
GUI_TARGET: aarch64-unknown-linux-gnu
ARTIFACT_NAME: linux-aarch64
- TARGET: x86_64-apple-darwin - TARGET: x86_64-apple-darwin
OS: macos-latest OS: macos-latest
@@ -57,16 +63,14 @@ jobs:
OS: windows-latest OS: windows-latest
GUI_TARGET: x86_64-pc-windows-msvc GUI_TARGET: x86_64-pc-windows-msvc
ARTIFACT_NAME: windows-x86_64 ARTIFACT_NAME: windows-x86_64
- TARGET: aarch64-pc-windows-msvc
OS: windows-latest
GUI_TARGET: aarch64-pc-windows-msvc
ARTIFACT_NAME: windows-arm64
- TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-msvc
OS: windows-latest OS: windows-latest
GUI_TARGET: i686-pc-windows-msvc GUI_TARGET: i686-pc-windows-msvc
ARTIFACT_NAME: windows-i686 ARTIFACT_NAME: windows-i686
- TARGET: aarch64-pc-windows-msvc
OS: windows-11-arm
GUI_TARGET: aarch64-pc-windows-msvc
ARTIFACT_NAME: windows-arm64
runs-on: ${{ matrix.OS }} runs-on: ${{ matrix.OS }}
env: env:
@@ -78,96 +82,31 @@ jobs:
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: bash ./.github/workflows/install_gui_dep.sh
- name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
run: |
# see https://tauri.app/v1/guides/building/linux/
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo dpkg --add-architecture arm64
sudo apt update
sudo apt install aptitude
sudo aptitude install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64 \
libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 \
libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu libsoup-3.0-dev:arm64 libjavascriptcoregtk-4.1-dev:arm64
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- name: Set current ref as env variable - name: Set current ref as env variable
run: | run: |
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
- uses: actions/setup-node@v4 - name: Prepare build environment
uses: ./.github/actions/prepare-build
with: with:
node-version: 22 target: ${{ matrix.TARGET }}
gui: true
- name: Install pnpm pnpm: true
uses: pnpm/action-setup@v4 pnpm-build-filter: ''
with: token: ${{ secrets.GITHUB_TOKEN }}
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r build
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
# The prefix cache key, this can be changed to start a new cache manually. # The prefix cache key, this can be changed to start a new cache manually.
# default: "v0-rust" # default: "v0-rust"
prefix-key: "" prefix-key: ""
shared-key: "gui-registry"
- name: Install rust target cache-targets: "false"
run: bash ./.github/workflows/install_rust.sh
- name: Setup protoc
uses: arduino/setup-protoc@v3
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: copy correct DLLs - name: copy correct DLLs
if: ${{ matrix.OS == 'windows-latest' }} if: ${{ contains(matrix.GUI_TARGET, 'windows') }}
run: | run: |
case $TARGET in case $TARGET in
x86_64*) ARCH_DIR=x86_64 ;; x86_64*) ARCH_DIR=x86_64 ;;
@@ -183,10 +122,9 @@ jobs:
uses: tauri-apps/tauri-action@v0 uses: tauri-apps/tauri-action@v0
with: with:
projectPath: ./easytier-gui projectPath: ./easytier-gui
# https://tauri.app/v1/guides/building/linux/#cross-compiling-tauri-applications-for-arm-based-devices args: --verbose --target ${{ matrix.GUI_TARGET }}
args: --verbose --target ${{ matrix.GUI_TARGET }} ${{ matrix.OS == 'ubuntu-22.04' && contains(matrix.TARGET, 'aarch64') && '--bundles deb' || '' }}
- name: Compress - name: Collect artifact
run: | run: |
mkdir -p ./artifacts/objects/ mkdir -p ./artifacts/objects/
@@ -195,36 +133,33 @@ jobs:
else else
TAG=$GITHUB_SHA TAG=$GITHUB_SHA
fi fi
# copy gui bundle, gui is built without specific target # copy gui bundle, gui is built without specific target
if [[ $OS =~ ^windows.*$ ]]; then if [[ $GUI_TARGET =~ windows ]]; then
mv ./target/$GUI_TARGET/release/bundle/nsis/*.exe ./artifacts/objects/ mv ./target/$GUI_TARGET/release/bundle/nsis/*.exe ./artifacts/objects/
elif [[ $OS =~ ^macos.*$ ]]; then elif [[ $GUI_TARGET =~ darwin ]]; then
mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/ mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/
elif [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^mips.*$ ]]; then elif [[ $GUI_TARGET =~ linux ]]; then
mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/ mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/
if [[ $GUI_TARGET =~ ^x86_64.*$ ]]; then mv ./target/$GUI_TARGET/release/bundle/rpm/*.rpm ./artifacts/objects/
# currently only x86 appimage is supported mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
fi
fi fi
mv ./artifacts/objects/* ./artifacts/ mv ./artifacts/objects/* ./artifacts/
rm -rf ./artifacts/objects/ rm -rf ./artifacts/objects/
- name: Archive artifact - name: Archive artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: easytier-gui-${{ matrix.ARTIFACT_NAME }} name: easytier-gui-${{ matrix.ARTIFACT_NAME }}
path: | path: |
./artifacts/* ./artifacts/*
gui-result: gui-result:
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: needs: [ pre_job, build-gui ]
- pre_job if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
- build-gui
steps: steps:
- name: Mark result as failed - name: Mark result as failed
if: needs.build-gui.result != 'success' if: contains(needs.*.result, 'failure')
run: exit 1 run: exit 1
-11
View File
@@ -1,11 +0,0 @@
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
-61
View File
@@ -1,61 +0,0 @@
#!/usr/bin/env bash
# env needed:
# - TARGET
# - GUI_TARGET
# - OS
# dependencies are only needed on ubuntu as that's the only place where
# we make cross-compilation
if [[ $OS =~ ^ubuntu.*$ ]]; then
sudo apt-get update && sudo apt-get install -qq musl-tools libappindicator3-dev llvm clang
# https://github.com/cross-tools/musl-cross/releases
# if "musl" is a substring of TARGET, we assume that we are using musl
MUSL_TARGET=$TARGET
# if target is mips or mipsel, we should use soft-float version of musl
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
MUSL_TARGET=${TARGET}sf
elif [[ $TARGET =~ ^riscv64gc-.*$ ]]; then
MUSL_TARGET=${TARGET/#riscv64gc-/riscv64-}
fi
if [[ $MUSL_TARGET =~ musl ]]; then
mkdir -p ./musl_gcc
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
sudo chmod -R a+rwx ./musl_gcc
fi
fi
# see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable
rustup install 1.93
rustup default 1.93
# mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
# for panic-abort
cp libgcc_eh.a libunwind.a
# for mimalloc
ar x libgcc.a _ctzsi2.o _clz.o _bswapsi2.o
ar rcs libctz.a _ctzsi2.o _clz.o _bswapsi2.o
rustup toolchain install nightly-2026-02-02-x86_64-unknown-linux-gnu
rustup component add rust-src --toolchain nightly-2026-02-02-x86_64-unknown-linux-gnu
# https://github.com/rust-lang/rust/issues/128808
# remove it after Cargo or rustc fix this.
RUST_LIB_SRC=$HOME/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/
if [[ -f $RUST_LIB_SRC/library/Cargo.lock && ! -f $RUST_LIB_SRC/Cargo.lock ]]; then
cp -f $RUST_LIB_SRC/library/Cargo.lock $RUST_LIB_SRC/Cargo.lock
fi
else
rustup target add $TARGET
if [[ $GUI_TARGET != '' ]]; then
rustup target add $GUI_TARGET
fi
fi
+41 -63
View File
@@ -5,6 +5,11 @@ on:
branches: ["develop", "main", "releases/**"] branches: ["develop", "main", "releases/**"]
pull_request: pull_request:
branches: ["develop", "main"] branches: ["develop", "main"]
types: [opened, synchronize, reopened, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@@ -18,6 +23,7 @@ jobs:
pre_job: pre_job:
# continue-on-error: true # Uncomment once integration is finished # continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
# Map a step output to a job output # Map a step output to a job output
outputs: outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }} should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }}
@@ -29,25 +35,30 @@ jobs:
concurrent_skipping: 'same_content_newer' concurrent_skipping: 'same_content_newer'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
cancel_others: 'true' cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", "tauri-plugin-vpnservice/**", ".github/workflows/mobile.yml", ".github/workflows/install_rust.sh"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", "tauri-plugin-vpnservice/**", ".github/workflows/mobile.yml", ".github/actions/**"]'
build-mobile: build-mobile:
strategy: strategy:
fail-fast: false fail-fast: true
matrix: matrix:
include: include:
- TARGET: android - TARGET: aarch64-linux-android
OS: ubuntu-22.04 ARCH: aarch64
ARTIFACT_NAME: android - TARGET: armv7-linux-androideabi
runs-on: ${{ matrix.OS }} ARCH: armv7
- TARGET: i686-linux-android
ARCH: i686
- TARGET: x86_64-linux-android
ARCH: x86_64
runs-on: ubuntu-latest
env: env:
NAME: easytier NAME: easytier
TARGET: ${{ matrix.TARGET }} TARGET: ${{ matrix.TARGET }}
OS: ${{ matrix.OS }} ARCH: ${{ matrix.ARCH }}
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }} OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Set current ref as env variable - name: Set current ref as env variable
run: | run: |
@@ -61,72 +72,41 @@ jobs:
- name: Setup Android SDK - name: Setup Android SDK
uses: android-actions/setup-android@v3 uses: android-actions/setup-android@v3
with: with:
cmdline-tools-version: 11076708 cmdline-tools-version: 12.0
packages: 'build-tools;34.0.0 ndk;26.0.10792818 tools platform-tools platforms;android-34 ' packages: 'build-tools;34.0.0 ndk;26.0.10792818 platform-tools platforms;android-34 '
- name: Setup Android Environment - name: Setup Android Environment
run: | run: |
echo "$ANDROID_HOME/platform-tools" >> $GITHUB_PATH echo "$ANDROID_HOME/platform-tools" >> $GITHUB_PATH
echo "$ANDROID_HOME/ndk/26.0.10792818/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH echo "$ANDROID_HOME/ndk/26.0.10792818/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
echo "NDK_HOME=$ANDROID_HOME/ndk/26.0.10792818/" > $GITHUB_ENV echo "NDK_HOME=$ANDROID_HOME/ndk/26.0.10792818/" >> $GITHUB_ENV
- uses: actions/setup-node@v4 - name: Prepare build environment
uses: ./.github/actions/prepare-build
with: with:
node-version: 22 target: ${{ matrix.TARGET }}
gui: false
- name: Install pnpm pnpm: true
uses: pnpm/action-setup@v4 pnpm-build-filter: ''
with: token: ${{ secrets.GITHUB_TOKEN }}
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r build
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
# The prefix cache key, this can be changed to start a new cache manually. # The prefix cache key, this can be changed to start a new cache manually.
# default: "v0-rust" # default: "v0-rust"
prefix-key: "" prefix-key: ""
shared-key: "gui-registry"
cache-targets: "false"
- name: Install rust target - name: Build
run: |
bash ./.github/workflows/install_rust.sh
rustup target add aarch64-linux-android
rustup target add armv7-linux-androideabi
rustup target add i686-linux-android
rustup target add x86_64-linux-android
- name: Setup protoc
uses: arduino/setup-protoc@v3
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build Android
run: | run: |
cd easytier-gui cd easytier-gui
pnpm tauri android build pnpm tauri android build --apk --target "$ARCH" --split-per-abi
- name: Compress - name: Collect artifact
run: | run: |
mkdir -p ./artifacts/objects/ mkdir -p ./artifacts/objects/
mv easytier-gui/src-tauri/gen/android/app/build/outputs/apk/universal/release/app-universal-release.apk ./artifacts/objects/ mv easytier-gui/src-tauri/gen/android/app/build/outputs/apk/*/release/*.apk ./artifacts/objects/
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME TAG=$GITHUB_REF_NAME
@@ -134,23 +114,21 @@ jobs:
TAG=$GITHUB_SHA TAG=$GITHUB_SHA
fi fi
mv ./artifacts/objects/* ./artifacts mv ./artifacts/objects/* ./artifacts/
rm -rf ./artifacts/objects/ rm -rf ./artifacts/objects/
- name: Archive artifact - name: Archive artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: easytier-gui-${{ matrix.ARTIFACT_NAME }} name: easytier-mobile-android-${{ matrix.ARCH }}
path: | path: |
./artifacts/* ./artifacts/*
mobile-result: mobile-result:
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: needs: [ pre_job, build-mobile ]
- pre_job if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
- build-mobile
steps: steps:
- name: Mark result as failed - name: Mark result as failed
if: needs.build-mobile.result != 'success' if: contains(needs.*.result, 'failure')
run: exit 1 run: exit 1
+16 -2
View File
@@ -6,17 +6,25 @@ on:
paths: paths:
- "**/*.nix" - "**/*.nix"
- "flake.lock" - "flake.lock"
- "rust-toolchain.toml"
pull_request: pull_request:
branches: ["main", "develop"] branches: ["main", "develop"]
types: [opened, synchronize, reopened, ready_for_review]
paths: paths:
- "**/*.nix" - "**/*.nix"
- "flake.lock" - "flake.lock"
- "rust-toolchain.toml"
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
check-full-shell: check-full-shell:
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Install Nix - name: Install Nix
uses: cachix/install-nix-action@v27 uses: cachix/install-nix-action@v27
@@ -26,5 +34,11 @@ jobs:
- name: Magic Nix Cache - name: Magic Nix Cache
uses: DeterminateSystems/magic-nix-cache-action@v6 uses: DeterminateSystems/magic-nix-cache-action@v6
- name: Check full devShell - name: Warm up full devShell
run: nix develop .#full --command true run: nix develop .#full --command true
- name: Cargo check in flake environment
run: nix develop .#full --command cargo check
- name: Cargo build in flake environment
run: nix develop .#full --command cargo build
+39 -15
View File
@@ -8,8 +8,13 @@ on:
- '!*-pre' - '!*-pre'
pull_request: pull_request:
branches: ["develop", "main"] branches: ["develop", "main"]
types: [opened, synchronize, reopened, ready_for_review]
workflow_dispatch: workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@@ -20,18 +25,29 @@ defaults:
jobs: jobs:
cargo_fmt_check: cargo_fmt_check:
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: fmt check
- name: Prepare build environment
uses: ./.github/actions/prepare-build
with:
gui: false
pnpm: false
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: rustfmt
- name: Check formatting
working-directory: ./easytier-contrib/easytier-ohrs working-directory: ./easytier-contrib/easytier-ohrs
run: | run: cargo fmt --all -- --check
bash ../../.github/workflows/install_rust.sh
rustup component add rustfmt
cargo fmt --all -- --check
pre_job: pre_job:
# continue-on-error: true # Uncomment once integration is finished # continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
# Map a step output to a job output # Map a step output to a job output
outputs: outputs:
# do not skip push on branch starts with releases/ # do not skip push on branch starts with releases/
@@ -44,7 +60,8 @@ jobs:
concurrent_skipping: "same_content_newer" concurrent_skipping: "same_content_newer"
skip_after_successful_duplicate: "true" skip_after_successful_duplicate: "true"
cancel_others: "true" cancel_others: "true"
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-contrib/easytier-ohrs/**", ".github/workflows/ohos.yml", ".github/workflows/install_rust.sh"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-contrib/easytier-ohrs/**", ".github/workflows/ohos.yml", ".github/actions/**"]'
build-ohos: build-ohos:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: pre_job needs: pre_job
@@ -52,17 +69,16 @@ jobs:
OHPM_PUBLISH_CODE: ${{ secrets.OHPM_PUBLISH_CODE }} OHPM_PUBLISH_CODE: ${{ secrets.OHPM_PUBLISH_CODE }}
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Install dependencies - name: Install dependencies
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y \ sudo apt-get install -qq \
build-essential \ build-essential \
wget \ wget \
unzip \ unzip \
git \ git \
pkg-config curl libgl1-mesa-dev expect pkg-config curl libgl1-mesa-dev expect
sudo apt-get clean
- name: Resolve easytier version - name: Resolve easytier version
run: | run: |
@@ -134,6 +150,15 @@ jobs:
run: | run: |
echo "TARGET_ARCH=aarch64-linux-ohos" >> $GITHUB_ENV echo "TARGET_ARCH=aarch64-linux-ohos" >> $GITHUB_ENV
rustup install stable
rustup default stable
rustup target add aarch64-unknown-linux-ohos
- uses: taiki-e/install-action@v2
with:
tool: ohrs
- name: Create clang wrapper script - name: Create clang wrapper script
run: | run: |
sudo mkdir -p $OHOS_NDK_HOME/native/llvm sudo mkdir -p $OHOS_NDK_HOME/native/llvm
@@ -152,11 +177,7 @@ jobs:
run: | run: |
sudo apt-get install -y llvm clang lldb lld sudo apt-get install -y llvm clang lldb lld
sudo apt-get install -y protobuf-compiler sudo apt-get install -y protobuf-compiler
bash ../../.github/workflows/install_rust.sh
source env.sh source env.sh
cargo install ohrs
rustup target add aarch64-unknown-linux-ohos
cargo update easytier
ohrs doctor ohrs doctor
ohrs build --release --arch aarch ohrs build --release --arch aarch
ohrs artifact ohrs artifact
@@ -174,11 +195,14 @@ jobs:
jq --arg v "$TAG_VERSION" '.name = "easytier-release" | .version = $v' oh-package.json5 > oh-package.tmp.json5 && mv oh-package.tmp.json5 oh-package.json5 jq --arg v "$TAG_VERSION" '.name = "easytier-release" | .version = $v' oh-package.json5 > oh-package.tmp.json5 && mv oh-package.tmp.json5 oh-package.json5
cd .. cd ..
ohrs build --release --arch aarch ohrs build --release --arch aarch
cd dist/arm64-v8a
mv libeasytier_ohrs.so libeasytier_release.so
cd ../..
ohrs artifact ohrs artifact
mv package.har easytier-release.har mv package.har easytier-release.har
- name: Upload artifact - name: Upload artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v5
with: with:
name: easytier-ohos name: easytier-ohos
path: | path: |
+2 -2
View File
@@ -18,7 +18,7 @@ on:
version: version:
description: 'Version for this release' description: 'Version for this release'
type: string type: string
default: 'v2.5.0' default: 'v2.6.0'
required: true required: true
make_latest: make_latest:
description: 'Mark this release as latest' description: 'Mark this release as latest'
@@ -35,7 +35,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Download Core Artifact - name: Download Core Artifact
uses: dawidd6/action-download-artifact@v11 uses: dawidd6/action-download-artifact@v11
+25 -16
View File
@@ -6,6 +6,10 @@ on:
pull_request: pull_request:
branches: [ "develop", "main" ] branches: [ "develop", "main" ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
# RUSTC_WRAPPER: "sccache" # RUSTC_WRAPPER: "sccache"
@@ -30,7 +34,7 @@ jobs:
# All of these options are optional, so you can remove them if you are happy with the defaults # All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never' concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/workflows/install_gui_dep.sh", ".github/workflows/install_rust.sh"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/actions/**"]'
check: check:
name: Run linters & check name: Run linters & check
@@ -38,24 +42,29 @@ jobs:
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Prepare build environment - name: Prepare build environment
uses: ./.github/actions/prepare-build uses: ./.github/actions/prepare-build
with: with:
gui: true gui: true
web: true pnpm: true
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- uses: Swatinem/rust-cache@v2 - uses: actions-rust-lang/setup-rust-toolchain@v1
with:
- name: Install rustfmt and clippy components: rustfmt,clippy
run: | rustflags: ''
rustup component add rustfmt
rustup component add clippy
- uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-hack
- name: Check Cargo.lock is up to date
run: |
if ! cargo metadata --format-version 1 --locked --no-deps > /dev/null; then
echo "::error::Cargo.lock is out of date. Run cargo generate-lockfile or cargo build locally, then commit Cargo.lock."
exit 1
fi
- name: Check formatting - name: Check formatting
run: cargo fmt --all -- --check run: cargo fmt --all -- --check
@@ -72,13 +81,13 @@ jobs:
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Prepare build environment - name: Prepare build environment
uses: ./.github/actions/prepare-build uses: ./.github/actions/prepare-build
with: with:
gui: true gui: true
web: true pnpm: true
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@@ -88,7 +97,7 @@ jobs:
- name: Archive test - name: Archive test
run: cargo nextest archive --archive-file tests.tar.zst --package easytier --features full run: cargo nextest archive --archive-file tests.tar.zst --package easytier --features full
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v5
with: with:
name: tests name: tests
path: tests.tar.zst path: tests.tar.zst
@@ -112,7 +121,7 @@ jobs:
- name: "three_node::subnet_proxy_three_node_test" - name: "three_node::subnet_proxy_three_node_test"
opts: "-E 'test(subnet_proxy_three_node_test)' --test-threads 1 --no-fail-fast" opts: "-E 'test(subnet_proxy_three_node_test)' --test-threads 1 --no-fail-fast"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v5
- name: Setup tools for test - name: Setup tools for test
run: sudo apt install bridge-utils run: sudo apt install bridge-utils
@@ -139,9 +148,9 @@ jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [ pre_job, test_matrix ] needs: [ pre_job, check, test_matrix ]
if: needs.pre_job.outputs.should_skip != 'true' && always() if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
steps: steps:
- name: Mark result as failed - name: Mark result as failed
if: needs.test_matrix.result != 'success' if: contains(needs.*.result, 'failure')
run: exit 1 run: exit 1
+3 -3
View File
@@ -26,7 +26,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
#### Required Tools #### Required Tools
- Node.js v21 or higher - Node.js v21 or higher
- pnpm v9 or higher - pnpm v9 or higher
- Rust toolchain (version 1.93) - Rust toolchain (version 1.95)
- LLVM and Clang - LLVM and Clang
- Protoc (Protocol Buffers compiler) - Protoc (Protocol Buffers compiler)
@@ -79,8 +79,8 @@ sudo apt install -y bridge-utils
2. Install dependencies: 2. Install dependencies:
```bash ```bash
# Install Rust toolchain # Install Rust toolchain
rustup install 1.93 rustup install 1.95
rustup default 1.93 rustup default 1.95
# Install project dependencies # Install project dependencies
pnpm -r install pnpm -r install
+3 -3
View File
@@ -34,7 +34,7 @@
#### 必需工具 #### 必需工具
- Node.js v21 或更高版本 - Node.js v21 或更高版本
- pnpm v9 或更高版本 - pnpm v9 或更高版本
- Rust 工具链(版本 1.93 - Rust 工具链(版本 1.95
- LLVM 和 Clang - LLVM 和 Clang
- ProtocProtocol Buffers 编译器) - ProtocProtocol Buffers 编译器)
@@ -87,8 +87,8 @@ sudo apt install -y bridge-utils
2. 安装依赖: 2. 安装依赖:
```bash ```bash
# 安装 Rust 工具链 # 安装 Rust 工具链
rustup install 1.93 rustup install 1.95
rustup default 1.93 rustup default 1.95
# 安装项目依赖 # 安装项目依赖
pnpm -r install pnpm -r install
Generated
+1187 -978
View File
File diff suppressed because it is too large Load Diff
+4
View File
@@ -14,6 +14,10 @@ exclude = [
"easytier-contrib/easytier-ohrs", # it needs ohrs sdk "easytier-contrib/easytier-ohrs", # it needs ohrs sdk
] ]
[workspace.package]
edition = "2024"
rust-version = "1.95"
[profile.dev] [profile.dev]
panic = "unwind" panic = "unwind"
debug = 2 debug = 2
+3 -3
View File
@@ -108,9 +108,9 @@ After successful execution, you can check the network status using `easytier-cli
```text ```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version | | ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- | | ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.5.0-70e69a38~ | | 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.5.0-70e69a38~ | | 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.5.0-70e69a38~ | | | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.0-70e69a38~ |
``` ```
You can test connectivity between nodes: You can test connectivity between nodes:
+3 -3
View File
@@ -108,9 +108,9 @@ sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<共享
```text ```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version | | ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- | | ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.5.0-70e69a38~ | | 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.5.0-70e69a38~ | | 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.5.0-70e69a38~ | | | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.0-70e69a38~ |
``` ```
您可以测试节点之间的连通性: 您可以测试节点之间的连通性:
@@ -1,7 +1,7 @@
[package] [package]
name = "easytier-android-jni" name = "easytier-android-jni"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition.workspace = true
[lib] [lib]
crate-type = ["cdylib"] crate-type = ["cdylib"]
@@ -1,7 +1,7 @@
use easytier::proto::api::manage::{NetworkInstanceRunningInfo, NetworkInstanceRunningInfoMap}; use easytier::proto::api::manage::{NetworkInstanceRunningInfo, NetworkInstanceRunningInfoMap};
use jni::JNIEnv;
use jni::objects::{JClass, JObjectArray, JString}; use jni::objects::{JClass, JObjectArray, JString};
use jni::sys::{jint, jstring}; use jni::sys::{jint, jstring};
use jni::JNIEnv;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use std::ptr; use std::ptr;
@@ -15,7 +15,7 @@ pub struct KeyValuePair {
} }
// 声明外部 C 函数 // 声明外部 C 函数
extern "C" { unsafe extern "C" {
fn set_tun_fd(inst_name: *const std::ffi::c_char, fd: std::ffi::c_int) -> std::ffi::c_int; fn set_tun_fd(inst_name: *const std::ffi::c_char, fd: std::ffi::c_int) -> std::ffi::c_int;
fn get_error_msg(out: *mut *const std::ffi::c_char); fn get_error_msg(out: *mut *const std::ffi::c_char);
fn free_string(s: *const std::ffi::c_char); fn free_string(s: *const std::ffi::c_char);
@@ -68,7 +68,7 @@ fn throw_exception(env: &mut JNIEnv, message: &str) {
} }
/// 设置 TUN 文件描述符 /// 设置 TUN 文件描述符
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_setTunFd( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_setTunFd(
mut env: JNIEnv, mut env: JNIEnv,
_class: JClass, _class: JClass,
@@ -87,17 +87,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_setTunFd(
unsafe { unsafe {
let result = set_tun_fd(inst_name_cstr.as_ptr(), fd); let result = set_tun_fd(inst_name_cstr.as_ptr(), fd);
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
result result
} }
} }
/// 解析配置 /// 解析配置
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_parseConfig( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_parseConfig(
mut env: JNIEnv, mut env: JNIEnv,
_class: JClass, _class: JClass,
@@ -115,17 +115,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_parseConfig(
unsafe { unsafe {
let result = parse_config(config_cstr.as_ptr()); let result = parse_config(config_cstr.as_ptr());
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
result result
} }
} }
/// 运行网络实例 /// 运行网络实例
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_runNetworkInstance( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_runNetworkInstance(
mut env: JNIEnv, mut env: JNIEnv,
_class: JClass, _class: JClass,
@@ -143,17 +143,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_runNetworkInstance(
unsafe { unsafe {
let result = run_network_instance(config_cstr.as_ptr()); let result = run_network_instance(config_cstr.as_ptr());
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
result result
} }
} }
/// 保持网络实例 /// 保持网络实例
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
mut env: JNIEnv, mut env: JNIEnv,
_class: JClass, _class: JClass,
@@ -165,10 +165,10 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
if instance_names.is_null() { if instance_names.is_null() {
unsafe { unsafe {
let result = retain_network_instance(ptr::null(), 0); let result = retain_network_instance(ptr::null(), 0);
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
return result; return result;
} }
@@ -187,10 +187,10 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
if array_length == 0 { if array_length == 0 {
unsafe { unsafe {
let result = retain_network_instance(ptr::null(), 0); let result = retain_network_instance(ptr::null(), 0);
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
return result; return result;
} }
@@ -234,17 +234,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
unsafe { unsafe {
let result = retain_network_instance(c_string_ptrs.as_ptr(), c_string_ptrs.len()); let result = retain_network_instance(c_string_ptrs.as_ptr(), c_string_ptrs.len());
if result != 0 { if result != 0
if let Some(error) = get_last_error() { && let Some(error) = get_last_error()
throw_exception(&mut env, &error); {
} throw_exception(&mut env, &error);
} }
result result
} }
} }
/// 收集网络信息 /// 收集网络信息
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_collectNetworkInfos( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_collectNetworkInfos(
mut env: JNIEnv, mut env: JNIEnv,
_class: JClass, _class: JClass,
@@ -304,7 +304,7 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_collectNetworkInfos(
} }
/// 获取最后的错误信息 /// 获取最后的错误信息
#[no_mangle] #[unsafe(no_mangle)]
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_getLastError( pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_getLastError(
env: JNIEnv, env: JNIEnv,
_class: JClass, _class: JClass,
+1 -1
View File
@@ -1,7 +1,7 @@
[package] [package]
name = "easytier-ffi" name = "easytier-ffi"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition.workspace = true
[lib] [lib]
crate-type = ["cdylib"] crate-type = ["cdylib"]
+7 -7
View File
@@ -30,7 +30,7 @@ fn set_error_msg(msg: &str) {
/// # Safety /// # Safety
/// Set the tun fd /// Set the tun fd
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn set_tun_fd( pub unsafe extern "C" fn set_tun_fd(
inst_name: *const std::ffi::c_char, inst_name: *const std::ffi::c_char,
fd: std::ffi::c_int, fd: std::ffi::c_int,
@@ -59,7 +59,7 @@ pub unsafe extern "C" fn set_tun_fd(
/// # Safety /// # Safety
/// Get the last error message /// Get the last error message
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) { pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
let msg_buf = ERROR_MSG.lock().unwrap(); let msg_buf = ERROR_MSG.lock().unwrap();
if msg_buf.is_empty() { if msg_buf.is_empty() {
@@ -74,7 +74,7 @@ pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
} }
} }
#[no_mangle] #[unsafe(no_mangle)]
pub extern "C" fn free_string(s: *const std::ffi::c_char) { pub extern "C" fn free_string(s: *const std::ffi::c_char) {
if s.is_null() { if s.is_null() {
return; return;
@@ -86,7 +86,7 @@ pub extern "C" fn free_string(s: *const std::ffi::c_char) {
/// # Safety /// # Safety
/// Parse the config /// Parse the config
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe { let cfg_str = unsafe {
assert!(!cfg_str.is_null()); assert!(!cfg_str.is_null());
@@ -105,7 +105,7 @@ pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::
/// # Safety /// # Safety
/// Run the network instance /// Run the network instance
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe { let cfg_str = unsafe {
assert!(!cfg_str.is_null()); assert!(!cfg_str.is_null());
@@ -144,7 +144,7 @@ pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char)
/// # Safety /// # Safety
/// Retain the network instance /// Retain the network instance
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn retain_network_instance( pub unsafe extern "C" fn retain_network_instance(
inst_names: *const *const std::ffi::c_char, inst_names: *const *const std::ffi::c_char,
length: usize, length: usize,
@@ -188,7 +188,7 @@ pub unsafe extern "C" fn retain_network_instance(
/// # Safety /// # Safety
/// Collect the network infos /// Collect the network infos
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn collect_network_infos( pub unsafe extern "C" fn collect_network_infos(
infos: *mut KeyValuePair, infos: *mut KeyValuePair,
max_length: usize, max_length: usize,
+57 -26
View File
@@ -1,43 +1,74 @@
#!/data/adb/magisk/busybox sh #!/data/adb/magisk/busybox sh
MODDIR=${0%/*} MODDIR=${0%/*}
MODULE_PROP="${MODDIR}/module.prop" MODULE_PROP="${MODDIR}/module.prop"
IP_RULE_SCRIPT="${MODDIR}/hotspot_iprule.sh"
ET_STATUS="" ET_STATUS=""
REDIR_STATUS="" REDIR_STATUS=""
# 更新module.prop文件中的description IS_RUNNING=false
# 确保辅助脚本有执行权限
chmod +x "${IP_RULE_SCRIPT}" 2>/dev/null
# 更新 module.prop 文件中的 description
update_module_description() { update_module_description() {
local status_message=$1 local status_message=$1
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP} # 检查 module.prop 文件存在且 description 发生变化了再写入
if [ -f "${MODULE_PROP}" ]; then
local current_desc=$(grep "^description=" "${MODULE_PROP}")
local new_desc="description=[状态] ${status_message}"
if [ "${current_desc}" != "${new_desc}" ]; then
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
fi
fi
} }
# 判断程序启动状态
if [ -f "${MODDIR}/disable" ]; then if [ -f "${MODDIR}/disable" ]; then
ET_STATUS="已关闭" IS_RUNNING=false
elif pgrep -f 'easytier-core' >/dev/null; then ET_STATUS="主程序已关闭"
if [ -f "${MODDIR}/config/command_args"]; then
ET_STATUS="主程序已开启(启动参数模式)" elif pgrep -f "${MODDIR}/easytier-core" >/dev/null; then
IS_RUNNING=true
if [ -f "${MODDIR}/config/command_args" ]; then
ET_STATUS="主程序正在运行(启动参数模式)"
else else
ET_STATUS="主程序已开启(配置文件模式)" ET_STATUS="主程序正在运行(配置文件模式"
fi fi
elif [ -z "$ET_STATUS" ]; then
# 既没 disable 也没运行,说明是异常停止或未启动
ET_STATUS="主程序启动失败或未运行"
fi fi
#ET_STATUS不存在说明开启模块未正常运行,不修改状态 # 无论主程序是否运行,都允许切换“开关文件”的状态,以便下次生效
if [ -n "$ET_STATUS" ]; then if [ -f "${MODDIR}/enable_IP_rule" ]; then
if [ -f "${MODDIR}/enable_IP_rule" ]; then rm -f "${MODDIR}/enable_IP_rule"
rm -f "${MODDIR}/enable_IP_rule"
${MODDIR}/hotspot_iprule.sh del "${IP_RULE_SCRIPT}" del >/dev/null 2>&1
REDIR_STATUS="转发已禁用"
echo "热点子网转发已禁用" REDIR_STATUS="转发已禁用"
echo "[ET-NAT] IP rule disabled." >> "${MODDIR}/log.log" echo "热点子网转发已禁用"
else echo "[ET-NAT] Action: IP rule disabled." >> "${MODDIR}/log.log"
touch "${MODDIR}/enable_IP_rule"
${MODDIR}/hotspot_iprule.sh del
${MODDIR}/hotspot_iprule.sh add_once
REDIR_STATUS="转发已激活"
echo "热点子网转发已激活,热点开启后将自动将热点加入转发网络(要求已配置本地网络cidr=参数)。转发规则将随着热点开关而自动开关。该状态将保持到转发被禁用为止。"
echo "[ET-NAT] IP rule enabled." >> "${MODDIR}/log.log"
fi
update_module_description "${ET_STATUS} | ${REDIR_STATUS}"
else else
echo "主程序未正常启动,请先检查配置文件" touch "${MODDIR}/enable_IP_rule"
if [ "$IS_RUNNING" = true ]; then
"${IP_RULE_SCRIPT}" del >/dev/null 2>&1
"${IP_RULE_SCRIPT}" add_once
echo "转发规则将立即生效,无需重启"
else
echo "主程序未运行,转发规则将在下次启动时生效"
fi
REDIR_STATUS="转发已激活"
echo "----------------------------------"
echo "热点子网转发已激活"
echo "热点开启后将自动将热点加入转发网络"
echo "需要在配置中提前配置好 cidr 参数"
echo "----------------------------------"
echo "[ET-NAT] Action: IP rule enabled." >> "${MODDIR}/log.log"
fi fi
sync
update_module_description "${ET_STATUS}| ${REDIR_STATUS}"
+12 -9
View File
@@ -5,12 +5,15 @@ LATESTARTSERVICE=true
set_perm_recursive $MODPATH 0 0 0777 0777 set_perm_recursive $MODPATH 0 0 0777 0777
ui_print '安装完成' ui_print "系统架构为:$ARCH"
ui_print '当前架构为' + $ARCH ui_print "系统 SDK 版本:$API"
ui_print '当前系统版本为' + $API ui_print "EasyTier 安装位置:/data/adb/modules/easytier_magisk"
ui_print '安装目录为: /data/adb/modules/easytier_magisk' ui_print "配置文件位置:/data/adb/modules/easytier_magisk/config/config.toml"
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.toml' ui_print "如需使用启动参数模式,请将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中的内容"
ui_print '如果需要自定义启动参数,可将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中内容,使用自定义启动参数时会忽略配置文件' ui_print "config 目录中存在 command_args 文件时,模块会自动忽略 config.toml 文件"
ui_print '修改配置文件后在magisk app禁用应用再启动即可生效' ui_print "----------------------------------"
ui_print '点击操作按钮可启动/关闭热点子网转发,配合easytier的子网代理功能实现手机热点访问easytier网络' ui_print "注意!启动参数文件中不能存在 \" 和 ',配置文件则没有这个限制"
ui_print '记得重启' ui_print "----------------------------------"
ui_print "修改配置后无需重启设备,在 Magisk 中禁用 EasyTier 模块,等待 10 秒后重新启用即可让新配置生效"
ui_print "点击 Magisk 中模块左下角的“操作”按钮可以禁用或激活热点子网转发,使用该功能前需要在配置中提前配置好 cidr 参数"
ui_print "模块安装完成,重启设备生效"
@@ -2,23 +2,31 @@
MODDIR=${0%/*} MODDIR=${0%/*}
CONFIG_FILE="${MODDIR}/config/config.toml" CONFIG_FILE="${MODDIR}/config/config.toml"
COMMAND_ARGS="${MODDIR}/config/command_args"
LOG_FILE="${MODDIR}/log.log" LOG_FILE="${MODDIR}/log.log"
MODULE_PROP="${MODDIR}/module.prop" MODULE_PROP="${MODDIR}/module.prop"
EASYTIER="${MODDIR}/easytier-core" EASYTIER="${MODDIR}/easytier-core"
# 处理获取到的设备型号中可能出现的空格
BRAND=$(getprop ro.product.brand | tr ' ' '-')
MODEL=$(getprop ro.product.model | tr ' ' '-')
DEVICE_HOSTNAME="${BRAND}-${MODEL}"
REDIR_STATUS="" REDIR_STATUS=""
# 更新module.prop文件中的description # 更新 module.prop 文件中的 description
update_module_description() { update_module_description() {
local status_message=$1 local status_message=$1
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP} # 检查 module.prop 文件存在且 description 发生变化了再写入
if [ -f "${MODULE_PROP}" ]; then
local current_desc=$(grep "^description=" "${MODULE_PROP}")
local new_desc="description=[状态] ${status_message}"
if [ "${current_desc}" != "${new_desc}" ]; then
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
fi
fi
} }
if [ -f "${MODDIR}/enable_IP_rule" ]; then # 检查并初始化 TUN 设备
REDIR_STATUS="转发已激活"
else
REDIR_STATUS="转发已禁用"
fi
if [ ! -e /dev/net/tun ]; then if [ ! -e /dev/net/tun ]; then
if [ ! -d /dev/net ]; then if [ ! -d /dev/net ]; then
mkdir -p /dev/net mkdir -p /dev/net
@@ -28,38 +36,77 @@ if [ ! -e /dev/net/tun ]; then
fi fi
while true; do while true; do
if ls $MODDIR | grep -q "disable"; then # 获取子网转发激活状态
update_module_description "关闭中 | ${REDIR_STATUS}" if [ -f "${MODDIR}/enable_IP_rule" ]; then
if pgrep -f 'easytier-core' >/dev/null; then REDIR_STATUS="转发已激活"
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..."
pkill easytier-core # 关闭进程
fi
else else
if ! pgrep -f 'easytier-core' >/dev/null; then REDIR_STATUS="转发已禁用"
if [ ! -f "$CONFIG_FILE" ]; then
update_module_description "config.toml不存在"
sleep 3s
continue
fi
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
if [ -f "${MODDIR}/config/command_args" ]; then
TZ=Asia/Shanghai ${EASYTIER} $(cat ${MODDIR}/config/command_args) --hostname "$(getprop ro.product.brand)-$(getprop ro.product.model)" > ${LOG_FILE} &
sleep 5s # 等待easytier-core启动完成
update_module_description "主程序已开启(启动参数模式) | ${REDIR_STATUS}"
else
TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} --hostname "$(getprop ro.product.brand)-$(getprop ro.product.model)" > ${LOG_FILE} &
sleep 5s # 等待easytier-core启动完成
update_module_description "主程序已开启(配置文件模式) | ${REDIR_STATUS}"
fi
ip rule add from all lookup main
if ! pgrep -f 'easytier-core' >/dev/null; then
update_module_descriptio "主程序启动失败,请检查配置文件"
fi
else
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在"
fi
fi fi
sleep 3s # 暂停3秒后再次执行循环 # 检查模块是否被禁用
if [ -f "${MODDIR}/disable" ]; then
update_module_description "主程序已关闭 | ${REDIR_STATUS}"
if pgrep -f "${EASYTIER}" >/dev/null; then
echo "开关控制 $(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭"
pkill -f "${EASYTIER}"
fi
sleep 10s
continue
fi
# 检查进程是否已经在运行
if pgrep -f "${EASYTIER}" >/dev/null; then
sleep 10s
continue
fi
# 检查配置文件是否存在
if [ ! -f "${CONFIG_FILE}" ] && [ ! -f "${COMMAND_ARGS}" ]; then
update_module_description "缺少配置文件或启动参数文件"
sleep 10s
continue
fi
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
if [ -f "${COMMAND_ARGS}" ]; then
# 启动参数模式
CMD_CONTENT=$(tr '\r\n' ' ' < "${COMMAND_ARGS}")
if echo "${CMD_CONTENT}" | grep -q "\-\-hostname"; then
FINAL_ARGS="${CMD_CONTENT}"
else
FINAL_ARGS="${CMD_CONTENT} --hostname ${DEVICE_HOSTNAME}"
fi
TZ=Asia/Shanghai "${EASYTIER}" ${FINAL_ARGS} > "${LOG_FILE}" 2>&1 &
STR_MODE="启动参数模式"
# 否则读取 config.toml 的内容作为启动参数
else
# 配置文件模式
if grep -q "^[[:space:]]*hostname[[:space:]]*=" "${CONFIG_FILE}"; then
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" > "${LOG_FILE}" 2>&1 &
else
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" --hostname "${DEVICE_HOSTNAME}" > "${LOG_FILE}" 2>&1 &
fi
STR_MODE="配置文件模式"
fi
# 等待进程启动
sleep 5s
# 启动后的扫尾工作
if pgrep -f "${EASYTIER}" >/dev/null; then
if ! ip rule show | grep -q "lookup main"; then
ip rule add from all lookup main
fi
update_module_description "主程序正在运行(${STR_MODE}| ${REDIR_STATUS}"
else
update_module_description "主程序启动失败,请检查配置文件或启动参数"
fi
sleep 10s
done done
+1 -1
View File
@@ -1,6 +1,6 @@
id=easytier_magisk id=easytier_magisk
name=EasyTier_Magisk name=EasyTier_Magisk
version=v2.5.0 version=v2.6.0
versionCode=1 versionCode=1
author=EasyTier author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier) description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
@@ -1,3 +1,5 @@
MODDIR=${0%/*} MODDIR=${0%/*}
pkill easytier-core # 结束 easytier-core 进程 pkill -f "${MODDIR}/easytier-core"
rm -rf $MODDIR/*
# 使用 ${MODDIR:?} 确保变量非空,避免执行 rm -rf /*
rm -rf "${MODDIR:?}/"*
+153 -7
View File
@@ -1083,7 +1083,7 @@ checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055"
[[package]] [[package]]
name = "easytier" name = "easytier"
version = "2.5.0" version = "2.6.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"arc-swap", "arc-swap",
@@ -1101,6 +1101,7 @@ dependencies = [
"byteorder", "byteorder",
"bytes", "bytes",
"cfg-if", "cfg-if",
"cfg_aliases",
"chrono", "chrono",
"cidr", "cidr",
"clap", "clap",
@@ -1115,6 +1116,7 @@ dependencies = [
"easytier-rpc-build", "easytier-rpc-build",
"encoding", "encoding",
"flume", "flume",
"forwarded-header-value",
"futures", "futures",
"gethostname", "gethostname",
"git-version", "git-version",
@@ -1131,6 +1133,7 @@ dependencies = [
"humantime-serde", "humantime-serde",
"idna", "idna",
"indoc", "indoc",
"itertools 0.14.0",
"kcp-sys", "kcp-sys",
"machine-uid", "machine-uid",
"multimap", "multimap",
@@ -1153,7 +1156,9 @@ dependencies = [
"prost-build", "prost-build",
"prost-reflect", "prost-reflect",
"prost-reflect-build", "prost-reflect-build",
"prost-types", "prost-wkt",
"prost-wkt-build",
"prost-wkt-types",
"quinn", "quinn",
"quinn-plaintext", "quinn-plaintext",
"rand 0.8.5", "rand 0.8.5",
@@ -1173,6 +1178,7 @@ dependencies = [
"smoltcp", "smoltcp",
"snow", "snow",
"socket2 0.5.10", "socket2 0.5.10",
"strum",
"stun_codec", "stun_codec",
"sys-locale", "sys-locale",
"tabled", "tabled",
@@ -1354,6 +1360,17 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "erased-serde"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2add8a07dd6a8d93ff627029c51de145e12686fbc36ecb298ac22e74cf02dec"
dependencies = [
"serde",
"serde_core",
"typeid",
]
[[package]] [[package]]
name = "errno" name = "errno"
version = "0.3.14" version = "0.3.14"
@@ -1471,6 +1488,16 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "forwarded-header-value"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9"
dependencies = [
"nonempty",
"thiserror 1.0.69",
]
[[package]] [[package]]
name = "futures" name = "futures"
version = "0.3.31" version = "0.3.31"
@@ -2217,6 +2244,15 @@ dependencies = [
"generic-array", "generic-array",
] ]
[[package]]
name = "inventory"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4f0c30c76f2f4ccee3fe55a2435f691ca00c0e4bd87abe4f4a851b1d4dac39b"
dependencies = [
"rustversion",
]
[[package]] [[package]]
name = "io-uring" name = "io-uring"
version = "0.7.10" version = "0.7.10"
@@ -2824,6 +2860,12 @@ dependencies = [
"minimal-lexical", "minimal-lexical",
] ]
[[package]]
name = "nonempty"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7"
[[package]] [[package]]
name = "normpath" name = "normpath"
version = "1.5.0" version = "1.5.0"
@@ -3413,6 +3455,52 @@ dependencies = [
"prost", "prost",
] ]
[[package]]
name = "prost-wkt"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497e1e938f0c09ef9cabe1d49437b4016e03e8f82fbbe5d1c62a9b61b9decae1"
dependencies = [
"chrono",
"inventory",
"prost",
"serde",
"serde_derive",
"serde_json",
"typetag",
]
[[package]]
name = "prost-wkt-build"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07b8bf115b70a7aa5af1fd5d6e9418492e9ccb6e4785e858c938e28d132a884b"
dependencies = [
"heck 0.5.0",
"prost",
"prost-build",
"prost-types",
"quote",
]
[[package]]
name = "prost-wkt-types"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8cdde6df0a98311c839392ca2f2f0bcecd545f86a62b4e3c6a49c336e970fe5"
dependencies = [
"chrono",
"prost",
"prost-build",
"prost-types",
"prost-wkt",
"prost-wkt-build",
"regex",
"serde",
"serde_derive",
"serde_json",
]
[[package]] [[package]]
name = "quick-xml" name = "quick-xml"
version = "0.38.3" version = "0.38.3"
@@ -3456,9 +3544,9 @@ dependencies = [
[[package]] [[package]]
name = "quinn-proto" name = "quinn-proto"
version = "0.11.13" version = "0.11.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
dependencies = [ dependencies = [
"bytes", "bytes",
"fastbloom", "fastbloom",
@@ -4136,6 +4224,12 @@ version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "simdutf8"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]] [[package]]
name = "siphasher" name = "siphasher"
version = "1.0.1" version = "1.0.1"
@@ -4225,6 +4319,27 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "strum"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]] [[package]]
name = "stun_codec" name = "stun_codec"
version = "0.3.5" version = "0.3.5"
@@ -4575,9 +4690,9 @@ dependencies = [
[[package]] [[package]]
name = "tokio-websockets" name = "tokio-websockets"
version = "0.8.3" version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "842e11addde61da7c37ef205cd625ebcd7b607076ea62e4698f06bfd5fd01a03" checksum = "dad543404f98bfc969aeb71994105c592acfc6c43323fddcd016bb208d1c65cb"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"bytes", "bytes",
@@ -4588,10 +4703,11 @@ dependencies = [
"httparse", "httparse",
"ring", "ring",
"rustls-pki-types", "rustls-pki-types",
"simdutf8",
"tokio", "tokio",
"tokio-rustls", "tokio-rustls",
"tokio-util", "tokio-util",
"webpki-roots 0.26.11", "webpki-roots 1.0.2",
] ]
[[package]] [[package]]
@@ -4823,12 +4939,42 @@ dependencies = [
"wintun", "wintun",
] ]
[[package]]
name = "typeid"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c"
[[package]] [[package]]
name = "typenum" name = "typenum"
version = "1.18.0" version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "typetag"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be2212c8a9b9bcfca32024de14998494cf9a5dfa59ea1b829de98bac374b86bf"
dependencies = [
"erased-serde",
"inventory",
"once_cell",
"serde",
"typetag-impl",
]
[[package]]
name = "typetag-impl"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27a7a9b72ba121f6f1f6c3632b85604cac41aedb5ddc70accbebb6cac83de846"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]] [[package]]
name = "unicase" name = "unicase"
version = "2.8.1" version = "2.8.1"
+1 -1
View File
@@ -1,7 +1,7 @@
[package] [package]
name = "easytier-uptime" name = "easytier-uptime"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition.workspace = true
[dependencies] [dependencies]
tokio = { version = "1.0", features = ["full"] } tokio = { version = "1.0", features = ["full"] }
@@ -1,7 +1,7 @@
use std::ops::{Div, Mul}; use std::ops::{Div, Mul};
use axum::extract::{Path, State};
use axum::Json; use axum::Json;
use axum::extract::{Path, State};
use sea_orm::{ use sea_orm::{
ColumnTrait, Condition, EntityTrait, IntoActiveModel, ModelTrait, Order, PaginatorTrait, ColumnTrait, Condition, EntityTrait, IntoActiveModel, ModelTrait, Order, PaginatorTrait,
QueryFilter, QueryOrder, QuerySelect, Set, TryIntoModel, QueryFilter, QueryOrder, QuerySelect, Set, TryIntoModel,
@@ -14,7 +14,7 @@ use crate::api::{
models::*, models::*,
}; };
use crate::db::entity::{self, health_records, shared_nodes}; use crate::db::entity::{self, health_records, shared_nodes};
use crate::db::{operations::*, Db}; use crate::db::{Db, operations::*};
use crate::health_checker_manager::HealthCheckerManager; use crate::health_checker_manager::HealthCheckerManager;
use axum_extra::extract::Query; use axum_extra::extract::Query;
use std::sync::Arc; use std::sync::Arc;
@@ -273,7 +273,7 @@ pub struct InstanceFilterParams {
use crate::config::AppConfig; use crate::config::AppConfig;
use axum::http::{HeaderMap, StatusCode}; use axum::http::{HeaderMap, StatusCode};
use chrono::{Duration, Utc}; use chrono::{Duration, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation}; use jsonwebtoken::{DecodingKey, EncodingKey, Header, Validation, decode, encode};
use serde::Serialize; use serde::Serialize;
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@@ -370,19 +370,19 @@ pub async fn admin_get_nodes(
let ids = NodeOperations::filter_node_ids_by_tag(&app_state.db, &tag).await?; let ids = NodeOperations::filter_node_ids_by_tag(&app_state.db, &tag).await?;
filtered_ids = Some(ids); filtered_ids = Some(ids);
} }
if let Some(tags) = filters.tags { if let Some(tags) = filters.tags
if !tags.is_empty() { && !tags.is_empty()
let ids_any = NodeOperations::filter_node_ids_by_tags_any(&app_state.db, &tags).await?; {
filtered_ids = match filtered_ids { let ids_any = NodeOperations::filter_node_ids_by_tags_any(&app_state.db, &tags).await?;
Some(mut existing) => { filtered_ids = match filtered_ids {
existing.extend(ids_any); Some(mut existing) => {
existing.sort(); existing.extend(ids_any);
existing.dedup(); existing.sort();
Some(existing) existing.dedup();
} Some(existing)
None => Some(ids_any), }
}; None => Some(ids_any),
} };
} }
if let Some(ids) = filtered_ids { if let Some(ids) = filtered_ids {
if ids.is_empty() { if ids.is_empty() {
@@ -1,5 +1,5 @@
use axum::routing::{delete, get, post, put};
use axum::Router; use axum::Router;
use axum::routing::{delete, get, post, put};
use tower_http::compression::CompressionLayer; use tower_http::compression::CompressionLayer;
use tower_http::cors::CorsLayer; use tower_http::cors::CorsLayer;
@@ -1,7 +1,7 @@
use crate::db::entity::*;
use crate::db::Db; use crate::db::Db;
use crate::db::entity::*;
use sea_orm::*; use sea_orm::*;
use tokio::time::{sleep, Duration}; use tokio::time::{Duration, sleep};
use tracing::{error, info, warn}; use tracing::{error, info, warn};
/// 数据清理策略配置 /// 数据清理策略配置
@@ -5,12 +5,12 @@ pub mod operations;
use std::fmt; use std::fmt;
use sea_orm::{ use sea_orm::{
prelude::*, sea_query::OnConflict, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait, QueryFilter as _, Set,
QueryFilter as _, Set, SqlxSqliteConnector, Statement, TransactionTrait as _, SqlxSqliteConnector, Statement, TransactionTrait as _, prelude::*, sea_query::OnConflict,
}; };
use sea_orm_migration::MigratorTrait as _; use sea_orm_migration::MigratorTrait as _;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::{migrate::MigrateDatabase as _, Sqlite, SqlitePool}; use sqlx::{Sqlite, SqlitePool, migrate::MigrateDatabase as _};
use crate::migrator; use crate::migrator;
@@ -1,8 +1,8 @@
use crate::api::CreateNodeRequest; use crate::api::CreateNodeRequest;
use crate::db::entity::*;
use crate::db::Db; use crate::db::Db;
use crate::db::HealthStats; use crate::db::HealthStats;
use crate::db::HealthStatus; use crate::db::HealthStatus;
use crate::db::entity::*;
use sea_orm::*; use sea_orm::*;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
@@ -19,9 +19,9 @@ use sqlx::any;
use tracing::{debug, error, info, instrument, warn}; use tracing::{debug, error, info, instrument, warn};
use crate::db::{ use crate::db::{
Db, HealthStatus,
entity::shared_nodes, entity::shared_nodes,
operations::{HealthOperations, NodeOperations}, operations::{HealthOperations, NodeOperations},
Db, HealthStatus,
}; };
pub struct HealthCheckOneNode { pub struct HealthCheckOneNode {
@@ -1,11 +1,11 @@
use std::{collections::HashSet, sync::Arc, time::Duration}; use std::{collections::HashSet, sync::Arc, time::Duration};
use anyhow::Context as _; use anyhow::Context as _;
use tokio::time::{interval, Interval}; use tokio::time::{Interval, interval};
use tracing::{error, info}; use tracing::{error, info};
use crate::{ use crate::{
db::{entity::shared_nodes, operations::NodeOperations, Db}, db::{Db, entity::shared_nodes, operations::NodeOperations},
health_checker::HealthChecker, health_checker::HealthChecker,
}; };
+4 -2
View File
@@ -10,7 +10,7 @@ mod migrator;
use api::routes::create_routes; use api::routes::create_routes;
use clap::Parser; use clap::Parser;
use config::AppConfig; use config::AppConfig;
use db::{operations::NodeOperations, Db}; use db::{Db, operations::NodeOperations};
use easytier::common::log; use easytier::common::log;
use health_checker::HealthChecker; use health_checker::HealthChecker;
use health_checker_manager::HealthCheckerManager; use health_checker_manager::HealthCheckerManager;
@@ -49,7 +49,9 @@ async fn main() -> anyhow::Result<()> {
// 如果提供了管理员密码,设置环境变量 // 如果提供了管理员密码,设置环境变量
if let Some(password) = args.admin_password { if let Some(password) = args.admin_password {
env::set_var("ADMIN_PASSWORD", password); unsafe {
env::set_var("ADMIN_PASSWORD", password);
}
} }
tracing::info!( tracing::info!(
+1 -1
View File
@@ -1,7 +1,7 @@
{ {
"name": "easytier-gui", "name": "easytier-gui",
"type": "module", "type": "module",
"version": "2.5.0", "version": "2.6.0",
"private": true, "private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4", "packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": { "scripts": {
+10 -11
View File
@@ -1,9 +1,9 @@
[package] [package]
name = "easytier-gui" name = "easytier-gui"
version = "2.5.0" version = "2.6.0"
description = "EasyTier GUI" description = "EasyTier GUI"
authors = ["you"] authors = ["you"]
edition = "2021" edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,15 +11,6 @@ edition = "2021"
name = "app_lib" name = "app_lib"
crate-type = ["staticlib", "cdylib", "rlib"] crate-type = ["staticlib", "cdylib", "rlib"]
[build-dependencies]
tauri-build = { version = "2.0.0-rc", features = [] }
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[dependencies] [dependencies]
# wry 0.47 may crash on android, see https://github.com/EasyTier/EasyTier/issues/527 # wry 0.47 may crash on android, see https://github.com/EasyTier/EasyTier/issues/527
@@ -66,6 +57,14 @@ libc = "0.2"
[target.'cfg(target_os = "macos")'.dependencies] [target.'cfg(target_os = "macos")'.dependencies]
security-framework-sys = "2.9.0" security-framework-sys = "2.9.0"
[build-dependencies]
tauri-build = { version = "2.0.0-rc", features = [] }
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
[features] [features]
# This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!! # This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!!
custom-protocol = ["tauri/custom-protocol"] custom-protocol = ["tauri/custom-protocol"]
+5 -5
View File
@@ -1,10 +1,10 @@
use std::env;
fn main() { fn main() {
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default();
// enable thunk-rs when target os is windows and arch is x86_64 or i686 // enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")] if target_os == "windows" && (target_arch == "x86" || target_arch == "x86_64") {
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
thunk::thunk(); thunk::thunk();
} }
@@ -36,6 +36,7 @@
"core:tray:allow-set-show-menu-on-left-click", "core:tray:allow-set-show-menu-on-left-click",
"core:tray:allow-set-tooltip", "core:tray:allow-set-tooltip",
"vpnservice:allow-ping", "vpnservice:allow-ping",
"vpnservice:allow-get-vpn-status",
"vpnservice:allow-prepare-vpn", "vpnservice:allow-prepare-vpn",
"vpnservice:allow-start-vpn", "vpnservice:allow-start-vpn",
"vpnservice:allow-stop-vpn", "vpnservice:allow-stop-vpn",
+1 -1
View File
@@ -4,7 +4,7 @@
*--------------------------------------------------------------------------------------------*/ *--------------------------------------------------------------------------------------------*/
use super::Command; use super::Command;
use anyhow::{anyhow, Result}; use anyhow::{Result, anyhow};
use std::env; use std::env;
use std::ffi::OsStr; use std::ffi::OsStr;
use std::process::{Command as StdCommand, Output}; use std::process::{Command as StdCommand, Output};
+2 -2
View File
@@ -30,10 +30,10 @@ use std::os::unix::process::ExitStatusExt;
use std::path::Path; use std::path::Path;
use std::ptr; use std::ptr;
use libc::{fileno, wait, EINTR, SHUT_WR}; use libc::{EINTR, SHUT_WR, fileno, wait};
use security_framework_sys::authorization::{ use security_framework_sys::authorization::{
errAuthorizationSuccess, kAuthorizationFlagDefaults, kAuthorizationFlagDestroyRights,
AuthorizationCreate, AuthorizationExecuteWithPrivileges, AuthorizationFree, AuthorizationRef, AuthorizationCreate, AuthorizationExecuteWithPrivileges, AuthorizationFree, AuthorizationRef,
errAuthorizationSuccess, kAuthorizationFlagDefaults, kAuthorizationFlagDestroyRights,
}; };
const ENV_PATH: &str = "PATH"; const ENV_PATH: &str = "PATH";
@@ -11,11 +11,11 @@ use std::process::{ExitStatus, Output};
use winapi::shared::minwindef::{DWORD, LPVOID}; use winapi::shared::minwindef::{DWORD, LPVOID};
use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcessToken}; use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcessToken};
use winapi::um::securitybaseapi::GetTokenInformation; use winapi::um::securitybaseapi::GetTokenInformation;
use winapi::um::winnt::{TokenElevation, HANDLE, TOKEN_ELEVATION, TOKEN_QUERY}; use winapi::um::winnt::{HANDLE, TOKEN_ELEVATION, TOKEN_QUERY, TokenElevation};
use windows::core::{w, HSTRING, PCWSTR};
use windows::Win32::Foundation::HWND; use windows::Win32::Foundation::HWND;
use windows::Win32::UI::Shell::ShellExecuteW; use windows::Win32::UI::Shell::ShellExecuteW;
use windows::Win32::UI::WindowsAndMessaging::SW_HIDE; use windows::Win32::UI::WindowsAndMessaging::SW_HIDE;
use windows::core::{HSTRING, PCWSTR, w};
/// The implementation of state check and elevated executing varies on each platform /// The implementation of state check and elevated executing varies on each platform
impl Command { impl Command {
+56 -35
View File
@@ -21,10 +21,10 @@ use easytier::{
instance_manager::NetworkInstanceManager, instance_manager::NetworkInstanceManager,
launcher::NetworkConfig, launcher::NetworkConfig,
rpc_service::ApiRpcServer, rpc_service::ApiRpcServer,
tunnel::TunnelListener,
tunnel::ring::RingTunnelListener, tunnel::ring::RingTunnelListener,
tunnel::tcp::TcpTunnelListener, tunnel::tcp::TcpTunnelListener,
tunnel::TunnelListener, utils::panic::setup_panic_handler,
utils::{self},
}; };
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
@@ -206,6 +206,16 @@ async fn update_network_config_state(
.parse() .parse()
.map_err(|e: uuid::Error| e.to_string())?; .map_err(|e: uuid::Error| e.to_string())?;
let client_manager = get_client_manager!()?; let client_manager = get_client_manager!()?;
if !disabled {
let cfg = client_manager
.handle_get_network_config(app.clone(), instance_id)
.await
.map_err(|e| e.to_string())?;
let toml_config = cfg.gen_config().map_err(|e| e.to_string())?;
client_manager
.pre_run_network_instance_hook(&app, &toml_config)
.await?;
}
client_manager client_manager
.handle_update_network_state(app.clone(), instance_id, disabled) .handle_update_network_state(app.clone(), instance_id, disabled)
.await .await
@@ -215,6 +225,10 @@ async fn update_network_config_state(
client_manager client_manager
.post_stop_network_instances_hook(&app) .post_stop_network_instances_hook(&app)
.await?; .await?;
} else {
client_manager
.post_run_network_instance_hook(&app, &instance_id)
.await?;
} }
Ok(()) Ok(())
@@ -545,10 +559,10 @@ fn toggle_window_visibility(app: &tauri::AppHandle) {
} }
fn get_exe_path() -> String { fn get_exe_path() -> String {
if let Ok(appimage_path) = std::env::var("APPIMAGE") { if let Ok(appimage_path) = std::env::var("APPIMAGE")
if !appimage_path.is_empty() { && !appimage_path.is_empty()
return appimage_path; {
} return appimage_path;
} }
std::env::current_exe() std::env::current_exe()
.map(|p| p.to_string_lossy().to_string()) .map(|p| p.to_string_lossy().to_string())
@@ -582,8 +596,8 @@ mod manager {
use easytier::proto::rpc_types::controller::BaseController; use easytier::proto::rpc_types::controller::BaseController;
use easytier::rpc_service::logger::LoggerRpcService; use easytier::rpc_service::logger::LoggerRpcService;
use easytier::rpc_service::remote_client::PersistentConfig; use easytier::rpc_service::remote_client::PersistentConfig;
use easytier::tunnel::ring::RingTunnelConnector;
use easytier::tunnel::TunnelConnector; use easytier::tunnel::TunnelConnector;
use easytier::tunnel::ring::RingTunnelConnector;
use easytier::web_client::WebClientHooks; use easytier::web_client::WebClientHooks;
pub(super) struct GuiHooks { pub(super) struct GuiHooks {
@@ -830,7 +844,7 @@ mod manager {
cfg: &easytier::common::config::TomlConfigLoader, cfg: &easytier::common::config::TomlConfigLoader,
) -> Result<(), String> { ) -> Result<(), String> {
let instance_id = cfg.get_id(); let instance_id = cfg.get_id();
app.emit("pre_run_network_instance", instance_id) app.emit("pre_run_network_instance", instance_id.to_string())
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
#[cfg(target_os = "android")] #[cfg(target_os = "android")]
@@ -867,20 +881,21 @@ mod manager {
let app_clone = app.clone(); let app_clone = app.clone();
let instance_id_clone = *instance_id; let instance_id_clone = *instance_id;
tokio::spawn(async move { tokio::spawn(async move {
let instance_id_str = instance_id_clone.to_string();
loop { loop {
match event_receiver.recv().await { match event_receiver.recv().await {
Ok(easytier::common::global_ctx::GlobalCtxEvent::DhcpIpv4Changed(_, _)) => { Ok(easytier::common::global_ctx::GlobalCtxEvent::DhcpIpv4Changed(_, _)) => {
let _ = app_clone.emit("dhcp_ip_changed", instance_id_clone); let _ = app_clone.emit("dhcp_ip_changed", &instance_id_str);
} }
Ok(easytier::common::global_ctx::GlobalCtxEvent::ProxyCidrsUpdated(_, _)) => { Ok(easytier::common::global_ctx::GlobalCtxEvent::ProxyCidrsUpdated(_, _)) => {
let _ = app_clone.emit("proxy_cidrs_updated", instance_id_clone); let _ = app_clone.emit("proxy_cidrs_updated", &instance_id_str);
} }
Ok(_) => {} Ok(_) => {}
Err(tokio::sync::broadcast::error::RecvError::Closed) => { Err(tokio::sync::broadcast::error::RecvError::Closed) => {
break; break;
} }
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => { Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => {
let _ = app_clone.emit("event_lagged", instance_id_clone); let _ = app_clone.emit("event_lagged", &instance_id_str);
event_receiver = event_receiver.resubscribe(); event_receiver = event_receiver.resubscribe();
} }
} }
@@ -892,7 +907,7 @@ mod manager {
self.storage.enabled_networks.insert(*instance_id); self.storage.enabled_networks.insert(*instance_id);
app.emit("post_run_network_instance", instance_id) app.emit("post_run_network_instance", instance_id.to_string())
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
Ok(()) Ok(())
@@ -964,28 +979,34 @@ mod manager {
.get_rpc_client(app.clone()) .get_rpc_client(app.clone())
.ok_or_else(|| anyhow::anyhow!("RPC client not found"))?; .ok_or_else(|| anyhow::anyhow!("RPC client not found"))?;
for id in enabled_networks { for id in enabled_networks {
if let Ok(uuid) = id.parse() { if let Ok(uuid) = id.parse()
if !self.storage.enabled_networks.contains(&uuid) { && !self.storage.enabled_networks.contains(&uuid)
let config = self {
.storage let config = self
.network_configs .storage
.get(&uuid) .network_configs
.map(|i| i.value().1.clone()); .get(&uuid)
if config.is_none() { .map(|i| i.value().1.clone());
continue; let Some(config) = config else {
} continue;
client };
.run_network_instance( let toml_config = config.gen_config()?;
BaseController::default(), self.pre_run_network_instance_hook(&app, &toml_config)
RunNetworkInstanceRequest { .await
inst_id: None, .map_err(|e| anyhow::anyhow!(e))?;
config, client
overwrite: false, .run_network_instance(
}, BaseController::default(),
) RunNetworkInstanceRequest {
.await?; inst_id: None,
self.storage.enabled_networks.insert(uuid); config: Some(config),
} overwrite: false,
},
)
.await?;
self.post_run_network_instance_hook(&app, &uuid)
.await
.map_err(|e| anyhow::anyhow!(e))?;
} }
} }
Ok(()) Ok(())
@@ -1099,7 +1120,7 @@ pub fn run_gui() -> std::process::ExitCode {
process::exit(0); process::exit(0);
} }
utils::setup_panic_handler(); setup_panic_handler();
let mut builder = tauri::Builder::default(); let mut builder = tauri::Builder::default();
+1 -1
View File
@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false "createUpdaterArtifacts": false
}, },
"productName": "easytier-gui", "productName": "easytier-gui",
"version": "2.5.0", "version": "2.6.0",
"identifier": "com.kkrainbow.easytier", "identifier": "com.kkrainbow.easytier",
"plugins": { "plugins": {
"shell": { "shell": {
+2
View File
@@ -93,6 +93,7 @@ declare global {
const shallowReadonly: typeof import('vue')['shallowReadonly'] const shallowReadonly: typeof import('vue')['shallowReadonly']
const shallowRef: typeof import('vue')['shallowRef'] const shallowRef: typeof import('vue')['shallowRef']
const storeToRefs: typeof import('pinia')['storeToRefs'] const storeToRefs: typeof import('pinia')['storeToRefs']
const syncMobileVpnService: typeof import('./composables/mobile_vpn')['syncMobileVpnService']
const toRaw: typeof import('vue')['toRaw'] const toRaw: typeof import('vue')['toRaw']
const toRef: typeof import('vue')['toRef'] const toRef: typeof import('vue')['toRef']
const toRefs: typeof import('vue')['toRefs'] const toRefs: typeof import('vue')['toRefs']
@@ -217,6 +218,7 @@ declare module 'vue' {
readonly shallowReadonly: UnwrapRef<typeof import('vue')['shallowReadonly']> readonly shallowReadonly: UnwrapRef<typeof import('vue')['shallowReadonly']>
readonly shallowRef: UnwrapRef<typeof import('vue')['shallowRef']> readonly shallowRef: UnwrapRef<typeof import('vue')['shallowRef']>
readonly storeToRefs: UnwrapRef<typeof import('pinia')['storeToRefs']> readonly storeToRefs: UnwrapRef<typeof import('pinia')['storeToRefs']>
readonly syncMobileVpnService: UnwrapRef<typeof import('./composables/mobile_vpn')['syncMobileVpnService']>
readonly toRaw: UnwrapRef<typeof import('vue')['toRaw']> readonly toRaw: UnwrapRef<typeof import('vue')['toRaw']>
readonly toRef: UnwrapRef<typeof import('vue')['toRef']> readonly toRef: UnwrapRef<typeof import('vue')['toRef']>
readonly toRefs: UnwrapRef<typeof import('vue')['toRefs']> readonly toRefs: UnwrapRef<typeof import('vue')['toRefs']>
+47 -14
View File
@@ -1,6 +1,7 @@
import { Event, listen } from "@tauri-apps/api/event"; import { Event, listen } from "@tauri-apps/api/event";
import { type } from "@tauri-apps/plugin-os"; import { type } from "@tauri-apps/plugin-os";
import { NetworkTypes } from "easytier-frontend-lib" import { NetworkTypes } from "easytier-frontend-lib"
import { Utils } from "easytier-frontend-lib";
const EVENTS = Object.freeze({ const EVENTS = Object.freeze({
SAVE_CONFIGS: 'save_configs', SAVE_CONFIGS: 'save_configs',
@@ -17,39 +18,71 @@ function onSaveConfigs(event: Event<NetworkTypes.NetworkConfig[]>) {
localStorage.setItem('networkList', JSON.stringify(event.payload.map((config) => NetworkTypes.normalizeNetworkConfig(config)))); localStorage.setItem('networkList', JSON.stringify(event.payload.map((config) => NetworkTypes.normalizeNetworkConfig(config))));
} }
async function onPreRunNetworkInstance(event: Event<string>) { function normalizeInstanceIdPayload(payload: unknown): string {
if (typeof payload === 'string') {
return payload
}
if (payload && typeof payload === 'object') {
const uuid = payload as Partial<Utils.UUID>
if (
typeof uuid.part1 === 'number'
&& typeof uuid.part2 === 'number'
&& typeof uuid.part3 === 'number'
&& typeof uuid.part4 === 'number'
) {
return Utils.UuidToStr(uuid as Utils.UUID)
}
}
if (payload == null) {
return ''
}
const fallback = String(payload)
return fallback === '[object Object]' ? '' : fallback
}
async function onPreRunNetworkInstance(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.PRE_RUN_NETWORK_INSTANCE}', raw payload:`, event.payload, 'normalized:', instanceId)
if (type() === 'android') { if (type() === 'android') {
await prepareVpnService(event.payload); await prepareVpnService(instanceId);
} }
} }
async function onPostRunNetworkInstance(event: Event<string>) { async function onPostRunNetworkInstance(event: Event<unknown>) {
const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.POST_RUN_NETWORK_INSTANCE}', raw payload:`, event.payload, 'normalized:', instanceId)
if (type() === 'android') { if (type() === 'android') {
await onNetworkInstanceChange(event.payload); await onNetworkInstanceChange(instanceId);
} }
} }
async function onVpnServiceStop(event: Event<string>) { async function onVpnServiceStop(event: Event<unknown>) {
await onNetworkInstanceChange(event.payload); console.log(`Received event '${EVENTS.VPN_SERVICE_STOP}', raw payload:`, event.payload)
await syncMobileVpnService();
} }
async function onDhcpIpChanged(event: Event<string>) { async function onDhcpIpChanged(event: Event<unknown>) {
console.log(`Received event '${EVENTS.DHCP_IP_CHANGED}' for instance: ${event.payload}`); const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.DHCP_IP_CHANGED}' for instance: ${instanceId}`);
if (type() === 'android') { if (type() === 'android') {
await onNetworkInstanceChange(event.payload); await onNetworkInstanceChange(instanceId);
} }
} }
async function onProxyCidrsUpdated(event: Event<string>) { async function onProxyCidrsUpdated(event: Event<unknown>) {
console.log(`Received event '${EVENTS.PROXY_CIDRS_UPDATED}' for instance: ${event.payload}`); const instanceId = normalizeInstanceIdPayload(event.payload)
console.log(`Received event '${EVENTS.PROXY_CIDRS_UPDATED}' for instance: ${instanceId}`);
if (type() === 'android') { if (type() === 'android') {
await onNetworkInstanceChange(event.payload); await onNetworkInstanceChange(instanceId);
} }
} }
async function onEventLagged(event: Event<string>) { async function onEventLagged(event: Event<unknown>) {
if (type() === 'android') { if (type() === 'android') {
await onNetworkInstanceChange(event.payload); await onNetworkInstanceChange(normalizeInstanceIdPayload(event.payload));
} }
} }
+140 -26
View File
@@ -1,7 +1,7 @@
import type { NetworkTypes } from 'easytier-frontend-lib' import type { NetworkTypes } from 'easytier-frontend-lib'
import { addPluginListener } from '@tauri-apps/api/core' import { addPluginListener } from '@tauri-apps/api/core'
import { Utils } from 'easytier-frontend-lib' import { Utils } from 'easytier-frontend-lib'
import { prepare_vpn, start_vpn, stop_vpn } from 'tauri-plugin-vpnservice-api' import { get_vpn_status, prepare_vpn, start_vpn, stop_vpn } from 'tauri-plugin-vpnservice-api'
type Route = NetworkTypes.Route type Route = NetworkTypes.Route
@@ -24,6 +24,53 @@ const curVpnStatus: vpnStatus = {
dns: undefined, dns: undefined,
} }
async function requestVpnPermission() {
console.log('prepare vpn')
const prepare_ret = await prepare_vpn()
console.log('prepare vpn', JSON.stringify((prepare_ret)))
if (prepare_ret?.errorMsg?.length) {
throw new Error(prepare_ret.errorMsg)
}
const granted = prepare_ret?.granted ?? true
if (!granted) {
console.info('vpn permission request was denied or dismissed')
}
return granted
}
function resetVpnConfigStatus() {
curVpnStatus.ipv4Addr = undefined
curVpnStatus.ipv4Cidr = undefined
curVpnStatus.routes = []
curVpnStatus.dns = undefined
}
function syncVpnStatusFromNative(status: Awaited<ReturnType<typeof get_vpn_status>>) {
curVpnStatus.running = status?.running ?? false
if (!curVpnStatus.running) {
resetVpnConfigStatus()
return
}
const ipv4WithCidr = status?.ipv4Addr
if (ipv4WithCidr?.length) {
const [ipv4Addr, cidr] = ipv4WithCidr.split('/')
curVpnStatus.ipv4Addr = ipv4Addr
const parsedCidr = Number(cidr)
curVpnStatus.ipv4Cidr = Number.isInteger(parsedCidr) ? parsedCidr : undefined
}
else {
curVpnStatus.ipv4Addr = undefined
curVpnStatus.ipv4Cidr = undefined
}
curVpnStatus.routes = [...(status?.routes ?? [])]
curVpnStatus.dns = status?.dns ?? undefined
}
async function waitVpnStatus(target_status: boolean, timeout_sec: number) { async function waitVpnStatus(target_status: boolean, timeout_sec: number) {
const start_time = Date.now() const start_time = Date.now()
while (curVpnStatus.running !== target_status) { while (curVpnStatus.running !== target_status) {
@@ -34,18 +81,19 @@ async function waitVpnStatus(target_status: boolean, timeout_sec: number) {
} }
} }
async function doStopVpn() { async function doStopVpn(force = false) {
if (!curVpnStatus.running) { const wasRunning = curVpnStatus.running
if (!force && !wasRunning) {
return return
} }
console.log('stop vpn') console.log('stop vpn')
const stop_ret = await stop_vpn() const stop_ret = await stop_vpn()
console.log('stop vpn', JSON.stringify((stop_ret))) console.log('stop vpn', JSON.stringify((stop_ret)))
await waitVpnStatus(false, 3) if (wasRunning) {
await waitVpnStatus(false, 3)
}
curVpnStatus.ipv4Addr = undefined resetVpnConfigStatus()
curVpnStatus.routes = []
curVpnStatus.dns = undefined
} }
async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[], dns?: string) { async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[], dns?: string) {
@@ -54,19 +102,32 @@ async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[], dns?
} }
console.log('start vpn service', ipv4Addr, cidr, routes, dns) console.log('start vpn service', ipv4Addr, cidr, routes, dns)
const start_ret = await start_vpn({ const request = {
ipv4Addr: `${ipv4Addr}/${cidr}`, ipv4Addr: `${ipv4Addr}/${cidr}`,
routes, routes,
dns, dns,
disallowedApplications: ['com.kkrainbow.easytier'], disallowedApplications: ['com.kkrainbow.easytier'],
mtu: 1300, mtu: 1300,
}) }
let start_ret = await start_vpn(request)
console.log('start vpn response', JSON.stringify(start_ret))
if (start_ret?.errorMsg === 'need_prepare') {
const granted = await requestVpnPermission()
if (!granted) {
throw new Error('vpn_permission_denied')
}
start_ret = await start_vpn(request)
console.log('start vpn retry response', JSON.stringify(start_ret))
}
if (start_ret?.errorMsg?.length) { if (start_ret?.errorMsg?.length) {
throw new Error(start_ret.errorMsg) throw new Error(start_ret.errorMsg)
} }
await waitVpnStatus(true, 3) await waitVpnStatus(true, 3)
curVpnStatus.ipv4Addr = ipv4Addr curVpnStatus.ipv4Addr = ipv4Addr
curVpnStatus.ipv4Cidr = cidr
curVpnStatus.routes = routes curVpnStatus.routes = routes
curVpnStatus.dns = dns curVpnStatus.dns = dns
} }
@@ -75,13 +136,16 @@ async function onVpnServiceStart(payload: any) {
console.log('vpn service start', JSON.stringify(payload)) console.log('vpn service start', JSON.stringify(payload))
curVpnStatus.running = true curVpnStatus.running = true
if (payload.fd) { if (payload.fd) {
setTunFd(payload.fd) await setTunFd(payload.fd).catch((e) => {
console.error('set tun fd failed', e)
})
} }
} }
async function onVpnServiceStop(payload: any) { async function onVpnServiceStop(payload: any) {
console.log('vpn service stop', JSON.stringify(payload)) console.log('vpn service stop', JSON.stringify(payload))
curVpnStatus.running = false curVpnStatus.running = false
resetVpnConfigStatus()
} }
async function registerVpnServiceListener() { async function registerVpnServiceListener() {
@@ -135,15 +199,25 @@ export async function onNetworkInstanceChange(instanceId: string) {
} }
if (!instanceId) { if (!instanceId) {
await doStopVpn() console.warn('vpn service skipped because instance id is empty')
if (curVpnStatus.running) {
await doStopVpn()
}
return return
} }
const config = await getConfig(instanceId) const config = await getConfig(instanceId)
console.log('vpn service loaded config', instanceId, JSON.stringify({
no_tun: config.no_tun,
dhcp: config.dhcp,
enable_magic_dns: config.enable_magic_dns,
}))
if (config.no_tun) { if (config.no_tun) {
console.log('vpn service skipped because no_tun is enabled', instanceId)
return return
} }
const curNetworkInfo = (await collectNetworkInfo(instanceId)).info.map[instanceId] const curNetworkInfo = (await collectNetworkInfo(instanceId)).info.map[instanceId]
if (!curNetworkInfo || curNetworkInfo?.error_msg?.length) { if (!curNetworkInfo || curNetworkInfo?.error_msg?.length) {
console.warn('vpn service skipped because network info is unavailable', instanceId, curNetworkInfo?.error_msg)
await doStopVpn() await doStopVpn()
return return
} }
@@ -170,27 +244,39 @@ export async function onNetworkInstanceChange(instanceId: string) {
const routes = getRoutesForVpn(curNetworkInfo?.routes, config) const routes = getRoutesForVpn(curNetworkInfo?.routes, config)
const dns = config.enable_magic_dns ? '100.100.100.101' : undefined; const dns = config.enable_magic_dns ? '100.100.100.101' : undefined
const ipChanged = virtual_ip !== curVpnStatus.ipv4Addr const ipChanged = virtual_ip !== curVpnStatus.ipv4Addr
const cidrChanged = network_length !== curVpnStatus.ipv4Cidr
const routesChanged = JSON.stringify(routes) !== JSON.stringify(curVpnStatus.routes) const routesChanged = JSON.stringify(routes) !== JSON.stringify(curVpnStatus.routes)
const dnsChanged = dns != curVpnStatus.dns const dnsChanged = dns != curVpnStatus.dns
const configChanged = ipChanged || cidrChanged || routesChanged || dnsChanged
const shouldStartVpn = !curVpnStatus.running
if (ipChanged || routesChanged || dnsChanged) { if (shouldStartVpn || configChanged) {
console.info('vpn service virtual ip changed', JSON.stringify(curVpnStatus), virtual_ip) console.info('vpn service virtual ip changed', JSON.stringify(curVpnStatus), virtual_ip)
try { if (curVpnStatus.running) {
await doStopVpn() try {
} await doStopVpn()
catch (e) { }
console.error(e) catch (e) {
console.error(e)
}
} }
try { try {
await doStartVpn(virtual_ip, network_length, routes, dns) await doStartVpn(virtual_ip, network_length, routes, dns)
} }
catch (e) { catch (e) {
console.error('start vpn service failed, stop all other network insts.', e) if (e instanceof Error && e.message === 'need_prepare') {
await runNetworkInstance(config, true); //on android config should always be saved console.info('vpn permission is required before starting the Android VPN service')
return
}
if (e instanceof Error && e.message === 'vpn_permission_denied') {
console.info('vpn permission request was denied or dismissed')
return
}
console.error('start vpn service failed', e)
} }
} }
} }
@@ -202,6 +288,22 @@ async function isNoTunEnabled(instanceId: string | undefined) {
return (await getConfig(instanceId)).no_tun ?? false return (await getConfig(instanceId)).no_tun ?? false
} }
async function findRunningTunInstanceId() {
const instanceIds = await listNetworkInstanceIds()
const runningIds = instanceIds.running_inst_ids.map(Utils.UuidToStr)
console.log('vpn service sync running instances', JSON.stringify(runningIds))
for (const instanceId of runningIds) {
if (await isNoTunEnabled(instanceId)) {
continue
}
return instanceId
}
return undefined
}
export async function initMobileVpnService() { export async function initMobileVpnService() {
await registerVpnServiceListener() await registerVpnServiceListener()
} }
@@ -210,10 +312,22 @@ export async function prepareVpnService(instanceId: string) {
if (await isNoTunEnabled(instanceId)) { if (await isNoTunEnabled(instanceId)) {
return return
} }
console.log('prepare vpn') await requestVpnPermission()
const prepare_ret = await prepare_vpn() }
console.log('prepare vpn', JSON.stringify((prepare_ret)))
if (prepare_ret?.errorMsg?.length) { export async function syncMobileVpnService() {
throw new Error(prepare_ret.errorMsg) syncVpnStatusFromNative(await get_vpn_status())
} const instanceId = await findRunningTunInstanceId()
if (instanceId) {
console.log('vpn service sync selected instance', instanceId)
await onNetworkInstanceChange(instanceId)
return
}
if (dhcpPollingTimer) {
clearTimeout(dhcpPollingTimer)
dhcpPollingTimer = null
}
await doStopVpn(true)
} }
+19 -18
View File
@@ -9,6 +9,7 @@ import { exit } from '@tauri-apps/plugin-process'
import { I18nUtils, RemoteManagement, Utils } from "easytier-frontend-lib" import { I18nUtils, RemoteManagement, Utils } from "easytier-frontend-lib"
import type { MenuItem } from 'primevue/menuitem' import type { MenuItem } from 'primevue/menuitem'
import { useTray } from '~/composables/tray' import { useTray } from '~/composables/tray'
import { initMobileVpnService } from '~/composables/mobile_vpn'
import { GUIRemoteClient } from '~/modules/api' import { GUIRemoteClient } from '~/modules/api'
import { useToast, useConfirm } from 'primevue' import { useToast, useConfirm } from 'primevue'
@@ -189,9 +190,25 @@ async function initWithMode(mode: Mode) {
clientRunning.value = await isClientRunning() clientRunning.value = await isClientRunning()
} }
onMounted(() => { onMounted(async () => {
const cleanupFns: Array<() => void> = []
if (type() === 'android') {
try {
await initMobileVpnService()
console.error("easytier init vpn service done")
} catch (e: any) {
console.error("easytier init vpn service failed", e)
}
}
cleanupFns.push(await listenGlobalEvents())
currentMode.value = loadMode() currentMode.value = loadMode()
initWithMode(currentMode.value); await initWithMode(currentMode.value);
onUnmounted(() => {
cleanupFns.forEach(unlisten => unlisten())
})
}); });
useTray(true) useTray(true)
@@ -347,22 +364,6 @@ async function connectRpcClient(isNormalMode: boolean, url?: string) {
console.log("easytier rpc connection established, isNormalMode: ", isNormalMode) console.log("easytier rpc connection established, isNormalMode: ", isNormalMode)
} }
onMounted(async () => {
if (type() === 'android') {
try {
await initMobileVpnService()
console.error("easytier init vpn service done")
} catch (e: any) {
console.error("easytier init vpn service failed", e)
}
}
const unlisten = await listenGlobalEvents()
onUnmounted(() => {
unlisten()
})
})
async function openConfigServerDialog() { async function openConfigServerDialog() {
editingMode.value = JSON.parse(JSON.stringify(loadMode())) editingMode.value = JSON.parse(JSON.stringify(loadMode()))
configServerDialogVisible.value = true configServerDialogVisible.value = true
+1 -2
View File
@@ -2,13 +2,12 @@
name = "easytier-rpc-build" name = "easytier-rpc-build"
description = "Protobuf RPC Service Generator for EasyTier" description = "Protobuf RPC Service Generator for EasyTier"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition.workspace = true
homepage = "https://github.com/EasyTier/EasyTier" homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier" repository = "https://github.com/EasyTier/EasyTier"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.93.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
+7 -9
View File
@@ -1,7 +1,7 @@
[package] [package]
name = "easytier-web" name = "easytier-web"
version = "2.5.0" version = "2.6.0"
edition = "2021" edition.workspace = true
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server." description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
[dependencies] [dependencies]
@@ -69,13 +69,11 @@ subtle = "2.6"
mimalloc = { version = "*" } mimalloc = { version = "*" }
[build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
[features] [features]
default = [] default = []
embed = ["dep:axum-embed"] embed = ["dep:axum-embed"]
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
+5 -5
View File
@@ -1,10 +1,10 @@
use std::env;
fn main() { fn main() {
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default();
// enable thunk-rs when target os is windows and arch is x86_64 or i686 // enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")] if target_os == "windows" && (target_arch == "x86" || target_arch == "x86_64") {
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
thunk::thunk(); thunk::thunk();
} }
} }
@@ -1,7 +1,7 @@
<script setup lang="ts"> <script setup lang="ts">
import { AutoComplete, Button, Checkbox, Dialog, Divider, InputNumber, InputText, Panel, Password, SelectButton, ToggleButton } from 'primevue'
import InputGroup from 'primevue/inputgroup' import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon' import InputGroupAddon from 'primevue/inputgroupaddon'
import { Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password, Dialog } from 'primevue'
import { import {
addRow, addRow,
DEFAULT_NETWORK_CONFIG, DEFAULT_NETWORK_CONFIG,
@@ -11,6 +11,7 @@ import {
} from '../types/network' } from '../types/network'
import { ref, onMounted, onUnmounted, watch } from 'vue' import { ref, onMounted, onUnmounted, watch } from 'vue'
import { useI18n } from 'vue-i18n' import { useI18n } from 'vue-i18n'
import AclManager from './acl/AclManager.vue'
import UrlListInput from './UrlListInput.vue' import UrlListInput from './UrlListInput.vue'
const props = defineProps<{ const props = defineProps<{
@@ -209,7 +210,8 @@ watch(() => curNetwork.value, syncNormalizedNetwork, { immediate: true, deep: fa
</div> </div>
<div class="items-center flex flex-col p-fluid gap-y-2"> <div class="items-center flex flex-col p-fluid gap-y-2">
<UrlListInput id="initial_nodes" v-model="curNetwork.peer_urls" :protos="protos" <UrlListInput id="initial_nodes" v-model="curNetwork.peer_urls" :protos="protos"
:add-label="t('add_initial_node')" :placeholder="t('initial_node_placeholder')" /> defaultUrl="tcp://:11010" :add-label="t('add_initial_node')"
:placeholder="t('initial_node_placeholder')" />
</div> </div>
</div> </div>
</div> </div>
@@ -305,6 +307,19 @@ watch(() => curNetwork.value, syncNormalizedNetwork, { immediate: true, deep: fa
</div> </div>
</div> </div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex">
<label for="instance_recv_bps_limit">{{ t('instance_recv_bps_limit') }}</label>
<span class="pi pi-question-circle ml-2 self-center"
v-tooltip="t('instance_recv_bps_limit_help')"></span>
</div>
<InputNumber id="instance_recv_bps_limit" v-model="curNetwork.instance_recv_bps_limit"
aria-describedby="instance_recv_bps_limit-help" :format="false"
:placeholder="t('instance_recv_bps_limit_placeholder')" :min="1" fluid />
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap"> <div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow"> <div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex"> <div class="flex">
@@ -474,6 +489,18 @@ watch(() => curNetwork.value, syncNormalizedNetwork, { immediate: true, deep: fa
</div> </div>
</Panel> </Panel>
<Divider />
<Panel :header="t('acl.title')" toggleable collapsed>
<div v-if="curNetwork.acl" class="flex flex-col gap-y-2">
<AclManager v-model="curNetwork.acl" />
</div>
<div v-else class="flex justify-center p-4">
<Button :label="t('acl.enabled')"
@click="curNetwork.acl = { acl_v1: { chains: [], group: { declares: [], members: [] } } }" />
</div>
</Panel>
<div class="flex pt-6 justify-center"> <div class="flex pt-6 justify-center">
<Button :label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid" <Button :label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid"
@click="$emit('runNetwork', curNetwork)" /> @click="$emit('runNetwork', curNetwork)" />
@@ -15,6 +15,7 @@ const url = defineModel<string>({ required: true })
const editing = ref(false) const editing = ref(false)
const container = ref<HTMLElement | null>(null) const container = ref<HTMLElement | null>(null)
const internalCompact = ref(false) const internalCompact = ref(false)
const hostFocused = ref(false)
onMounted(() => { onMounted(() => {
if (container.value) { if (container.value) {
@@ -36,36 +37,86 @@ const parseUrl = (val: string | null | undefined) => {
const p = parseInt(portStr) const p = parseInt(portStr)
return isNaN(p) ? (props.protos[proto] ?? 11010) : p return isNaN(p) ? (props.protos[proto] ?? 11010) : p
} }
const parseByPattern = (input: string) => {
const trimmed = input.trim()
if (!trimmed) {
return null
}
const match = trimmed.match(/^(\w+):\/\/(.*)$/)
const proto = match ? match[1] : 'tcp'
const rest = match ? match[2] : trimmed
const authority = rest.split(/[/?#]/)[0]
if (!authority) {
return null
}
const hostAndMaybePort = authority.includes('@') ? authority.slice(authority.lastIndexOf('@') + 1) : authority
if (hostAndMaybePort.startsWith('[')) {
const ipv6End = hostAndMaybePort.indexOf(']')
if (ipv6End > 0) {
const host = hostAndMaybePort.slice(0, ipv6End + 1)
const remain = hostAndMaybePort.slice(ipv6End + 1)
const port = remain.startsWith(':') ? getValidPort(remain.slice(1), proto) : (props.protos[proto] ?? 11010)
return { proto, host, port }
}
}
const portMatch = hostAndMaybePort.match(/^(.*):(\d+)$/)
const host = portMatch ? portMatch[1] : hostAndMaybePort
const port = portMatch ? parseInt(portMatch[2]) : (props.protos[proto] ?? 11010)
return { proto, host, port }
}
if (!val) { if (!val) {
return { proto: 'tcp', host: '', port: props.protos['tcp'] ?? 11010 } return { proto: 'tcp', host: '', port: props.protos['tcp'] ?? 11010 }
} }
try { const parsedByPattern = parseByPattern(val)
const urlObj = new URL(val) if (parsedByPattern) {
const proto = urlObj.protocol.replace(':', '') return parsedByPattern
return {
proto: proto,
host: urlObj.hostname,
port: getValidPort(urlObj.port, proto)
}
} catch (e) {
// Fallback for incomplete or invalid URLs
const match = val.match(/^(\w+):\/\/(.*)$/)
if (match) {
const proto = match[1]
const rest = match[2]
const portMatch = rest.match(/:(\d+)$/)
return {
proto,
host: portMatch ? rest.slice(0, portMatch.index) : rest,
port: portMatch ? parseInt(portMatch[1]) : (props.protos[proto] ?? 11010)
}
}
return { proto: 'tcp', host: '', port: 11010 }
} }
return { proto: 'tcp', host: '', port: 11010 }
} }
const internalValue = ref(parseUrl(url.value)) const internalValue = ref(parseUrl(url.value))
const defaultHost = '0.0.0.0'
const buildUrlValue = (value: { proto: string, host: string, port: number }, forceDefaultHost = false) => {
const proto = value.proto || 'tcp'
const rawHost = (value.host ?? '').trim()
const host = rawHost || (forceDefaultHost ? defaultHost : '')
if (!host) {
return null
}
let port = value.port
if (isNaN(parseInt(port as any))) {
port = props.protos[proto] ?? 11010
}
if (props.protos[proto] === 0) {
return `${proto}://${host}`
}
return `${proto}://${host}:${port}`
}
const syncUrlFromInternal = (forceDefaultHost = false) => {
const nextUrl = buildUrlValue(internalValue.value, forceDefaultHost)
if (!nextUrl || nextUrl === url.value) {
return
}
url.value = nextUrl
}
const onHostBlur = () => {
hostFocused.value = false
syncUrlFromInternal(true)
}
const onHostFocus = () => {
hostFocused.value = true
}
const onDialogConfirm = () => {
syncUrlFromInternal(true)
editing.value = false
}
const isNoPortProto = computed(() => { const isNoPortProto = computed(() => {
return props.protos[internalValue.value.proto] === 0 return props.protos[internalValue.value.proto] === 0
@@ -73,28 +124,22 @@ const isNoPortProto = computed(() => {
// Sync from external // Sync from external
watch(() => url.value, (newVal) => { watch(() => url.value, (newVal) => {
if (hostFocused.value) {
return
}
const parsed = parseUrl(newVal) const parsed = parseUrl(newVal)
const internalHost = internalValue.value.host ?? ''
const sameHost = parsed.host === internalHost || (!internalHost.trim() && parsed.host === defaultHost)
if (parsed.proto !== internalValue.value.proto || if (parsed.proto !== internalValue.value.proto ||
parsed.host !== internalValue.value.host || !sameHost ||
parsed.port !== internalValue.value.port) { parsed.port !== internalValue.value.port) {
internalValue.value = parsed internalValue.value = parsed
} }
}) })
// Sync to external // Sync to external
watch(internalValue, (newVal) => { watch(internalValue, () => {
const proto = newVal.proto || 'tcp' syncUrlFromInternal(false)
const host = newVal.host || '0.0.0.0'
let port = newVal.port
if (isNaN(parseInt(port as any))) {
port = props.protos[proto] ?? 11010
}
if (props.protos[proto] === 0) {
url.value = `${proto}://${host}`
} else {
url.value = `${proto}://${host}:${port}`
}
}, { deep: true }) }, { deep: true })
const protoOptions = computed(() => Object.keys(props.protos)) const protoOptions = computed(() => Object.keys(props.protos))
@@ -128,7 +173,8 @@ const onProtoChange = (newProto: string) => {
<AutoComplete :model-value="internalValue.proto" :suggestions="filteredProtos" dropdown <AutoComplete :model-value="internalValue.proto" :suggestions="filteredProtos" dropdown
class="max-w-32 proto-autocomplete-in-group" @complete="searchProtos" class="max-w-32 proto-autocomplete-in-group" @complete="searchProtos"
@update:model-value="onProtoChange" /> @update:model-value="onProtoChange" />
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow" /> <InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="grow"
@focus="onHostFocus" @blur="onHostBlur" />
<template v-if="!isNoPortProto"> <template v-if="!isNoPortProto">
<InputGroupAddon> <InputGroupAddon>
<span style="font-weight: bold">:</span> <span style="font-weight: bold">:</span>
@@ -156,7 +202,8 @@ const onProtoChange = (newProto: string) => {
</div> </div>
<div class="flex flex-col gap-2"> <div class="flex flex-col gap-2">
<label>{{ t('web.common.address') || 'Address' }}</label> <label>{{ t('web.common.address') || 'Address' }}</label>
<InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="w-full" /> <InputText v-model="internalValue.host" :placeholder="placeholder || '0.0.0.0'" class="w-full"
@focus="onHostFocus" @blur="onHostBlur" />
</div> </div>
<div v-if="!isNoPortProto" class="flex flex-col gap-2"> <div v-if="!isNoPortProto" class="flex flex-col gap-2">
<label>{{ t('port') }}</label> <label>{{ t('port') }}</label>
@@ -164,7 +211,7 @@ const onProtoChange = (newProto: string) => {
</div> </div>
</div> </div>
<template #footer> <template #footer>
<Button :label="t('web.common.confirm') || 'Done'" icon="pi pi-check" @click="editing = false" <Button :label="t('web.common.confirm') || 'Done'" icon="pi pi-check" @click="onDialogConfirm"
autofocus /> autofocus />
</template> </template>
</Dialog> </Dialog>
@@ -0,0 +1,218 @@
<script setup lang="ts">
import { Button, Column, DataTable, Divider, InputText, Select, SelectButton, ToggleButton } from 'primevue'
import { ref, watch } from 'vue'
import { useI18n } from 'vue-i18n'
import { AclAction, AclChain, AclChainType, AclProtocol, AclRule } from '../../types/network'
import AclRuleDialog from './AclRuleDialog.vue'
const props = defineProps<{
groupNames?: string[]
}>()
const chain = defineModel<AclChain>({ required: true })
const { t } = useI18n()
watch(() => chain.value.rules, (newRules) => {
if (!newRules) return
const isSorted = newRules.every((rule, i) => i === 0 || (rule.priority || 0) <= (newRules[i - 1].priority || 0))
if (!isSorted) {
chain.value.rules.sort((a, b) => (b.priority || 0) - (a.priority || 0))
}
}, { deep: true, immediate: true })
const actionOptions = [
{ label: () => t('acl.allow'), value: AclAction.Allow },
{ label: () => t('acl.drop'), value: AclAction.Drop },
]
const chainTypeOptions = [
{ label: () => t('acl.inbound'), value: AclChainType.Inbound },
{ label: () => t('acl.outbound'), value: AclChainType.Outbound },
{ label: () => t('acl.forward'), value: AclChainType.Forward },
]
const editingRule = ref<AclRule | null>(null)
const editingRuleIndex = ref(-1)
const showRuleDialog = ref(false)
function getProtocolLabel(proto: AclProtocol) {
switch (proto) {
case AclProtocol.Any: return t('acl.any')
case AclProtocol.TCP: return 'TCP'
case AclProtocol.UDP: return 'UDP'
case AclProtocol.ICMP: return 'ICMP'
case AclProtocol.ICMPv6: return 'ICMPv6'
default: return t('event.Unknown')
}
}
function getActionLabel(action: AclAction) {
switch (action) {
case AclAction.Allow: return t('acl.allow')
case AclAction.Drop: return t('acl.drop')
default: return t('event.Unknown')
}
}
function addRule() {
editingRuleIndex.value = -1
editingRule.value = {
name: '',
description: '',
priority: chain.value.rules.length,
enabled: true,
protocol: AclProtocol.Any,
ports: [],
source_ips: [],
destination_ips: [],
source_ports: [],
action: AclAction.Allow,
rate_limit: 0,
burst_limit: 0,
stateful: false,
source_groups: [],
destination_groups: [],
}
showRuleDialog.value = true
}
function editRule(index: number) {
editingRuleIndex.value = index
editingRule.value = JSON.parse(JSON.stringify(chain.value.rules[index]))
showRuleDialog.value = true
}
function deleteRule(index: number) {
chain.value.rules.splice(index, 1)
}
function saveRule(rule: AclRule) {
if (editingRuleIndex.value === -1) {
chain.value.rules.push(rule)
} else {
chain.value.rules[editingRuleIndex.value] = rule
}
chain.value.rules.sort((a, b) => (b.priority || 0) - (a.priority || 0))
}
function onRowReorder(event: any) {
chain.value.rules = event.value
// Update priorities based on new order (higher priority at top)
chain.value.rules.forEach((rule, index) => {
rule.priority = chain.value.rules.length - index - 1
})
}
</script>
<template>
<div class="flex flex-col gap-6">
<!-- Chain Metadata Section -->
<div
class="grid grid-cols-1 md:grid-cols-2 gap-4 p-4 bg-gray-50 rounded-lg border border-gray-200 dark:bg-gray-900 dark:border-gray-700">
<div class="flex flex-col gap-2">
<label class="font-bold text-sm">{{ t('acl.chain.name') }}</label>
<InputText v-model="chain.name" size="small" />
</div>
<div class="flex flex-col gap-2">
<label class="font-bold text-sm">{{ t('acl.rule.description') }}</label>
<InputText v-model="chain.description" size="small" />
</div>
<div class="flex items-center gap-6 col-span-full border-t pt-2 mt-2 dark:border-gray-700">
<div class="flex items-center gap-2">
<label class="font-bold text-sm">{{ t('acl.rule.enabled') }}</label>
<ToggleButton v-model="chain.enabled" on-icon="pi pi-check" off-icon="pi pi-times"
:on-label="t('web.common.enable')" :off-label="t('web.common.disable')" class="w-24" />
</div>
<div class="flex items-center gap-2">
<label class="font-bold text-sm">{{ t('acl.chain.type') }}</label>
<Select v-model="chain.chain_type" :options="chainTypeOptions" :option-label="opt => opt.label()"
option-value="value" size="small" class="w-40" />
</div>
<div class="flex items-center gap-2 ml-auto">
<label class="font-bold text-sm">{{ t('acl.default_action') }}</label>
<SelectButton v-model="chain.default_action" :options="actionOptions" :option-label="opt => opt.label()"
option-value="value" :allow-empty="false" />
</div>
</div>
</div>
<div class="flex flex-row items-center gap-4 justify-between">
<h4 class="text-md font-bold">{{ t('acl.rules') }}</h4>
<Button icon="pi pi-plus" :label="t('acl.add_rule')" severity="success" size="small" @click="addRule" />
</div>
<DataTable :value="chain.rules" @row-reorder="onRowReorder" responsiveLayout="scroll">
<Column rowReorder headerStyle="width: 3rem" />
<Column field="enabled" :header="t('acl.rule.enabled')">
<template #body="{ data }">
<i class="pi" :class="data.enabled ? 'pi-check-circle text-green-500' : 'pi-times-circle text-red-500'"></i>
</template>
</Column>
<Column field="name" :header="t('acl.rule.name')" />
<Column :header="t('acl.match')">
<template #body="{ data }">
<div class="flex flex-col gap-2 py-1">
<div class="flex items-center gap-2">
<span
class="px-2 py-0.5 bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400 rounded-md text-[10px] font-bold uppercase tracking-wider">
{{ getProtocolLabel(data.protocol) }}
</span>
</div>
<div class="flex flex-col sm:flex-row sm:items-center gap-1 sm:gap-3">
<div class="flex items-center gap-1.5 min-w-0">
<span class="text-[10px] font-bold text-gray-400 uppercase w-7">Src</span>
<div class="flex flex-wrap gap-1 items-center overflow-hidden">
<span v-for="ip in data.source_ips" :key="ip"
class="font-mono text-xs bg-surface-100 dark:bg-surface-800 px-1.5 py-0.5 rounded">{{ ip }}</span>
<span v-for="grp in data.source_groups" :key="grp"
class="text-xs font-bold text-purple-600 dark:text-purple-400">@{{ grp }}</span>
<span v-if="data.source_ports.length" class="text-xs text-blue-600 dark:text-blue-400 font-mono">:{{
data.source_ports.join(',') }}</span>
<span v-if="!data.source_ips.length && !data.source_groups.length" class="text-gray-400">*</span>
</div>
</div>
<i class="pi pi-arrow-right hidden sm:block text-gray-300 text-xs"></i>
<Divider layout="horizontal" class="sm:hidden my-1" />
<div class="flex items-center gap-1.5 min-w-0">
<span class="text-[10px] font-bold text-gray-400 uppercase w-7">Dst</span>
<div class="flex flex-wrap gap-1 items-center overflow-hidden">
<span v-for="ip in data.destination_ips" :key="ip"
class="font-mono text-xs bg-surface-100 dark:bg-surface-800 px-1.5 py-0.5 rounded">{{ ip }}</span>
<span v-for="grp in data.destination_groups" :key="grp"
class="text-xs font-bold text-purple-600 dark:text-purple-400">@{{ grp }}</span>
<span v-if="data.ports.length" class="text-xs text-blue-600 dark:text-blue-400 font-mono">:{{
data.ports.join(',') }}</span>
<span v-if="!data.destination_ips.length && !data.destination_groups.length"
class="text-gray-400">*</span>
</div>
</div>
</div>
</div>
</template>
</Column>
<Column field="action" :header="t('acl.rule.action')">
<template #body="{ data }">
<span :class="data.action === AclAction.Allow ? 'text-green-600' : 'text-red-600 font-bold'">
{{ getActionLabel(data.action) }}
</span>
</template>
</Column>
<Column :header="t('web.common.edit')">
<template #body="{ index }">
<div class="flex gap-2">
<Button icon="pi pi-pencil" text rounded @click="editRule(index)" />
<Button icon="pi pi-trash" severity="danger" text rounded @click="deleteRule(index)" />
</div>
</template>
</Column>
</DataTable>
<AclRuleDialog v-if="showRuleDialog && editingRule" v-model:visible="showRuleDialog" v-model:rule="editingRule"
:group-names="props.groupNames" @save="saveRule" />
</div>
</template>
@@ -0,0 +1,115 @@
<script setup lang="ts">
import { Button, Column, DataTable, Dialog, InputText, MultiSelect, Password } from 'primevue';
import { ref } from 'vue';
import { useI18n } from 'vue-i18n';
import { GroupIdentity, GroupInfo } from '../../types/network';
const props = defineProps<{
groupNames?: string[]
}>()
const group = defineModel<GroupInfo>({ required: true })
const emit = defineEmits(['rename-group'])
const { t } = useI18n()
const editingGroup = ref<GroupIdentity | null>(null)
const editingGroupIndex = ref(-1)
const showGroupDialog = ref(false)
const oldGroupName = ref('')
function addGroup() {
editingGroupIndex.value = -1
editingGroup.value = {
group_name: '',
group_secret: '',
}
oldGroupName.value = ''
showGroupDialog.value = true
}
function editGroup(index: number) {
editingGroupIndex.value = index
editingGroup.value = JSON.parse(JSON.stringify(group.value.declares[index]))
oldGroupName.value = editingGroup.value?.group_name || ''
showGroupDialog.value = true
}
function deleteGroup(index: number) {
group.value.declares.splice(index, 1)
}
function saveGroup() {
if (!editingGroup.value) return
const newName = editingGroup.value.group_name
if (editingGroupIndex.value === -1) {
group.value.declares.push(editingGroup.value)
} else {
if (oldGroupName.value && oldGroupName.value !== newName) {
// Sync in members
group.value.members = group.value.members.map(m => m === oldGroupName.value ? newName : m)
// Notify parent to sync in rules
emit('rename-group', { oldName: oldGroupName.value, newName })
}
group.value.declares[editingGroupIndex.value] = editingGroup.value
}
showGroupDialog.value = false
}
</script>
<template>
<div class="flex flex-col gap-6">
<div class="flex flex-col gap-2">
<div class="flex justify-between items-center">
<div class="flex flex-col">
<label class="font-bold text-lg">{{ t('acl.group.declares') }}</label>
<small class="text-gray-500">{{ t('acl.group.help') }}</small>
</div>
<Button icon="pi pi-plus" :label="t('web.common.add')" severity="success" @click="addGroup" />
</div>
<DataTable :value="group.declares" responsiveLayout="scroll">
<Column field="group_name" :header="t('acl.group.name')" />
<Column field="group_secret" :header="t('acl.group.secret')">
<template #body="{ data }">
<Password v-model="data.group_secret" :feedback="false" toggleMask readonly plain class="w-full" />
</template>
</Column>
<Column :header="t('web.common.edit')" headerStyle="width: 8rem">
<template #body="{ index }">
<div class="flex gap-2">
<Button icon="pi pi-pencil" text rounded @click="editGroup(index)" />
<Button icon="pi pi-trash" severity="danger" text rounded @click="deleteGroup(index)" />
</div>
</template>
</Column>
</DataTable>
</div>
<div class="flex flex-col gap-2">
<label class="font-bold text-lg">{{ t('acl.group.members') }}</label>
<MultiSelect v-model="group.members" :options="props.groupNames" multiple fluid filter
:placeholder="t('acl.group.members')" />
</div>
<!-- Group Identity Dialog -->
<Dialog v-model:visible="showGroupDialog" modal :header="t('acl.groups')" :style="{ width: '400px' }">
<div v-if="editingGroup" class="flex flex-col gap-4 pt-2">
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.group.name') }}</label>
<InputText v-model="editingGroup.group_name" fluid />
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.group.secret') }}</label>
<Password v-model="editingGroup.group_secret" :feedback="false" toggleMask fluid />
</div>
</div>
<template #footer>
<Button :label="t('web.common.cancel')" icon="pi pi-times" @click="showGroupDialog = false" text />
<Button :label="t('web.common.save')" icon="pi pi-save" @click="saveGroup" />
</template>
</Dialog>
</div>
</template>
@@ -0,0 +1,150 @@
<script setup lang="ts">
import { Button, Menu, Tab, TabList, TabPanel, TabPanels, Tabs } from 'primevue'
import { computed, ref } from 'vue'
import { useI18n } from 'vue-i18n'
import { Acl, AclAction, AclChainType } from '../../types/network'
import AclChainEditor from './AclChainEditor.vue'
import AclGroupEditor from './AclGroupEditor.vue'
const acl = defineModel<Acl>({ required: true })
const { t } = useI18n()
const activeTab = ref(0)
const menu = ref()
const addMenuModel = ref([
{ label: () => t('acl.inbound'), command: () => addChain(AclChainType.Inbound) },
{ label: () => t('acl.outbound'), command: () => addChain(AclChainType.Outbound) },
{ label: () => t('acl.forward'), command: () => addChain(AclChainType.Forward) },
])
function addChain(type: AclChainType) {
if (!acl.value.acl_v1) {
acl.value.acl_v1 = { chains: [], group: { declares: [], members: [] } }
}
let defaultName = ''
switch (type) {
case AclChainType.Inbound: defaultName = 'Inbound'; break;
case AclChainType.Outbound: defaultName = 'Outbound'; break;
case AclChainType.Forward: defaultName = 'Forward'; break;
}
acl.value.acl_v1.chains.push({
name: defaultName,
chain_type: type,
description: '',
enabled: true,
rules: [],
default_action: AclAction.Allow
})
activeTab.value = acl.value.acl_v1.chains.length - 1
}
function removeChain(index: number) {
if (confirm(t('acl.delete_chain_confirm'))) {
acl.value.acl_v1?.chains.splice(index, 1)
if (activeTab.value >= (acl.value.acl_v1?.chains.length || 0)) {
activeTab.value = Math.max(0, (acl.value.acl_v1?.chains.length || 0))
}
}
}
function handleRenameGroup({ oldName, newName }: { oldName: string, newName: string }) {
if (!acl.value.acl_v1) return
acl.value.acl_v1.chains.forEach(chain => {
chain.rules.forEach(rule => {
rule.source_groups = rule.source_groups.map(g => g === oldName ? newName : g)
rule.destination_groups = rule.destination_groups.map(g => g === oldName ? newName : g)
})
})
}
const groupNames = computed(() => {
return acl.value.acl_v1?.group?.declares.map(g => g.group_name) || []
})
const tabs = computed(() => {
const chains = acl.value.acl_v1?.chains || []
const result: { type: string, label: string, index: number }[] = []
if (chains.length === 0) {
result.push({ type: 'empty', label: t('acl.chains'), index: 0 })
}
else {
chains.forEach((c, index) => {
result.push({
type: 'chain',
label: c.name || `Chain ${index}`,
index
})
})
}
result.push({ type: 'groups', label: t('acl.groups'), index: result.length })
return result
})
</script>
<template>
<div class="flex flex-col gap-4">
<Tabs v-model:value="activeTab">
<div class="flex items-center border-b border-surface-200 dark:border-surface-700">
<TabList class="flex-grow min-w-0 overflow-x-auto" style="border-bottom: none;">
<Tab v-for="tab in tabs" :key="tab.type + tab.index" :value="tab.index">
<div class="flex items-center gap-2 whitespace-nowrap">
{{ tab.label }}
<Button v-if="tab.type === 'chain'" icon="pi pi-times" severity="danger" text rounded size="small"
class="w-6 h-6 p-0" @click.stop="removeChain(tab.index)" />
</div>
</Tab>
</TabList>
<div
class="flex-shrink-0 flex items-center px-2 bg-white dark:bg-gray-900 border-l border-surface-100 dark:border-surface-800">
<Button icon="pi pi-plus" text rounded size="small" class="w-8 h-8 p-0"
@click="(event) => menu.toggle(event)" />
<Menu ref="menu" :model="addMenuModel" :popup="true" />
</div>
</div>
<TabPanels>
<TabPanel v-for="tab in tabs" :key="'panel' + tab.type + tab.index" :value="tab.index">
<!-- Empty State within TabPanel -->
<div v-if="tab.type === 'empty'"
class="py-8 flex flex-col items-center justify-center border-2 border-dashed border-surface-200 rounded-lg bg-surface-50 dark:bg-surface-900 dark:border-surface-700">
<i class="pi pi-shield text-5xl mb-4 text-primary" />
<div class="text-xl font-bold mb-2">{{ t('acl.chains') }}</div>
<p class="text-surface-500 mb-8 text-center max-w-sm px-4">{{ t('acl.help') }}</p>
<div class="flex flex-wrap gap-3 justify-center">
<Button :label="t('acl.inbound')" icon="pi pi-arrow-down-left" @click="addChain(AclChainType.Inbound)" />
<Button :label="t('acl.outbound')" icon="pi pi-arrow-up-right" @click="addChain(AclChainType.Outbound)" />
<Button :label="t('acl.forward')" icon="pi pi-directions" @click="addChain(AclChainType.Forward)" />
</div>
</div>
<!-- Rule Chains -->
<div v-if="tab.type === 'chain' && acl.acl_v1 && acl.acl_v1.chains[tab.index]" class="py-4">
<AclChainEditor v-model="acl.acl_v1.chains[tab.index]" :group-names="groupNames" />
</div>
<!-- Group Management -->
<div v-if="tab.type === 'groups'" class="py-4">
<template v-if="acl.acl_v1">
<AclGroupEditor v-if="acl.acl_v1.group" v-model="acl.acl_v1.group" :group-names="groupNames"
@rename-group="handleRenameGroup" />
<div v-else class="flex justify-center p-4">
<Button :label="t('web.common.add') + ' ' + t('acl.groups')"
@click="acl.acl_v1.group = { declares: [], members: [] }" />
</div>
</template>
<div v-else class="flex justify-center p-4">
<Button :label="t('acl.enabled')"
@click="acl.acl_v1 = { chains: [], group: { declares: [], members: [] } }" />
</div>
</div>
</TabPanel>
</TabPanels>
</Tabs>
</div>
</template>
@@ -0,0 +1,150 @@
<script setup lang="ts">
import { AutoComplete, Button, Checkbox, Dialog, InputNumber, InputText, MultiSelect, Panel, SelectButton, ToggleButton } from 'primevue';
import { computed, ref } from 'vue';
import { useI18n } from 'vue-i18n';
import { AclAction, AclProtocol, AclRule } from '../../types/network';
const props = defineProps<{
visible: boolean
groupNames?: string[]
}>()
const emit = defineEmits(['update:visible', 'save'])
const rule = defineModel<AclRule>('rule', { required: true })
const { t } = useI18n()
const protocolOptions = [
{ label: () => t('acl.any'), value: AclProtocol.Any },
{ label: 'TCP', value: AclProtocol.TCP },
{ label: 'UDP', value: AclProtocol.UDP },
{ label: 'ICMP', value: AclProtocol.ICMP },
{ label: 'ICMPv6', value: AclProtocol.ICMPv6 },
]
const actionOptions = [
{ label: () => t('acl.allow'), value: AclAction.Allow },
{ label: () => t('acl.drop'), value: AclAction.Drop },
]
const showPorts = computed(() => {
return rule.value.protocol === AclProtocol.TCP || rule.value.protocol === AclProtocol.UDP || rule.value.protocol === AclProtocol.Any
})
function close() {
emit('update:visible', false)
}
function save() {
emit('save', rule.value)
close()
}
// Suggestions for IP/Port AutoComplete
const genericSuggestions = ref<string[]>([])
</script>
<template>
<Dialog :visible="visible" @update:visible="emit('update:visible', $event)" modal :header="t('acl.edit_rule')"
:style="{ width: '90vw', maxWidth: '600px' }">
<div class="flex flex-col gap-4">
<div class="flex flex-row gap-4 items-center">
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.name') }}</label>
<InputText v-model="rule.name" fluid />
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.enabled') }}</label>
<ToggleButton v-model="rule.enabled" on-icon="pi pi-check" off-icon="pi pi-times"
:on-label="t('web.common.enable')" :off-label="t('web.common.disable')" class="w-24" />
</div>
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.description') }}</label>
<InputText v-model="rule.description" fluid />
</div>
<div class="flex flex-row gap-4 flex-wrap">
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.action') }}</label>
<SelectButton v-model="rule.action" :options="actionOptions" :option-label="opt => opt.label()"
option-value="value" :allow-empty="false" />
</div>
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.protocol') }}</label>
<SelectButton v-model="rule.protocol" :options="protocolOptions"
:option-label="opt => typeof opt.label === 'function' ? opt.label() : opt.label" option-value="value"
:allow-empty="false" />
</div>
</div>
<Panel :header="t('acl.rules')" toggleable>
<div class="flex flex-col gap-4">
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.src_ips') }}</label>
<AutoComplete v-model="rule.source_ips" multiple fluid :suggestions="genericSuggestions"
@complete="genericSuggestions = [$event.query]"
:placeholder="t('chips_placeholder', ['10.126.126.0/24'])" />
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.dst_ips') }}</label>
<AutoComplete v-model="rule.destination_ips" multiple fluid :suggestions="genericSuggestions"
@complete="genericSuggestions = [$event.query]"
:placeholder="t('chips_placeholder', ['10.126.126.2/32'])" />
</div>
<div v-if="showPorts" class="flex flex-row gap-4 flex-wrap">
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.src_ports') }}</label>
<AutoComplete v-model="rule.source_ports" multiple fluid :suggestions="genericSuggestions"
@complete="genericSuggestions = [$event.query]" placeholder="e.g. 80, 1000-2000" />
</div>
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.dst_ports') }}</label>
<AutoComplete v-model="rule.ports" multiple fluid :suggestions="genericSuggestions"
@complete="genericSuggestions = [$event.query]" placeholder="e.g. 80, 1000-2000" />
</div>
</div>
</div>
</Panel>
<Panel :header="t('advanced_settings')" toggleable collapsed>
<div class="flex flex-col gap-4">
<div class="flex items-center gap-2">
<Checkbox v-model="rule.stateful" :binary="true" inputId="rule-stateful" />
<label for="rule-stateful" class="font-bold">{{ t('acl.rule.stateful') }}</label>
</div>
<div class="flex flex-row gap-4 flex-wrap">
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.rate_limit') }}</label>
<InputNumber v-model="rule.rate_limit" :min="0" placeholder="0 = no limit" fluid />
</div>
<div class="flex flex-col gap-2 grow">
<label class="font-bold">{{ t('acl.rule.burst_limit') }}</label>
<InputNumber v-model="rule.burst_limit" :min="0" placeholder="0 = no limit" fluid />
</div>
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.src_groups') }}</label>
<MultiSelect v-model="rule.source_groups" :options="props.groupNames" multiple fluid filter
:placeholder="t('acl.rule.src_groups')" />
</div>
<div class="flex flex-col gap-2">
<label class="font-bold">{{ t('acl.rule.dst_groups') }}</label>
<MultiSelect v-model="rule.destination_groups" :options="props.groupNames" multiple fluid filter
:placeholder="t('acl.rule.dst_groups')" />
</div>
</div>
</Panel>
</div>
<template #footer>
<Button :label="t('web.common.cancel')" icon="pi pi-times" @click="close" text />
<Button :label="t('web.common.save')" icon="pi pi-save" @click="save" />
</template>
</Dialog>
</template>
+52 -2
View File
@@ -10,7 +10,7 @@ initial_nodes_help: |
• 留空 = 节点独立启动,等别人来连,或你后续手动连。 • 留空 = 节点独立启动,等别人来连,或你后续手动连。
• 无论直接还是间接连通(通过其他节点搭桥),都能组网互通。 • 无论直接还是间接连通(通过其他节点搭桥),都能组网互通。
初始节点可以用自己的,也可以用别人分享的。 初始节点可以用自己的,也可以用别人分享的。
initial_node_placeholder: 例如:tcp://node.example.com:11010 initial_node_placeholder: 例如:node.example.com
virtual_ipv4: 虚拟IPv4地址 virtual_ipv4: 虚拟IPv4地址
virtual_ipv4_dhcp: DHCP virtual_ipv4_dhcp: DHCP
network_name: 网络名称 network_name: 网络名称
@@ -117,7 +117,7 @@ disable_quic_input: 禁用 QUIC 输入
disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。 disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。
disable_p2p: 禁用 P2P disable_p2p: 禁用 P2P
disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转 disable_p2p_help: 禁用普通自动 P2P。开启 need-p2p 的节点仍可与当前节点建立 P2P
p2p_only: 仅 P2P p2p_only: 仅 P2P
p2p_only_help: 仅与已经建立P2P连接的对等节点通信,不通过其他节点中转。 p2p_only_help: 仅与已经建立P2P连接的对等节点通信,不通过其他节点中转。
@@ -196,6 +196,12 @@ mtu_help: |
TUN设备的MTU,默认为非加密时为1380,加密时为1360。范围:400-1380 TUN设备的MTU,默认为非加密时为1380,加密时为1360。范围:400-1380
mtu_placeholder: 留空为默认值1380 mtu_placeholder: 留空为默认值1380
instance_recv_bps_limit: 实例接收限速
instance_recv_bps_limit_help: |
限制当前实例整体入站流量的总接收速率,单位为字节每秒。
留空表示不限速。
instance_recv_bps_limit_placeholder: 留空表示不限速
mapped_listeners: 监听映射 mapped_listeners: 监听映射
mapped_listeners_help: | mapped_listeners_help: |
手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。 手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。
@@ -349,6 +355,7 @@ web:
delete: 删除 delete: 删除
edit: 编辑 edit: 编辑
refresh: 刷新 refresh: 刷新
add: 添加
loading: 加载中... loading: 加载中...
error: 错误 error: 错误
success: 成功 success: 成功
@@ -416,3 +423,46 @@ config-server:
client: client:
not_running: 无法连接至远程客户端 not_running: 无法连接至远程客户端
retry: 重试 retry: 重试
acl:
title: 访问控制
help: 访问控制列表,用于限制节点间的通信。
enabled: 启用 ACL
default_action: 默认动作
chains: 规则链
inbound: 入站
outbound: 出站
forward: 转发
rules: 规则
add_rule: 添加规则
edit_rule: 编辑规则
rule:
name: 规则名称
description: 描述
enabled: 启用
protocol: 协议
action: 动作
src_ips: 来源 IP
dst_ips: 目的 IP
src_ports: 来源端口
dst_ports: 目的端口
rate_limit: 速率限制 (pps)
burst_limit: 爆发限制
stateful: 状态追踪
src_groups: 来源组
dst_groups: 目的组
groups: 组管理
group:
declares: 声明组
members: 加入组
name: 组名
secret: 密钥
help: 在此处定义网络中的组身份,以便在规则中使用。
any: 任意
allow: 允许
drop: 丢弃
delete_chain_confirm: 确定要删除此规则链及其所有规则吗?
chain:
name: 名称
type: 类型
match: 匹配
+52 -2
View File
@@ -10,7 +10,7 @@ initial_nodes_help: |
• Leaving it empty = the node starts alone until others connect to it, or you connect it later yourself. • Leaving it empty = the node starts alone until others connect to it, or you connect it later yourself.
• Direct or indirect connectivity, including through relay nodes, can form one network. • Direct or indirect connectivity, including through relay nodes, can form one network.
Initial nodes can be your own nodes or ones shared by others. Initial nodes can be your own nodes or ones shared by others.
initial_node_placeholder: "Example: tcp://node.example.com:11010" initial_node_placeholder: "Example: node.example.com"
virtual_ipv4: Virtual IPv4 virtual_ipv4: Virtual IPv4
virtual_ipv4_dhcp: DHCP virtual_ipv4_dhcp: DHCP
network_name: Network Name network_name: Network Name
@@ -116,7 +116,7 @@ disable_quic_input: Disable QUIC Input
disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP. disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP.
disable_p2p: Disable P2P disable_p2p: Disable P2P
disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server. disable_p2p_help: Disable ordinary automatic P2P. Nodes with need-p2p enabled can still establish P2P with this node.
p2p_only: P2P Only p2p_only: P2P Only
p2p_only_help: Only communicate with peers that have already established P2P connections, do not relay through other nodes. p2p_only_help: Only communicate with peers that have already established P2P connections, do not relay through other nodes.
@@ -196,6 +196,12 @@ mtu_help: |
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380 MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380
mtu_placeholder: Leave blank as default value 1380 mtu_placeholder: Leave blank as default value 1380
instance_recv_bps_limit: Instance Receive Limit
instance_recv_bps_limit_help: |
Limit the total receive bandwidth for the whole instance. Unit: bytes per second.
Leave blank for no limit.
instance_recv_bps_limit_placeholder: Leave blank for no limit
mapped_listeners: Map Listeners mapped_listeners: Map Listeners
mapped_listeners_help: | mapped_listeners_help: |
Manually specify the public address of the listener, other nodes can use this address to connect to this node. Manually specify the public address of the listener, other nodes can use this address to connect to this node.
@@ -349,6 +355,7 @@ web:
delete: Delete delete: Delete
edit: Edit edit: Edit
refresh: Refresh refresh: Refresh
add: Add
loading: Loading... loading: Loading...
error: Error error: Error
success: Success success: Success
@@ -416,3 +423,46 @@ config-server:
client: client:
not_running: Unable to connect to remote client. not_running: Unable to connect to remote client.
retry: Retry retry: Retry
acl:
title: Access Control (ACL)
help: Access control list to restrict communication between nodes.
enabled: Enable ACL
default_action: Default Action
chains: Rule Chains
inbound: Inbound
outbound: Outbound
forward: Forward
rules: Rules
add_rule: Add Rule
edit_rule: Edit Rule
rule:
name: Rule Name
description: Description
enabled: Enabled
protocol: Protocol
action: Action
src_ips: Source IPs
dst_ips: Destination IPs
src_ports: Source Ports
dst_ports: Destination Ports
rate_limit: Rate Limit (pps)
burst_limit: Burst Limit
stateful: Stateful
src_groups: Source Groups
dst_groups: Destination Groups
groups: Groups
group:
declares: Declared Groups
members: Node Memberships
name: Group Name
secret: Group Secret
help: Define group identities in the network to use them in rules.
any: Any
allow: Allow
drop: Drop
delete_chain_confirm: Are you sure you want to delete this rule chain and all its rules?
chain:
name: Name
type: Type
match: Match
@@ -14,6 +14,74 @@ export interface SecureModeConfig {
local_public_key?: string local_public_key?: string
} }
export enum AclProtocol {
Unspecified = 0,
TCP = 1,
UDP = 2,
ICMP = 3,
ICMPv6 = 4,
Any = 5,
}
export enum AclAction {
Noop = 0,
Allow = 1,
Drop = 2,
}
export enum AclChainType {
UnspecifiedChain = 0,
Inbound = 1,
Outbound = 2,
Forward = 3,
}
export interface AclRule {
name: string
description: string
priority: number
enabled: boolean
protocol: AclProtocol
ports: string[]
source_ips: string[]
destination_ips: string[]
source_ports: string[]
action: AclAction
rate_limit: number
burst_limit: number
stateful: boolean
source_groups: string[]
destination_groups: string[]
}
export interface AclChain {
name: string
chain_type: AclChainType
description: string
enabled: boolean
rules: AclRule[]
default_action: AclAction
}
export interface GroupIdentity {
group_name: string
group_secret: string
}
export interface GroupInfo {
declares: GroupIdentity[]
members: string[]
}
export interface AclV1 {
chains: AclChain[]
group?: GroupInfo
}
export interface Acl {
acl_v1?: AclV1
}
export interface NetworkConfig { export interface NetworkConfig {
instance_id: string instance_id: string
@@ -78,12 +146,14 @@ export interface NetworkConfig {
socks5_port: number socks5_port: number
mtu: number | null mtu: number | null
instance_recv_bps_limit: number | null
mapped_listeners: string[] mapped_listeners: string[]
enable_magic_dns?: boolean enable_magic_dns?: boolean
enable_private_mode?: boolean enable_private_mode?: boolean
port_forwards: PortForwardConfig[] port_forwards: PortForwardConfig[]
acl?: Acl
} }
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -146,10 +216,20 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
enable_socks5: false, enable_socks5: false,
socks5_port: 1080, socks5_port: 1080,
mtu: null, mtu: null,
instance_recv_bps_limit: null,
mapped_listeners: [], mapped_listeners: [],
enable_magic_dns: false, enable_magic_dns: false,
enable_private_mode: false, enable_private_mode: false,
port_forwards: [], port_forwards: [],
acl: {
acl_v1: {
group: {
declares: [],
members: [],
},
chains: [],
},
},
} }
} }
+18 -11
View File
@@ -2,8 +2,8 @@ pub mod session;
pub mod storage; pub mod storage;
use std::sync::{ use std::sync::{
atomic::{AtomicU32, Ordering},
Arc, Arc,
atomic::{AtomicU32, Ordering},
}; };
use dashmap::DashMap; use dashmap::DashMap;
@@ -19,11 +19,11 @@ use maxminddb::geoip2;
use session::{Location, Session}; use session::{Location, Session};
use storage::{Storage, StorageToken}; use storage::{Storage, StorageToken};
use crate::webhook::SharedWebhookConfig;
use crate::FeatureFlags; use crate::FeatureFlags;
use crate::webhook::SharedWebhookConfig;
use tokio::task::JoinSet; use tokio::task::JoinSet;
use crate::db::{entity::user_running_network_configs, Db, UserIdInDb}; use crate::db::{Db, UserIdInDb, entity::user_running_network_configs};
#[derive(rust_embed::Embed)] #[derive(rust_embed::Embed)]
#[folder = "resources/"] #[folder = "resources/"]
@@ -340,7 +340,7 @@ mod tests {
}; };
use sqlx::Executor; use sqlx::Executor;
use crate::{client_manager::ClientManager, db::Db, FeatureFlags}; use crate::{FeatureFlags, client_manager::ClientManager, db::Db};
#[tokio::test] #[tokio::test]
async fn test_client() { async fn test_client() {
@@ -379,19 +379,26 @@ mod tests {
let req = tokio::time::timeout(Duration::from_secs(12), async { let req = tokio::time::timeout(Duration::from_secs(12), async {
loop { loop {
let session = mgr let sessions = mgr
.client_sessions .client_sessions
.iter() .iter()
.next() .map(|item| item.value().clone())
.map(|item| item.value().clone()); .collect::<Vec<_>>();
let Some(session) = session else { if sessions.is_empty() {
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_millis(100)).await;
continue; continue;
}; }
let mut waiter = session.data().read().await.heartbeat_waiter(); let mut found_req = None;
if let Ok(req) = waiter.recv().await { for session in sessions {
if let Some(req) = session.data().read().await.req() {
found_req = Some(req);
break;
}
}
if let Some(req) = found_req {
break req; break req;
} }
tokio::time::sleep(Duration::from_millis(100)).await;
} }
}) })
.await .await
+228 -82
View File
@@ -1,4 +1,9 @@
use std::{collections::HashSet, fmt::Debug, str::FromStr as _, sync::Arc}; use std::{
collections::{HashMap, HashSet},
fmt::Debug,
str::FromStr as _,
sync::Arc,
};
use anyhow::Context; use anyhow::Context;
use easytier::{ use easytier::{
@@ -15,11 +20,11 @@ use easytier::{
rpc_service::remote_client::{ListNetworkProps, Storage as _}, rpc_service::remote_client::{ListNetworkProps, Storage as _},
tunnel::Tunnel, tunnel::Tunnel,
}; };
use tokio::sync::{broadcast, RwLock}; use tokio::sync::{RwLock, broadcast};
use super::storage::{Storage, StorageToken, WeakRefStorage}; use super::storage::{Storage, StorageToken, WeakRefStorage};
use crate::webhook::SharedWebhookConfig;
use crate::FeatureFlags; use crate::FeatureFlags;
use crate::webhook::SharedWebhookConfig;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Location { pub struct Location {
@@ -37,6 +42,7 @@ pub struct SessionData {
storage_token: Option<StorageToken>, storage_token: Option<StorageToken>,
binding_version: Option<u64>, binding_version: Option<u64>,
applied_config_revision: Option<String>,
notifier: broadcast::Sender<HeartbeatRequest>, notifier: broadcast::Sender<HeartbeatRequest>,
req: Option<HeartbeatRequest>, req: Option<HeartbeatRequest>,
location: Option<Location>, location: Option<Location>,
@@ -59,6 +65,7 @@ impl SessionData {
client_url, client_url,
storage_token: None, storage_token: None,
binding_version: None, binding_version: None,
applied_config_revision: None,
notifier: tx, notifier: tx,
req: None, req: None,
location, location,
@@ -80,30 +87,30 @@ impl SessionData {
impl Drop for SessionData { impl Drop for SessionData {
fn drop(&mut self) { fn drop(&mut self) {
if let Ok(storage) = Storage::try_from(self.storage.clone()) { if let Ok(storage) = Storage::try_from(self.storage.clone())
if let Some(token) = self.storage_token.as_ref() { && let Some(token) = self.storage_token.as_ref()
storage.remove_client(token); {
storage.remove_client(token);
// Notify the webhook receiver when a node disconnects. // Notify the webhook receiver when a node disconnects.
if self.webhook_config.is_enabled() { if self.webhook_config.is_enabled() {
let webhook = self.webhook_config.clone(); let webhook = self.webhook_config.clone();
let machine_id = token.machine_id.to_string(); let machine_id = token.machine_id.to_string();
let user_id = Some(token.user_id); let user_id = Some(token.user_id);
let token_value = token.token.clone(); let token_value = token.token.clone();
let web_instance_id = webhook.web_instance_id.clone(); let web_instance_id = webhook.web_instance_id.clone();
let binding_version = self.binding_version; let binding_version = self.binding_version;
tokio::spawn(async move { tokio::spawn(async move {
webhook webhook
.notify_node_disconnected(&crate::webhook::NodeDisconnectedRequest { .notify_node_disconnected(&crate::webhook::NodeDisconnectedRequest {
machine_id, machine_id,
token: token_value, token: token_value,
user_id, user_id,
web_instance_id, web_instance_id,
binding_version, binding_version,
}) })
.await; .await;
}); });
}
} }
} }
} }
@@ -117,37 +124,16 @@ struct SessionRpcService {
} }
impl SessionRpcService { impl SessionRpcService {
async fn persist_webhook_network_config( fn normalize_network_config(
storage: &Storage, mut network_config: serde_json::Value,
user_id: i32, inst_id: uuid::Uuid,
machine_id: uuid::Uuid, ) -> anyhow::Result<NetworkConfig> {
network_config: serde_json::Value,
) -> anyhow::Result<()> {
let mut network_config = network_config;
let network_name = network_config let network_name = network_config
.get("network_name") .get("network_name")
.and_then(|v| v.as_str()) .and_then(|v| v.as_str())
.filter(|v| !v.is_empty()) .filter(|v| !v.is_empty())
.ok_or_else(|| anyhow::anyhow!("webhook response missing network_name"))? .ok_or_else(|| anyhow::anyhow!("webhook response missing network_name"))?
.to_string(); .to_string();
let existing_configs = storage
.db()
.list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await
.map_err(|e| anyhow::anyhow!("failed to list existing network configs: {:?}", e))?;
let inst_id = existing_configs
.iter()
.find_map(|cfg| {
let value = serde_json::from_str::<serde_json::Value>(&cfg.network_config).ok()?;
let cfg_network_name = value.get("network_name")?.as_str()?;
if cfg_network_name == network_name {
uuid::Uuid::parse_str(&cfg.network_instance_id).ok()
} else {
None
}
})
.unwrap_or_else(uuid::Uuid::new_v4);
let config_obj = network_config let config_obj = network_config
.as_object_mut() .as_object_mut()
.ok_or_else(|| anyhow::anyhow!("webhook network_config must be a JSON object"))?; .ok_or_else(|| anyhow::anyhow!("webhook network_config must be a JSON object"))?;
@@ -157,14 +143,66 @@ impl SessionRpcService {
); );
config_obj config_obj
.entry("instance_name".to_string()) .entry("instance_name".to_string())
.or_insert_with(|| serde_json::Value::String(network_name.clone())); .or_insert_with(|| serde_json::Value::String(network_name));
let config = serde_json::from_value::<NetworkConfig>(network_config)?; Ok(serde_json::from_value::<NetworkConfig>(network_config)?)
storage }
async fn reconcile_managed_network_configs(
storage: &Storage,
user_id: i32,
machine_id: uuid::Uuid,
desired_configs: Vec<crate::webhook::ManagedNetworkConfig>,
) -> anyhow::Result<()> {
let existing_configs = storage
.db() .db()
.insert_or_update_user_network_config((user_id, machine_id), inst_id, config) .list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await .await
.map_err(|e| anyhow::anyhow!("failed to persist webhook network config: {:?}", e))?; .map_err(|e| anyhow::anyhow!("failed to list existing network configs: {:?}", e))?;
let existing_ids = existing_configs
.iter()
.filter_map(|cfg| uuid::Uuid::parse_str(&cfg.network_instance_id).ok())
.collect::<HashSet<_>>();
let mut desired_ids = HashSet::with_capacity(desired_configs.len());
let mut normalized = HashMap::with_capacity(desired_configs.len());
for desired in desired_configs {
let inst_id = uuid::Uuid::parse_str(&desired.instance_id).with_context(|| {
format!(
"invalid desired managed instance id: {}",
desired.instance_id
)
})?;
let config = Self::normalize_network_config(desired.network_config, inst_id)?;
desired_ids.insert(inst_id);
normalized.insert(inst_id, config);
}
for (inst_id, config) in normalized {
storage
.db()
.insert_or_update_user_network_config((user_id, machine_id), inst_id, config)
.await
.map_err(|e| {
anyhow::anyhow!(
"failed to persist managed network config {}: {:?}",
inst_id,
e
)
})?;
}
let stale_ids = existing_ids
.difference(&desired_ids)
.copied()
.collect::<Vec<_>>();
if !stale_ids.is_empty() {
storage
.db()
.delete_network_configs((user_id, machine_id), &stale_ids)
.await
.map_err(|e| anyhow::anyhow!("failed to delete stale network configs: {:?}", e))?;
}
Ok(()) Ok(())
} }
@@ -185,13 +223,17 @@ impl SessionRpcService {
req.machine_id req.machine_id
))?; ))?;
let (user_id, webhook_network_config, webhook_validated, binding_version) = if data let (
.webhook_config user_id,
.is_enabled() webhook_managed_network_configs,
{ webhook_config_revision,
webhook_validated,
binding_version,
) = if data.webhook_config.is_enabled() {
let webhook_req = crate::webhook::ValidateTokenRequest { let webhook_req = crate::webhook::ValidateTokenRequest {
token: req.user_token.clone(), token: req.user_token.clone(),
machine_id: machine_id.to_string(), machine_id: machine_id.to_string(),
public_ip: data.client_url.host_str().map(str::to_string),
hostname: req.hostname.clone(), hostname: req.hostname.clone(),
version: req.easytier_version.clone(), version: req.easytier_version.clone(),
os_type: req.device_os.as_ref().map(|info| info.os_type.clone()), os_type: req.device_os.as_ref().map(|info| info.os_type.clone()),
@@ -223,7 +265,8 @@ impl SessionRpcService {
}; };
( (
user_id, user_id,
resp.network_config, resp.managed_network_configs,
resp.config_revision,
true, true,
Some(resp.binding_version), Some(resp.binding_version),
) )
@@ -257,21 +300,21 @@ impl SessionRpcService {
); );
} }
}; };
(user_id, None, false, None) (user_id, Vec::new(), String::new(), false, None)
}; };
if webhook_validated { if webhook_validated
if let Some(network_config) = webhook_network_config { && data.applied_config_revision.as_deref() != Some(webhook_config_revision.as_str())
Self::persist_webhook_network_config(&storage, user_id, machine_id, network_config) {
.await Self::reconcile_managed_network_configs(
.map_err(rpc_types::error::Error::from)?; &storage,
} user_id,
} else if webhook_network_config.is_some() { machine_id,
return Err(anyhow::anyhow!( webhook_managed_network_configs,
"unexpected webhook network_config for non-webhook token {:?}",
req.user_token
) )
.into()); .await
.map_err(rpc_types::error::Error::from)?;
data.applied_config_revision = Some(webhook_config_revision);
} }
if data.req.replace(req.clone()).is_none() { if data.req.replace(req.clone()).is_none() {
@@ -343,7 +386,7 @@ impl WebServerService for SessionRpcService {
_: easytier::proto::web::GetFeatureRequest, _: easytier::proto::web::GetFeatureRequest,
) -> rpc_types::error::Result<easytier::proto::web::GetFeatureResponse> { ) -> rpc_types::error::Result<easytier::proto::web::GetFeatureResponse> {
Ok(easytier::proto::web::GetFeatureResponse { Ok(easytier::proto::web::GetFeatureResponse {
support_encryption: true, support_encryption: easytier::web_client::security::web_secure_tunnel_supported(),
}) })
} }
} }
@@ -411,6 +454,7 @@ impl Session {
rpc_client: SessionRpcClient, rpc_client: SessionRpcClient,
) { ) {
let mut cleaned_web_managed_instances = false; let mut cleaned_web_managed_instances = false;
let mut last_desired_inst_ids: Option<HashSet<String>> = None;
loop { loop {
heartbeat_waiter = heartbeat_waiter.resubscribe(); heartbeat_waiter = heartbeat_waiter.resubscribe();
let req = heartbeat_waiter.recv().await; let req = heartbeat_waiter.recv().await;
@@ -467,8 +511,15 @@ impl Session {
}; };
let mut has_failed = false; let mut has_failed = false;
let should_be_alive_inst_ids = local_configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
let desired_changed = last_desired_inst_ids
.as_ref()
.is_none_or(|last| last != &should_be_alive_inst_ids);
if !cleaned_web_managed_instances { if !cleaned_web_managed_instances || desired_changed {
let all_local_configs = match storage let all_local_configs = match storage
.db .db
.list_network_configs((user_id, machine_id.into()), ListNetworkProps::All) .list_network_configs((user_id, machine_id.into()), ListNetworkProps::All)
@@ -486,11 +537,6 @@ impl Session {
.map(|cfg| cfg.network_instance_id.clone()) .map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let should_be_alive_inst_ids = local_configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
let should_delete_ids = running_inst_ids let should_delete_ids = running_inst_ids
.iter() .iter()
.chain(all_inst_ids.iter()) .chain(all_inst_ids.iter())
@@ -519,6 +565,7 @@ impl Session {
if !has_failed { if !has_failed {
cleaned_web_managed_instances = true; cleaned_web_managed_instances = true;
last_desired_inst_ids = Some(should_be_alive_inst_ids.clone());
} }
} }
@@ -549,8 +596,7 @@ impl Session {
} }
if !has_failed { if !has_failed {
tracing::info!(?req, "All network instances are running"); last_desired_inst_ids = Some(should_be_alive_inst_ids);
break;
} }
} }
} }
@@ -585,3 +631,103 @@ impl Session {
self.data.read().await.req() self.data.read().await.req()
} }
} }
#[cfg(test)]
mod tests {
use easytier::rpc_service::remote_client::{ListNetworkProps, Storage as _};
use serde_json::json;
use super::{super::storage::Storage, *};
#[tokio::test]
async fn reconcile_managed_network_configs_upserts_and_deletes_exact_set() {
let storage = Storage::new(crate::db::Db::memory_db().await);
let user_id = storage
.db()
.auto_create_user("webhook-user")
.await
.unwrap()
.id;
let machine_id = uuid::Uuid::new_v4();
let keep_id = uuid::Uuid::new_v4();
let stale_id = uuid::Uuid::new_v4();
let new_id = uuid::Uuid::new_v4();
storage
.db()
.insert_or_update_user_network_config(
(user_id, machine_id),
keep_id,
NetworkConfig {
network_name: Some("old-name".to_string()),
..Default::default()
},
)
.await
.unwrap();
storage
.db()
.insert_or_update_user_network_config(
(user_id, machine_id),
stale_id,
NetworkConfig {
network_name: Some("stale".to_string()),
..Default::default()
},
)
.await
.unwrap();
SessionRpcService::reconcile_managed_network_configs(
&storage,
user_id,
machine_id,
vec![
crate::webhook::ManagedNetworkConfig {
instance_id: keep_id.to_string(),
network_config: json!({
"instance_id": keep_id.to_string(),
"network_name": "updated-name"
}),
},
crate::webhook::ManagedNetworkConfig {
instance_id: new_id.to_string(),
network_config: json!({
"instance_id": new_id.to_string(),
"network_name": "new-name"
}),
},
],
)
.await
.unwrap();
let configs = storage
.db()
.list_network_configs((user_id, machine_id), ListNetworkProps::All)
.await
.unwrap();
let config_ids = configs
.iter()
.map(|cfg| cfg.network_instance_id.clone())
.collect::<HashSet<_>>();
assert_eq!(configs.len(), 2);
assert!(config_ids.contains(&keep_id.to_string()));
assert!(config_ids.contains(&new_id.to_string()));
assert!(!config_ids.contains(&stale_id.to_string()));
let updated_keep = storage
.db()
.get_network_config((user_id, machine_id), &keep_id.to_string())
.await
.unwrap()
.unwrap();
let updated_keep_config: NetworkConfig =
serde_json::from_str(&updated_keep.network_config).unwrap();
assert_eq!(
updated_keep_config.network_name.as_deref(),
Some("updated-name")
);
}
}
+75 -13
View File
@@ -8,11 +8,11 @@ use easytier::{
}; };
use entity::user_running_network_configs; use entity::user_running_network_configs;
use sea_orm::{ use sea_orm::{
prelude::Expr, sea_query::OnConflict, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait, QueryFilter as _, Set,
QueryFilter as _, Set, SqlxSqliteConnector, TransactionTrait as _, SqlxSqliteConnector, TransactionTrait as _, prelude::Expr, sea_query::OnConflict,
}; };
use sea_orm_migration::MigratorTrait as _; use sea_orm_migration::MigratorTrait as _;
use sqlx::{migrate::MigrateDatabase as _, types::chrono, Sqlite, SqlitePool}; use sqlx::{Sqlite, SqlitePool, migrate::MigrateDatabase as _, types::chrono};
use uuid::Uuid; use uuid::Uuid;
use crate::migrator; use crate::migrator;
@@ -154,13 +154,17 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
use entity::user_running_network_configs as urnc; use entity::user_running_network_configs as urnc;
let on_conflict = OnConflict::column(urnc::Column::NetworkInstanceId) let on_conflict = OnConflict::columns([
.update_columns([ urnc::Column::UserId,
urnc::Column::NetworkConfig, urnc::Column::DeviceId,
urnc::Column::Disabled, urnc::Column::NetworkInstanceId,
urnc::Column::UpdateTime, ])
]) .update_columns([
.to_owned(); urnc::Column::NetworkConfig,
urnc::Column::Disabled,
urnc::Column::UpdateTime,
])
.to_owned();
let insert_m = urnc::ActiveModel { let insert_m = urnc::ActiveModel {
user_id: sea_orm::Set(user_id), user_id: sea_orm::Set(user_id),
device_id: sea_orm::Set(device_id.to_string()), device_id: sea_orm::Set(device_id.to_string()),
@@ -184,13 +188,14 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
async fn delete_network_configs( async fn delete_network_configs(
&self, &self,
(user_id, _): (UserIdInDb, Uuid), (user_id, device_id): (UserIdInDb, Uuid),
network_inst_ids: &[Uuid], network_inst_ids: &[Uuid],
) -> Result<(), DbErr> { ) -> Result<(), DbErr> {
use entity::user_running_network_configs as urnc; use entity::user_running_network_configs as urnc;
urnc::Entity::delete_many() urnc::Entity::delete_many()
.filter(urnc::Column::UserId.eq(user_id)) .filter(urnc::Column::UserId.eq(user_id))
.filter(urnc::Column::DeviceId.eq(device_id.to_string()))
.filter( .filter(
urnc::Column::NetworkInstanceId urnc::Column::NetworkInstanceId
.is_in(network_inst_ids.iter().map(|id| id.to_string())), .is_in(network_inst_ids.iter().map(|id| id.to_string())),
@@ -203,7 +208,7 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
async fn update_network_config_state( async fn update_network_config_state(
&self, &self,
(user_id, _): (UserIdInDb, Uuid), (user_id, device_id): (UserIdInDb, Uuid),
network_inst_id: Uuid, network_inst_id: Uuid,
disabled: bool, disabled: bool,
) -> Result<(), DbErr> { ) -> Result<(), DbErr> {
@@ -211,6 +216,7 @@ impl Storage<(UserIdInDb, Uuid), user_running_network_configs::Model, DbErr> for
urnc::Entity::update_many() urnc::Entity::update_many()
.filter(urnc::Column::UserId.eq(user_id)) .filter(urnc::Column::UserId.eq(user_id))
.filter(urnc::Column::DeviceId.eq(device_id.to_string()))
.filter(urnc::Column::NetworkInstanceId.eq(network_inst_id.to_string())) .filter(urnc::Column::NetworkInstanceId.eq(network_inst_id.to_string()))
.col_expr(urnc::Column::Disabled, Expr::value(disabled)) .col_expr(urnc::Column::Disabled, Expr::value(disabled))
.col_expr( .col_expr(
@@ -274,7 +280,7 @@ mod tests {
use easytier::{proto::api::manage::NetworkConfig, rpc_service::remote_client::Storage}; use easytier::{proto::api::manage::NetworkConfig, rpc_service::remote_client::Storage};
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter as _}; use sea_orm::{ColumnTrait, EntityTrait, QueryFilter as _};
use crate::db::{entity::user_running_network_configs, Db, ListNetworkProps}; use crate::db::{Db, ListNetworkProps, entity::user_running_network_configs};
#[tokio::test] #[tokio::test]
async fn test_user_network_config_management() { async fn test_user_network_config_management() {
@@ -341,4 +347,60 @@ mod tests {
.unwrap(); .unwrap();
assert!(result3.is_none()); assert!(result3.is_none());
} }
#[tokio::test]
async fn test_user_network_config_same_instance_id_is_scoped_by_device() {
let db = Db::memory_db().await;
let user_id = db.auto_create_user("user-1").await.unwrap().id;
let device1 = uuid::Uuid::new_v4();
let device2 = uuid::Uuid::new_v4();
let inst_id = uuid::Uuid::new_v4();
db.insert_or_update_user_network_config(
(user_id, device1),
inst_id,
NetworkConfig {
network_name: Some("cfg-1".to_string()),
..Default::default()
},
)
.await
.unwrap();
db.insert_or_update_user_network_config(
(user_id, device2),
inst_id,
NetworkConfig {
network_name: Some("cfg-2".to_string()),
..Default::default()
},
)
.await
.unwrap();
let first = db
.get_network_config((user_id, device1), &inst_id.to_string())
.await
.unwrap()
.unwrap();
let second = db
.get_network_config((user_id, device2), &inst_id.to_string())
.await
.unwrap()
.unwrap();
assert_eq!(first.user_id, user_id);
assert_eq!(first.device_id, device1.to_string());
assert_eq!(second.user_id, user_id);
assert_eq!(second.device_id, device2.to_string());
let device1_configs = db
.list_network_configs((user_id, device1), ListNetworkProps::All)
.await
.unwrap();
let device2_configs = db
.list_network_configs((user_id, device2), ListNetworkProps::All)
.await
.unwrap();
assert_eq!(device1_configs.len(), 1);
assert_eq!(device2_configs.len(), 1);
}
} }
+27 -19
View File
@@ -7,7 +7,7 @@ use std::net::IpAddr;
use std::sync::Arc; use std::sync::Arc;
use clap::Parser; use clap::Parser;
use easytier::tunnel::websocket::WSTunnelListener; use easytier::tunnel::websocket::WsTunnelListener;
use easytier::{ use easytier::{
common::{ common::{
config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader}, config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader},
@@ -16,10 +16,12 @@ use easytier::{
log, log,
network::{local_ipv4, local_ipv6}, network::{local_ipv4, local_ipv6},
}, },
tunnel::{tcp::TcpTunnelListener, udp::UdpTunnelListener, TunnelListener}, tunnel::{TunnelListener, tcp::TcpTunnelListener, udp::UdpTunnelListener},
utils::setup_panic_handler, utils::panic::setup_panic_handler,
}; };
use easytier::tunnel::IpScheme;
use easytier::utils::BoxExt;
use mimalloc::MiMalloc; use mimalloc::MiMalloc;
mod client_manager; mod client_manager;
@@ -192,14 +194,12 @@ impl LoggingConfigLoader for &Cli {
} }
} }
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> { pub fn get_listener_by_url(scheme: IpScheme, l: &url::Url) -> Option<Box<dyn TunnelListener>> {
Ok(match l.scheme() { Some(match scheme {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())), IpScheme::Tcp => TcpTunnelListener::new(l.clone()).boxed(),
"udp" => Box::new(UdpTunnelListener::new(l.clone())), IpScheme::Udp => UdpTunnelListener::new(l.clone()).boxed(),
"ws" => Box::new(WSTunnelListener::new(l.clone())), IpScheme::Ws => WsTunnelListener::new(l.clone()).boxed(),
_ => { _ => return None,
return Err(Error::InvalidUrl(l.to_string()));
}
}) })
} }
@@ -213,15 +213,23 @@ async fn get_dual_stack_listener(
), ),
Error, Error,
> { > {
let is_protocol_support_dual_stack = let scheme = protocol
protocol.trim().to_lowercase() == "tcp" || protocol.trim().to_lowercase() == "udp"; .parse()
let v6_listener = if is_protocol_support_dual_stack && local_ipv6().await.is_ok() { .map_err(|_| Error::InvalidUrl(protocol.to_string()))?;
get_listener_by_url(&format!("{}://[::0]:{}", protocol, port).parse().unwrap()).ok() let v6_listener =
} else { if local_ipv6().await.is_ok() && matches!(scheme, IpScheme::Tcp | IpScheme::Udp) {
None get_listener_by_url(
}; scheme,
&format!("{protocol}://[::]:{port}").parse().unwrap(),
)
} else {
None
};
let v4_listener = if local_ipv4().await.is_ok() { let v4_listener = if local_ipv4().await.is_ok() {
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok() get_listener_by_url(
scheme,
&format!("{protocol}://0.0.0.0:{port}").parse().unwrap(),
)
} else { } else {
None None
}; };
@@ -0,0 +1,120 @@
use sea_orm_migration::prelude::*;
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m20260403_000002_scope_network_config_unique"
}
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
db.execute_unprepared(
r#"
CREATE TABLE user_running_network_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL,
device_id TEXT NOT NULL,
network_instance_id TEXT NOT NULL,
network_config TEXT NOT NULL,
disabled BOOLEAN NOT NULL DEFAULT FALSE,
create_time TEXT NOT NULL,
update_time TEXT NOT NULL,
CONSTRAINT fk_user_running_network_configs_user_id_to_users_id
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
INSERT INTO user_running_network_configs_new (
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
)
SELECT
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
FROM user_running_network_configs;
DROP TABLE user_running_network_configs;
ALTER TABLE user_running_network_configs_new RENAME TO user_running_network_configs;
CREATE INDEX idx_user_running_network_configs_user_id
ON user_running_network_configs(user_id);
CREATE UNIQUE INDEX idx_user_running_network_configs_scope_inst
ON user_running_network_configs(user_id, device_id, network_instance_id);
"#,
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
let db = manager.get_connection();
db.execute_unprepared(
r#"
CREATE TABLE user_running_network_configs_old (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
user_id INTEGER NOT NULL,
device_id TEXT NOT NULL,
network_instance_id TEXT NOT NULL UNIQUE,
network_config TEXT NOT NULL,
disabled BOOLEAN NOT NULL DEFAULT FALSE,
create_time TEXT NOT NULL,
update_time TEXT NOT NULL,
CONSTRAINT fk_user_running_network_configs_user_id_to_users_id
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
INSERT INTO user_running_network_configs_old (
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
)
SELECT
id,
user_id,
device_id,
network_instance_id,
network_config,
disabled,
create_time,
update_time
FROM user_running_network_configs;
DROP TABLE user_running_network_configs;
ALTER TABLE user_running_network_configs_old RENAME TO user_running_network_configs;
CREATE INDEX idx_user_running_network_configs_user_id
ON user_running_network_configs(user_id);
"#,
)
.await?;
Ok(())
}
}
+5 -1
View File
@@ -1,12 +1,16 @@
use sea_orm_migration::prelude::*; use sea_orm_migration::prelude::*;
mod m20241029_000001_init; mod m20241029_000001_init;
mod m20260403_000002_scope_network_config_unique;
pub struct Migrator; pub struct Migrator;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigratorTrait for Migrator { impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> { fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20241029_000001_init::Migration)] vec![
Box::new(m20241029_000001_init::Migration),
Box::new(m20260403_000002_scope_network_config_unique::Migration),
]
} }
} }
+12 -11
View File
@@ -1,7 +1,7 @@
use axum::{ use axum::{
Router,
http::StatusCode, http::StatusCode,
routing::{get, post, put}, routing::{get, post, put},
Router,
}; };
use axum_login::login_required; use axum_login::login_required;
use axum_messages::Message; use axum_messages::Message;
@@ -14,8 +14,8 @@ use std::sync::Arc;
use crate::FeatureFlags; use crate::FeatureFlags;
use super::{ use super::{
users::{AuthSession, Credentials},
AppStateInner, AppStateInner,
users::{AuthSession, Credentials},
}; };
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
@@ -44,7 +44,7 @@ mod put {
use axum_login::AuthUser; use axum_login::AuthUser;
use easytier::proto::common::Void; use easytier::proto::common::Void;
use crate::restful::{other_error, users::ChangePassword, HttpHandleError}; use crate::restful::{HttpHandleError, other_error, users::ChangePassword};
use super::*; use super::*;
@@ -71,14 +71,14 @@ mod put {
} }
mod post { mod post {
use axum::{extract::Extension, Json}; use axum::{Json, extract::Extension};
use easytier::proto::common::Void; use easytier::proto::common::Void;
use crate::restful::{ use crate::restful::{
captcha::extension::{axum_tower_sessions::CaptchaAxumTowerSessionStaticExt, CaptchaUtil}, HttpHandleError,
captcha::extension::{CaptchaUtil, axum_tower_sessions::CaptchaAxumTowerSessionStaticExt},
other_error, other_error,
users::RegisterNewUser, users::RegisterNewUser,
HttpHandleError,
}; };
use super::*; use super::*;
@@ -99,7 +99,7 @@ mod post {
return Err(( return Err((
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
Json::from(other_error(format!("{:?}", e))), Json::from(other_error(format!("{:?}", e))),
)) ));
} }
}; };
@@ -150,14 +150,15 @@ mod post {
mod get { mod get {
use crate::restful::{ use crate::restful::{
HttpHandleError,
captcha::{ captcha::{
builder::spec::SpecCaptcha,
extension::{axum_tower_sessions::CaptchaAxumTowerSessionExt as _, CaptchaUtil},
NewCaptcha as _, NewCaptcha as _,
builder::spec::SpecCaptcha,
extension::{CaptchaUtil, axum_tower_sessions::CaptchaAxumTowerSessionExt as _},
}, },
other_error, HttpHandleError, other_error,
}; };
use axum::{response::Response, Json}; use axum::{Json, response::Response};
use easytier::proto::common::Void; use easytier::proto::common::Void;
use tower_sessions::Session; use tower_sessions::Session;
@@ -2,8 +2,8 @@ use super::super::base::randoms::Randoms;
use super::super::utils::color::Color; use super::super::utils::color::Color;
use super::super::utils::font; use super::super::utils::font;
use base64::prelude::BASE64_STANDARD;
use base64::Engine; use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use rusttype::Font; use rusttype::Font;
use std::fmt::Debug; use std::fmt::Debug;
@@ -9,14 +9,14 @@ use super::super::{CaptchaFont, NewCaptcha};
use image::{ImageBuffer, Rgba}; use image::{ImageBuffer, Rgba};
use imageproc::drawing; use imageproc::drawing;
use rand::{rngs::ThreadRng, Rng}; use rand::{Rng, rngs::ThreadRng};
use rusttype::{Font, Scale}; use rusttype::{Font, Scale};
use std::io::{Cursor, Write}; use std::io::{Cursor, Write};
use std::sync::Arc; use std::sync::Arc;
mod color { mod color {
use image::Rgba; use image::Rgba;
use rand::{rngs::ThreadRng, Rng}; use rand::{Rng, rngs::ThreadRng};
pub fn gen_background_color(rng: &mut ThreadRng) -> Rgba<u8> { pub fn gen_background_color(rng: &mut ThreadRng) -> Rgba<u8> {
let red = rng.gen_range(200..=255); let red = rng.gen_range(200..=255);
let green = rng.gen_range(200..=255); let green = rng.gen_range(200..=255);
@@ -133,7 +133,7 @@ impl<'a, 'b> CaptchaBuilder<'a, 'b> {
fn draw_line(&self, image: &mut ImageBuffer<Rgba<u8>, Vec<u8>>, rng: &mut ThreadRng) { fn draw_line(&self, image: &mut ImageBuffer<Rgba<u8>, Vec<u8>>, rng: &mut ThreadRng) {
let line_color = color::gen_line_color(rng); let line_color = color::gen_line_color(rng);
let is_h = rng.gen(); let is_h = rng.r#gen();
let (start, end) = if is_h { let (start, end) = if is_h {
let xa = rng.gen_range(0.0..(self.width as f32) / 2.0); let xa = rng.gen_range(0.0..(self.width as f32) / 2.0);
let ya = rng.gen_range(0.0..(self.height as f32)); let ya = rng.gen_range(0.0..(self.height as f32));
+6 -6
View File
@@ -8,13 +8,13 @@ mod users;
use std::{net::SocketAddr, sync::Arc}; use std::{net::SocketAddr, sync::Arc};
use axum::extract::Path; use axum::extract::Path;
use axum::http::{header, Request, StatusCode}; use axum::http::{Request, StatusCode, header};
use axum::middleware::{self as axum_mw, Next}; use axum::middleware::{self as axum_mw, Next};
use axum::response::Response; use axum::response::Response;
use axum::routing::{delete, post}; use axum::routing::{delete, post};
use axum::{extract::State, routing::get, Extension, Json, Router}; use axum::{Extension, Json, Router, extract::State, routing::get};
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer}; use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend}; use axum_login::{AuthManagerLayerBuilder, AuthUser, AuthzBackend, login_required};
use axum_messages::MessagesManagerLayer; use axum_messages::MessagesManagerLayer;
use easytier::common::config::{ConfigLoader, TomlConfigLoader}; use easytier::common::config::{ConfigLoader, TomlConfigLoader};
use easytier::common::scoped_task::ScopedTask; use easytier::common::scoped_task::ScopedTask;
@@ -23,17 +23,17 @@ use easytier::proto::rpc_types;
use network::NetworkApi; use network::NetworkApi;
use sea_orm::DbErr; use sea_orm::DbErr;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tower_sessions::Expiry;
use tower_sessions::cookie::time::Duration; use tower_sessions::cookie::time::Duration;
use tower_sessions::cookie::{Key, SameSite}; use tower_sessions::cookie::{Key, SameSite};
use tower_sessions::Expiry;
use tower_sessions_sqlx_store::SqliteStore; use tower_sessions_sqlx_store::SqliteStore;
use users::{AuthSession, Backend}; use users::{AuthSession, Backend};
use crate::client_manager::storage::StorageToken; use crate::FeatureFlags;
use crate::client_manager::ClientManager; use crate::client_manager::ClientManager;
use crate::client_manager::storage::StorageToken;
use crate::db::{Db, UserIdInDb}; use crate::db::{Db, UserIdInDb};
use crate::webhook::SharedWebhookConfig; use crate::webhook::SharedWebhookConfig;
use crate::FeatureFlags;
/// Embed assets for web dashboard, build frontend first /// Embed assets for web dashboard, build frontend first
#[cfg(feature = "embed")] #[cfg(feature = "embed")]
+2 -2
View File
@@ -1,7 +1,7 @@
use axum::extract::Path; use axum::extract::Path;
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::routing::{delete, post}; use axum::routing::{delete, post};
use axum::{extract::State, routing::get, Json, Router}; use axum::{Json, Router, extract::State, routing::get};
use axum_login::AuthUser; use axum_login::AuthUser;
use easytier::launcher::NetworkConfig; use easytier::launcher::NetworkConfig;
use easytier::proto::common::Void; use easytier::proto::common::Void;
@@ -16,7 +16,7 @@ use crate::db::UserIdInDb;
use super::users::AuthSession; use super::users::AuthSession;
use super::{ use super::{
convert_db_error, other_error, AppState, AppStateInner, Error, HttpHandleError, RpcError, AppState, AppStateInner, Error, HttpHandleError, RpcError, convert_db_error, other_error,
}; };
fn convert_rpc_error(e: RpcError) -> (StatusCode, Json<Error>) { fn convert_rpc_error(e: RpcError) -> (StatusCode, Json<Error>) {
+13 -12
View File
@@ -4,8 +4,8 @@ use std::time::Duration;
use subtle::ConstantTimeEq; use subtle::ConstantTimeEq;
use axum::routing::get;
use axum::Router; use axum::Router;
use axum::routing::get;
use openidconnect::core::{ use openidconnect::core::{
CoreAuthDisplay, CoreAuthPrompt, CoreErrorResponseType, CoreGenderClaim, CoreJsonWebKey, CoreAuthDisplay, CoreAuthPrompt, CoreErrorResponseType, CoreGenderClaim, CoreJsonWebKey,
CoreJweContentEncryptionAlgorithm, CoreJwsSigningAlgorithm, CoreProviderMetadata, CoreJweContentEncryptionAlgorithm, CoreJwsSigningAlgorithm, CoreProviderMetadata,
@@ -216,7 +216,9 @@ impl OidcConfig {
} = opts; } = opts;
if oidc_issuer_url.is_none() || oidc_client_id.is_none() || oidc_redirect_url.is_none() { if oidc_issuer_url.is_none() || oidc_client_id.is_none() || oidc_redirect_url.is_none() {
return Err(anyhow::anyhow!("--oidc-issuer-url, --oidc-client-id and --oidc-redirect-url are required when using OIDC authentication")); return Err(anyhow::anyhow!(
"--oidc-issuer-url, --oidc-client-id and --oidc-redirect-url are required when using OIDC authentication"
));
} }
if oidc_username_claim.trim().is_empty() { if oidc_username_claim.trim().is_empty() {
return Err(anyhow::anyhow!("--oidc-username-claim cannot be empty")); return Err(anyhow::anyhow!("--oidc-username-claim cannot be empty"));
@@ -373,18 +375,17 @@ mod route {
) )
.into_response(); .into_response();
} }
if let Some(verifier) = pkce_verifier { if let Some(verifier) = pkce_verifier
if let Err(e) = session && let Err(e) = session
.insert("oidc_pkce_verifier", verifier.secret().clone()) .insert("oidc_pkce_verifier", verifier.secret().clone())
.await .await
{ {
tracing::error!("Failed to store pkce_verifier in session: {:?}", e); tracing::error!("Failed to store pkce_verifier in session: {:?}", e);
return ( return (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
Json(other_error("Session error")), Json(other_error("Session error")),
) )
.into_response(); .into_response();
}
} }
if let Err(e) = session.insert("oidc_pkce_used", pkce_enabled).await { if let Err(e) = session.insert("oidc_pkce_used", pkce_enabled).await {
tracing::error!("Failed to store pkce_used in session: {:?}", e); tracing::error!("Failed to store pkce_used in session: {:?}", e);
+3 -3
View File
@@ -1,15 +1,15 @@
use axum::{ use axum::{
Json, Router,
extract::{Path, State}, extract::{Path, State},
http::StatusCode, http::StatusCode,
routing::post, routing::post,
Json, Router,
}; };
use axum_login::AuthUser as _; use axum_login::AuthUser as _;
use easytier::proto::rpc_types::controller::BaseController; use easytier::proto::rpc_types::controller::BaseController;
use crate::db::UserIdInDb; use crate::db::UserIdInDb;
use super::{other_error, AppState, HttpHandleError}; use super::{AppState, HttpHandleError, other_error};
#[derive(Debug, serde::Deserialize)] #[derive(Debug, serde::Deserialize)]
pub struct ProxyRpcRequest { pub struct ProxyRpcRequest {
@@ -120,7 +120,7 @@ async fn handle_proxy_rpc_by_session(
return Err(( return Err((
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
other_error(format!("Unknown service: {}", service_name)).into(), other_error(format!("Unknown service: {}", service_name)).into(),
)) ));
} }
}; };
+3 -3
View File
@@ -39,9 +39,9 @@ impl AuthUser for User {
fn session_auth_hash(&self) -> &[u8] { fn session_auth_hash(&self) -> &[u8] {
self.db_user.password.as_bytes() // We use the password hash as the auth self.db_user.password.as_bytes() // We use the password hash as the auth
// hash--what this means // hash--what this means
// is when the user changes their password the // is when the user changes their password the
// auth session becomes invalid. // auth session becomes invalid.
} }
} }
+2 -1
View File
@@ -1,8 +1,9 @@
use axum::{ use axum::{
Router,
extract::State, extract::State,
http::header, http::header,
response::{IntoResponse, Response}, response::{IntoResponse, Response},
routing, Router, routing,
}; };
use axum_embed::ServeEmbed; use axum_embed::ServeEmbed;
use easytier::common::scoped_task::ScopedTask; use easytier::common::scoped_task::ScopedTask;
+9 -1
View File
@@ -49,6 +49,7 @@ impl WebhookConfig {
pub struct ValidateTokenRequest { pub struct ValidateTokenRequest {
pub token: String, pub token: String,
pub machine_id: String, pub machine_id: String,
pub public_ip: Option<String>,
pub hostname: String, pub hostname: String,
pub version: String, pub version: String,
pub os_type: Option<String>, pub os_type: Option<String>,
@@ -65,7 +66,14 @@ pub struct ValidateTokenResponse {
pub pre_approved: bool, pub pre_approved: bool,
#[serde(default)] #[serde(default)]
pub binding_version: u64, pub binding_version: u64,
pub network_config: Option<serde_json::Value>, pub managed_network_configs: Vec<ManagedNetworkConfig>,
pub config_revision: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ManagedNetworkConfig {
pub instance_id: String,
pub network_config: serde_json::Value,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
+11
View File
@@ -0,0 +1,11 @@
disallowed-methods = [
{ path = "itertools::Itertools::map_into", reason = "Blocks underlying iterator optimizations. Use the native `.map(Into::into)` instead." },
{ path = "itertools::Itertools::map_ok", reason = "Blocks underlying iterator optimizations. Use the native `.map(|r| r.map(f))` instead." },
{ path = "itertools::Itertools::filter_ok", reason = "Blocks underlying iterator optimizations. Use a native approach, e.g., `.filter(|r| r.as_ref().map_or(true, condition))`." },
{ path = "itertools::Itertools::filter_map_ok", reason = "Blocks underlying iterator optimizations. Use native `.map()` and `.flatten()`, or extract logic into a standard `.filter_map()`." },
{ path = "itertools::Itertools::collect_vec", reason = "Non-standard idiom. Directly use the standard library's `.collect::<Vec<_>>()`." },
{ path = "itertools::Itertools::try_collect", reason = "Non-standard idiom. Standard `collect()` already supports Result/Option inversion; use `.collect::<Result<_, _>>()`." },
{ path = "itertools::Itertools::set_from", reason = "Non-standard idiom. Directly use the `.extend()` method provided by the standard library's `Extend` trait." },
{ path = "itertools::Itertools::concat", reason = "Non-standard idiom. Use native `.flatten().collect()` or a slice's `.concat()` instead." }
]
+16 -19
View File
@@ -3,12 +3,12 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command." description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier" homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier" repository = "https://github.com/EasyTier/EasyTier"
version = "2.5.0" version = "2.6.0"
edition = "2021" edition.workspace = true
rust-version.workspace = true
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.93.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
@@ -37,7 +37,7 @@ tracing-subscriber = { version = "0.3", features = [
"time", "time",
] } ] }
derivative = "2.2.0" derivative = "2.2.0"
derive_more = {version = "2.1.1", features = ["full"]} derive_more = { version = "2.1.1", features = ["full"] }
console-subscriber = { version = "0.4.1", optional = true } console-subscriber = { version = "0.4.1", optional = true }
indoc = "2.0.7" indoc = "2.0.7"
regex = "1.8" regex = "1.8"
@@ -50,6 +50,8 @@ time = "0.3"
toml = "0.8.12" toml = "0.8.12"
chrono = { version = "0.4.37", features = ["serde"] } chrono = { version = "0.4.37", features = ["serde"] }
delegate = "0.13.5"
itertools = "0.14.0" itertools = "0.14.0"
strum = { version = "0.27.2", features = ["derive"] } strum = { version = "0.27.2", features = ["derive"] }
@@ -79,12 +81,12 @@ quinn = { version = "0.11.8", optional = true, features = ["ring"] }
quinn-plaintext = { version = "0.3.0", optional = true } quinn-plaintext = { version = "0.3.0", optional = true }
rustls = { version = "0.23.0", features = [ rustls = { version = "0.23.0", features = [
"ring","tls12" "ring", "tls12"
], default-features = false, optional = true } ], default-features = false, optional = true }
rcgen = { version = "0.12.1", optional = true } rcgen = { version = "0.12.1", optional = true }
# for websocket # for websocket
tokio-websockets = { version = "0.8", optional = true, features = [ tokio-websockets = { version = "0.13.2", optional = true, features = [
"rustls-webpki-roots", "rustls-webpki-roots",
"client", "client",
"server", "server",
@@ -94,6 +96,7 @@ tokio-websockets = { version = "0.8", optional = true, features = [
http = { version = "1", default-features = false, features = [ http = { version = "1", default-features = false, features = [
"std", "std",
], optional = true } ], optional = true }
forwarded-header-value = { version = "0.1.1", optional = true }
tokio-rustls = { version = "0.26", default-features = false, optional = true } tokio-rustls = { version = "0.26", default-features = false, optional = true }
# for tap device # for tap device
@@ -162,7 +165,6 @@ network-interface = "2.0"
# for ospf route # for ospf route
petgraph = "0.8.1" petgraph = "0.8.1"
hashbrown = "0.15.3"
ordered_hash_map = "0.5.0" ordered_hash_map = "0.5.0"
# for wireguard # for wireguard
@@ -239,6 +241,7 @@ hickory-server = { version = "0.25.2", features = [
"resolver", "resolver",
], optional = true } ], optional = true }
bon = "3.9.1"
derive_builder = "0.20.2" derive_builder = "0.20.2"
humantime-serde = "1.1.1" humantime-serde = "1.1.1"
multimap = "0.10.1" multimap = "0.10.1"
@@ -249,7 +252,6 @@ shellexpand = "3.1.1"
# for fake tcp # for fake tcp
flume = { version = "0.12", optional = true } flume = { version = "0.12", optional = true }
cfg-if = "1.0"
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies] [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3" machine-uid = "0.5.3"
@@ -312,6 +314,7 @@ jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [
], optional = true } ], optional = true }
[build-dependencies] [build-dependencies]
cfg_aliases = "0.2.1"
tonic-build = "0.12" tonic-build = "0.12"
globwalk = "0.8.1" globwalk = "0.8.1"
regex = "1" regex = "1"
@@ -321,22 +324,14 @@ easytier-rpc-build = { path = "../easytier-rpc-build", features = [
"internal-namespace", "internal-namespace",
] } ] }
prost-reflect-build = { version = "0.14.0" } prost-reflect-build = { version = "0.14.0" }
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
[target.'cfg(windows)'.build-dependencies] [target.'cfg(windows)'.build-dependencies]
reqwest = { version = "0.12.12", features = ["blocking"] } reqwest = { version = "0.12.12", features = ["blocking"] }
zip = "4.0.0" zip = "4.0.0"
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = [
"win7",
] }
[dev-dependencies] [dev-dependencies]
serial_test = "3.0.0" serial_test = "3.0.0"
@@ -344,6 +339,7 @@ rstest = "0.25.0"
futures-util = "0.3.31" futures-util = "0.3.31"
maplit = "1.0.2" maplit = "1.0.2"
tempfile = "3.22.0" tempfile = "3.22.0"
ctor = "0.8.0"
[target.'cfg(target_os = "linux")'.dev-dependencies] [target.'cfg(target_os = "linux")'.dev-dependencies]
defguard_wireguard_rs = "0.4.2" defguard_wireguard_rs = "0.4.2"
@@ -387,6 +383,7 @@ tun = ["dep:tun"]
websocket = [ websocket = [
"dep:tokio-websockets", "dep:tokio-websockets",
"dep:http", "dep:http",
"dep:forwarded-header-value",
"dep:tokio-rustls", "dep:tokio-rustls",
"dep:rustls", "dep:rustls",
"dep:rcgen", "dep:rcgen",
+19 -8
View File
@@ -1,9 +1,9 @@
use cfg_aliases::cfg_aliases;
use prost_wkt_build::{FileDescriptorSet, Message as _};
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
use std::io::Cursor; use std::io::Cursor;
use std::{env, path::PathBuf}; use std::{env, path::PathBuf};
use prost_wkt_build::{FileDescriptorSet, Message as _};
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
struct WindowsBuild {} struct WindowsBuild {}
@@ -86,7 +86,9 @@ impl WindowsBuild {
} else { } else {
Self::download_protoc() Self::download_protoc()
}; };
std::env::set_var("PROTOC", protoc_path); unsafe {
std::env::set_var("PROTOC", protoc_path);
}
} }
} }
@@ -130,12 +132,21 @@ fn check_locale() {
} }
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
cfg_aliases! {
mobile: {
any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "macos-ne"),
target_env = "ohos"
)
}
}
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default();
// enable thunk-rs when target os is windows and arch is x86_64 or i686 // enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")] if target_os == "windows" && (target_arch == "x86" || target_arch == "x86_64") {
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
thunk::thunk(); thunk::thunk();
} }
+7 -4
View File
@@ -152,8 +152,8 @@ core_clap:
如果该参数为空,则禁用转发。默认允许所有网络。 如果该参数为空,则禁用转发。默认允许所有网络。
例如:'*'(所有网络),'def*'(以def为前缀的网络),'net1 net2'(只允许net1和net2" 例如:'*'(所有网络),'def*'(以def为前缀的网络),'net1 net2'(只允许net1和net2"
disable_p2p: disable_p2p:
en: "disable p2p communication, will only relay packets with peers specified by --peers" en: "disable ordinary automatic p2p; still establish p2p with peers marked as need-p2p, and other peers should not proactively connect to this node"
zh-CN: "禁用P2P通信,只通过--peers指定的节点转发数据包" zh-CN: "禁用普通自动P2P;仍会与标记为 need-p2p 的节点建立P2P连接,其他节点不应主动与当前节点建立P2P"
p2p_only: p2p_only:
en: "only communicate with peers that already establish p2p connection" en: "only communicate with peers that already establish p2p connection"
zh-CN: "仅与已经建立P2P连接的对等节点通信" zh-CN: "仅与已经建立P2P连接的对等节点通信"
@@ -212,11 +212,14 @@ core_clap:
en: "specify the top-level domain zone for magic DNS. if not provided, defaults to the value from dns_server module (et.net.). only used when accept_dns is true." en: "specify the top-level domain zone for magic DNS. if not provided, defaults to the value from dns_server module (et.net.). only used when accept_dns is true."
zh-CN: "指定魔法DNS的顶级域名区域。如果未提供,默认使用dns_server模块中的值(et.net.)。仅在accept_dns为true时使用。" zh-CN: "指定魔法DNS的顶级域名区域。如果未提供,默认使用dns_server模块中的值(et.net.)。仅在accept_dns为true时使用。"
private_mode: private_mode:
en: "if true, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node." en: "if true, foreign networks are only allowed when this node can verify they use the same network secret, or when a foreign credential node is already trusted via admin-issued credential propagation; different or missing secrets are otherwise rejected."
zh-CN: "如果为true,则允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转" zh-CN: "如果为true,则允许两类 foreign network 接入:本节点能验证其使用相同 network secret 的节点,或已通过 foreign network 管理节点传播而被信任的 credential 节点;否则 secret 不同或缺失时会被拒绝。"
foreign_relay_bps_limit: foreign_relay_bps_limit:
en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)" en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)"
zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)" zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)"
instance_recv_bps_limit:
en: "the maximum total receive bps limit for this instance, default is no limit. unit: BPS (bytes per second)"
zh-CN: "限制当前网络实例整体入站流量的总接收速率,默认无限制,单位 BPS (字节每秒)"
tcp_whitelist: tcp_whitelist:
en: "tcp port whitelist. Supports single ports (80) and ranges (8000-9000)" en: "tcp port whitelist. Supports single ports (80) and ranges (8000-9000)"
zh-CN: "TCP 端口白名单。支持单个端口(80)和范围(8000-9000" zh-CN: "TCP 端口白名单。支持单个端口(80)和范围(8000-9000"
+5 -5
View File
@@ -3,7 +3,6 @@ use std::{io, mem::ManuallyDrop, net::SocketAddr, os::windows::io::AsRawSocket};
use anyhow::Context; use anyhow::Context;
use network_interface::NetworkInterfaceConfig; use network_interface::NetworkInterfaceConfig;
use windows::{ use windows::{
core::BSTR,
Win32::{ Win32::{
Foundation::{BOOL, FALSE}, Foundation::{BOOL, FALSE},
NetworkManagement::WindowsFirewall::{ NetworkManagement::WindowsFirewall::{
@@ -12,15 +11,16 @@ use windows::{
NET_FW_RULE_DIR_OUT, NET_FW_RULE_DIR_OUT,
}, },
Networking::WinSock::{ Networking::WinSock::{
htonl, setsockopt, WSAGetLastError, WSAIoctl, IPPROTO_IP, IPPROTO_IPV6, IP_UNICAST_IF, IPPROTO_IP, IPPROTO_IPV6, IPV6_UNICAST_IF, SIO_UDP_CONNRESET, SOCKET,
IPV6_UNICAST_IF, IP_UNICAST_IF, SIO_UDP_CONNRESET, SOCKET, SOCKET_ERROR, SOCKET_ERROR, WSAGetLastError, WSAIoctl, htonl, setsockopt,
}, },
System::Com::{ System::Com::{
CoCreateInstance, CoInitializeEx, CoUninitialize, CLSCTX_ALL, COINIT_MULTITHREADED, CLSCTX_ALL, COINIT_MULTITHREADED, CoCreateInstance, CoInitializeEx, CoUninitialize,
}, },
System::Ole::{SafeArrayCreateVector, SafeArrayPutElement}, System::Ole::{SafeArrayCreateVector, SafeArrayPutElement},
System::Variant::{VARENUM, VARIANT, VT_ARRAY, VT_BSTR, VT_VARIANT}, System::Variant::{VARENUM, VARIANT, VT_ARRAY, VT_BSTR, VT_VARIANT},
}, },
core::BSTR,
}; };
pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> { pub fn disable_connection_reset<S: AsRawSocket>(socket: &S) -> io::Result<()> {
@@ -345,7 +345,7 @@ fn add_protocol_firewall_rules(
SafeArrayPutElement( SafeArrayPutElement(
interface_array, interface_array,
&index as *const _ as *const i32, &index as *const _,
&variant_interface as *const _ as *const std::ffi::c_void, &variant_interface as *const _ as *const std::ffi::c_void,
)?; )?;
+20 -20
View File
@@ -345,7 +345,7 @@ impl AclProcessor {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Sort by priority (higher priority first) // Sort by priority (higher priority first)
rules.sort_by(|a, b| b.priority.cmp(&a.priority)); rules.sort_by_key(|r| std::cmp::Reverse(r.priority));
match chain.chain_type() { match chain.chain_type() {
ChainType::Inbound => inbound_rules.extend(rules), ChainType::Inbound => inbound_rules.extend(rules),
@@ -507,7 +507,7 @@ impl AclProcessor {
matched_rule: Some(RuleId::Default), matched_rule: Some(RuleId::Default),
should_log: false, should_log: false,
log_context: Some(AclLogContext::UnsupportedChainType), log_context: Some(AclLogContext::UnsupportedChainType),
} };
} }
}; };
@@ -679,28 +679,28 @@ impl AclProcessor {
} }
// Source port check // Source port check
if let Some(src_port) = packet_info.src_port { if let Some(src_port) = packet_info.src_port
if !rule.src_port_ranges.is_empty() { && !rule.src_port_ranges.is_empty()
let matches = rule {
.src_port_ranges let matches = rule
.iter() .src_port_ranges
.any(|(start, end)| src_port >= *start && src_port <= *end); .iter()
if !matches { .any(|(start, end)| src_port >= *start && src_port <= *end);
return false; if !matches {
} return false;
} }
} }
// Destination port check // Destination port check
if let Some(dst_port) = packet_info.dst_port { if let Some(dst_port) = packet_info.dst_port
if !rule.dst_port_ranges.is_empty() { && !rule.dst_port_ranges.is_empty()
let matches = rule {
.dst_port_ranges let matches = rule
.iter() .dst_port_ranges
.any(|(start, end)| dst_port >= *start && dst_port <= *end); .iter()
if !matches { .any(|(start, end)| dst_port >= *start && dst_port <= *end);
return false; if !matches {
} return false;
} }
} }
+1 -1
View File
@@ -9,7 +9,7 @@ use zstd::bulk;
use zerocopy::{AsBytes as _, FromBytes as _}; use zerocopy::{AsBytes as _, FromBytes as _};
use crate::tunnel::packet_def::{CompressorAlgo, CompressorTail, ZCPacket, COMPRESSOR_TAIL_SIZE}; use crate::tunnel::packet_def::{COMPRESSOR_TAIL_SIZE, CompressorAlgo, CompressorTail, ZCPacket};
type Error = anyhow::Error; type Error = anyhow::Error;
+70 -68
View File
@@ -6,10 +6,9 @@ use std::{
}; };
use anyhow::Context; use anyhow::Context;
use base64::{prelude::BASE64_STANDARD, Engine as _}; use base64::{Engine as _, prelude::BASE64_STANDARD};
use cfg_if::cfg_if;
use clap::builder::PossibleValue;
use clap::ValueEnum; use clap::ValueEnum;
use clap::builder::PossibleValue;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use strum::{Display, EnumString, VariantArray}; use strum::{Display, EnumString, VariantArray};
use tokio::io::AsyncReadExt as _; use tokio::io::AsyncReadExt as _;
@@ -69,6 +68,7 @@ pub fn gen_default_flags() -> Flags {
quic_listen_port: u32::MAX, quic_listen_port: u32::MAX,
need_p2p: false, need_p2p: false,
instance_recv_bps_limit: u64::MAX,
} }
} }
@@ -108,10 +108,9 @@ impl ValueEnum for EncryptionAlgorithm {
#[allow(clippy::derivable_impls)] #[allow(clippy::derivable_impls)]
impl Default for EncryptionAlgorithm { impl Default for EncryptionAlgorithm {
fn default() -> Self { fn default() -> Self {
cfg_if! { cfg_select! {
if #[cfg(any(feature = "aes-gcm", feature = "wireguard", feature = "openssl-crypto"))] { any(feature = "aes-gcm", feature = "wireguard", feature = "openssl-crypto") => EncryptionAlgorithm::AesGcm,
EncryptionAlgorithm::AesGcm _ => {
} else {
crate::common::log::warn!("no AEAD encryption algorithm is available, using INSECURE XOR"); crate::common::log::warn!("no AEAD encryption algorithm is available, using INSECURE XOR");
EncryptionAlgorithm::Xor EncryptionAlgorithm::Xor
} }
@@ -620,14 +619,14 @@ impl ConfigLoader for TomlConfigLoader {
if locked_config.proxy_network.is_none() { if locked_config.proxy_network.is_none() {
locked_config.proxy_network = Some(vec![]); locked_config.proxy_network = Some(vec![]);
} }
if let Some(mapped_cidr) = mapped_cidr.as_ref() { if let Some(mapped_cidr) = mapped_cidr.as_ref()
if cidr.network_length() != mapped_cidr.network_length() { && cidr.network_length() != mapped_cidr.network_length()
return Err(anyhow::anyhow!( {
"Mapped CIDR must have the same network length as the original CIDR: {} != {}", return Err(anyhow::anyhow!(
cidr.network_length(), "Mapped CIDR must have the same network length as the original CIDR: {} != {}",
mapped_cidr.network_length() cidr.network_length(),
)); mapped_cidr.network_length()
} ));
} }
// insert if no duplicate // insert if no duplicate
if !locked_config if !locked_config
@@ -880,10 +879,10 @@ impl ConfigLoader for TomlConfigLoader {
let mut flag_map: serde_json::Map<String, serde_json::Value> = Default::default(); let mut flag_map: serde_json::Map<String, serde_json::Value> = Default::default();
for (key, value) in default_flags_hashmap { for (key, value) in default_flags_hashmap {
if let Some(v) = cur_flags_hashmap.get(&key) { if let Some(v) = cur_flags_hashmap.get(&key)
if *v != value { && *v != value
flag_map.insert(key, v.clone()); {
} flag_map.insert(key, v.clone());
} }
} }
@@ -1088,6 +1087,7 @@ pub async fn load_config_from_file(
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::tests::{remove_env_var, set_env_var};
use std::io::Write; use std::io::Write;
use std::path::PathBuf; use std::path::PathBuf;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
@@ -1211,8 +1211,8 @@ proto = "tcp"
#[tokio::test] #[tokio::test]
async fn test_env_var_expansion_and_readonly_flag() { async fn test_env_var_expansion_and_readonly_flag() {
// 设置测试环境变量 // 设置测试环境变量
std::env::set_var("TEST_SECRET", "my-test-secret-123"); set_env_var("TEST_SECRET", "my-test-secret-123");
std::env::set_var("TEST_NETWORK", "test-network"); set_env_var("TEST_NETWORK", "test-network");
// 创建临时配置文件,包含环境变量占位符 // 创建临时配置文件,包含环境变量占位符
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
@@ -1252,8 +1252,8 @@ network_secret = "${TEST_SECRET}"
); );
// 清理环境变量 // 清理环境变量
std::env::remove_var("TEST_SECRET"); remove_env_var("TEST_SECRET");
std::env::remove_var("TEST_NETWORK"); remove_env_var("TEST_NETWORK");
} }
/// RPC API 安全测试(只读配置保护) /// RPC API 安全测试(只读配置保护)
@@ -1266,7 +1266,7 @@ network_secret = "${TEST_SECRET}"
/// `easytier/src/rpc_service/instance_manage.rs` 中实现 /// `easytier/src/rpc_service/instance_manage.rs` 中实现
#[tokio::test] #[tokio::test]
async fn test_readonly_config_api_protection() { async fn test_readonly_config_api_protection() {
std::env::set_var("API_TEST_SECRET", "secret-value"); set_env_var("API_TEST_SECRET", "secret-value");
// 创建包含环境变量的配置 // 创建包含环境变量的配置
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
@@ -1297,7 +1297,7 @@ network_secret = "${API_TEST_SECRET}"
"Permission flag should be set correctly" "Permission flag should be set correctly"
); );
std::env::remove_var("API_TEST_SECRET"); remove_env_var("API_TEST_SECRET");
} }
/// CLI 参数测试(--disable-env-parsing 开关) /// CLI 参数测试(--disable-env-parsing 开关)
@@ -1307,7 +1307,7 @@ network_secret = "${API_TEST_SECRET}"
/// - 配置不会被标记为只读 /// - 配置不会被标记为只读
#[tokio::test] #[tokio::test]
async fn test_disable_env_parsing_flag() { async fn test_disable_env_parsing_flag() {
std::env::set_var("DISABLED_TEST_VAR", "should-not-expand"); set_env_var("DISABLED_TEST_VAR", "should-not-expand");
// 创建包含环境变量占位符的配置 // 创建包含环境变量占位符的配置
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
@@ -1345,7 +1345,7 @@ network_secret = "${DISABLED_TEST_VAR}"
"Config should be NO_DELETE due to no config_dir, not env vars" "Config should be NO_DELETE due to no config_dir, not env vars"
); );
std::env::remove_var("DISABLED_TEST_VAR"); remove_env_var("DISABLED_TEST_VAR");
} }
/// 多实例隔离测试 /// 多实例隔离测试
@@ -1356,8 +1356,8 @@ network_secret = "${DISABLED_TEST_VAR}"
#[tokio::test] #[tokio::test]
async fn test_multiple_instances_with_different_env_vars() { async fn test_multiple_instances_with_different_env_vars() {
// 实例1:使用第一组环境变量 // 实例1:使用第一组环境变量
std::env::set_var("INSTANCE_SECRET", "instance1-secret"); set_env_var("INSTANCE_SECRET", "instance1-secret");
std::env::set_var("INSTANCE_NAME", "instance-one"); set_env_var("INSTANCE_NAME", "instance-one");
let mut temp_file1 = NamedTempFile::new().unwrap(); let mut temp_file1 = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1387,8 +1387,8 @@ network_secret = "${INSTANCE_SECRET}"
); );
// 实例2:修改环境变量后加载同一模板 // 实例2:修改环境变量后加载同一模板
std::env::set_var("INSTANCE_SECRET", "instance2-secret"); set_env_var("INSTANCE_SECRET", "instance2-secret");
std::env::set_var("INSTANCE_NAME", "instance-two"); set_env_var("INSTANCE_NAME", "instance-two");
let mut temp_file2 = NamedTempFile::new().unwrap(); let mut temp_file2 = NamedTempFile::new().unwrap();
temp_file2.write_all(config_content.as_bytes()).unwrap(); temp_file2.write_all(config_content.as_bytes()).unwrap();
@@ -1418,8 +1418,8 @@ network_secret = "${INSTANCE_SECRET}"
); );
// 清理 // 清理
std::env::remove_var("INSTANCE_SECRET"); remove_env_var("INSTANCE_SECRET");
std::env::remove_var("INSTANCE_NAME"); remove_env_var("INSTANCE_NAME");
} }
/// 实际配置字段测试(network_secret、peer.uri 等) /// 实际配置字段测试(network_secret、peer.uri 等)
@@ -1432,11 +1432,11 @@ network_secret = "${INSTANCE_SECRET}"
#[tokio::test] #[tokio::test]
async fn test_real_config_fields_expansion() { async fn test_real_config_fields_expansion() {
// 设置各种实际场景的环境变量 // 设置各种实际场景的环境变量
std::env::set_var("ET_SECRET", "production-secret-key"); set_env_var("ET_SECRET", "production-secret-key");
std::env::set_var("PEER_HOST", "peer.example.com"); set_env_var("PEER_HOST", "peer.example.com");
std::env::set_var("PEER_PORT", "11011"); set_env_var("PEER_PORT", "11011");
std::env::set_var("LISTEN_PORT", "11010"); set_env_var("LISTEN_PORT", "11010");
std::env::set_var("NETWORK_NAME", "prod-network"); set_env_var("NETWORK_NAME", "prod-network");
// 创建包含多个实际字段的完整配置 // 创建包含多个实际字段的完整配置
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
@@ -1484,11 +1484,11 @@ uri = "tcp://${PEER_HOST}:${PEER_PORT}"
assert!(control.is_no_delete()); assert!(control.is_no_delete());
// 清理环境变量 // 清理环境变量
std::env::remove_var("ET_SECRET"); remove_env_var("ET_SECRET");
std::env::remove_var("PEER_HOST"); remove_env_var("PEER_HOST");
std::env::remove_var("PEER_PORT"); remove_env_var("PEER_PORT");
std::env::remove_var("LISTEN_PORT"); remove_env_var("LISTEN_PORT");
std::env::remove_var("NETWORK_NAME"); remove_env_var("NETWORK_NAME");
} }
/// 带默认值的环境变量 /// 带默认值的环境变量
@@ -1498,8 +1498,8 @@ uri = "tcp://${PEER_HOST}:${PEER_PORT}"
#[tokio::test] #[tokio::test]
async fn test_env_var_with_default_value() { async fn test_env_var_with_default_value() {
// 确保变量未定义 // 确保变量未定义
std::env::remove_var("UNDEFINED_PORT"); remove_env_var("UNDEFINED_PORT");
std::env::remove_var("UNDEFINED_SECRET"); remove_env_var("UNDEFINED_SECRET");
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1540,7 +1540,7 @@ network_secret = "${UNDEFINED_SECRET:-default-secret}"
/// - 未定义的环境变量保持原样(shellexpand 的默认行为) /// - 未定义的环境变量保持原样(shellexpand 的默认行为)
#[tokio::test] #[tokio::test]
async fn test_undefined_env_var_without_default() { async fn test_undefined_env_var_without_default() {
std::env::remove_var("COMPLETELY_UNDEFINED"); remove_env_var("COMPLETELY_UNDEFINED");
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1570,6 +1570,8 @@ network_secret = "${COMPLETELY_UNDEFINED}"
// 注意:由于没有实际替换发生,控制标记不应因环境变量而设置 // 注意:由于没有实际替换发生,控制标记不应因环境变量而设置
// 但会因为其他原因(如没有 config_dir)被标记为 NO_DELETE // 但会因为其他原因(如没有 config_dir)被标记为 NO_DELETE
// 这里我们主要验证 NO_DELETE 标记的逻辑
// 由于没有 config_dir,文件会被标记为 NO_DELETE,但不是因为环境变量
assert!(control.is_no_delete()); assert!(control.is_no_delete());
} }
@@ -1581,9 +1583,9 @@ network_secret = "${COMPLETELY_UNDEFINED}"
#[tokio::test] #[tokio::test]
async fn test_boolean_type_env_vars() { async fn test_boolean_type_env_vars() {
// 设置布尔类型的环境变量 // 设置布尔类型的环境变量
std::env::set_var("ENABLE_DHCP", "true"); set_env_var("ENABLE_DHCP", "true");
std::env::set_var("ENABLE_ENCRYPTION", "false"); set_env_var("ENABLE_ENCRYPTION", "false");
std::env::set_var("ENABLE_IPV6", "true"); set_env_var("ENABLE_IPV6", "true");
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1621,9 +1623,9 @@ enable_ipv6 = ${ENABLE_IPV6}
assert!(control.is_no_delete()); assert!(control.is_no_delete());
// 清理 // 清理
std::env::remove_var("ENABLE_DHCP"); remove_env_var("ENABLE_DHCP");
std::env::remove_var("ENABLE_ENCRYPTION"); remove_env_var("ENABLE_ENCRYPTION");
std::env::remove_var("ENABLE_IPV6"); remove_env_var("ENABLE_IPV6");
} }
/// 数字类型环境变量 /// 数字类型环境变量
@@ -1634,8 +1636,8 @@ enable_ipv6 = ${ENABLE_IPV6}
#[tokio::test] #[tokio::test]
async fn test_numeric_type_env_vars() { async fn test_numeric_type_env_vars() {
// 设置数字类型的环境变量 // 设置数字类型的环境变量
std::env::set_var("MTU_VALUE", "1400"); set_env_var("MTU_VALUE", "1400");
std::env::set_var("THREAD_COUNT", "4"); set_env_var("THREAD_COUNT", "4");
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1670,8 +1672,8 @@ multi_thread_count = ${THREAD_COUNT}
assert!(control.is_no_delete()); assert!(control.is_no_delete());
// 清理 // 清理
std::env::remove_var("MTU_VALUE"); remove_env_var("MTU_VALUE");
std::env::remove_var("THREAD_COUNT"); remove_env_var("THREAD_COUNT");
} }
/// 混合类型环境变量 /// 混合类型环境变量
@@ -1683,12 +1685,12 @@ multi_thread_count = ${THREAD_COUNT}
#[tokio::test] #[tokio::test]
async fn test_mixed_type_env_vars() { async fn test_mixed_type_env_vars() {
// 设置不同类型的环境变量 // 设置不同类型的环境变量
std::env::set_var("MIXED_SECRET", "mixed-secret-key"); set_env_var("MIXED_SECRET", "mixed-secret-key");
std::env::set_var("MIXED_NETWORK", "production"); set_env_var("MIXED_NETWORK", "production");
std::env::set_var("MIXED_DHCP", "true"); set_env_var("MIXED_DHCP", "true");
std::env::set_var("MIXED_MTU", "1500"); set_env_var("MIXED_MTU", "1500");
std::env::set_var("MIXED_ENCRYPTION", "false"); set_env_var("MIXED_ENCRYPTION", "false");
std::env::set_var("MIXED_LISTEN_PORT", "12345"); set_env_var("MIXED_LISTEN_PORT", "12345");
let mut temp_file = NamedTempFile::new().unwrap(); let mut temp_file = NamedTempFile::new().unwrap();
let config_content = r#" let config_content = r#"
@@ -1740,11 +1742,11 @@ enable_encryption = ${MIXED_ENCRYPTION}
assert!(control.is_no_delete()); assert!(control.is_no_delete());
// 清理 // 清理
std::env::remove_var("MIXED_SECRET"); remove_env_var("MIXED_SECRET");
std::env::remove_var("MIXED_NETWORK"); remove_env_var("MIXED_NETWORK");
std::env::remove_var("MIXED_DHCP"); remove_env_var("MIXED_DHCP");
std::env::remove_var("MIXED_MTU"); remove_env_var("MIXED_MTU");
std::env::remove_var("MIXED_ENCRYPTION"); remove_env_var("MIXED_ENCRYPTION");
std::env::remove_var("MIXED_LISTEN_PORT"); remove_env_var("MIXED_LISTEN_PORT");
} }
} }
+1 -1
View File
@@ -1,6 +1,6 @@
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::atomic::AtomicBool;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use anyhow::Context; use anyhow::Context;
use hickory_proto::runtime::TokioRuntimeProvider; use hickory_proto::runtime::TokioRuntimeProvider;
+21 -20
View File
@@ -42,10 +42,11 @@ pub fn expand_env_vars(text: &str) -> (String, bool) {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::tests::{remove_env_var, set_env_var};
#[test] #[test]
fn test_expand_standard_syntax() { fn test_expand_standard_syntax() {
std::env::set_var("TEST_VAR_STANDARD", "test_value"); set_env_var("TEST_VAR_STANDARD", "test_value");
let (result, changed) = expand_env_vars("secret=${TEST_VAR_STANDARD}"); let (result, changed) = expand_env_vars("secret=${TEST_VAR_STANDARD}");
assert_eq!(result, "secret=test_value"); assert_eq!(result, "secret=test_value");
assert!(changed); assert!(changed);
@@ -53,7 +54,7 @@ mod tests {
#[test] #[test]
fn test_expand_short_syntax() { fn test_expand_short_syntax() {
std::env::set_var("TEST_VAR_SHORT", "short_value"); set_env_var("TEST_VAR_SHORT", "short_value");
let (result, changed) = expand_env_vars("key=$TEST_VAR_SHORT"); let (result, changed) = expand_env_vars("key=$TEST_VAR_SHORT");
assert_eq!(result, "key=short_value"); assert_eq!(result, "key=short_value");
assert!(changed); assert!(changed);
@@ -62,7 +63,7 @@ mod tests {
#[test] #[test]
fn test_expand_with_default() { fn test_expand_with_default() {
// 确保变量未定义 // 确保变量未定义
std::env::remove_var("UNDEFINED_VAR_WITH_DEFAULT"); remove_env_var("UNDEFINED_VAR_WITH_DEFAULT");
let (result, changed) = expand_env_vars("port=${UNDEFINED_VAR_WITH_DEFAULT:-8080}"); let (result, changed) = expand_env_vars("port=${UNDEFINED_VAR_WITH_DEFAULT:-8080}");
assert_eq!(result, "port=8080"); assert_eq!(result, "port=8080");
assert!(changed); assert!(changed);
@@ -84,8 +85,8 @@ mod tests {
#[test] #[test]
fn test_multiple_vars() { fn test_multiple_vars() {
std::env::set_var("VAR1", "value1"); set_env_var("VAR1", "value1");
std::env::set_var("VAR2", "value2"); set_env_var("VAR2", "value2");
let (result, changed) = expand_env_vars("${VAR1} and ${VAR2}"); let (result, changed) = expand_env_vars("${VAR1} and ${VAR2}");
assert_eq!(result, "value1 and value2"); assert_eq!(result, "value1 and value2");
assert!(changed); assert!(changed);
@@ -94,7 +95,7 @@ mod tests {
#[test] #[test]
fn test_undefined_var_without_default() { fn test_undefined_var_without_default() {
// 确保变量未定义 // 确保变量未定义
std::env::remove_var("COMPLETELY_UNDEFINED_VAR"); remove_env_var("COMPLETELY_UNDEFINED_VAR");
let (result, changed) = expand_env_vars("value=${COMPLETELY_UNDEFINED_VAR}"); let (result, changed) = expand_env_vars("value=${COMPLETELY_UNDEFINED_VAR}");
// shellexpand::env 对未定义的变量会保持原样 // shellexpand::env 对未定义的变量会保持原样
assert_eq!(result, "value=${COMPLETELY_UNDEFINED_VAR}"); assert_eq!(result, "value=${COMPLETELY_UNDEFINED_VAR}");
@@ -103,8 +104,8 @@ mod tests {
#[test] #[test]
fn test_complex_toml_config() { fn test_complex_toml_config() {
std::env::set_var("ET_SECRET", "my-secret-key"); set_env_var("ET_SECRET", "my-secret-key");
std::env::set_var("ET_PORT", "11010"); set_env_var("ET_PORT", "11010");
let config = r#" let config = r#"
[network_identity] [network_identity]
@@ -123,7 +124,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_escape_syntax_double_dollar() { fn test_escape_syntax_double_dollar() {
std::env::set_var("ESCAPED_VAR", "should_not_expand"); set_env_var("ESCAPED_VAR", "should_not_expand");
// shellexpand 使用 $$ 作为转义序列,表示字面量的单个 $ // shellexpand 使用 $$ 作为转义序列,表示字面量的单个 $
// $$ 会被转义为单个 $,不会触发变量扩展 // $$ 会被转义为单个 $,不会触发变量扩展
let (result, changed) = expand_env_vars("value=$${ESCAPED_VAR}"); let (result, changed) = expand_env_vars("value=$${ESCAPED_VAR}");
@@ -133,7 +134,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_escape_syntax_backslash() { fn test_escape_syntax_backslash() {
std::env::set_var("ESCAPED_VAR", "should_not_expand"); set_env_var("ESCAPED_VAR", "should_not_expand");
// shellexpand 中反斜杠转义的行为:\$ 会展开为 \<变量值> // shellexpand 中反斜杠转义的行为:\$ 会展开为 \<变量值>
// 这不是推荐的转义方式,此测试仅为记录实际行为 // 这不是推荐的转义方式,此测试仅为记录实际行为
let (result, changed) = expand_env_vars(r"value=\${ESCAPED_VAR}"); let (result, changed) = expand_env_vars(r"value=\${ESCAPED_VAR}");
@@ -143,7 +144,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_multiple_dollar_signs() { fn test_multiple_dollar_signs() {
std::env::set_var("TEST_VAR", "value"); set_env_var("TEST_VAR", "value");
// 测试多个连续的 $ 符号 // 测试多个连续的 $ 符号
let (result1, changed1) = expand_env_vars("$$"); let (result1, changed1) = expand_env_vars("$$");
assert_eq!(result1, "$"); assert_eq!(result1, "$");
@@ -161,7 +162,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_empty_var_value() { fn test_empty_var_value() {
std::env::set_var("EMPTY_VAR", ""); set_env_var("EMPTY_VAR", "");
let (result, changed) = expand_env_vars("value=${EMPTY_VAR}"); let (result, changed) = expand_env_vars("value=${EMPTY_VAR}");
// 变量存在但值为空 // 变量存在但值为空
assert_eq!(result, "value="); assert_eq!(result, "value=");
@@ -170,7 +171,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_default_with_special_chars() { fn test_default_with_special_chars() {
std::env::remove_var("UNDEFINED_SPECIAL"); remove_env_var("UNDEFINED_SPECIAL");
// 测试默认值包含冒号、等号、空格等特殊字符 // 测试默认值包含冒号、等号、空格等特殊字符
let (result, changed) = expand_env_vars("url=${UNDEFINED_SPECIAL:-http://localhost:8080}"); let (result, changed) = expand_env_vars("url=${UNDEFINED_SPECIAL:-http://localhost:8080}");
assert_eq!(result, "url=http://localhost:8080"); assert_eq!(result, "url=http://localhost:8080");
@@ -187,9 +188,9 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_var_name_with_numbers_underscores() { fn test_var_name_with_numbers_underscores() {
std::env::set_var("VAR_123", "num_value"); set_env_var("VAR_123", "num_value");
std::env::set_var("_VAR", "underscore_prefix"); set_env_var("_VAR", "underscore_prefix");
std::env::set_var("VAR_", "underscore_suffix"); set_env_var("VAR_", "underscore_suffix");
let (result1, changed1) = expand_env_vars("${VAR_123}"); let (result1, changed1) = expand_env_vars("${VAR_123}");
assert_eq!(result1, "num_value"); assert_eq!(result1, "num_value");
@@ -214,7 +215,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
// 注意:未闭合的 ${VAR 实际上 shellexpand 会当作普通文本处理 // 注意:未闭合的 ${VAR 实际上 shellexpand 会当作普通文本处理
// 它会尝试查找名为 "VAR" 的环境变量(到字符串末尾) // 它会尝试查找名为 "VAR" 的环境变量(到字符串末尾)
std::env::remove_var("VAR"); remove_env_var("VAR");
let (result2, _changed2) = expand_env_vars("incomplete ${VAR"); let (result2, _changed2) = expand_env_vars("incomplete ${VAR");
// 如果 VAR 未定义,shellexpand 会返回错误或保持原样 // 如果 VAR 未定义,shellexpand 会返回错误或保持原样
assert_eq!(result2, "incomplete ${VAR"); assert_eq!(result2, "incomplete ${VAR");
@@ -224,8 +225,8 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_mixed_defined_undefined_vars() { fn test_mixed_defined_undefined_vars() {
std::env::set_var("DEFINED_VAR", "defined"); set_env_var("DEFINED_VAR", "defined");
std::env::remove_var("UNDEFINED_VAR"); remove_env_var("UNDEFINED_VAR");
// 混合已定义和未定义的变量 // 混合已定义和未定义的变量
// shellexpand::env 在遇到未定义变量时会返回错误(默认行为) // shellexpand::env 在遇到未定义变量时会返回错误(默认行为)
@@ -237,7 +238,7 @@ uri = "tcp://127.0.0.1:${ET_PORT}"
#[test] #[test]
fn test_nested_braces() { fn test_nested_braces() {
std::env::set_var("OUTER", "outer_value"); set_env_var("OUTER", "outer_value");
// 嵌套的大括号是无效语法,shellexpand::env 会返回错误 // 嵌套的大括号是无效语法,shellexpand::env 会返回错误
let (result, changed) = expand_env_vars("${OUTER} and ${{INNER}}"); let (result, changed) = expand_env_vars("${OUTER} and ${{INNER}}");
// 由于语法错误,整个字符串保持不变 // 由于语法错误,整个字符串保持不变
+45 -25
View File
@@ -1,8 +1,7 @@
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::{ use std::{
collections::{HashMap, hash_map::DefaultHasher},
hash::Hasher, hash::Hasher,
net::{IpAddr, SocketAddr},
sync::{Arc, Mutex}, sync::{Arc, Mutex},
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
@@ -10,28 +9,32 @@ use std::{
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use dashmap::DashMap; use dashmap::DashMap;
use crate::common::config::ProxyNetworkConfig;
use crate::common::shrink_dashmap;
use crate::common::stats_manager::StatsManager;
use crate::common::token_bucket::TokenBucketManager;
use crate::peers::acl_filter::AclFilter;
use crate::peers::credential_manager::CredentialManager;
use crate::proto::acl::GroupIdentity;
use crate::proto::api::config::InstanceConfigPatch;
use crate::proto::api::instance::PeerConnInfo;
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
use crate::proto::peer_rpc::PeerGroupInfo;
use crossbeam::atomic::AtomicCell;
use hmac::{Hmac, Mac};
use sha2::Sha256;
use super::{ use super::{
PeerId,
config::{ConfigLoader, Flags}, config::{ConfigLoader, Flags},
netns::NetNS, netns::NetNS,
network::IPCollector, network::IPCollector,
stun::{StunInfoCollector, StunInfoCollectorTrait}, stun::{StunInfoCollector, StunInfoCollectorTrait},
PeerId,
}; };
use crate::{
common::{
config::ProxyNetworkConfig, shrink_dashmap, stats_manager::StatsManager,
token_bucket::TokenBucketManager,
},
peers::{acl_filter::AclFilter, credential_manager::CredentialManager},
proto::{
acl::GroupIdentity,
api::{config::InstanceConfigPatch, instance::PeerConnInfo},
common::{PeerFeatureFlag, PortForwardConfigPb},
peer_rpc::PeerGroupInfo,
},
rpc_service::protected_port,
tunnel::matches_protocol,
};
use crossbeam::atomic::AtomicCell;
use hmac::{Hmac, Mac};
use sha2::Sha256;
use socket2::Protocol;
pub type NetworkIdentity = crate::common::config::NetworkIdentity; pub type NetworkIdentity = crate::common::config::NetworkIdentity;
@@ -242,6 +245,7 @@ impl GlobalCtx {
feature_flags.quic_input = !flags.disable_quic_input; feature_flags.quic_input = !flags.disable_quic_input;
feature_flags.no_relay_quic = flags.disable_relay_quic; feature_flags.no_relay_quic = flags.disable_relay_quic;
feature_flags.need_p2p = flags.need_p2p; feature_flags.need_p2p = flags.need_p2p;
feature_flags.disable_p2p = flags.disable_p2p;
feature_flags feature_flags
} }
@@ -625,15 +629,11 @@ impl GlobalCtx {
} }
fn is_port_in_running_listeners(&self, port: u16, is_udp: bool) -> bool { fn is_port_in_running_listeners(&self, port: u16, is_udp: bool) -> bool {
let check_proto = |listener_proto: &str| {
let listener_is_udp = matches!(listener_proto, "udp" | "wg");
listener_is_udp == is_udp
};
self.running_listeners self.running_listeners
.lock() .lock()
.unwrap() .unwrap()
.iter() .iter()
.any(|x| x.port() == Some(port) && check_proto(x.scheme())) .any(|x| x.port() == Some(port) && matches_protocol!(x, Protocol::UDP) == is_udp)
} }
#[tracing::instrument(ret, skip(self))] #[tracing::instrument(ret, skip(self))]
@@ -659,6 +659,7 @@ impl GlobalCtx {
if dst_is_local_virtual_ip || dst_is_local_phy_ip { if dst_is_local_virtual_ip || dst_is_local_phy_ip {
// if is local ip, make sure the port is not one of the listening ports // if is local ip, make sure the port is not one of the listening ports
self.is_port_in_running_listeners(dst_addr.port(), is_udp) self.is_port_in_running_listeners(dst_addr.port(), is_udp)
|| (!is_udp && protected_port::is_protected_tcp_port(dst_addr.port()))
} else { } else {
false false
} }
@@ -745,12 +746,13 @@ pub mod tests {
feature_flags.is_public_server = true; feature_flags.is_public_server = true;
global_ctx.set_feature_flags(feature_flags); global_ctx.set_feature_flags(feature_flags);
let mut flags = global_ctx.get_flags(); let mut flags = global_ctx.get_flags().clone();
flags.disable_kcp_input = true; flags.disable_kcp_input = true;
flags.disable_relay_kcp = true; flags.disable_relay_kcp = true;
flags.disable_quic_input = true; flags.disable_quic_input = true;
flags.disable_relay_quic = true; flags.disable_relay_quic = true;
flags.need_p2p = true; flags.need_p2p = true;
flags.disable_p2p = true;
global_ctx.set_flags(flags); global_ctx.set_flags(flags);
let feature_flags = global_ctx.get_feature_flags(); let feature_flags = global_ctx.get_feature_flags();
@@ -759,11 +761,29 @@ pub mod tests {
assert!(!feature_flags.quic_input); assert!(!feature_flags.quic_input);
assert!(feature_flags.no_relay_quic); assert!(feature_flags.no_relay_quic);
assert!(feature_flags.need_p2p); assert!(feature_flags.need_p2p);
assert!(feature_flags.disable_p2p);
assert!(feature_flags.support_conn_list_sync); assert!(feature_flags.support_conn_list_sync);
assert!(feature_flags.avoid_relay_data); assert!(feature_flags.avoid_relay_data);
assert!(feature_flags.is_public_server); assert!(feature_flags.is_public_server);
} }
#[tokio::test]
async fn should_deny_proxy_for_process_wide_rpc_port() {
protected_port::clear_protected_tcp_ports_for_test();
protected_port::register_protected_tcp_port(15888);
let config = TomlConfigLoader::default();
let global_ctx = GlobalCtx::new(config);
let rpc_addr = SocketAddr::from(([127, 0, 0, 1], 15888));
let other_tcp_addr = SocketAddr::from(([127, 0, 0, 1], 15889));
assert!(global_ctx.should_deny_proxy(&rpc_addr, false));
assert!(!global_ctx.should_deny_proxy(&rpc_addr, true));
assert!(!global_ctx.should_deny_proxy(&other_tcp_addr, false));
protected_port::clear_protected_tcp_ports_for_test();
}
pub fn get_mock_global_ctx_with_network( pub fn get_mock_global_ctx_with_network(
network_identy: Option<NetworkIdentity>, network_identy: Option<NetworkIdentity>,
) -> ArcGlobalCtx { ) -> ArcGlobalCtx {
+3 -3
View File
@@ -1,6 +1,6 @@
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait}; use super::{Error, IfConfiguerTrait, cidr_to_subnet_mask, run_shell_cmd};
use async_trait::async_trait; use async_trait::async_trait;
use cidr::{Ipv4Inet, Ipv6Inet}; use cidr::{Ipv4Inet, Ipv6Inet};
@@ -53,8 +53,8 @@ impl IfConfiguerTrait for MacIfConfiger {
) -> Result<(), Error> { ) -> Result<(), Error> {
run_shell_cmd( run_shell_cmd(
format!( format!(
"ifconfig {} {:?}/{:?} 10.8.8.8 up", "ifconfig {} {:?}/{:?} {:?} up",
name, address, cidr_prefix, name, address, cidr_prefix, address,
) )
.as_str(), .as_str(),
) )
+2 -2
View File
@@ -119,8 +119,8 @@ async fn run_shell_cmd(cmd: &str) -> Result<(), Error> {
.creation_flags(CREATE_NO_WINDOW) .creation_flags(CREATE_NO_WINDOW)
.output() .output()
.await?; .await?;
stdout = crate::utils::utf8_or_gbk_to_string(cmd_out.stdout.as_slice()); stdout = crate::utils::string::utf8_or_gbk_to_string(cmd_out.stdout.as_slice());
stderr = crate::utils::utf8_or_gbk_to_string(cmd_out.stderr.as_slice()); stderr = crate::utils::string::utf8_or_gbk_to_string(cmd_out.stderr.as_slice());
}; };
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
+6 -6
View File
@@ -10,27 +10,27 @@ use anyhow::Context;
use async_trait::async_trait; use async_trait::async_trait;
use cidr::{IpInet, Ipv4Inet, Ipv6Inet}; use cidr::{IpInet, Ipv4Inet, Ipv6Inet};
use netlink_packet_core::{ use netlink_packet_core::{
NetlinkDeserializable, NetlinkHeader, NetlinkMessage, NetlinkPayload, NetlinkSerializable, NLM_F_ACK, NLM_F_CREATE, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REQUEST, NetlinkDeserializable,
NLM_F_ACK, NLM_F_CREATE, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REQUEST, NetlinkHeader, NetlinkMessage, NetlinkPayload, NetlinkSerializable,
}; };
use netlink_packet_route::{ use netlink_packet_route::{
AddressFamily, RouteNetlinkMessage,
address::{AddressAttribute, AddressMessage}, address::{AddressAttribute, AddressMessage},
route::{ route::{
RouteAddress, RouteAttribute, RouteHeader, RouteMessage, RouteProtocol, RouteScope, RouteAddress, RouteAttribute, RouteHeader, RouteMessage, RouteProtocol, RouteScope,
RouteType, RouteType,
}, },
AddressFamily, RouteNetlinkMessage,
}; };
use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr}; use netlink_sys::{Socket, SocketAddr, protocols::NETLINK_ROUTE};
use nix::{ use nix::{
ifaddrs::getifaddrs, ifaddrs::getifaddrs,
libc::{self, ifreq, ioctl, Ioctl, SIOCGIFFLAGS, SIOCGIFMTU, SIOCSIFFLAGS, SIOCSIFMTU}, libc::{self, Ioctl, SIOCGIFFLAGS, SIOCGIFMTU, SIOCSIFFLAGS, SIOCSIFMTU, ifreq, ioctl},
net::if_::InterfaceFlags, net::if_::InterfaceFlags,
sys::socket::SockaddrLike as _, sys::socket::SockaddrLike as _,
}; };
use pnet::ipnetwork::ip_mask_to_prefix; use pnet::ipnetwork::ip_mask_to_prefix;
use super::{route::Route, Error, IfConfiguerTrait}; use super::{Error, IfConfiguerTrait, route::Route};
pub(crate) fn dummy_socket() -> Result<std::net::UdpSocket, Error> { pub(crate) fn dummy_socket() -> Result<std::net::UdpSocket, Error> {
Ok(std::net::UdpSocket::bind("0:0")?) Ok(std::net::UdpSocket::bind("0:0")?)
+1 -5
View File
@@ -740,10 +740,6 @@ impl InterfaceLuid {
// SAFETY: TODO // SAFETY: TODO
let ret = unsafe { SetIpInterfaceEntry(&mut row) }; let ret = unsafe { SetIpInterfaceEntry(&mut row) };
if NO_ERROR == ret { if NO_ERROR == ret { Ok(()) } else { Err(ret) }
Ok(())
} else {
Err(ret)
}
} }
} }
+10 -11
View File
@@ -10,14 +10,14 @@ use std::{
}; };
use windows_sys::Win32::{ use windows_sys::Win32::{
Foundation::NO_ERROR, Foundation::NO_ERROR,
NetworkManagement::IpHelper::{GetIfEntry, SetIfEntry, MIB_IFROW}, NetworkManagement::IpHelper::{GetIfEntry, MIB_IFROW, SetIfEntry},
System::Diagnostics::Debug::{ System::Diagnostics::Debug::{
FormatMessageW, FORMAT_MESSAGE_FROM_SYSTEM, FORMAT_MESSAGE_IGNORE_INSERTS, FORMAT_MESSAGE_FROM_SYSTEM, FORMAT_MESSAGE_IGNORE_INSERTS, FormatMessageW,
}, },
}; };
use winreg::{ use winreg::{
enums::{HKEY_LOCAL_MACHINE, KEY_READ, KEY_WRITE},
RegKey, RegKey,
enums::{HKEY_LOCAL_MACHINE, KEY_READ, KEY_WRITE},
}; };
use super::{Error, IfConfiguerTrait}; use super::{Error, IfConfiguerTrait};
@@ -331,7 +331,7 @@ impl RegistryManager {
r"SYSTEM\CurrentControlSet\Services\NetBT\Parameters\Interfaces\Tcpip_"; r"SYSTEM\CurrentControlSet\Services\NetBT\Parameters\Interfaces\Tcpip_";
pub fn reg_delete_obsoleted_items(dev_name: &str) -> io::Result<()> { pub fn reg_delete_obsoleted_items(dev_name: &str) -> io::Result<()> {
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey}; use winreg::{RegKey, enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS};
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let profiles_key = hklm.open_subkey_with_flags( let profiles_key = hklm.open_subkey_with_flags(
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles", "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
@@ -405,7 +405,7 @@ impl RegistryManager {
} }
pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> { pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> {
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey}; use winreg::{RegKey, enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS};
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let profiles_key = hklm.open_subkey_with_flags( let profiles_key = hklm.open_subkey_with_flags(
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles", "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
@@ -448,12 +448,11 @@ impl RegistryManager {
for guid in network_key.enum_keys().map_while(Result::ok) { for guid in network_key.enum_keys().map_while(Result::ok) {
if let Ok(guid_key) = network_key.open_subkey_with_flags(&guid, KEY_READ) { if let Ok(guid_key) = network_key.open_subkey_with_flags(&guid, KEY_READ) {
// 检查 Connection/Name 是否匹配目标接口名 // 检查 Connection/Name 是否匹配目标接口名
if let Ok(conn_key) = guid_key.open_subkey_with_flags("Connection", KEY_READ) { if let Ok(conn_key) = guid_key.open_subkey_with_flags("Connection", KEY_READ)
if let Ok(name) = conn_key.get_value::<String, _>("Name") { && let Ok(name) = conn_key.get_value::<String, _>("Name")
if name == interface_name { && name == interface_name
return Ok(guid); {
} return Ok(guid);
}
} }
} }
} }
+207 -129
View File
@@ -1,19 +1,18 @@
use std::io::IsTerminal as _; use crate::common::config::{FileLoggerConfig, LoggingConfigLoader};
use crate::common::config::LoggingConfigLoader;
use crate::common::get_logger_timer_rfc3339; use crate::common::get_logger_timer_rfc3339;
use crate::common::tracing_rolling_appender::{FileAppenderWrapper, RollingFileAppenderBase}; use crate::common::tracing_rolling_appender::{FileAppenderWrapper, RollingFileAppenderBase};
use crate::rpc_service::logger::{CURRENT_LOG_LEVEL, LOGGER_LEVEL_SENDER}; use crate::rpc_service::logger::{CURRENT_LOG_LEVEL, LOGGER_LEVEL_SENDER};
use anyhow::Context; use anyhow::Context;
use paste::paste; use paste::paste;
use regex::Regex; use std::io::IsTerminal;
use tracing::level_filters::LevelFilter; use tracing::level_filters::LevelFilter;
use tracing::{Level, Metadata}; use tracing::{Level, Metadata};
use tracing_subscriber::filter::{filter_fn, FilterExt}; use tracing_subscriber::Registry;
use tracing_subscriber::filter::{FilterExt, filter_fn};
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::fmt::layer; use tracing_subscriber::fmt::layer;
use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::Registry;
use tracing_subscriber::{EnvFilter, Layer}; use tracing_subscriber::{EnvFilter, Layer};
macro_rules! __log__ { macro_rules! __log__ {
@@ -47,18 +46,16 @@ macro_rules! __log__ {
__log__!(const LOG_TARGET = "CORE"); __log__!(const LOG_TARGET = "CORE");
fn parse_env_filter(default_level: LevelFilter) -> Result<EnvFilter, anyhow::Error> { fn parse_env_filter(default_level: Option<LevelFilter>) -> Result<EnvFilter, anyhow::Error> {
let mut filter = EnvFilter::builder() let directive = match default_level {
.with_default_directive(default_level.into()) Some(level) => level.into(),
None => format!("{LOG_TARGET}=info").parse()?,
};
EnvFilter::builder()
.with_default_directive(directive)
.from_env() .from_env()
.with_context(|| "failed to create env filter")?; .with_context(|| "failed to create env filter")
let pattern = Regex::new(&format!(r"(^|,){}\s*=", regex::escape(LOG_TARGET)))?;
if !pattern.is_match(&filter.to_string()) {
filter = filter.add_directive(format!("{LOG_TARGET}=info").parse()?);
}
Ok(filter)
} }
fn is_log(meta: &Metadata) -> bool { fn is_log(meta: &Metadata) -> bool {
@@ -78,7 +75,6 @@ macro_rules! log_layer {
$layer $layer
.with_file(false) .with_file(false)
.with_line_number(false) .with_line_number(false)
.with_ansi(true)
.with_filter(filter_fn(is_log)) .with_filter(filter_fn(is_log))
.boxed() .boxed()
}; };
@@ -86,127 +82,68 @@ macro_rules! log_layer {
pub fn init( pub fn init(
config: impl LoggingConfigLoader, config: impl LoggingConfigLoader,
need_reload: bool, reload: bool,
) -> Result<Option<NewFilterSender>, anyhow::Error> { ) -> Result<Option<NewFilterSender>, anyhow::Error> {
let mut layers = Vec::new(); let mut layers = Vec::new();
let file_config = config.get_file_logger_config(); let console_layers = console_layers(
let file_level = file_config config
.level .get_console_logger_config()
.map(|s| s.parse().unwrap()) .level
.unwrap_or(LevelFilter::OFF); .map(|s| s.parse().unwrap()),
)?;
layers.extend(console_layers);
let mut ret_sender: Option<NewFilterSender> = None; let sender = if cfg!(not(test)) {
let (file_layers, sender) = file_layers(config.get_file_logger_config(), reload)?;
layers.extend(file_layers);
sender
} else {
None
};
// logger to a rolling file Registry::default()
if file_level != LevelFilter::OFF || need_reload { .with(layers)
let dir = file_config.dir.as_deref().unwrap_or("."); .try_init()
let file = file_config.file.as_deref().unwrap_or("easytier.log"); .map(|_| sender)
let path = std::path::Path::new(dir).join(file); .map_err(Into::into)
let path_str = path.to_string_lossy().into_owned(); }
let builder = RollingFileAppenderBase::builder(); type BoxLayer = Box<dyn Layer<Registry> + Send + Sync>;
let file_appender = builder
.filename(path_str)
.condition_daily()
.max_filecount(file_config.count.unwrap_or(10))
.condition_max_file_size(file_config.size_mb.unwrap_or(100) * 1024 * 1024)
.build()
.unwrap();
// Create a simple wrapper that implements MakeWriter fn console_layers(default_level: Option<LevelFilter>) -> anyhow::Result<Vec<BoxLayer>> {
let wrapper = FileAppenderWrapper::new(file_appender); let mut layers = Vec::new();
if matches!(default_level, Some(LevelFilter::OFF)) {
let (file_filter, file_filter_reloader) = return Ok(layers);
tracing_subscriber::reload::Layer::<_, Registry>::new(parse_env_filter(file_level)?);
let layer = |wrapper| {
layer()
.with_ansi(false)
.with_writer(wrapper)
.with_timer(get_logger_timer_rfc3339())
};
layers.push(
vec![
tracing_layer!(layer(wrapper.clone())),
log_layer!(layer(wrapper.clone())),
]
.with_filter(file_filter)
.boxed(),
);
if need_reload {
let (sender, recver) = std::sync::mpsc::channel();
ret_sender = Some(sender.clone());
// 初始化全局状态
let _ = LOGGER_LEVEL_SENDER.set(std::sync::Mutex::new(sender));
let _ = CURRENT_LOG_LEVEL.set(std::sync::Mutex::new(file_level.to_string()));
std::thread::spawn(move || {
while let Ok(lf) = recver.recv() {
let parsed_level = match lf.parse::<LevelFilter>() {
Ok(level) => level,
Err(e) => {
error!("Failed to parse new log level {:?}: {}", lf, e);
continue;
}
};
let mut new_filter = match EnvFilter::builder()
.with_default_directive(parsed_level.into())
.from_env()
.with_context(|| "failed to create file filter")
{
Ok(filter) => Some(filter),
Err(e) => {
error!("Failed to build new log filter for {:?}: {:?}", lf, e);
continue;
}
};
match file_filter_reloader.modify(|f| {
*f = new_filter
.take()
.expect("log filter reloader only applies one filter per reload");
}) {
Ok(()) => {
info!("Reload log filter succeed, new filter level: {:?}", lf);
}
Err(e) => {
error!("Failed to reload log filter: {:?}", e);
}
}
}
info!("Stop log filter reloader");
});
}
} }
// logger to console
let console_config = config.get_console_logger_config();
let console_level = console_config
.level
.map(|s| s.parse().unwrap())
.unwrap_or(LevelFilter::OFF);
let (console_filter, _) = let (console_filter, _) =
tracing_subscriber::reload::Layer::new(parse_env_filter(console_level)?); tracing_subscriber::reload::Layer::new(parse_env_filter(default_level)?);
let (stdout, stderr) = cfg_select! {
test => {{
let w = tracing_subscriber::fmt::TestWriter::new;
(w, w)
}}
_ => (std::io::stdout, std::io::stderr),
};
let ansi = std::io::stderr().is_terminal() || cfg!(test);
let layer = || { let layer = || {
layer() layer()
.compact() .compact()
.with_ansi(std::io::stderr().is_terminal())
.with_timer(get_logger_timer_rfc3339()) .with_timer(get_logger_timer_rfc3339())
.with_writer(std::io::stderr) .with_ansi(ansi)
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_writer(stderr)
}; };
layers.push( layers.push(
vec![ vec![
tracing_layer!(layer()), tracing_layer!(layer()),
log_layer!(layer()).with_filter(LevelFilter::WARN).boxed(), log_layer!(layer()).with_filter(LevelFilter::WARN).boxed(),
log_layer!(layer().with_writer(std::io::stdout)) log_layer!(layer().with_writer(stdout))
.with_filter(filter_fn(|metadata| *metadata.level() > Level::WARN)) .with_filter(filter_fn(|metadata| *metadata.level() > Level::WARN))
.boxed(), .boxed(),
] ]
@@ -219,23 +156,164 @@ pub fn init(
layers.push(console_subscriber::ConsoleLayer::builder().spawn().boxed()); layers.push(console_subscriber::ConsoleLayer::builder().spawn().boxed());
} }
Registry::default().with(layers).init(); Ok(layers)
}
Ok(ret_sender) fn file_layers(
config: FileLoggerConfig,
reload: bool,
) -> anyhow::Result<(Vec<BoxLayer>, Option<NewFilterSender>)> {
let mut layers = Vec::new();
let level = config.level.map(|s| s.parse().unwrap());
if matches!(level, Some(LevelFilter::OFF)) && !reload {
return Ok((layers, None));
}
let (file_filter, file_filter_reloader) =
tracing_subscriber::reload::Layer::<_, Registry>::new(parse_env_filter(level)?);
let layer = |wrapper| {
layer()
.with_ansi(false)
.with_writer(wrapper)
.with_timer(get_logger_timer_rfc3339())
};
let wrapper = {
let path = {
let dir = config.dir.as_deref().unwrap_or(".");
let file = config.file.as_deref().unwrap_or("easytier.log");
let path = std::path::Path::new(dir).join(file);
path.to_string_lossy().into_owned()
};
let builder = RollingFileAppenderBase::builder();
let file_appender = builder
.filename(path)
.condition_daily()
.max_filecount(config.count.unwrap_or(10))
.condition_max_file_size(config.size_mb.unwrap_or(100) * 1024 * 1024)
.build()
.with_context(|| "failed to initialize rolling file appender")?;
FileAppenderWrapper::new(file_appender)
};
layers.push(
vec![
tracing_layer!(layer(wrapper.clone())),
log_layer!(layer(wrapper.clone())),
]
.with_filter(file_filter)
.boxed(),
);
if !reload {
return Ok((layers, None));
}
let (tx, rx) = std::sync::mpsc::channel();
// 初始化全局状态
let _ = LOGGER_LEVEL_SENDER.set(std::sync::Mutex::new(tx.clone()));
if let Some(level) = level {
let _ = CURRENT_LOG_LEVEL.set(std::sync::Mutex::new(level.to_string()));
}
std::thread::spawn(move || {
while let Ok(lf) = rx.recv() {
let parsed_level = match lf.parse::<LevelFilter>() {
Ok(level) => level,
Err(e) => {
error!("Failed to parse new log level {:?}: {}", lf, e);
continue;
}
};
let mut new_filter = match EnvFilter::builder()
.with_default_directive(parsed_level.into())
.from_env()
.with_context(|| "failed to create file filter")
{
Ok(filter) => Some(filter),
Err(e) => {
error!("Failed to build new log filter for {:?}: {:?}", lf, e);
continue;
}
};
match file_filter_reloader.modify(|f| {
*f = new_filter
.take()
.expect("log filter reloader only applies one filter per reload");
}) {
Ok(()) => {
info!("Reload log filter succeed, new filter level: {:?}", lf);
}
Err(e) => {
error!("Failed to reload log filter: {:?}", e);
}
}
}
info!("Stop log filter reloader");
});
Ok((layers, Some(tx)))
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::common::config::{self}; use crate::common::config::FileLoggerConfig;
async fn test_logger_reload() { #[ctor::ctor]
println!("current working dir: {:?}", std::env::current_dir()); fn init() {
let config = config::LoggingConfigBuilder::default().build().unwrap(); let _ = Registry::default()
let s = init(&config, true).unwrap(); .with(console_layers(Some(LevelFilter::WARN)).unwrap())
tracing::debug!("test not display debug"); .try_init();
s.unwrap().send(LevelFilter::DEBUG.to_string()).unwrap(); }
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
tracing::debug!("test display debug"); #[test]
fn test_logger_reload() {
let temp_dir = tempfile::tempdir().unwrap();
let log_file_name = "reload-test.log".to_string();
let log_path = temp_dir.path().join(&log_file_name);
let cfg = FileLoggerConfig {
level: Some(LevelFilter::INFO.to_string()),
file: Some(log_file_name),
dir: Some(temp_dir.path().to_string_lossy().to_string()),
size_mb: Some(10),
count: Some(1),
};
let (layers, sender) = file_layers(cfg, true).unwrap();
let sender = sender.expect("reload=true should return a sender");
let before_marker = "reload-before-debug-marker";
let after_marker = "reload-after-debug-marker";
let subscriber = Registry::default().with(layers);
tracing::subscriber::with_default(subscriber, || {
tracing::debug!("{}", before_marker);
sender.send(LevelFilter::DEBUG.to_string()).unwrap();
std::thread::sleep(std::time::Duration::from_millis(300));
tracing::debug!("{}", after_marker);
std::thread::sleep(std::time::Duration::from_millis(300));
});
let content = std::fs::read_to_string(&log_path).unwrap_or_default();
assert!(
!content.contains(before_marker),
"debug log should be filtered before reload"
);
assert!(
content.contains(after_marker),
"debug log should be visible after reload"
);
} }
} }
+6 -6
View File
@@ -41,8 +41,8 @@ pub fn get_logger_timer<F: time::formatting::Formattable>(
tracing_subscriber::fmt::time::OffsetTime::new(local_offset, format) tracing_subscriber::fmt::time::OffsetTime::new(local_offset, format)
} }
pub fn get_logger_timer_rfc3339( pub fn get_logger_timer_rfc3339()
) -> tracing_subscriber::fmt::time::OffsetTime<time::format_description::well_known::Rfc3339> { -> tracing_subscriber::fmt::time::OffsetTime<time::format_description::well_known::Rfc3339> {
get_logger_timer(time::format_description::well_known::Rfc3339) get_logger_timer(time::format_description::well_known::Rfc3339)
} }
@@ -117,10 +117,10 @@ pub fn get_machine_id() -> uuid::Uuid {
.unwrap_or_else(|_| std::path::PathBuf::from("et_machine_id")); .unwrap_or_else(|_| std::path::PathBuf::from("et_machine_id"));
// try load from local file // try load from local file
if let Ok(mid) = std::fs::read_to_string(&machine_id_file) { if let Ok(mid) = std::fs::read_to_string(&machine_id_file)
if let Ok(mid) = uuid::Uuid::parse_str(mid.trim()) { && let Ok(mid) = uuid::Uuid::parse_str(mid.trim())
return mid; {
} return mid;
} }
#[cfg(any( #[cfg(any(
+1 -1
View File
@@ -1,7 +1,7 @@
use futures::Future; use futures::Future;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
use nix::sched::{setns, CloneFlags}; use nix::sched::{CloneFlags, setns};
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
use std::os::fd::AsFd; use std::os::fd::AsFd;
+89
View File
@@ -1,6 +1,12 @@
use std::{net::IpAddr, ops::Deref, sync::Arc}; use std::{net::IpAddr, ops::Deref, sync::Arc};
#[cfg(target_os = "windows")]
use network_interface::{
Addr as SystemAddr, NetworkInterface as SystemNetworkInterface, NetworkInterfaceConfig,
};
use pnet::datalink::NetworkInterface; use pnet::datalink::NetworkInterface;
#[cfg(target_os = "windows")]
use pnet::{ipnetwork::IpNetwork, util::MacAddr};
use tokio::{ use tokio::{
sync::{Mutex, RwLock}, sync::{Mutex, RwLock},
task::JoinSet, task::JoinSet,
@@ -264,6 +270,9 @@ impl IPCollector {
pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> { pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> {
let _g = net_ns.guard(); let _g = net_ns.guard();
#[cfg(target_os = "windows")]
let ifaces = Self::collect_interfaces_windows();
#[cfg(not(target_os = "windows"))]
let ifaces = pnet::datalink::interfaces(); let ifaces = pnet::datalink::interfaces();
let mut ret = vec![]; let mut ret = vec![];
for iface in ifaces { for iface in ifaces {
@@ -281,6 +290,86 @@ impl IPCollector {
ret ret
} }
#[cfg(target_os = "windows")]
fn collect_interfaces_windows() -> Vec<NetworkInterface> {
match SystemNetworkInterface::show() {
Ok(ifaces) => ifaces
.into_iter()
.map(Self::convert_windows_interface)
.collect(),
Err(e) => {
tracing::warn!(
?e,
"failed to enumerate interfaces via network-interface, falling back to pnet"
);
match std::panic::catch_unwind(pnet::datalink::interfaces) {
Ok(ifaces) => ifaces,
Err(_) => {
tracing::error!(
"failed to enumerate interfaces via both network-interface and pnet"
);
Vec::new()
}
}
}
}
}
#[cfg(target_os = "windows")]
fn convert_windows_interface(iface: SystemNetworkInterface) -> NetworkInterface {
let mac = iface.mac_addr.as_deref().and_then(|mac| {
mac.parse::<MacAddr>()
.map_err(|e| {
tracing::debug!(iface = %iface.name, mac, ?e, "failed to parse interface mac")
})
.ok()
});
let ips = iface
.addr
.into_iter()
.filter_map(Self::convert_windows_interface_addr)
.collect();
NetworkInterface {
name: iface.name,
description: String::new(),
index: iface.index,
mac,
ips,
// pnet does not populate Windows flags either, so keep the existing semantics.
flags: 0,
}
}
#[cfg(target_os = "windows")]
fn convert_windows_interface_addr(addr: SystemAddr) -> Option<IpNetwork> {
match addr {
SystemAddr::V4(addr) => {
let netmask = addr
.netmask
.map(IpAddr::V4)
.unwrap_or(IpAddr::V4(std::net::Ipv4Addr::new(255, 255, 255, 255)));
IpNetwork::with_netmask(IpAddr::V4(addr.ip), netmask)
.map_err(|e| {
tracing::debug!(ip = %addr.ip, ?addr.netmask, ?e, "failed to convert ipv4")
})
.ok()
}
SystemAddr::V6(addr) => {
let netmask = addr
.netmask
.map(IpAddr::V6)
.unwrap_or(IpAddr::V6(std::net::Ipv6Addr::from(u128::MAX)));
IpNetwork::with_netmask(IpAddr::V6(addr.ip), netmask)
.map_err(|e| {
tracing::debug!(ip = %addr.ip, ?addr.netmask, ?e, "failed to convert ipv6")
})
.ok()
}
}
}
#[tracing::instrument(skip(net_ns))] #[tracing::instrument(skip(net_ns))]
async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse { async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse {
let mut ret = GetIpListResponse::default(); let mut ret = GetIpListResponse::default();
+5 -20
View File
@@ -4,40 +4,25 @@
//! For example, if task A spawned task B but is doing something else, and task B is waiting for task C to join, //! For example, if task A spawned task B but is doing something else, and task B is waiting for task C to join,
//! aborting A will also abort both B and C. //! aborting A will also abort both B and C.
use derive_more::{Deref, DerefMut, From};
use std::future::Future; use std::future::Future;
use std::ops::Deref;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
#[derive(Debug)] #[derive(Debug, From, Deref, DerefMut)]
pub struct ScopedTask<T> { pub struct ScopedTask<T>(JoinHandle<T>);
inner: JoinHandle<T>,
}
impl<T> Drop for ScopedTask<T> { impl<T> Drop for ScopedTask<T> {
fn drop(&mut self) { fn drop(&mut self) {
self.inner.abort() self.abort()
} }
} }
impl<T> Future for ScopedTask<T> { impl<T> Future for ScopedTask<T> {
type Output = <JoinHandle<T> as Future>::Output; type Output = <JoinHandle<T> as Future>::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).poll(cx) Pin::new(&mut self.0).poll(cx)
}
}
impl<T> From<JoinHandle<T>> for ScopedTask<T> {
fn from(inner: JoinHandle<T>) -> Self {
Self { inner }
}
}
impl<T> Deref for ScopedTask<T> {
type Target = JoinHandle<T>;
fn deref(&self) -> &Self::Target {
&self.inner
} }
} }

Some files were not shown because too many files have changed in this diff Show More