mirror of
https://github.com/EasyTier/EasyTier.git
synced 2026-05-13 17:35:37 +00:00
Compare commits
272 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8428a89d2d | |||
| 513695297c | |||
| bfbfa2ef8d | |||
| 8e1d079142 | |||
| 55f15bb6f0 | |||
| 96fd39649a | |||
| 74fc8b300d | |||
| baeee40b79 | |||
| 4342c8d7a2 | |||
| 1178b312fa | |||
| 362aa7a9cd | |||
| 12a7b5a5c5 | |||
| 4eba9b07b6 | |||
| 1b48029bdc | |||
| 3542e944cb | |||
| 852d1c9e14 | |||
| 4958394469 | |||
| 41b6d65604 | |||
| aae30894dd | |||
| 81d169abfc | |||
| 9c6c210e89 | |||
| d1c6dcf754 | |||
| 97c8c4f55a | |||
| ed8df2d58f | |||
| f66010e6f9 | |||
| d5c4700d32 | |||
| 969ecfc4ca | |||
| 8f862997eb | |||
| b20075e3dc | |||
| eb3b5aae51 | |||
| af6b6ab6f1 | |||
| 5a1668c753 | |||
| 820d9095d3 | |||
| 2fb41ccbba | |||
| b4666be696 | |||
| 4688ad74ad | |||
| f7ea78d4f0 | |||
| ac112440c3 | |||
| 958b246f05 | |||
| 263f4c3bc9 | |||
| ffddc517e1 | |||
| 5cd0a3e846 | |||
| f4319c4d4f | |||
| 0091a535d5 | |||
| d7a5fb8d66 | |||
| f63054e937 | |||
| efc043abbb | |||
| 40c6de8e31 | |||
| 2db655bd6d | |||
| c49c56612b | |||
| 6ca074abae | |||
| 84430055ab | |||
| 432fcb3fc3 | |||
| fae32361f2 | |||
| bcb2e512d4 | |||
| 82ca04a8a7 | |||
| 2ef3b72224 | |||
| 6d319cba1d | |||
| 3687519ef3 | |||
| 3a4ac59467 | |||
| 1cfc135df3 | |||
| 5b35c51da9 | |||
| ec7ddd3bad | |||
| 6f3e708679 | |||
| 869e1b89f5 | |||
| 9e0a3b6936 | |||
| c6cb1a77d0 | |||
| 83010861ba | |||
| daa53e5168 | |||
| 51befdbf87 | |||
| 8311b11713 | |||
| 19c80c7b9c | |||
| a879dd1b14 | |||
| a8feb9ac2b | |||
| c5fbd29c0e | |||
| 26b1794723 | |||
| 371b4b70a3 | |||
| b2cc38ee63 | |||
| 79b562cdc9 | |||
| e3f089251c | |||
| cf6dcbc054 | |||
| 2cf2b0fcac | |||
| aa0cca3bb6 | |||
| fb59f01058 | |||
| e91a0da70a | |||
| 9cc617ae4c | |||
| e4b0f1f1bb | |||
| 443c3ca0b3 | |||
| 55a0e5952c | |||
| 1dff388717 | |||
| 61c741f887 | |||
| 01dd9a05c3 | |||
| 8c19a2293c | |||
| a1bec48dc9 | |||
| 7e289865b2 | |||
| 742c7edd57 | |||
| b71a2889ef | |||
| bcd75d6ce3 | |||
| d4c1b0e867 | |||
| b037ea9c3f | |||
| b5f475cd4c | |||
| eaa4d2c7b8 | |||
| e160d9b048 | |||
| 0aeea39fbe | |||
| e000636d83 | |||
| 8e4dc508bb | |||
| e2684a93de | |||
| 1d89ddbb16 | |||
| 2bfdd44759 | |||
| 77966916c4 | |||
| 26b7455c1e | |||
| 8922e7b991 | |||
| e6ac31fb20 | |||
| c8f3c5d6aa | |||
| 330659e449 | |||
| 80043df292 | |||
| ecd1ea6f8c | |||
| 694b8d349d | |||
| ef44027f57 | |||
| f3db348b01 | |||
| c4eacf4591 | |||
| 59d4475743 | |||
| 22b4c4be2c | |||
| 5f31583a84 | |||
| 1d25240d8c | |||
| eeb507d6ea | |||
| 9e9916efa5 | |||
| db6b9e3684 | |||
| ff24332e23 | |||
| d4ff0b1767 | |||
| 5716f7f16b | |||
| e5bd8f9e24 | |||
| b56bcfb4b0 | |||
| fb95b4827c | |||
| a8f7226195 | |||
| e6ee485352 | |||
| 73291a3a1c | |||
| f737708f45 | |||
| aa24d09aa2 | |||
| fe4e77979d | |||
| 7a26640c26 | |||
| 5a777959e3 | |||
| 3512a80597 | |||
| 011770a601 | |||
| 6475724d2e | |||
| 85e9029577 | |||
| b6e292cce3 | |||
| c58140fb47 | |||
| aebb7facfa | |||
| 1e2124cb99 | |||
| e1cbd07d1f | |||
| 7750e81168 | |||
| bf3edbd28f | |||
| cd2cf56358 | |||
| 21f4a944a7 | |||
| 9617005136 | |||
| c85d1d41b3 | |||
| 9e3c9228bb | |||
| acd7c85ff6 | |||
| 8727221513 | |||
| cdedaf3f63 | |||
| ffe5644ddc | |||
| ccc684a9ab | |||
| 977e502150 | |||
| 518d26b25f | |||
| 101f416268 | |||
| ffa08d1c43 | |||
| cf3f9169b7 | |||
| 8343cd5e76 | |||
| 005b321f62 | |||
| 53264f67bf | |||
| f8b34e3c86 | |||
| ce1bdac2bc | |||
| bd8f01fb26 | |||
| b590700540 | |||
| 48c5c23f9b | |||
| f4f591d14c | |||
| 0c16e2211b | |||
| 4bfea06a12 | |||
| 057ee9f2c5 | |||
| 7f48ca54a3 | |||
| ee5227130c | |||
| 2e0d9a2b54 | |||
| c5d732773f | |||
| 88a45d1156 | |||
| 4e651a72f7 | |||
| 7c563153ae | |||
| cb81c0df85 | |||
| 9c316ea01c | |||
| 541fc664e3 | |||
| 18478b7c4b | |||
| 650323faef | |||
| ed131272d4 | |||
| 39b056c87a | |||
| c19cd1bff3 | |||
| 37531507db | |||
| ca9b4c58b1 | |||
| 4341bcba5d | |||
| 0be4ac1fa5 | |||
| 28cd6da502 | |||
| 0712ef762d | |||
| eee7d7a1ed | |||
| 4c58def0db | |||
| c6a32e4467 | |||
| 30f0ff16ca | |||
| 38d117ee44 | |||
| 7aba65ea32 | |||
| fe4dff5df0 | |||
| 2bc51daa98 | |||
| 838b6101b9 | |||
| 056c9da781 | |||
| 2a656d6a0c | |||
| 43a650f9ab | |||
| 88a55859ac | |||
| d686c8721f | |||
| 0a718163fd | |||
| 53f279f5ff | |||
| ae6d929f4a | |||
| bb82b3a5b0 | |||
| 70b122fb91 | |||
| 67cba2c326 | |||
| b86692d009 | |||
| 28e645a277 | |||
| 1f2517c731 | |||
| b44053f496 | |||
| 5b9ac65477 | |||
| d726d46a00 | |||
| 1273426009 | |||
| b50744690e | |||
| 55b93454dc | |||
| 89cc75f674 | |||
| 6bb2fd9a15 | |||
| 8ab98bba8f | |||
| 26d002bc2b | |||
| 71679e889a | |||
| 7485f5f64e | |||
| bbe8f9f810 | |||
| eba9504fc2 | |||
| 67ac9b00ff | |||
| 3ffa6214ca | |||
| 6f278ab167 | |||
| f10b45a67c | |||
| cc8f35787e | |||
| 8f1786fa23 | |||
| 70dddeace3 | |||
| 8cc9da9d6d | |||
| 5292b87275 | |||
| 87b7b7ed7c | |||
| 999a486928 | |||
| 627e989faa | |||
| af95312949 | |||
| a452c34390 | |||
| 4d5330fa0a | |||
| 5e48626cb9 | |||
| ad7dc3a129 | |||
| 92fab5aafa | |||
| 841d525913 | |||
| d2efbbef04 | |||
| 971ef82679 | |||
| 020bf04ec4 | |||
| 4d91582fd8 | |||
| e9b4dbce6e | |||
| 00fd02c739 | |||
| c0d2045e52 | |||
| 835cd407bf | |||
| f5ba5bb146 | |||
| 7a694257d9 | |||
| 67abf4446d | |||
| 7035a3fef4 | |||
| 4445916ba7 | |||
| a102a8bfc7 | |||
| c9e8c35e77 |
+35
-54
@@ -1,29 +1,40 @@
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
linker = "rust-lld"
|
||||
rustflags = ["-C", "linker-flavor=ld.lld"]
|
||||
# region Native
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
[target.aarch64-unknown-linux-ohos]
|
||||
ar = "/usr/local/ohos-sdk/linux/native/llvm/bin/llvm-ar"
|
||||
linker = "/home/runner/sdk/native/llvm/aarch64-unknown-linux-ohos-clang.sh"
|
||||
[target.'cfg(all(windows, target_env = "msvc"))']
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.aarch64-unknown-linux-ohos.env]
|
||||
PKG_CONFIG_PATH = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib/pkgconfig:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib/pkgconfig"
|
||||
PKG_CONFIG_LIBDIR = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib"
|
||||
PKG_CONFIG_SYSROOT_DIR = "/usr/local/ohos-sdk/linux/native/sysroot"
|
||||
SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot"
|
||||
# region
|
||||
|
||||
# region CI
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.aarch64-unknown-linux-musl]
|
||||
linker = "aarch64-unknown-linux-musl-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.riscv64gc-unknown-linux-musl]
|
||||
linker = "riscv64-unknown-linux-musl-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.'cfg(all(windows, target_env = "msvc"))']
|
||||
[target.armv7-unknown-linux-musleabihf]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.armv7-unknown-linux-musleabi]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.arm-unknown-linux-musleabihf]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.arm-unknown-linux-musleabi]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.loongarch64-unknown-linux-musl]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.mipsel-unknown-linux-musl]
|
||||
@@ -64,44 +75,14 @@ rustflags = [
|
||||
"gcc",
|
||||
]
|
||||
|
||||
[target.armv7-unknown-linux-musleabihf]
|
||||
linker = "armv7-unknown-linux-musleabihf-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
[target.aarch64-unknown-linux-ohos]
|
||||
ar = "/usr/local/ohos-sdk/linux/native/llvm/bin/llvm-ar"
|
||||
linker = "/home/runner/sdk/native/llvm/aarch64-unknown-linux-ohos-clang.sh"
|
||||
|
||||
[target.armv7-unknown-linux-musleabi]
|
||||
linker = "armv7-unknown-linux-musleabi-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
[target.aarch64-unknown-linux-ohos.env]
|
||||
PKG_CONFIG_PATH = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib/pkgconfig:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib/pkgconfig"
|
||||
PKG_CONFIG_LIBDIR = "/usr/local/ohos-sdk/linux/native/sysroot/usr/lib:/usr/local/ohos-sdk/linux/native/sysroot/usr/local/lib"
|
||||
PKG_CONFIG_SYSROOT_DIR = "/usr/local/ohos-sdk/linux/native/sysroot"
|
||||
SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot"
|
||||
|
||||
[target.loongarch64-unknown-linux-musl]
|
||||
linker = "loongarch64-unknown-linux-musl-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.arm-unknown-linux-musleabihf]
|
||||
linker = "arm-unknown-linux-musleabihf-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/arm-unknown-linux-musleabihf/arm-unknown-linux-musleabihf/lib",
|
||||
"-L",
|
||||
"./musl_gcc/arm-unknown-linux-musleabihf/lib/gcc/arm-unknown-linux-musleabihf/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
|
||||
[target.arm-unknown-linux-musleabi]
|
||||
linker = "arm-unknown-linux-musleabi-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/arm-unknown-linux-musleabi/arm-unknown-linux-musleabi/lib",
|
||||
"-L",
|
||||
"./musl_gcc/arm-unknown-linux-musleabi/lib/gcc/arm-unknown-linux-musleabi/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
# endregion
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
name: prepare-build
|
||||
author: Luna
|
||||
description: Prepare build environment
|
||||
inputs:
|
||||
target:
|
||||
description: 'The target to build for'
|
||||
required: false
|
||||
pnpm:
|
||||
description: 'Whether to run pnpm build'
|
||||
required: true
|
||||
default: 'true'
|
||||
pnpm-build-filter:
|
||||
description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)'
|
||||
required: false
|
||||
default: './easytier-web/*'
|
||||
gui:
|
||||
description: 'Whether to prepare the GUI build environment'
|
||||
required: true
|
||||
default: 'true'
|
||||
token:
|
||||
description: 'GitHub token, used by setup-protoc action'
|
||||
required: false
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- run: mkdir -p easytier-gui/dist
|
||||
shell: bash
|
||||
|
||||
- name: Install dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -qqy build-essential mold musl-tools
|
||||
shell: bash
|
||||
|
||||
- name: Setup Frontend Environment
|
||||
if: ${{ inputs.pnpm == 'true' }}
|
||||
uses: ./.github/actions/prepare-pnpm
|
||||
with:
|
||||
build-filter: ${{ inputs.pnpm-build-filter }}
|
||||
|
||||
- name: Install GUI dependencies (Linux)
|
||||
if: ${{ inputs.gui == 'true' && runner.os == 'Linux' }}
|
||||
run: |
|
||||
sudo apt-get install -qq xdg-utils \
|
||||
libappindicator3-dev \
|
||||
libgtk-3-dev \
|
||||
librsvg2-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev
|
||||
shell: bash
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: 1.95
|
||||
target: ${{ !contains(inputs.target, 'mips') && inputs.target || '' }}
|
||||
components: ${{ contains(inputs.target, 'mips') && 'rust-src' || '' }}
|
||||
cache: false
|
||||
rustflags: ''
|
||||
|
||||
- name: Install Rust (MIPS)
|
||||
if: ${{ contains(inputs.target, 'mips') }}
|
||||
run: |
|
||||
MUSL_TARGET=${{ inputs.target }}sf
|
||||
mkdir -p ./musl_gcc
|
||||
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
|
||||
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
|
||||
sudo chmod -R a+rwx ./musl_gcc
|
||||
|
||||
if [[ -d "./musl_gcc/sysroot" ]]; then
|
||||
echo "BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
|
||||
# for panic-abort
|
||||
cp libgcc_eh.a libunwind.a
|
||||
|
||||
# for mimalloc
|
||||
ar x libgcc.a _ctzsi2.o _clz.o _bswapsi2.o
|
||||
ar rcs libctz.a _ctzsi2.o _clz.o _bswapsi2.o
|
||||
shell: bash
|
||||
|
||||
- name: Setup protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ inputs.token }}
|
||||
@@ -0,0 +1,48 @@
|
||||
name: 'Setup pnpm'
|
||||
author: Luna
|
||||
description: 'Setup Node.js, pnpm, and install dependencies'
|
||||
|
||||
inputs:
|
||||
build-filter:
|
||||
description: 'The filter argument for pnpm build (e.g. ./easytier-web/*)'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v5
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install and build
|
||||
shell: bash
|
||||
run: |
|
||||
pnpm -r install
|
||||
if [ -n "${{ inputs.build-filter }}" ]; then
|
||||
echo "Building with filter: ${{ inputs.build-filter }}"
|
||||
pnpm -r --filter "${{ inputs.build-filter }}" build
|
||||
else
|
||||
echo "No build filter provided, building all packages"
|
||||
pnpm -r build
|
||||
fi
|
||||
+144
-178
@@ -2,9 +2,14 @@ name: EasyTier Core
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "main", "releases/**"]
|
||||
branches: [ "develop", "main", "releases/**" ]
|
||||
pull_request:
|
||||
branches: ["develop", "main"]
|
||||
branches: [ "develop", "main" ]
|
||||
types: [ opened, synchronize, reopened, ready_for_review ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -18,6 +23,7 @@ jobs:
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
# do not skip push on branch starts with releases/
|
||||
@@ -30,85 +36,69 @@ jobs:
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
skip_after_successful_duplicate: 'true'
|
||||
cancel_others: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/workflows/install_rust.sh"]'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/actions/**", "easytier-web/**"]'
|
||||
build_web:
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- name: Setup Frontend Environment
|
||||
uses: ./.github/actions/prepare-pnpm
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r --filter "./easytier-web/*" build
|
||||
build-filter: './easytier-web/*'
|
||||
|
||||
- name: Archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: easytier-web-dashboard
|
||||
path: |
|
||||
easytier-web/frontend/dist/*
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
include:
|
||||
- TARGET: aarch64-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-aarch64
|
||||
- TARGET: x86_64-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-x86_64
|
||||
- TARGET: riscv64gc-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-riscv64
|
||||
- TARGET: mips-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-mips
|
||||
- TARGET: mipsel-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-mipsel
|
||||
- TARGET: armv7-unknown-linux-musleabihf # raspberry pi 2-3-4, not tested
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-armv7hf
|
||||
- TARGET: armv7-unknown-linux-musleabi # raspberry pi 2-3-4, not tested
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-armv7
|
||||
- TARGET: arm-unknown-linux-musleabihf # raspberry pi 0-1, not tested
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-armhf
|
||||
- TARGET: arm-unknown-linux-musleabi # raspberry pi 0-1, not tested
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: linux-arm
|
||||
- TARGET: aarch64-unknown-linux-musl
|
||||
OS: ubuntu-24.04-arm
|
||||
ARTIFACT_NAME: linux-aarch64
|
||||
|
||||
- TARGET: riscv64gc-unknown-linux-musl
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-riscv64
|
||||
- TARGET: loongarch64-unknown-linux-musl
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-loongarch64
|
||||
|
||||
- TARGET: armv7-unknown-linux-musleabihf # raspberry pi 2-3-4, not tested
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-armv7hf
|
||||
- TARGET: armv7-unknown-linux-musleabi # raspberry pi 2-3-4, not tested
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-armv7
|
||||
- TARGET: arm-unknown-linux-musleabihf # raspberry pi 0-1, not tested
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-armhf
|
||||
- TARGET: arm-unknown-linux-musleabi # raspberry pi 0-1, not tested
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-arm
|
||||
|
||||
- TARGET: mips-unknown-linux-musl
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-mips
|
||||
- TARGET: mipsel-unknown-linux-musl
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: linux-mipsel
|
||||
|
||||
- TARGET: x86_64-unknown-freebsd
|
||||
OS: ubuntu-24.04
|
||||
ARTIFACT_NAME: freebsd-13.2-x86_64
|
||||
BSD_VERSION: 13.2
|
||||
|
||||
- TARGET: x86_64-apple-darwin
|
||||
OS: macos-latest
|
||||
ARTIFACT_NAME: macos-x86_64
|
||||
@@ -119,17 +109,12 @@ jobs:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-x86_64
|
||||
- TARGET: aarch64-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-i686
|
||||
|
||||
- TARGET: x86_64-unknown-freebsd
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: freebsd-13.2-x86_64
|
||||
BSD_VERSION: 13.2
|
||||
- TARGET: aarch64-pc-windows-msvc
|
||||
OS: windows-11-arm
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
|
||||
runs-on: ${{ matrix.OS }}
|
||||
env:
|
||||
@@ -142,7 +127,7 @@ jobs:
|
||||
- build_web
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Set current ref as env variable
|
||||
run: |
|
||||
@@ -154,158 +139,131 @@ jobs:
|
||||
name: easytier-web-dashboard
|
||||
path: easytier-web/frontend/dist/
|
||||
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
target: ${{ matrix.TARGET }}
|
||||
gui: true
|
||||
pnpm: true
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
|
||||
with:
|
||||
# The prefix cache key, this can be changed to start a new cache manually.
|
||||
# default: "v0-rust"
|
||||
prefix-key: ""
|
||||
shared-key: "core-registry"
|
||||
cache-targets: "false"
|
||||
|
||||
|
||||
- name: Setup protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
- uses: mlugg/setup-zig@v2
|
||||
if: ${{ contains(matrix.OS, 'ubuntu') }}
|
||||
with:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
version: 0.16.0
|
||||
use-cache: true
|
||||
|
||||
- name: Build Core & Cli
|
||||
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
|
||||
- uses: taiki-e/install-action@v2
|
||||
if: ${{ contains(matrix.OS, 'ubuntu') }}
|
||||
with:
|
||||
tool: cargo-zigbuild
|
||||
|
||||
- name: Build
|
||||
if: ${{ !contains(matrix.TARGET, 'mips') }}
|
||||
run: |
|
||||
bash ./.github/workflows/install_rust.sh
|
||||
|
||||
# loongarch need llvm-18
|
||||
if [[ $TARGET =~ ^loongarch.*$ ]]; then
|
||||
sudo apt-get install -qq llvm-18 clang-18
|
||||
export LLVM_CONFIG_PATH=/usr/lib/llvm-18/bin/llvm-config
|
||||
fi
|
||||
# we set the sysroot when sysroot is a dir
|
||||
# this dir is a soft link generated by install_rust.sh
|
||||
# kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h
|
||||
if [[ -d "./musl_gcc/sysroot" ]]; then
|
||||
export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)
|
||||
fi
|
||||
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
cargo +nightly-2025-09-01 build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier --features=jemalloc
|
||||
if [[ "$TARGET" == *windows* ]]; then
|
||||
SUFFIX=.exe
|
||||
else
|
||||
if [[ $OS =~ ^windows.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
CORE_FEATURES="--features=mimalloc"
|
||||
elif [[ $TARGET =~ ^riscv64.*$ || $TARGET =~ ^loongarch64.*$ ]]; then
|
||||
CORE_FEATURES="--features=mimalloc"
|
||||
else
|
||||
CORE_FEATURES="--features=jemalloc"
|
||||
fi
|
||||
cargo build --release --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
|
||||
cargo build --release --target $TARGET $CORE_FEATURES
|
||||
SUFFIX=""
|
||||
fi
|
||||
|
||||
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267)
|
||||
- name: Build Core & Cli (X86_64 FreeBSD)
|
||||
uses: vmactions/freebsd-vm@v1
|
||||
if: ${{ endsWith(matrix.TARGET, 'freebsd') }}
|
||||
if [[ "$TARGET" =~ (x86_64-unknown-linux-musl|aarch64-unknown-linux-musl|windows|darwin) ]]; then
|
||||
BUILD=build
|
||||
else
|
||||
BUILD=zigbuild
|
||||
fi
|
||||
|
||||
if [[ "$TARGET" =~ ^(riscv64|loongarch64|aarch64).*$ || "$TARGET" =~ (freebsd|windows) ]]; then
|
||||
FEATURES="mimalloc"
|
||||
else
|
||||
FEATURES="jemalloc"
|
||||
fi
|
||||
|
||||
cargo $BUILD --release --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
|
||||
|
||||
cargo $BUILD --release --target $TARGET --features=$FEATURES
|
||||
|
||||
- name: Build (MIPS)
|
||||
if: ${{ contains(matrix.TARGET, 'mips') }}
|
||||
env:
|
||||
TARGET: ${{ matrix.TARGET }}
|
||||
with:
|
||||
envs: TARGET
|
||||
release: ${{ matrix.BSD_VERSION }}
|
||||
arch: x86_64
|
||||
usesh: true
|
||||
mem: 6144
|
||||
cpu: 4
|
||||
run: |
|
||||
uname -a
|
||||
echo $SHELL
|
||||
pwd
|
||||
ls -lah
|
||||
whoami
|
||||
env | sort
|
||||
|
||||
pkg install -y git protobuf llvm-devel sudo curl
|
||||
curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
. $HOME/.cargo/env
|
||||
|
||||
rustup set auto-self-update disable
|
||||
|
||||
rustup install 1.89
|
||||
rustup default 1.89
|
||||
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
export CARGO_TERM_COLOR=always
|
||||
|
||||
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed
|
||||
cargo build --release --verbose --target $TARGET --features=mimalloc
|
||||
RUSTC_BOOTSTRAP: 1
|
||||
run: |
|
||||
cargo build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier --features=jemalloc
|
||||
|
||||
- name: Compress
|
||||
run: |
|
||||
mkdir -p ./artifacts/objects/
|
||||
|
||||
# windows is the only OS using a different convention for executable file name
|
||||
if [[ $OS =~ ^windows.*$ && $TARGET =~ ^x86_64.*$ ]]; then
|
||||
if [[ $OS =~ ^windows.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/*.dll ./artifacts/objects/
|
||||
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^i686.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/i686/*.dll ./artifacts/objects/
|
||||
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^aarch64.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/arm64/*.dll ./artifacts/objects/
|
||||
case $TARGET in
|
||||
x86_64*) ARCH_DIR=x86_64 ;;
|
||||
i686*) ARCH_DIR=i686 ;;
|
||||
aarch64*) ARCH_DIR=arm64 ;;
|
||||
esac
|
||||
if [[ -n "$ARCH_DIR" ]]; then
|
||||
find "easytier/third_party/${ARCH_DIR}" -maxdepth 1 -type f \( -name "*.dll" -o -name "*.sys" \) -exec cp {} ./artifacts/objects/ \;
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
|
||||
TAG=$GITHUB_REF_NAME
|
||||
else
|
||||
TAG=$GITHUB_SHA
|
||||
fi
|
||||
|
||||
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ && ! $TARGET =~ ^riscv64.*$ ]]; then
|
||||
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ (loongarch|freebsd) ]]; then
|
||||
HOST_ARCH=$(uname -m)
|
||||
case $HOST_ARCH in
|
||||
x86_64) UPX_ARCH="amd64" ;;
|
||||
aarch64) UPX_ARCH="arm64" ;;
|
||||
*) UPX_ARCH="amd64" ;;
|
||||
esac
|
||||
|
||||
UPX_VERSION=4.2.4
|
||||
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
|
||||
cp upx-${UPX_VERSION}-amd64_linux/upx .
|
||||
./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
|
||||
./upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
|
||||
UPX_PKG="upx-${UPX_VERSION}-${UPX_ARCH}_linux"
|
||||
curl -L "https://github.com/upx/upx/releases/download/v${UPX_VERSION}/${UPX_PKG}.tar.xz" -s | tar xJvf -
|
||||
cp "${UPX_PKG}/upx" .
|
||||
UPX_BIN=./upx
|
||||
fi
|
||||
|
||||
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/
|
||||
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/
|
||||
if [[ ! $TARGET =~ ^mips.*$ ]]; then
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./artifacts/objects/
|
||||
mv ./target/$TARGET/release/easytier-web-embed"$SUFFIX" ./artifacts/objects/
|
||||
fi
|
||||
for BIN in ./target/$TARGET/release/easytier-{core,cli,web,web-embed}"$SUFFIX"; do
|
||||
if [[ -f "$BIN" ]]; then
|
||||
if [[ -n "$UPX_BIN" ]]; then
|
||||
$UPX_BIN --lzma --best "$BIN" || true
|
||||
fi
|
||||
|
||||
mv "$BIN" ./artifacts/objects/
|
||||
fi
|
||||
done
|
||||
|
||||
mv ./artifacts/objects/* ./artifacts/
|
||||
rm -rf ./artifacts/objects/
|
||||
|
||||
- name: Archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: easytier-${{ matrix.ARTIFACT_NAME }}
|
||||
path: |
|
||||
./artifacts/*
|
||||
|
||||
core-result:
|
||||
if: needs.pre_job.outputs.should_skip != 'true' && always()
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre_job
|
||||
- build_web
|
||||
- build
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: needs.build.result != 'success'
|
||||
run: exit 1
|
||||
|
||||
magisk_build:
|
||||
needs:
|
||||
- pre_job
|
||||
- build_web
|
||||
- build
|
||||
if: needs.pre_job.outputs.should_skip != 'true' && always()
|
||||
build_magisk:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ pre_job, build_web, build ]
|
||||
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4 # 必须先检出代码才能获取模块配置
|
||||
uses: actions/checkout@v5 # 必须先检出代码才能获取模块配置
|
||||
|
||||
# 下载二进制文件到独立目录
|
||||
- name: Download Linux aarch64 binaries
|
||||
@@ -322,10 +280,9 @@ jobs:
|
||||
cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/
|
||||
cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/
|
||||
|
||||
|
||||
# 上传生成的模块
|
||||
- name: Upload Magisk Module
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: Easytier-Magisk
|
||||
path: |
|
||||
@@ -333,3 +290,12 @@ jobs:
|
||||
!./easytier-contrib/easytier-magisk/build.sh
|
||||
!./easytier-contrib/easytier-magisk/magisk_update.json
|
||||
if-no-files-found: error
|
||||
|
||||
core-result:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ pre_job, build_web, build, build_magisk ]
|
||||
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: contains(needs.*.result, 'failure')
|
||||
run: exit 1
|
||||
|
||||
@@ -11,7 +11,7 @@ on:
|
||||
image_tag:
|
||||
description: 'Tag for this image build'
|
||||
type: string
|
||||
default: 'v2.4.4'
|
||||
default: 'v2.6.4'
|
||||
required: true
|
||||
mark_latest:
|
||||
description: 'Mark this image as latest'
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
-
|
||||
name: Validate inputs
|
||||
run: |
|
||||
|
||||
+47
-111
@@ -5,6 +5,11 @@ on:
|
||||
branches: ["develop", "main", "releases/**"]
|
||||
pull_request:
|
||||
branches: ["develop", "main"]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -18,6 +23,7 @@ jobs:
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }}
|
||||
@@ -29,20 +35,20 @@ jobs:
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
skip_after_successful_duplicate: 'true'
|
||||
cancel_others: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh", ".github/workflows/install_gui_dep.sh"]'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/actions/**", "easytier-web/frontend-lib/**"]'
|
||||
build-gui:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
include:
|
||||
- TARGET: aarch64-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
GUI_TARGET: aarch64-unknown-linux-gnu
|
||||
ARTIFACT_NAME: linux-aarch64
|
||||
- TARGET: x86_64-unknown-linux-musl
|
||||
OS: ubuntu-22.04
|
||||
OS: ubuntu-24.04
|
||||
GUI_TARGET: x86_64-unknown-linux-gnu
|
||||
ARTIFACT_NAME: linux-x86_64
|
||||
- TARGET: aarch64-unknown-linux-musl
|
||||
OS: ubuntu-24.04-arm
|
||||
GUI_TARGET: aarch64-unknown-linux-gnu
|
||||
ARTIFACT_NAME: linux-aarch64
|
||||
|
||||
- TARGET: x86_64-apple-darwin
|
||||
OS: macos-latest
|
||||
@@ -57,16 +63,14 @@ jobs:
|
||||
OS: windows-latest
|
||||
GUI_TARGET: x86_64-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-x86_64
|
||||
|
||||
- TARGET: aarch64-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
GUI_TARGET: aarch64-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
GUI_TARGET: i686-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-i686
|
||||
- TARGET: aarch64-pc-windows-msvc
|
||||
OS: windows-11-arm
|
||||
GUI_TARGET: aarch64-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
|
||||
runs-on: ${{ matrix.OS }}
|
||||
env:
|
||||
@@ -78,103 +82,39 @@ jobs:
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install GUI dependencies (x86 only)
|
||||
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
|
||||
run: bash ./.github/workflows/install_gui_dep.sh
|
||||
|
||||
- name: Install GUI cross compile (aarch64 only)
|
||||
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
|
||||
run: |
|
||||
# see https://tauri.app/v1/guides/building/linux/
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
sudo dpkg --add-architecture arm64
|
||||
sudo apt update
|
||||
sudo apt install aptitude
|
||||
sudo aptitude install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64 \
|
||||
libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 \
|
||||
libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu
|
||||
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
|
||||
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Set current ref as env variable
|
||||
run: |
|
||||
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r build
|
||||
target: ${{ matrix.TARGET }}
|
||||
gui: true
|
||||
pnpm: true
|
||||
pnpm-build-filter: ''
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# The prefix cache key, this can be changed to start a new cache manually.
|
||||
# default: "v0-rust"
|
||||
prefix-key: ""
|
||||
|
||||
- name: Install rust target
|
||||
run: bash ./.github/workflows/install_rust.sh
|
||||
|
||||
- name: Setup protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
shared-key: "gui-registry"
|
||||
cache-targets: "false"
|
||||
|
||||
- name: copy correct DLLs
|
||||
if: ${{ matrix.OS == 'windows-latest' }}
|
||||
if: ${{ contains(matrix.GUI_TARGET, 'windows') }}
|
||||
run: |
|
||||
if [[ $GUI_TARGET =~ ^aarch64.*$ ]]; then
|
||||
cp ./easytier/third_party/arm64/*.dll ./easytier-gui/src-tauri/
|
||||
elif [[ $GUI_TARGET =~ ^i686.*$ ]]; then
|
||||
cp ./easytier/third_party/i686/*.dll ./easytier-gui/src-tauri/
|
||||
else
|
||||
cp ./easytier/third_party/*.dll ./easytier-gui/src-tauri/
|
||||
case $TARGET in
|
||||
x86_64*) ARCH_DIR=x86_64 ;;
|
||||
i686*) ARCH_DIR=i686 ;;
|
||||
aarch64*) ARCH_DIR=arm64 ;;
|
||||
esac
|
||||
if [[ -n "$ARCH_DIR" ]]; then
|
||||
find "./easytier/third_party/${ARCH_DIR}" -maxdepth 1 -type f \( -name "*.dll" -o -name "*.sys" \) -exec cp {} ./easytier-gui/src-tauri/ \;
|
||||
fi
|
||||
|
||||
- name: Build GUI
|
||||
@@ -182,10 +122,9 @@ jobs:
|
||||
uses: tauri-apps/tauri-action@v0
|
||||
with:
|
||||
projectPath: ./easytier-gui
|
||||
# https://tauri.app/v1/guides/building/linux/#cross-compiling-tauri-applications-for-arm-based-devices
|
||||
args: --verbose --target ${{ matrix.GUI_TARGET }} ${{ matrix.OS == 'ubuntu-22.04' && contains(matrix.TARGET, 'aarch64') && '--bundles deb' || '' }}
|
||||
args: --verbose --target ${{ matrix.GUI_TARGET }}
|
||||
|
||||
- name: Compress
|
||||
- name: Collect artifact
|
||||
run: |
|
||||
mkdir -p ./artifacts/objects/
|
||||
|
||||
@@ -194,36 +133,33 @@ jobs:
|
||||
else
|
||||
TAG=$GITHUB_SHA
|
||||
fi
|
||||
|
||||
# copy gui bundle, gui is built without specific target
|
||||
if [[ $OS =~ ^windows.*$ ]]; then
|
||||
if [[ $GUI_TARGET =~ windows ]]; then
|
||||
mv ./target/$GUI_TARGET/release/bundle/nsis/*.exe ./artifacts/objects/
|
||||
elif [[ $OS =~ ^macos.*$ ]]; then
|
||||
elif [[ $GUI_TARGET =~ darwin ]]; then
|
||||
mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/
|
||||
elif [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^mips.*$ ]]; then
|
||||
elif [[ $GUI_TARGET =~ linux ]]; then
|
||||
mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/
|
||||
if [[ $GUI_TARGET =~ ^x86_64.*$ ]]; then
|
||||
# currently only x86 appimage is supported
|
||||
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
|
||||
fi
|
||||
mv ./target/$GUI_TARGET/release/bundle/rpm/*.rpm ./artifacts/objects/
|
||||
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
|
||||
fi
|
||||
|
||||
mv ./artifacts/objects/* ./artifacts/
|
||||
rm -rf ./artifacts/objects/
|
||||
|
||||
- name: Archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: easytier-gui-${{ matrix.ARTIFACT_NAME }}
|
||||
path: |
|
||||
./artifacts/*
|
||||
|
||||
gui-result:
|
||||
if: needs.pre_job.outputs.should_skip != 'true' && always()
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre_job
|
||||
- build-gui
|
||||
needs: [ pre_job, build-gui ]
|
||||
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: needs.build-gui.result != 'success'
|
||||
if: contains(needs.*.result, 'failure')
|
||||
run: exit 1
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
sudo apt update
|
||||
sudo apt install -qq libwebkit2gtk-4.1-dev \
|
||||
build-essential \
|
||||
curl \
|
||||
wget \
|
||||
file \
|
||||
libgtk-3-dev \
|
||||
librsvg2-dev \
|
||||
libxdo-dev \
|
||||
libssl-dev \
|
||||
patchelf
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# env needed:
|
||||
# - TARGET
|
||||
# - GUI_TARGET
|
||||
# - OS
|
||||
|
||||
# dependencies are only needed on ubuntu as that's the only place where
|
||||
# we make cross-compilation
|
||||
if [[ $OS =~ ^ubuntu.*$ ]]; then
|
||||
sudo apt-get update && sudo apt-get install -qq musl-tools libappindicator3-dev llvm clang
|
||||
# https://github.com/cross-tools/musl-cross/releases
|
||||
# if "musl" is a substring of TARGET, we assume that we are using musl
|
||||
MUSL_TARGET=$TARGET
|
||||
# if target is mips or mipsel, we should use soft-float version of musl
|
||||
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
|
||||
MUSL_TARGET=${TARGET}sf
|
||||
elif [[ $TARGET =~ ^riscv64gc-.*$ ]]; then
|
||||
MUSL_TARGET=${TARGET/#riscv64gc-/riscv64-}
|
||||
fi
|
||||
if [[ $MUSL_TARGET =~ musl ]]; then
|
||||
mkdir -p ./musl_gcc
|
||||
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
|
||||
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
|
||||
sudo chmod -R a+rwx ./musl_gcc
|
||||
fi
|
||||
fi
|
||||
|
||||
# see https://github.com/rust-lang/rustup/issues/3709
|
||||
rustup set auto-self-update disable
|
||||
rustup install 1.89
|
||||
rustup default 1.89
|
||||
|
||||
# mips/mipsel cannot add target from rustup, need compile by ourselves
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
|
||||
# for panic-abort
|
||||
cp libgcc_eh.a libunwind.a
|
||||
|
||||
# for mimalloc
|
||||
ar x libgcc.a _ctzsi2.o _clz.o _bswapsi2.o
|
||||
ar rcs libctz.a _ctzsi2.o _clz.o _bswapsi2.o
|
||||
|
||||
rustup toolchain install nightly-2025-09-01-x86_64-unknown-linux-gnu
|
||||
rustup component add rust-src --toolchain nightly-2025-09-01-x86_64-unknown-linux-gnu
|
||||
|
||||
# https://github.com/rust-lang/rust/issues/128808
|
||||
# remove it after Cargo or rustc fix this.
|
||||
RUST_LIB_SRC=$HOME/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/
|
||||
if [[ -f $RUST_LIB_SRC/library/Cargo.lock && ! -f $RUST_LIB_SRC/Cargo.lock ]]; then
|
||||
cp -f $RUST_LIB_SRC/library/Cargo.lock $RUST_LIB_SRC/Cargo.lock
|
||||
fi
|
||||
else
|
||||
rustup target add $TARGET
|
||||
if [[ $GUI_TARGET != '' ]]; then
|
||||
rustup target add $GUI_TARGET
|
||||
fi
|
||||
fi
|
||||
@@ -5,6 +5,11 @@ on:
|
||||
branches: ["develop", "main", "releases/**"]
|
||||
pull_request:
|
||||
branches: ["develop", "main"]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -18,6 +23,7 @@ jobs:
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && !startsWith(github.ref_name, 'releases/') }}
|
||||
@@ -29,25 +35,30 @@ jobs:
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
skip_after_successful_duplicate: 'true'
|
||||
cancel_others: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", "tauri-plugin-vpnservice/**", ".github/workflows/mobile.yml", ".github/workflows/install_rust.sh"]'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", "tauri-plugin-vpnservice/**", ".github/workflows/mobile.yml", ".github/actions/**"]'
|
||||
build-mobile:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
include:
|
||||
- TARGET: android
|
||||
OS: ubuntu-22.04
|
||||
ARTIFACT_NAME: android
|
||||
runs-on: ${{ matrix.OS }}
|
||||
- TARGET: aarch64-linux-android
|
||||
ARCH: aarch64
|
||||
- TARGET: armv7-linux-androideabi
|
||||
ARCH: armv7
|
||||
- TARGET: i686-linux-android
|
||||
ARCH: i686
|
||||
- TARGET: x86_64-linux-android
|
||||
ARCH: x86_64
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
NAME: easytier
|
||||
TARGET: ${{ matrix.TARGET }}
|
||||
OS: ${{ matrix.OS }}
|
||||
ARCH: ${{ matrix.ARCH }}
|
||||
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Set current ref as env variable
|
||||
run: |
|
||||
@@ -61,72 +72,41 @@ jobs:
|
||||
- name: Setup Android SDK
|
||||
uses: android-actions/setup-android@v3
|
||||
with:
|
||||
cmdline-tools-version: 11076708
|
||||
packages: 'build-tools;34.0.0 ndk;26.0.10792818 tools platform-tools platforms;android-34 '
|
||||
cmdline-tools-version: 12.0
|
||||
packages: 'build-tools;34.0.0 ndk;26.0.10792818 platform-tools platforms;android-34 '
|
||||
|
||||
- name: Setup Android Environment
|
||||
run: |
|
||||
echo "$ANDROID_HOME/platform-tools" >> $GITHUB_PATH
|
||||
echo "$ANDROID_HOME/ndk/26.0.10792818/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
|
||||
echo "NDK_HOME=$ANDROID_HOME/ndk/26.0.10792818/" > $GITHUB_ENV
|
||||
echo "NDK_HOME=$ANDROID_HOME/ndk/26.0.10792818/" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r build
|
||||
target: ${{ matrix.TARGET }}
|
||||
gui: false
|
||||
pnpm: true
|
||||
pnpm-build-filter: ''
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
# The prefix cache key, this can be changed to start a new cache manually.
|
||||
# default: "v0-rust"
|
||||
prefix-key: ""
|
||||
shared-key: "gui-registry"
|
||||
cache-targets: "false"
|
||||
|
||||
- name: Install rust target
|
||||
run: |
|
||||
bash ./.github/workflows/install_rust.sh
|
||||
rustup target add aarch64-linux-android
|
||||
rustup target add armv7-linux-androideabi
|
||||
rustup target add i686-linux-android
|
||||
rustup target add x86_64-linux-android
|
||||
|
||||
- name: Setup protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build Android
|
||||
- name: Build
|
||||
run: |
|
||||
cd easytier-gui
|
||||
pnpm tauri android build
|
||||
pnpm tauri android build --apk --target "$ARCH" --split-per-abi
|
||||
|
||||
- name: Compress
|
||||
- name: Collect artifact
|
||||
run: |
|
||||
mkdir -p ./artifacts/objects/
|
||||
mv easytier-gui/src-tauri/gen/android/app/build/outputs/apk/universal/release/app-universal-release.apk ./artifacts/objects/
|
||||
mv easytier-gui/src-tauri/gen/android/app/build/outputs/apk/*/release/*.apk ./artifacts/objects/
|
||||
|
||||
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
|
||||
TAG=$GITHUB_REF_NAME
|
||||
@@ -134,23 +114,21 @@ jobs:
|
||||
TAG=$GITHUB_SHA
|
||||
fi
|
||||
|
||||
mv ./artifacts/objects/* ./artifacts
|
||||
mv ./artifacts/objects/* ./artifacts/
|
||||
rm -rf ./artifacts/objects/
|
||||
|
||||
- name: Archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: easytier-gui-${{ matrix.ARTIFACT_NAME }}
|
||||
name: easytier-mobile-android-${{ matrix.ARCH }}
|
||||
path: |
|
||||
./artifacts/*
|
||||
|
||||
mobile-result:
|
||||
if: needs.pre_job.outputs.should_skip != 'true' && always()
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre_job
|
||||
- build-mobile
|
||||
needs: [ pre_job, build-mobile ]
|
||||
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: needs.build-mobile.result != 'success'
|
||||
if: contains(needs.*.result, 'failure')
|
||||
run: exit 1
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
name: Nix Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main", "develop"]
|
||||
paths:
|
||||
- "**/*.nix"
|
||||
- "flake.lock"
|
||||
- "rust-toolchain.toml"
|
||||
pull_request:
|
||||
branches: ["main", "develop"]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- "**/*.nix"
|
||||
- "flake.lock"
|
||||
- "rust-toolchain.toml"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-full-shell:
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@v27
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
|
||||
- name: Magic Nix Cache
|
||||
uses: DeterminateSystems/magic-nix-cache-action@v6
|
||||
|
||||
- name: Warm up full devShell
|
||||
run: nix develop .#full --command true
|
||||
|
||||
- name: Cargo check in flake environment
|
||||
run: nix develop .#full --command cargo check
|
||||
|
||||
- name: Cargo build in flake environment
|
||||
run: nix develop .#full --command cargo build
|
||||
+167
-35
@@ -3,8 +3,17 @@ name: EasyTier OHOS
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "main", "releases/**"]
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!*-pre'
|
||||
pull_request:
|
||||
branches: ["develop", "main"]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -15,9 +24,30 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
cargo_fmt_check:
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
gui: false
|
||||
pnpm: false
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
|
||||
- name: Check formatting
|
||||
working-directory: ./easytier-contrib/easytier-ohrs
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || !github.event.pull_request.draft
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
# do not skip push on branch starts with releases/
|
||||
@@ -27,58 +57,108 @@ jobs:
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
# All of these options are optional, so you can remove them if you are happy with the defaults
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
skip_after_successful_duplicate: 'true'
|
||||
cancel_others: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-contrib/easytier-ohrs/**", ".github/workflows/ohos.yml", ".github/workflows/install_rust.sh"]'
|
||||
concurrent_skipping: "same_content_newer"
|
||||
skip_after_successful_duplicate: "true"
|
||||
cancel_others: "true"
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-contrib/easytier-ohrs/**", ".github/workflows/ohos.yml", ".github/actions/**"]'
|
||||
|
||||
build-ohos:
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre_job
|
||||
env:
|
||||
OHPM_PUBLISH_CODE: ${{ secrets.OHPM_PUBLISH_CODE }}
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
sudo apt-get install -qq \
|
||||
build-essential \
|
||||
wget \
|
||||
unzip \
|
||||
git \
|
||||
pkg-config
|
||||
sudo apt-get clean
|
||||
pkg-config curl libgl1-mesa-dev expect
|
||||
|
||||
- name: Download and extract native SDK
|
||||
working-directory: ../../../
|
||||
- name: Resolve easytier version
|
||||
run: |
|
||||
echo $PWD
|
||||
wget -q \
|
||||
https://github.com/openharmony-rs/ohos-sdk/releases/download/v5.1.0/ohos-sdk-windows_linux-public.tar.gz.aa
|
||||
wget -q \
|
||||
https://github.com/openharmony-rs/ohos-sdk/releases/download/v5.1.0/ohos-sdk-windows_linux-public.tar.gz.ab
|
||||
cat ohos-sdk-windows_linux-public.tar.gz.aa ohos-sdk-windows_linux-public.tar.gz.ab > sdk.tar.gz
|
||||
echo "Extracting native..."
|
||||
mkdir sdk
|
||||
tar -xzf sdk.tar.gz ohos-sdk/linux/native-linux-x64-5.1.0.107-Release.zip
|
||||
tar -xzf sdk.tar.gz ohos-sdk/linux/toolchains-linux-x64-5.1.0.107-Release.zip
|
||||
unzip -qq ohos-sdk/linux/native-linux-x64-5.1.0.107-Release.zip -d sdk
|
||||
unzip -qq ohos-sdk/linux/toolchains-linux-x64-5.1.0.107-Release.zip -d sdk
|
||||
ls -la sdk/native/llvm/bin/
|
||||
rm -rf ohos-sdk-windows_linux-public.tar.gz.aa ohos-sdk-windows_linux-public.tar.gz.ab ohos-sdk/
|
||||
set -e
|
||||
|
||||
UPSTREAM_REPO="https://github.com/EasyTier/EasyTier.git"
|
||||
|
||||
git remote add upstream "$UPSTREAM_REPO" 2>/dev/null || true
|
||||
git fetch --unshallow upstream main || git fetch upstream main
|
||||
git fetch --tags upstream --force
|
||||
|
||||
# 读取 cargo 版本
|
||||
CARGO_VERSION=$(cargo metadata --format-version 1 --no-deps --manifest-path easytier/Cargo.toml \
|
||||
| jq -r '.packages[0].version')
|
||||
|
||||
# 获取 upstream/main 最新 tag
|
||||
LAST_TAG=$(git describe --tags --abbrev=0 upstream/main 2>/dev/null || echo "")
|
||||
LAST_TAG_VERSION="${LAST_TAG#v}"
|
||||
|
||||
# 语义版本比较
|
||||
version_gt() {
|
||||
[ "$(printf '%s\n' "$1" "$2" | sort -V | tail -n1)" = "$1" ] && [ "$1" != "$2" ]
|
||||
}
|
||||
|
||||
if [ -z "$LAST_TAG_VERSION" ]; then
|
||||
BASE_VERSION="$CARGO_VERSION"
|
||||
DIFF_COUNT=$(git rev-list --count upstream/main)
|
||||
elif version_gt "$CARGO_VERSION" "$LAST_TAG_VERSION"; then
|
||||
BASE_VERSION="$CARGO_VERSION"
|
||||
DIFF_COUNT=0
|
||||
else
|
||||
BASE_VERSION="$LAST_TAG_VERSION"
|
||||
DIFF_COUNT=$(git rev-list --count "${LAST_TAG}..upstream/main")
|
||||
fi
|
||||
|
||||
COMMIT_HASH=$(git rev-parse --short upstream/main)
|
||||
EASYTIER_VERSION="${BASE_VERSION}-${DIFF_COUNT}-${COMMIT_HASH}"
|
||||
|
||||
echo "EASYTIER_VERSION=$EASYTIER_VERSION"
|
||||
echo "EASYTIER_VERSION=$EASYTIER_VERSION" >> $GITHUB_ENV
|
||||
|
||||
cd ./easytier-contrib/easytier-ohrs/package
|
||||
jq --arg v "$EASYTIER_VERSION" '.version = $v' oh-package.json5 > oh-package.tmp.json5
|
||||
mv oh-package.tmp.json5 oh-package.json5
|
||||
|
||||
|
||||
- name: Generate CHANGELOG.md for current commit
|
||||
working-directory: ./easytier-contrib/easytier-ohrs/package
|
||||
run: |
|
||||
{
|
||||
echo "## easytier-ohrs ${EASYTIER_VERSION}"
|
||||
echo
|
||||
git log -1 --pretty=format:"- %s"
|
||||
echo
|
||||
} > CHANGELOG.md
|
||||
|
||||
- name: Setup HarmonyOS CLI tools
|
||||
uses: ErBWs/setup-ohos@v1
|
||||
|
||||
- name: Download and Extract Custom SDK
|
||||
run: |
|
||||
wget https://github.com/FrankHan052176/Easytier-OHOS-sdk/releases/download/v1/ohos-sdk.zip -O /tmp/ohos-sdk.zip
|
||||
sudo unzip -o /tmp/ohos-sdk.zip -d /tmp/custom-sdk
|
||||
sudo cp -rf /tmp/custom-sdk/linux/native/* $HOME/sdk/native
|
||||
echo "Custom SDK files deployed to $HOME/sdk/native"
|
||||
ls -a $HOME/sdk/native
|
||||
sudo cp -rf /tmp/custom-sdk/linux/native/* $OHOS_NDK_HOME/native
|
||||
echo "Custom SDK files deployed to $OHOS_NDK_HOME/native"
|
||||
ls -a $OHOS_NDK_HOME/native
|
||||
|
||||
- name: Setup build environment
|
||||
run: |
|
||||
echo "OHOS_NDK_HOME=$HOME/sdk" >> $GITHUB_ENV
|
||||
echo "TARGET_ARCH=aarch64-linux-ohos" >> $GITHUB_ENV
|
||||
|
||||
rustup install stable
|
||||
rustup default stable
|
||||
|
||||
rustup target add aarch64-unknown-linux-ohos
|
||||
|
||||
- uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: ohrs
|
||||
|
||||
- name: Create clang wrapper script
|
||||
run: |
|
||||
sudo mkdir -p $OHOS_NDK_HOME/native/llvm
|
||||
@@ -92,23 +172,75 @@ jobs:
|
||||
EOF
|
||||
sudo chmod +x $OHOS_NDK_HOME/native/llvm/aarch64-unknown-linux-ohos-clang.sh
|
||||
|
||||
- name: Build
|
||||
- name: Build latest Har
|
||||
working-directory: ./easytier-contrib/easytier-ohrs
|
||||
run: |
|
||||
sudo apt-get install -y llvm clang lldb lld
|
||||
sudo apt-get install -y protobuf-compiler
|
||||
bash ../../.github/workflows/install_rust.sh
|
||||
source env.sh
|
||||
cargo install ohrs
|
||||
rustup target add aarch64-unknown-linux-ohos
|
||||
cargo update easytier
|
||||
ohrs doctor
|
||||
ohrs build --release --arch aarch
|
||||
ohrs artifact
|
||||
mv package.har easytier-ohrs.har
|
||||
|
||||
- name: Build Release Package
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
working-directory: ./easytier-contrib/easytier-ohrs
|
||||
run: |
|
||||
echo "🎉 Official Release detected. Building easytier-release..."
|
||||
TAG_NAME="${{ github.ref_name }}"
|
||||
TAG_VERSION="${TAG_NAME#v}"
|
||||
echo "Release Version: $TAG_VERSION"
|
||||
cd package
|
||||
jq --arg v "$TAG_VERSION" '.name = "easytier-release" | .version = $v' oh-package.json5 > oh-package.tmp.json5 && mv oh-package.tmp.json5 oh-package.json5
|
||||
cd ..
|
||||
ohrs build --release --arch aarch
|
||||
cd dist/arm64-v8a
|
||||
mv libeasytier_ohrs.so libeasytier_release.so
|
||||
cd ../..
|
||||
ohrs artifact
|
||||
mv package.har easytier-release.har
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: easytier-ohos
|
||||
path: ./easytier-contrib/easytier-ohrs/dist/arm64-v8a/libeasytier_ohrs.so
|
||||
path: |
|
||||
./easytier-contrib/easytier-ohrs/easytier-ohrs.har
|
||||
retention-days: 5
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Publish To Center Ohpm
|
||||
working-directory: ./easytier-contrib/easytier-ohrs
|
||||
env:
|
||||
OHPM_PRIVATE_KEY: ${{ secrets.OHPM_PRIVATE_KEY }}
|
||||
OHPM_KEY_PASSPHRASE: ${{ secrets.OHPM_KEY_PASSPHRASE }}
|
||||
if: ${{ env.OHPM_PUBLISH_CODE != '' && github.event_name == 'push' }}
|
||||
run: |
|
||||
ohpm config set publish_id "$OHPM_PUBLISH_CODE"
|
||||
ohpm config set publish_registry https://ohpm.openharmony.cn/ohpm
|
||||
TMP_DIR=$(mktemp -d)
|
||||
PRIVATE_KEY_FILE="$TMP_DIR/private_key"
|
||||
printf '%s' "$OHPM_PRIVATE_KEY" > "$PRIVATE_KEY_FILE"
|
||||
chmod 600 "$PRIVATE_KEY_FILE"
|
||||
ohpm config set key_path $PRIVATE_KEY_FILE
|
||||
unzip ohpm_crypto.zip -d /home/runner/work/
|
||||
ohpm config set crypto_path /home/runner/work/ohpm_crypto
|
||||
chmod 755 /home/runner/work/ohpm_crypto/*
|
||||
PASSPHRASE="$(printf '%s' "$OHPM_KEY_PASSPHRASE" | tr -d '\r\n')"
|
||||
ohpm config set key_passphrase "$PASSPHRASE"
|
||||
ohpm publish easytier-ohrs.har
|
||||
|
||||
- name: Publish To Private Ohpm
|
||||
working-directory: ./easytier-contrib/easytier-ohrs
|
||||
if: ${{ env.OHPM_PUBLISH_CODE != '' && github.event_name == 'push' }}
|
||||
run: |
|
||||
printf '%s' "${{ secrets.CODEARTS_PRIVATE_OHPM }}" > ~/.ohpm/.ohpmrc
|
||||
ohpm config set strict_ssl false
|
||||
ohpm publish easytier-ohrs.har
|
||||
if [ -f "easytier-release.har" ]; then
|
||||
echo "🚀 Publishing Release package..."
|
||||
ohpm publish easytier-release.har
|
||||
fi
|
||||
curl --header "Content-Type: application/json" --request POST --data "{}" ${{ secrets.CODEARTS_WEBHOOKS }}
|
||||
|
||||
|
||||
@@ -6,22 +6,19 @@ on:
|
||||
core_run_id:
|
||||
description: 'The run id of EasyTier-Core Action in EasyTier repo'
|
||||
type: number
|
||||
default: 10322498549
|
||||
required: true
|
||||
gui_run_id:
|
||||
description: 'The run id of EasyTier-GUI Action in EasyTier repo'
|
||||
type: number
|
||||
default: 10322498557
|
||||
required: true
|
||||
mobile_run_id:
|
||||
description: 'The run id of EasyTier-Mobile Action in EasyTier repo'
|
||||
type: number
|
||||
default: 10322498555
|
||||
required: true
|
||||
version:
|
||||
description: 'Version for this release'
|
||||
type: string
|
||||
default: 'v2.4.4'
|
||||
default: 'v2.6.4'
|
||||
required: true
|
||||
make_latest:
|
||||
description: 'Mark this release as latest'
|
||||
@@ -34,19 +31,18 @@ permissions:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
if: contains('["KKRainbow"]', github.actor)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download Core Artifact
|
||||
uses: dawidd6/action-download-artifact@v11
|
||||
with:
|
||||
github_token: ${{secrets.GITHUB_TOKEN}}
|
||||
run_id: ${{ inputs.core_run_id }}
|
||||
repo: EasyTier/EasyTier
|
||||
repo: ${{ github.repository }}
|
||||
path: release_assets
|
||||
|
||||
- name: Download GUI Artifact
|
||||
@@ -54,7 +50,7 @@ jobs:
|
||||
with:
|
||||
github_token: ${{secrets.GITHUB_TOKEN}}
|
||||
run_id: ${{ inputs.gui_run_id }}
|
||||
repo: EasyTier/EasyTier
|
||||
repo: ${{ github.repository }}
|
||||
path: release_assets_nozip
|
||||
|
||||
- name: Download Mobile Artifact
|
||||
@@ -62,7 +58,7 @@ jobs:
|
||||
with:
|
||||
github_token: ${{secrets.GITHUB_TOKEN}}
|
||||
run_id: ${{ inputs.mobile_run_id }}
|
||||
repo: EasyTier/EasyTier
|
||||
repo: ${{ github.repository }}
|
||||
path: release_assets_nozip
|
||||
|
||||
- name: Zip release assets
|
||||
|
||||
+113
-65
@@ -2,12 +2,18 @@ name: EasyTier Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "main"]
|
||||
branches: [ "develop", "main" ]
|
||||
pull_request:
|
||||
branches: ["develop", "main"]
|
||||
branches: [ "develop", "main" ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
# RUSTC_WRAPPER: "sccache"
|
||||
# SCCACHE_GHA_ENABLED: "true"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -28,22 +34,104 @@ jobs:
|
||||
# All of these options are optional, so you can remove them if you are happy with the defaults
|
||||
concurrent_skipping: 'never'
|
||||
skip_after_successful_duplicate: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/workflows/install_gui_dep.sh", ".github/workflows/install_rust.sh"]'
|
||||
test:
|
||||
runs-on: ubuntu-22.04
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/actions/**"]'
|
||||
|
||||
check:
|
||||
name: Run linters & check
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Setup protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
gui: true
|
||||
pnpm: true
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt,clippy
|
||||
rustflags: ''
|
||||
|
||||
- uses: taiki-e/install-action@cargo-hack
|
||||
|
||||
- name: Check formatting
|
||||
if: ${{ !cancelled() }}
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Check Clippy
|
||||
if: ${{ !cancelled() }}
|
||||
run: cargo clippy --all-targets --features full --all -- -D warnings
|
||||
|
||||
- name: Check features
|
||||
if: ${{ !cancelled() }}
|
||||
run: cargo hack check --package easytier --each-feature --exclude-features macos-ne --verbose
|
||||
|
||||
- name: Check Cargo.lock is up to date
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
if ! cargo metadata --format-version 1 --locked > /dev/null; then
|
||||
echo "::error::Cargo.lock is out of date. Run cargo generate-lockfile or cargo build locally, then commit Cargo.lock."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pre-test:
|
||||
name: Build test
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare build environment
|
||||
uses: ./.github/actions/prepare-build
|
||||
with:
|
||||
gui: true
|
||||
pnpm: true
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Archive test
|
||||
run: cargo nextest archive --archive-file tests.tar.zst --package easytier --features full
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: tests
|
||||
path: tests.tar.zst
|
||||
retention-days: 1
|
||||
|
||||
test_matrix:
|
||||
name: Test (${{ matrix.name }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ pre_job, pre-test ]
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: "easytier"
|
||||
opts: "-E 'not test(tests::three_node)' --test-threads 1 --no-fail-fast"
|
||||
|
||||
- name: "three_node"
|
||||
opts: "-E 'test(tests::three_node) and not test(subnet_proxy_three_node_test)' --test-threads 1 --no-fail-fast"
|
||||
|
||||
- name: "three_node::subnet_proxy_three_node_test"
|
||||
opts: "-E 'test(subnet_proxy_three_node_test)' --test-threads 1 --no-fail-fast"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Setup tools for test
|
||||
run: sudo apt install bridge-utils
|
||||
- name: Setup upnpd for test
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y miniupnpd miniupnpd-iptables iptables
|
||||
|
||||
- name: Setup system for test
|
||||
run: |
|
||||
@@ -53,63 +141,23 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
|
||||
sudo ip addr add 2001:db8::2/64 dev lo
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Download tests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r --filter "./easytier-web/*" build
|
||||
|
||||
- name: Cargo cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo
|
||||
./target
|
||||
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install GUI dependencies (Used by clippy)
|
||||
run: |
|
||||
bash ./.github/workflows/install_gui_dep.sh
|
||||
bash ./.github/workflows/install_rust.sh
|
||||
rustup component add rustfmt
|
||||
rustup component add clippy
|
||||
|
||||
- name: Check formatting
|
||||
if: ${{ !cancelled() }}
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Check Clippy
|
||||
if: ${{ !cancelled() }}
|
||||
# NOTE: tauri need `dist` dir in build.rs
|
||||
run: |
|
||||
mkdir -p easytier-gui/dist
|
||||
cargo clippy --all-targets --all-features --all -- -D warnings
|
||||
name: tests
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
sudo prlimit --pid $$ --nofile=1048576:1048576
|
||||
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1
|
||||
sudo chown -R $USER:$USER ./target
|
||||
sudo chown -R $USER:$USER ~/.cargo
|
||||
sudo -E env "PATH=$PATH" cargo nextest run --archive-file tests.tar.zst ${{ matrix.opts }}
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ pre_job, check, test_matrix ]
|
||||
if: needs.pre_job.result == 'success' && needs.pre_job.outputs.should_skip != 'true' && !cancelled()
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: contains(needs.*.result, 'failure')
|
||||
run: exit 1
|
||||
|
||||
@@ -38,6 +38,7 @@ node_modules
|
||||
.vite
|
||||
|
||||
easytier-gui/src-tauri/*.dll
|
||||
easytier-gui/src-tauri/*.sys
|
||||
/easytier-contrib/easytier-ohrs/dist/
|
||||
|
||||
.direnv
|
||||
|
||||
+3
-3
@@ -26,7 +26,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
|
||||
#### Required Tools
|
||||
- Node.js v21 or higher
|
||||
- pnpm v9 or higher
|
||||
- Rust toolchain (version 1.89)
|
||||
- Rust toolchain (version 1.95)
|
||||
- LLVM and Clang
|
||||
- Protoc (Protocol Buffers compiler)
|
||||
|
||||
@@ -79,8 +79,8 @@ sudo apt install -y bridge-utils
|
||||
2. Install dependencies:
|
||||
```bash
|
||||
# Install Rust toolchain
|
||||
rustup install 1.89
|
||||
rustup default 1.89
|
||||
rustup install 1.95
|
||||
rustup default 1.95
|
||||
|
||||
# Install project dependencies
|
||||
pnpm -r install
|
||||
|
||||
+3
-3
@@ -34,7 +34,7 @@
|
||||
#### 必需工具
|
||||
- Node.js v21 或更高版本
|
||||
- pnpm v9 或更高版本
|
||||
- Rust 工具链(版本 1.89)
|
||||
- Rust 工具链(版本 1.95)
|
||||
- LLVM 和 Clang
|
||||
- Protoc(Protocol Buffers 编译器)
|
||||
|
||||
@@ -87,8 +87,8 @@ sudo apt install -y bridge-utils
|
||||
2. 安装依赖:
|
||||
```bash
|
||||
# 安装 Rust 工具链
|
||||
rustup install 1.89
|
||||
rustup default 1.89
|
||||
rustup install 1.95
|
||||
rustup default 1.95
|
||||
|
||||
# 安装项目依赖
|
||||
pnpm -r install
|
||||
|
||||
Generated
+2371
-1300
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,10 @@ exclude = [
|
||||
"easytier-contrib/easytier-ohrs", # it needs ohrs sdk
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2024"
|
||||
rust-version = "1.95"
|
||||
|
||||
[profile.dev]
|
||||
panic = "unwind"
|
||||
debug = 2
|
||||
|
||||
@@ -48,40 +48,43 @@
|
||||
|
||||
Choose the installation method that best suits your needs:
|
||||
|
||||
Linux (Recommended):
|
||||
```bash
|
||||
# 1. Download pre-built binary (Recommended, All platforms supported)
|
||||
# Visit https://github.com/EasyTier/EasyTier/releases
|
||||
curl -fsSL "https://github.com/EasyTier/EasyTier/blob/main/script/install.sh?raw=true" | sudo bash -s install
|
||||
```
|
||||
|
||||
# 2. Install via cargo (Latest development version)
|
||||
cargo install --git https://github.com/EasyTier/EasyTier.git easytier
|
||||
|
||||
# 3. Install via Docker
|
||||
# See https://easytier.cn/en/guide/installation.html#installation-methods
|
||||
|
||||
# 4. Linux Quick Install
|
||||
wget -O- https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh | sudo bash -s install
|
||||
|
||||
# 5. MacOS via Homebrew
|
||||
Homebrew (MacOS/Linux):
|
||||
```bash
|
||||
brew tap brewforge/chinese
|
||||
brew install --cask easytier-gui
|
||||
|
||||
# 6. OpenWrt Luci Web UI
|
||||
# Visit https://github.com/EasyTier/luci-app-easytier
|
||||
|
||||
# 7. (Optional) Install shell completions:
|
||||
easytier-core --gen-autocomplete fish > ~/.config/fish/completions/easytier-core.fish
|
||||
easytier-cli gen-autocomplete fish > ~/.config/fish/completions/easytier-cli.fish
|
||||
|
||||
```
|
||||
|
||||
Windows (Recommended, run with administrator privileges):
|
||||
```powershell
|
||||
irm "https://github.com/EasyTier/EasyTier/blob/main/script/install.ps1?raw=true" | iex
|
||||
```
|
||||
|
||||
Install via cargo (Latest development version):
|
||||
```bash
|
||||
cargo install --git https://github.com/EasyTier/EasyTier.git easytier
|
||||
```
|
||||
|
||||
[Install pre-built binary](https://github.com/EasyTier/EasyTier/releases) (Recommended, All platforms supported)
|
||||
|
||||
[Install via Docker](https://easytier.cn/en/guide/installation.html#installation-methods)
|
||||
|
||||
[Install OpenWrt ipk package](https://github.com/EasyTier/luci-app-easytier)
|
||||
|
||||
Additional steps:
|
||||
|
||||
[One-Click Register Service](https://easytier.cn/en/guide/network/oneclick-install-as-service.html) (Automatically start when the system boots and run in the background)
|
||||
|
||||
### 🚀 Basic Usage
|
||||
|
||||
#### Quick Networking with Shared Nodes
|
||||
|
||||
EasyTier supports quick networking using shared public nodes. When you don't have a public IP, you can use the free shared nodes provided by the EasyTier community. Nodes will automatically attempt NAT traversal and establish P2P connections. When P2P fails, data will be relayed through shared nodes.
|
||||
|
||||
The currently deployed shared public node is `tcp://public.easytier.cn:11010`.
|
||||
|
||||
When using shared nodes, each node entering the network needs to provide the same `--network-name` and `--network-secret` parameters as the unique identifier of the network.
|
||||
|
||||
Taking two nodes as an example (Please use more complex network name to avoid conflicts):
|
||||
@@ -90,14 +93,14 @@ Taking two nodes as an example (Please use more complex network name to avoid co
|
||||
|
||||
```bash
|
||||
# Run with administrator privileges
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<SharedNodeIP>:11010
|
||||
```
|
||||
|
||||
2. Run on Node B:
|
||||
|
||||
```bash
|
||||
# Run with administrator privileges
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<SharedNodeIP>:11010
|
||||
```
|
||||
|
||||
After successful execution, you can check the network status using `easytier-cli`:
|
||||
@@ -105,9 +108,9 @@ After successful execution, you can check the network status using `easytier-cli
|
||||
```text
|
||||
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
|
||||
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
|
||||
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.4-70e69a38~ |
|
||||
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.4-70e69a38~ |
|
||||
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.4-70e69a38~ |
|
||||
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.2-70e69a38~ |
|
||||
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.2-70e69a38~ |
|
||||
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.2-70e69a38~ |
|
||||
```
|
||||
|
||||
You can test connectivity between nodes:
|
||||
@@ -124,7 +127,7 @@ To improve availability, you can connect to multiple shared nodes simultaneously
|
||||
|
||||
```bash
|
||||
# Connect to multiple shared nodes
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010 -p udp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<SharedNodeIP1>:11010 -p udp://<SharedNodeIP2>:11010
|
||||
```
|
||||
|
||||
Once your network is set up successfully, you can easily configure it to start automatically on system boot. Refer to the [One-Click Register Service guide](https://easytier.cn/en/guide/network/oneclick-install-as-service.html) for step-by-step instructions on registering EasyTier as a system service.
|
||||
@@ -280,8 +283,6 @@ sudo easytier-core --network-name mysharednode --network-secret mysharednode
|
||||
|
||||
- [ZeroTier](https://www.zerotier.com/): A global virtual network for connecting devices.
|
||||
- [TailScale](https://tailscale.com/): A VPN solution aimed at simplifying network configuration.
|
||||
- [vpncloud](https://github.com/dswd/vpncloud): A P2P Mesh VPN
|
||||
- [Candy](https://github.com/lanthora/candy): A reliable, low-latency, and anti-censorship virtual private network
|
||||
|
||||
### Contact Us
|
||||
|
||||
|
||||
+32
-32
@@ -48,40 +48,42 @@
|
||||
|
||||
选择最适合您需求的安装方式:
|
||||
|
||||
Linux(推荐):
|
||||
```bash
|
||||
# 1. 下载预编译二进制文件(推荐,支持所有平台)
|
||||
# 访问 https://github.com/EasyTier/EasyTier/releases
|
||||
curl -fsSL "https://github.com/EasyTier/EasyTier/blob/main/script/install.sh?raw=true" | sudo bash -s install
|
||||
```
|
||||
|
||||
# 2. 通过 cargo 安装(最新开发版本)
|
||||
cargo install --git https://github.com/EasyTier/EasyTier.git easytier
|
||||
|
||||
# 3. 通过 Docker 安装
|
||||
# 参见 https://easytier.cn/guide/installation.html#%E5%AE%89%E8%A3%85%E6%96%B9%E5%BC%8F
|
||||
|
||||
# 4. Linux 快速安装
|
||||
wget -O- https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh | sudo bash -s install
|
||||
|
||||
# 5. MacOS 通过 Homebrew 安装
|
||||
Homebrew(MacOS/Linux):
|
||||
```bash
|
||||
brew tap brewforge/chinese
|
||||
brew install --cask easytier-gui
|
||||
|
||||
# 6. OpenWrt Luci Web 界面
|
||||
# 访问 https://github.com/EasyTier/luci-app-easytier
|
||||
|
||||
# 7.(可选)安装 Shell 补全功能:
|
||||
# Fish 补全
|
||||
easytier-core --gen-autocomplete fish > ~/.config/fish/completions/easytier-core.fish
|
||||
easytier-cli gen-autocomplete fish > ~/.config/fish/completions/easytier-cli.fish
|
||||
|
||||
```
|
||||
|
||||
Windows(推荐,请以管理员权限运行):
|
||||
```powershell
|
||||
irm "https://github.com/EasyTier/EasyTier/blob/main/script/install.ps1?raw=true" | iex
|
||||
```
|
||||
|
||||
通过 cargo 安装(最新开发版本):
|
||||
```bash
|
||||
cargo install --git https://github.com/EasyTier/EasyTier.git easytier
|
||||
```
|
||||
|
||||
[下载预编译文件](https://github.com/EasyTier/EasyTier/releases)(推荐,支持所有平台)
|
||||
|
||||
[通过 Docker 安装](https://easytier.cn/guide/installation.html#%E5%AE%89%E8%A3%85%E6%96%B9%E5%BC%8F)
|
||||
|
||||
[安装 OpenWrt ipk 软件包](https://github.com/EasyTier/luci-app-easytier)
|
||||
|
||||
附加步骤:
|
||||
|
||||
[一键注册系统服务](https://easytier.cn/guide/network/oneclick-install-as-service.html)(系统启动时自动后台运行)
|
||||
|
||||
### 🚀 基本用法
|
||||
|
||||
#### 使用共享节点快速组网
|
||||
|
||||
EasyTier 支持使用共享公共节点快速组网。当您没有公网 IP 时,可以使用 EasyTier 社区提供的免费共享节点。节点会自动尝试 NAT 穿透并建立 P2P 连接。当 P2P 失败时,数据将通过共享节点中继。
|
||||
|
||||
当前部署的共享公共节点是 `tcp://public.easytier.cn:11010`。
|
||||
EasyTier 支持使用共享节点快速组网。当您没有公网 IP 时,可以使用公共共享节点。节点会自动尝试 NAT 穿透并建立 P2P 连接。当 P2P 失败时,数据将通过共享节点中继。
|
||||
|
||||
使用共享节点时,每个进入网络的节点需要提供相同的 `--network-name` 和 `--network-secret` 参数作为网络的唯一标识符。
|
||||
|
||||
@@ -91,14 +93,14 @@ EasyTier 支持使用共享公共节点快速组网。当您没有公网 IP 时
|
||||
|
||||
```bash
|
||||
# 以管理员权限运行
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<共享节点IP>:11010
|
||||
```
|
||||
|
||||
2. 在节点 B 上运行:
|
||||
|
||||
```bash
|
||||
# 以管理员权限运行
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<共享节点IP>:11010
|
||||
```
|
||||
|
||||
执行成功后,可以使用 `easytier-cli` 检查网络状态:
|
||||
@@ -106,9 +108,9 @@ sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.ea
|
||||
```text
|
||||
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
|
||||
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
|
||||
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.4-70e69a38~ |
|
||||
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.4-70e69a38~ |
|
||||
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.4-70e69a38~ |
|
||||
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.6.2-70e69a38~ |
|
||||
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.6.2-70e69a38~ |
|
||||
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.6.2-70e69a38~ |
|
||||
```
|
||||
|
||||
您可以测试节点之间的连通性:
|
||||
@@ -125,7 +127,7 @@ ping 10.126.126.2
|
||||
|
||||
```bash
|
||||
# 连接多个共享节点
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.easytier.cn:11010 -p udp://public.easytier.cn:11010
|
||||
sudo easytier-core -d --network-name abc --network-secret abc -p tcp://<公共节点IP>:11010 -p udp://<公共节点IP>:11010
|
||||
```
|
||||
|
||||
#### 去中心化组网
|
||||
@@ -281,8 +283,6 @@ sudo easytier-core --network-name mysharednode --network-secret mysharednode
|
||||
|
||||
- [ZeroTier](https://www.zerotier.com/):用于连接设备的全球虚拟网络。
|
||||
- [TailScale](https://tailscale.com/):旨在简化网络配置的 VPN 解决方案。
|
||||
- [vpncloud](https://github.com/dswd/vpncloud):一个 P2P 网状 VPN
|
||||
- [Candy](https://github.com/lanthora/candy):一个可靠、低延迟、反审查的虚拟专用网络
|
||||
|
||||
### 联系我们
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "easytier-android-jni"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
@@ -11,6 +11,6 @@ jni = "0.21"
|
||||
once_cell = "1.18.0"
|
||||
log = "0.4"
|
||||
android_logger = "0.13"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde = { version = "1.0.220", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
easytier = { path = "../../easytier" }
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
# EasyTier Android JNI 构建脚本
|
||||
# 用于编译适用于 Android 平台的 JNI 库
|
||||
# 使用 cargo-ndk 工具简化 Android 编译过程
|
||||
|
||||
set -e
|
||||
|
||||
@@ -13,8 +14,8 @@ NC='\033[0m' # No Color
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
echo -e "${GREEN}EasyTier Android JNI 构建脚本${NC}"
|
||||
echo "=============================="
|
||||
echo -e "${GREEN}EasyTier Android JNI 构建脚本 (使用 cargo-ndk)${NC}"
|
||||
echo "=============================================="
|
||||
|
||||
# 检查 Rust 是否安装
|
||||
if ! command -v rustc &> /dev/null; then
|
||||
@@ -28,18 +29,38 @@ if ! command -v cargo &> /dev/null; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Android 目标架构
|
||||
# TARGETS=("aarch64-linux-android" "armv7-linux-androideabi" "i686-linux-android" "x86_64-linux-android")
|
||||
TARGETS=("aarch64-linux-android")
|
||||
# 检查 cargo-ndk 是否安装
|
||||
if ! cargo ndk --version &> /dev/null; then
|
||||
echo -e "${YELLOW}cargo-ndk 未安装,正在安装...${NC}"
|
||||
cargo install cargo-ndk
|
||||
if ! cargo ndk --version &> /dev/null; then
|
||||
echo -e "${RED}错误: cargo-ndk 安装失败${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# 检查是否安装了 Android 目标
|
||||
echo -e "${YELLOW}检查 Android 目标架构...${NC}"
|
||||
for target in "${TARGETS[@]}"; do
|
||||
if ! rustup target list --installed | grep -q "$target"; then
|
||||
echo -e "${YELLOW}安装目标架构: $target${NC}"
|
||||
rustup target add "$target"
|
||||
echo -e "${GREEN}cargo-ndk 版本: $(cargo ndk --version)${NC}"
|
||||
|
||||
# Android 目标架构映射 (cargo-ndk 使用的架构名称)
|
||||
# ANDROID_TARGETS=("arm64-v8a" "armeabi-v7a" "x86" "x86_64")
|
||||
ANDROID_TARGETS=("arm64-v8a")
|
||||
|
||||
# Android 架构到 Rust target 的映射
|
||||
declare -A TARGET_MAP
|
||||
TARGET_MAP["arm64-v8a"]="aarch64-linux-android"
|
||||
TARGET_MAP["armeabi-v7a"]="armv7-linux-androideabi"
|
||||
TARGET_MAP["x86"]="i686-linux-android"
|
||||
TARGET_MAP["x86_64"]="x86_64-linux-android"
|
||||
|
||||
# 检查并安装所需的 Rust target
|
||||
echo -e "${YELLOW}检查并安装 Android 目标架构...${NC}"
|
||||
for android_target in "${ANDROID_TARGETS[@]}"; do
|
||||
rust_target="${TARGET_MAP[$android_target]}"
|
||||
if ! rustup target list --installed | grep -q "$rust_target"; then
|
||||
echo -e "${YELLOW}安装目标架构: $rust_target (for $android_target)${NC}"
|
||||
rustup target add "$rust_target"
|
||||
else
|
||||
echo -e "${GREEN}目标架构已安装: $target${NC}"
|
||||
echo -e "${GREEN}目标架构已安装: $rust_target (for $android_target)${NC}"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -49,66 +70,46 @@ mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# 构建函数
|
||||
build_for_target() {
|
||||
local target=$1
|
||||
echo -e "${YELLOW}构建目标: $target${NC}"
|
||||
|
||||
# 设置环境变量
|
||||
export CC_aarch64_linux_android="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang"
|
||||
export CC_armv7_linux_androideabi="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi21-clang"
|
||||
export CC_i686_linux_android="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android21-clang"
|
||||
export CC_x86_64_linux_android="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang"
|
||||
local android_target=$1
|
||||
echo -e "${YELLOW}构建目标: $android_target${NC}"
|
||||
|
||||
# 首先构建 easytier-ffi
|
||||
echo -e "${YELLOW}构建 easytier-ffi for $target${NC}"
|
||||
(cd $REPO_ROOT/easytier-contrib/easytier-ffi && cargo build --target="$target" --release)
|
||||
|
||||
# 设置链接器环境变量
|
||||
export RUSTFLAGS="-L $(readlink -f $REPO_ROOT/target/$target/release) -l easytier_ffi"
|
||||
echo $RUSTFLAGS
|
||||
echo -e "${YELLOW}构建 easytier-ffi for $android_target${NC}"
|
||||
(cd $REPO_ROOT/easytier-contrib/easytier-ffi && cargo ndk -t $android_target build --release)
|
||||
|
||||
# 构建 JNI 库
|
||||
cargo build --target="$target" --release
|
||||
cargo ndk -t $android_target build --release
|
||||
|
||||
# 复制库文件到输出目录
|
||||
local arch_dir
|
||||
case $target in
|
||||
"aarch64-linux-android")
|
||||
arch_dir="arm64-v8a"
|
||||
;;
|
||||
"armv7-linux-androideabi")
|
||||
arch_dir="armeabi-v7a"
|
||||
;;
|
||||
"i686-linux-android")
|
||||
arch_dir="x86"
|
||||
;;
|
||||
"x86_64-linux-android")
|
||||
arch_dir="x86_64"
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p "$OUTPUT_DIR/$arch_dir"
|
||||
cp "$REPO_ROOT/target/$target/release/libeasytier_android_jni.so" "$OUTPUT_DIR/$arch_dir/"
|
||||
echo -e "${GREEN}库文件已复制到: $OUTPUT_DIR/$arch_dir/${NC}"
|
||||
# cargo-ndk 使用 Rust target 名称作为目录名,而不是 Android 架构名称
|
||||
rust_target="${TARGET_MAP[$android_target]}"
|
||||
mkdir -p "$OUTPUT_DIR/$android_target"
|
||||
cp "$REPO_ROOT/target/$rust_target/release/libeasytier_android_jni.so" "$OUTPUT_DIR/$android_target/"
|
||||
cp "$REPO_ROOT/target/$rust_target/release/libeasytier_ffi.so" "$OUTPUT_DIR/$android_target/"
|
||||
echo -e "${GREEN}库文件已复制到: $OUTPUT_DIR/$android_target/${NC}"
|
||||
}
|
||||
|
||||
# 检查 Android NDK
|
||||
if [ -z "$ANDROID_NDK_ROOT" ]; then
|
||||
echo -e "${RED}错误: 未设置 ANDROID_NDK_ROOT 环境变量${NC}"
|
||||
echo "请设置 ANDROID_NDK_ROOT 指向您的 Android NDK 安装目录"
|
||||
echo "例如: export ANDROID_NDK_ROOT=/path/to/android-ndk"
|
||||
exit 1
|
||||
# 检查 Android NDK (cargo-ndk 会自动处理 NDK 路径)
|
||||
if [ -z "$ANDROID_NDK_ROOT" ] && [ -z "$ANDROID_NDK_HOME" ] && [ -z "$NDK_HOME" ]; then
|
||||
echo -e "${YELLOW}警告: 未设置 Android NDK 环境变量${NC}"
|
||||
echo "cargo-ndk 将尝试自动检测 NDK 路径"
|
||||
echo "如果构建失败,请设置以下环境变量之一:"
|
||||
echo " - ANDROID_NDK_ROOT"
|
||||
echo " - ANDROID_NDK_HOME"
|
||||
echo " - NDK_HOME"
|
||||
else
|
||||
if [ -n "$ANDROID_NDK_ROOT" ]; then
|
||||
echo -e "${GREEN}使用 Android NDK: $ANDROID_NDK_ROOT${NC}"
|
||||
elif [ -n "$ANDROID_NDK_HOME" ]; then
|
||||
echo -e "${GREEN}使用 Android NDK: $ANDROID_NDK_HOME${NC}"
|
||||
elif [ -n "$NDK_HOME" ]; then
|
||||
echo -e "${GREEN}使用 Android NDK: $NDK_HOME${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -d "$ANDROID_NDK_ROOT" ]; then
|
||||
echo -e "${RED}错误: Android NDK 目录不存在: $ANDROID_NDK_ROOT${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}使用 Android NDK: $ANDROID_NDK_ROOT${NC}"
|
||||
|
||||
# 构建所有目标
|
||||
echo -e "${YELLOW}开始构建所有目标架构...${NC}"
|
||||
for target in "${TARGETS[@]}"; do
|
||||
for target in "${ANDROID_TARGETS[@]}"; do
|
||||
build_for_target "$target"
|
||||
done
|
||||
|
||||
@@ -123,3 +124,6 @@ echo -e "${YELLOW}使用说明:${NC}"
|
||||
echo "1. 将生成的 .so 文件复制到您的 Android 项目的 src/main/jniLibs/ 目录下"
|
||||
echo "2. 将 java/com/easytier/jni/EasyTierJNI.java 复制到您的 Android 项目中"
|
||||
echo "3. 在您的 Android 代码中调用 EasyTierJNI 类的方法"
|
||||
echo ""
|
||||
echo -e "${GREEN}注意: 此脚本使用 cargo-ndk 工具,无需手动设置复杂的环境变量${NC}"
|
||||
echo -e "${GREEN}cargo-ndk 会自动处理交叉编译所需的工具链配置${NC}"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use easytier::proto::web::{NetworkInstanceRunningInfo, NetworkInstanceRunningInfoMap};
|
||||
use easytier::proto::api::manage::{NetworkInstanceRunningInfo, NetworkInstanceRunningInfoMap};
|
||||
use jni::JNIEnv;
|
||||
use jni::objects::{JClass, JObjectArray, JString};
|
||||
use jni::sys::{jint, jstring};
|
||||
use jni::JNIEnv;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::ptr;
|
||||
@@ -15,7 +15,7 @@ pub struct KeyValuePair {
|
||||
}
|
||||
|
||||
// 声明外部 C 函数
|
||||
extern "C" {
|
||||
unsafe extern "C" {
|
||||
fn set_tun_fd(inst_name: *const std::ffi::c_char, fd: std::ffi::c_int) -> std::ffi::c_int;
|
||||
fn get_error_msg(out: *mut *const std::ffi::c_char);
|
||||
fn free_string(s: *const std::ffi::c_char);
|
||||
@@ -68,7 +68,7 @@ fn throw_exception(env: &mut JNIEnv, message: &str) {
|
||||
}
|
||||
|
||||
/// 设置 TUN 文件描述符
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_setTunFd(
|
||||
mut env: JNIEnv,
|
||||
_class: JClass,
|
||||
@@ -87,17 +87,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_setTunFd(
|
||||
|
||||
unsafe {
|
||||
let result = set_tun_fd(inst_name_cstr.as_ptr(), fd);
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// 解析配置
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_parseConfig(
|
||||
mut env: JNIEnv,
|
||||
_class: JClass,
|
||||
@@ -115,17 +115,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_parseConfig(
|
||||
|
||||
unsafe {
|
||||
let result = parse_config(config_cstr.as_ptr());
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// 运行网络实例
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_runNetworkInstance(
|
||||
mut env: JNIEnv,
|
||||
_class: JClass,
|
||||
@@ -143,17 +143,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_runNetworkInstance(
|
||||
|
||||
unsafe {
|
||||
let result = run_network_instance(config_cstr.as_ptr());
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// 保持网络实例
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
|
||||
mut env: JNIEnv,
|
||||
_class: JClass,
|
||||
@@ -165,10 +165,10 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
|
||||
if instance_names.is_null() {
|
||||
unsafe {
|
||||
let result = retain_network_instance(ptr::null(), 0);
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -187,10 +187,10 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
|
||||
if array_length == 0 {
|
||||
unsafe {
|
||||
let result = retain_network_instance(ptr::null(), 0);
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -234,17 +234,17 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_retainNetworkInstance(
|
||||
|
||||
unsafe {
|
||||
let result = retain_network_instance(c_string_ptrs.as_ptr(), c_string_ptrs.len());
|
||||
if result != 0 {
|
||||
if let Some(error) = get_last_error() {
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
if result != 0
|
||||
&& let Some(error) = get_last_error()
|
||||
{
|
||||
throw_exception(&mut env, &error);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// 收集网络信息
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_collectNetworkInfos(
|
||||
mut env: JNIEnv,
|
||||
_class: JClass,
|
||||
@@ -304,7 +304,7 @@ pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_collectNetworkInfos(
|
||||
}
|
||||
|
||||
/// 获取最后的错误信息
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "system" fn Java_com_easytier_jni_EasyTierJNI_getLastError(
|
||||
env: JNIEnv,
|
||||
_class: JClass,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "easytier-ffi"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
@@ -2,9 +2,8 @@ use std::sync::Mutex;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::config::{ConfigLoader as _, TomlConfigLoader},
|
||||
common::config::{ConfigFileControl, ConfigLoader as _, TomlConfigLoader},
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::ConfigSource,
|
||||
};
|
||||
|
||||
static INSTANCE_NAME_ID_MAP: once_cell::sync::Lazy<DashMap<String, uuid::Uuid>> =
|
||||
@@ -31,7 +30,7 @@ fn set_error_msg(msg: &str) {
|
||||
|
||||
/// # Safety
|
||||
/// Set the tun fd
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn set_tun_fd(
|
||||
inst_name: *const std::ffi::c_char,
|
||||
fd: std::ffi::c_int,
|
||||
@@ -60,7 +59,7 @@ pub unsafe extern "C" fn set_tun_fd(
|
||||
|
||||
/// # Safety
|
||||
/// Get the last error message
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
|
||||
let msg_buf = ERROR_MSG.lock().unwrap();
|
||||
if msg_buf.is_empty() {
|
||||
@@ -75,7 +74,7 @@ pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub extern "C" fn free_string(s: *const std::ffi::c_char) {
|
||||
if s.is_null() {
|
||||
return;
|
||||
@@ -87,7 +86,7 @@ pub extern "C" fn free_string(s: *const std::ffi::c_char) {
|
||||
|
||||
/// # Safety
|
||||
/// Parse the config
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
|
||||
let cfg_str = unsafe {
|
||||
assert!(!cfg_str.is_null());
|
||||
@@ -106,7 +105,7 @@ pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::
|
||||
|
||||
/// # Safety
|
||||
/// Run the network instance
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
|
||||
let cfg_str = unsafe {
|
||||
assert!(!cfg_str.is_null());
|
||||
@@ -129,13 +128,14 @@ pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char)
|
||||
return -1;
|
||||
}
|
||||
|
||||
let instance_id = match INSTANCE_MANAGER.run_network_instance(cfg, ConfigSource::FFI) {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to start instance: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
let instance_id =
|
||||
match INSTANCE_MANAGER.run_network_instance(cfg, false, ConfigFileControl::STATIC_CONFIG) {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to start instance: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
INSTANCE_NAME_ID_MAP.insert(inst_name, instance_id);
|
||||
|
||||
@@ -144,7 +144,7 @@ pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char)
|
||||
|
||||
/// # Safety
|
||||
/// Retain the network instance
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn retain_network_instance(
|
||||
inst_names: *const *const std::ffi::c_char,
|
||||
length: usize,
|
||||
@@ -188,7 +188,7 @@ pub unsafe extern "C" fn retain_network_instance(
|
||||
|
||||
/// # Safety
|
||||
/// Collect the network infos
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn collect_network_infos(
|
||||
infos: *mut KeyValuePair,
|
||||
max_length: usize,
|
||||
@@ -202,7 +202,7 @@ pub unsafe extern "C" fn collect_network_infos(
|
||||
std::slice::from_raw_parts_mut(infos, max_length)
|
||||
};
|
||||
|
||||
let collected_infos = match INSTANCE_MANAGER.collect_network_infos() {
|
||||
let collected_infos = match INSTANCE_MANAGER.collect_network_infos_sync() {
|
||||
Ok(infos) => infos,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to collect network infos: {}", e));
|
||||
@@ -215,7 +215,7 @@ pub unsafe extern "C" fn collect_network_infos(
|
||||
if index >= max_length {
|
||||
break;
|
||||
}
|
||||
let Some(key) = INSTANCE_MANAGER.get_network_instance_name(instance_id) else {
|
||||
let Some(key) = INSTANCE_MANAGER.get_instance_name(instance_id) else {
|
||||
continue;
|
||||
};
|
||||
// convert value to json string
|
||||
@@ -228,7 +228,7 @@ pub unsafe extern "C" fn collect_network_infos(
|
||||
};
|
||||
|
||||
infos[index] = KeyValuePair {
|
||||
key: std::ffi::CString::new(key.clone()).unwrap().into_raw(),
|
||||
key: std::ffi::CString::new(key).unwrap().into_raw(),
|
||||
value: std::ffi::CString::new(value).unwrap().into_raw(),
|
||||
};
|
||||
index += 1;
|
||||
|
||||
@@ -1,43 +1,74 @@
|
||||
#!/data/adb/magisk/busybox sh
|
||||
MODDIR=${0%/*}
|
||||
MODULE_PROP="${MODDIR}/module.prop"
|
||||
IP_RULE_SCRIPT="${MODDIR}/hotspot_iprule.sh"
|
||||
|
||||
ET_STATUS=""
|
||||
REDIR_STATUS=""
|
||||
# 更新module.prop文件中的description
|
||||
IS_RUNNING=false
|
||||
|
||||
# 确保辅助脚本有执行权限
|
||||
chmod +x "${IP_RULE_SCRIPT}" 2>/dev/null
|
||||
|
||||
# 更新 module.prop 文件中的 description
|
||||
update_module_description() {
|
||||
local status_message=$1
|
||||
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
|
||||
# 检查 module.prop 文件存在且 description 发生变化了再写入
|
||||
if [ -f "${MODULE_PROP}" ]; then
|
||||
local current_desc=$(grep "^description=" "${MODULE_PROP}")
|
||||
local new_desc="description=[状态] ${status_message}"
|
||||
if [ "${current_desc}" != "${new_desc}" ]; then
|
||||
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# 判断程序启动状态
|
||||
if [ -f "${MODDIR}/disable" ]; then
|
||||
ET_STATUS="已关闭"
|
||||
elif pgrep -f 'easytier-core' >/dev/null; then
|
||||
if [ -f "${MODDIR}/config/command_args"]; then
|
||||
ET_STATUS="主程序已开启(启动参数模式)"
|
||||
IS_RUNNING=false
|
||||
ET_STATUS="主程序已关闭"
|
||||
|
||||
elif pgrep -f "${MODDIR}/easytier-core" >/dev/null; then
|
||||
IS_RUNNING=true
|
||||
if [ -f "${MODDIR}/config/command_args" ]; then
|
||||
ET_STATUS="主程序正在运行(启动参数模式)"
|
||||
else
|
||||
ET_STATUS="主程序已开启(配置文件模式)"
|
||||
ET_STATUS="主程序正在运行(配置文件模式)"
|
||||
fi
|
||||
|
||||
elif [ -z "$ET_STATUS" ]; then
|
||||
# 既没 disable 也没运行,说明是异常停止或未启动
|
||||
ET_STATUS="主程序启动失败或未运行"
|
||||
fi
|
||||
|
||||
#ET_STATUS不存在说明开启模块未正常运行,不修改状态
|
||||
if [ -n "$ET_STATUS" ]; then
|
||||
if [ -f "${MODDIR}/enable_IP_rule" ]; then
|
||||
rm -f "${MODDIR}/enable_IP_rule"
|
||||
${MODDIR}/hotspot_iprule.sh del
|
||||
REDIR_STATUS="转发已禁用"
|
||||
echo "热点子网转发已禁用"
|
||||
echo "[ET-NAT] IP rule disabled." >> "${MODDIR}/log.log"
|
||||
else
|
||||
touch "${MODDIR}/enable_IP_rule"
|
||||
${MODDIR}/hotspot_iprule.sh del
|
||||
${MODDIR}/hotspot_iprule.sh add_once
|
||||
REDIR_STATUS="转发已激活"
|
||||
echo "热点子网转发已激活,热点开启后将自动将热点加入转发网络(要求已配置本地网络cidr=参数)。转发规则将随着热点开关而自动开关。该状态将保持到转发被禁用为止。"
|
||||
echo "[ET-NAT] IP rule enabled." >> "${MODDIR}/log.log"
|
||||
fi
|
||||
update_module_description "${ET_STATUS} | ${REDIR_STATUS}"
|
||||
# 无论主程序是否运行,都允许切换“开关文件”的状态,以便下次生效
|
||||
if [ -f "${MODDIR}/enable_IP_rule" ]; then
|
||||
rm -f "${MODDIR}/enable_IP_rule"
|
||||
|
||||
"${IP_RULE_SCRIPT}" del >/dev/null 2>&1
|
||||
|
||||
REDIR_STATUS="转发已禁用"
|
||||
echo "热点子网转发已禁用"
|
||||
echo "[ET-NAT] Action: IP rule disabled." >> "${MODDIR}/log.log"
|
||||
else
|
||||
echo "主程序未正常启动,请先检查配置文件"
|
||||
touch "${MODDIR}/enable_IP_rule"
|
||||
|
||||
if [ "$IS_RUNNING" = true ]; then
|
||||
"${IP_RULE_SCRIPT}" del >/dev/null 2>&1
|
||||
"${IP_RULE_SCRIPT}" add_once
|
||||
echo "转发规则将立即生效,无需重启"
|
||||
else
|
||||
echo "主程序未运行,转发规则将在下次启动时生效"
|
||||
fi
|
||||
|
||||
REDIR_STATUS="转发已激活"
|
||||
echo "----------------------------------"
|
||||
echo "热点子网转发已激活"
|
||||
echo "热点开启后将自动将热点加入转发网络"
|
||||
echo "需要在配置中提前配置好 cidr 参数"
|
||||
echo "----------------------------------"
|
||||
echo "[ET-NAT] Action: IP rule enabled." >> "${MODDIR}/log.log"
|
||||
fi
|
||||
|
||||
sync
|
||||
update_module_description "${ET_STATUS}| ${REDIR_STATUS}"
|
||||
@@ -33,5 +33,6 @@ foreign_network_whitelist = "*"
|
||||
disable_p2p = false
|
||||
relay_all_peer_rpc = false
|
||||
disable_udp_hole_punching = false
|
||||
disable_tcp_hole_punching = false
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
ui_print '安装完成'
|
||||
ui_print '当前架构为' + $ARCH
|
||||
ui_print '当前系统版本为' + $API
|
||||
ui_print '安装目录为: /data/adb/modules/easytier_magisk'
|
||||
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.toml'
|
||||
ui_print '如果需要自定义启动参数,可将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中内容,使用自定义启动参数时会忽略配置文件'
|
||||
ui_print '修改配置文件后在magisk app禁用应用再启动即可生效'
|
||||
ui_print '点击操作按钮可启动/关闭热点子网转发,配合easytier的子网代理功能实现手机热点访问easytier网络'
|
||||
ui_print '记得重启'
|
||||
SKIPMOUNT=false
|
||||
PROPFILE=true
|
||||
POSTFSDATA=true
|
||||
LATESTARTSERVICE=true
|
||||
|
||||
set_perm_recursive $MODPATH 0 0 0777 0777
|
||||
|
||||
ui_print "系统架构为:$ARCH"
|
||||
ui_print "系统 SDK 版本:$API"
|
||||
ui_print "EasyTier 安装位置:/data/adb/modules/easytier_magisk"
|
||||
ui_print "配置文件位置:/data/adb/modules/easytier_magisk/config/config.toml"
|
||||
ui_print "如需使用启动参数模式,请将 /data/adb/modules/easytier_magisk/config/command_args_sample 重命名为 command_args,并修改其中的内容"
|
||||
ui_print "config 目录中存在 command_args 文件时,模块会自动忽略 config.toml 文件"
|
||||
ui_print "----------------------------------"
|
||||
ui_print "注意!启动参数文件中不能存在 \" 和 ',配置文件则没有这个限制"
|
||||
ui_print "----------------------------------"
|
||||
ui_print "修改配置后无需重启设备,在 Magisk 中禁用 EasyTier 模块,等待 10 秒后重新启用即可让新配置生效"
|
||||
ui_print "点击 Magisk 中模块左下角的“操作”按钮可以禁用或激活热点子网转发,使用该功能前需要在配置中提前配置好 cidr 参数"
|
||||
ui_print "模块安装完成,重启设备生效"
|
||||
@@ -2,23 +2,31 @@
|
||||
|
||||
MODDIR=${0%/*}
|
||||
CONFIG_FILE="${MODDIR}/config/config.toml"
|
||||
COMMAND_ARGS="${MODDIR}/config/command_args"
|
||||
LOG_FILE="${MODDIR}/log.log"
|
||||
MODULE_PROP="${MODDIR}/module.prop"
|
||||
EASYTIER="${MODDIR}/easytier-core"
|
||||
|
||||
# 处理获取到的设备型号中可能出现的空格
|
||||
BRAND=$(getprop ro.product.brand | tr ' ' '-')
|
||||
MODEL=$(getprop ro.product.model | tr ' ' '-')
|
||||
DEVICE_HOSTNAME="${BRAND}-${MODEL}"
|
||||
REDIR_STATUS=""
|
||||
|
||||
# 更新module.prop文件中的description
|
||||
# 更新 module.prop 文件中的 description
|
||||
update_module_description() {
|
||||
local status_message=$1
|
||||
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
|
||||
# 检查 module.prop 文件存在且 description 发生变化了再写入
|
||||
if [ -f "${MODULE_PROP}" ]; then
|
||||
local current_desc=$(grep "^description=" "${MODULE_PROP}")
|
||||
local new_desc="description=[状态] ${status_message}"
|
||||
if [ "${current_desc}" != "${new_desc}" ]; then
|
||||
sed -i "s#^description=.*#${new_desc}#" "${MODULE_PROP}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -f "${MODDIR}/enable_IP_rule" ]; then
|
||||
REDIR_STATUS="转发已激活"
|
||||
else
|
||||
REDIR_STATUS="转发已禁用"
|
||||
fi
|
||||
|
||||
# 检查并初始化 TUN 设备
|
||||
if [ ! -e /dev/net/tun ]; then
|
||||
if [ ! -d /dev/net ]; then
|
||||
mkdir -p /dev/net
|
||||
@@ -28,38 +36,77 @@ if [ ! -e /dev/net/tun ]; then
|
||||
fi
|
||||
|
||||
while true; do
|
||||
if ls $MODDIR | grep -q "disable"; then
|
||||
update_module_description "关闭中 | ${REDIR_STATUS}"
|
||||
if pgrep -f 'easytier-core' >/dev/null; then
|
||||
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..."
|
||||
pkill easytier-core # 关闭进程
|
||||
fi
|
||||
# 获取子网转发激活状态
|
||||
if [ -f "${MODDIR}/enable_IP_rule" ]; then
|
||||
REDIR_STATUS="转发已激活"
|
||||
else
|
||||
if ! pgrep -f 'easytier-core' >/dev/null; then
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
update_module_description "config.toml不存在"
|
||||
sleep 3s
|
||||
continue
|
||||
fi
|
||||
|
||||
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
|
||||
if [ -f "${MODDIR}/config/command_args" ]; then
|
||||
TZ=Asia/Shanghai ${EASYTIER} $(cat ${MODDIR}/config/command_args) > ${LOG_FILE} &
|
||||
sleep 5s # 等待easytier-core启动完成
|
||||
update_module_description "主程序已开启(启动参数模式) | ${REDIR_STATUS}"
|
||||
else
|
||||
TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} > ${LOG_FILE} &
|
||||
sleep 5s # 等待easytier-core启动完成
|
||||
update_module_description "主程序已开启(配置文件模式) | ${REDIR_STATUS}"
|
||||
fi
|
||||
ip rule add from all lookup main
|
||||
if ! pgrep -f 'easytier-core' >/dev/null; then
|
||||
update_module_descriptio "主程序启动失败,请检查配置文件"
|
||||
fi
|
||||
else
|
||||
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在"
|
||||
fi
|
||||
REDIR_STATUS="转发已禁用"
|
||||
fi
|
||||
|
||||
sleep 3s # 暂停3秒后再次执行循环
|
||||
# 检查模块是否被禁用
|
||||
if [ -f "${MODDIR}/disable" ]; then
|
||||
update_module_description "主程序已关闭 | ${REDIR_STATUS}"
|
||||
if pgrep -f "${EASYTIER}" >/dev/null; then
|
||||
echo "开关控制 $(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭"
|
||||
pkill -f "${EASYTIER}"
|
||||
fi
|
||||
sleep 10s
|
||||
continue
|
||||
fi
|
||||
|
||||
# 检查进程是否已经在运行
|
||||
if pgrep -f "${EASYTIER}" >/dev/null; then
|
||||
sleep 10s
|
||||
continue
|
||||
fi
|
||||
|
||||
# 检查配置文件是否存在
|
||||
if [ ! -f "${CONFIG_FILE}" ] && [ ! -f "${COMMAND_ARGS}" ]; then
|
||||
update_module_description "缺少配置文件或启动参数文件"
|
||||
sleep 10s
|
||||
continue
|
||||
fi
|
||||
|
||||
# 如果 config 目录下存在 command_args 文件,则读取其中的内容作为启动参数
|
||||
if [ -f "${COMMAND_ARGS}" ]; then
|
||||
# 启动参数模式
|
||||
CMD_CONTENT=$(tr '\r\n' ' ' < "${COMMAND_ARGS}")
|
||||
|
||||
if echo "${CMD_CONTENT}" | grep -q "\-\-hostname"; then
|
||||
FINAL_ARGS="${CMD_CONTENT}"
|
||||
else
|
||||
FINAL_ARGS="${CMD_CONTENT} --hostname ${DEVICE_HOSTNAME}"
|
||||
fi
|
||||
|
||||
TZ=Asia/Shanghai "${EASYTIER}" ${FINAL_ARGS} > "${LOG_FILE}" 2>&1 &
|
||||
STR_MODE="启动参数模式"
|
||||
|
||||
# 否则读取 config.toml 的内容作为启动参数
|
||||
else
|
||||
# 配置文件模式
|
||||
if grep -q "^[[:space:]]*hostname[[:space:]]*=" "${CONFIG_FILE}"; then
|
||||
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" > "${LOG_FILE}" 2>&1 &
|
||||
else
|
||||
TZ=Asia/Shanghai "${EASYTIER}" -c "${CONFIG_FILE}" --hostname "${DEVICE_HOSTNAME}" > "${LOG_FILE}" 2>&1 &
|
||||
fi
|
||||
|
||||
STR_MODE="配置文件模式"
|
||||
fi
|
||||
|
||||
# 等待进程启动
|
||||
sleep 5s
|
||||
|
||||
# 启动后的扫尾工作
|
||||
if pgrep -f "${EASYTIER}" >/dev/null; then
|
||||
|
||||
if ! ip rule show | grep -q "lookup main"; then
|
||||
ip rule add from all lookup main
|
||||
fi
|
||||
|
||||
update_module_description "主程序正在运行(${STR_MODE})| ${REDIR_STATUS}"
|
||||
else
|
||||
update_module_description "主程序启动失败,请检查配置文件或启动参数"
|
||||
fi
|
||||
|
||||
sleep 10s
|
||||
done
|
||||
@@ -22,7 +22,10 @@ get_tun_iface() {
|
||||
ip link | awk -F': ' '/ tun[[:alnum:]]+/ {print $2; exit}'
|
||||
}
|
||||
get_hot_iface() {
|
||||
ip link | awk -F': ' '/(^| )(swlan[[:alnum:]_]*|softap[[:alnum:]_]*|ap[[:alnum:]_]*)\:/ {print $2; exit}' | cut -d'@' -f1 | head -n1
|
||||
ip link | awk -F': ' '/(^| )(swlan[[:alnum:]_]*|softap[[:alnum:]_]*|p2p-wlan[[:alnum:]_]*|ap[[:alnum:]_]*)\:/ {print $2; exit}' | cut -d'@' -f1 | head -n1
|
||||
}
|
||||
get_usb_iface() {
|
||||
ip link | awk -F': ' '/(^| )(usb[[:alnum:]_]*|rndis[[:alnum:]_]*|eth[[:alnum:]_]*)\:/ {print $2; exit}' | cut -d'@' -f1 | head -n1
|
||||
}
|
||||
get_hot_cidr() {
|
||||
ip -4 addr show dev "$1" | awk '/inet /{print $2; exit}'
|
||||
@@ -33,10 +36,12 @@ set_nat_rules() {
|
||||
ET_IFACE=$(get_et_iface)
|
||||
[ -z "$ET_IFACE" ] && ET_IFACE="$(get_tun_iface)"
|
||||
HOT_IFACE=$(get_hot_iface)
|
||||
USB_IFACE=$(get_usb_iface)
|
||||
HOT_CIDR=$(get_hot_cidr "$HOT_IFACE")
|
||||
USB_CIDR=$(get_hot_cidr "$USB_IFACE")
|
||||
|
||||
# 如果热点关闭就删除自定义链
|
||||
[ -n "$ET_IFACE" ] && [ -n "$HOT_CIDR" ] || return 1
|
||||
[ -n "$ET_IFACE" ] && { [ -n "$HOT_CIDR" ] || [ -n "$USB_CIDR" ]; } || return 1
|
||||
|
||||
# 创建自定义链(如不存在)
|
||||
iptables -t nat -N ET_NAT 2>/dev/null
|
||||
@@ -49,13 +54,22 @@ set_nat_rules() {
|
||||
iptables -I FORWARD 1 -j ET_FWD
|
||||
|
||||
# 添加规则
|
||||
iptables -t nat -A ET_NAT -s "$HOT_CIDR" -o "$ET_IFACE" -j MASQUERADE
|
||||
iptables -A ET_FWD -i "$HOT_IFACE" -o "$ET_IFACE" \
|
||||
-m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
|
||||
iptables -A ET_FWD -i "$ET_IFACE" -o "$HOT_IFACE" \
|
||||
-m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
echo "[ET-NAT] Rules applied: $HOT_IFACE $HOT_CIDR ↔ $ET_IFACE" >> "$LOG_FILE"
|
||||
if [ -n "$HOT_CIDR" ]; then
|
||||
iptables -t nat -A ET_NAT -s "$HOT_CIDR" -o "$ET_IFACE" -j MASQUERADE
|
||||
iptables -A ET_FWD -i "$HOT_IFACE" -o "$ET_IFACE" \
|
||||
-m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
|
||||
iptables -A ET_FWD -i "$ET_IFACE" -o "$HOT_IFACE" \
|
||||
-m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
echo "[ET-NAT] Rules applied: $HOT_IFACE $HOT_CIDR ↔ $ET_IFACE" >> "$LOG_FILE"
|
||||
fi
|
||||
if [ -n "$USB_CIDR" ]; then
|
||||
iptables -t nat -A ET_NAT -s "$USB_CIDR" -o "$ET_IFACE" -j MASQUERADE
|
||||
iptables -A ET_FWD -i "$USB_IFACE" -o "$ET_IFACE" \
|
||||
-m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
|
||||
iptables -A ET_FWD -i "$ET_IFACE" -o "$USB_IFACE" \
|
||||
-m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
echo "[ET-NAT] Rules applied: $USB_IFACE $USB_CIDR ↔ $ET_IFACE" >> "$LOG_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
flush_rules() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
id=easytier_magisk
|
||||
name=EasyTier_Magisk
|
||||
version=v2.4.4
|
||||
version=v2.6.4
|
||||
versionCode=1
|
||||
author=EasyTier
|
||||
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
MODDIR=${0%/*}
|
||||
pkill easytier-core # 结束 easytier-core 进程
|
||||
rm -rf $MODDIR/*
|
||||
pkill -f "${MODDIR}/easytier-core"
|
||||
|
||||
# 使用 ${MODDIR:?} 确保变量非空,避免执行 rm -rf /*
|
||||
rm -rf "${MODDIR:?}/"*
|
||||
@@ -0,0 +1,9 @@
|
||||
dist/
|
||||
target/
|
||||
.DS_Store
|
||||
.idea/
|
||||
package/libs
|
||||
|
||||
*.har
|
||||
|
||||
Cargo.lock
|
||||
+1487
-692
File diff suppressed because it is too large
Load Diff
@@ -7,10 +7,14 @@ edition = "2024"
|
||||
crate-type=["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
base64 = "0.22"
|
||||
flate2 = "1.1"
|
||||
gethostname = "1.1"
|
||||
ohos-hilog-binding = {version = "*", features = ["redirect"]}
|
||||
easytier = { git = "https://github.com/EasyTier/EasyTier.git" }
|
||||
napi-derive-ohos = "1.0.4"
|
||||
napi-ohos = { version = "1.0.4", default-features = false, features = [
|
||||
easytier = { path = "../../easytier" }
|
||||
napi-derive-ohos = "1.1"
|
||||
napi-ohos = { version = "1.1", default-features = false, features = [
|
||||
"serde-json",
|
||||
"latin1",
|
||||
"chrono_date",
|
||||
@@ -26,14 +30,25 @@ napi-ohos = { version = "1.0.4", default-features = false, features = [
|
||||
"web_stream",
|
||||
] }
|
||||
once_cell = "1.21.3"
|
||||
ipnet = "2.10"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0.125"
|
||||
prost-reflect = { version = "0.14.5", default-features = false, features = ["derive"] }
|
||||
rusqlite = { version = "0.32", features = ["bundled"] }
|
||||
tracing-subscriber = "0.3.19"
|
||||
tracing-core = "0.1.33"
|
||||
tracing = "0.1.41"
|
||||
uuid = { version = "1.17.0", features = ["v4"] }
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "sync", "time"] }
|
||||
url = "2.5"
|
||||
uuid = { version = "1.5.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
"serde",
|
||||
] }
|
||||
|
||||
[build-dependencies]
|
||||
napi-build-ohos = "1.0.4"
|
||||
napi-build-ohos = "1.1"
|
||||
[profile.dev]
|
||||
panic = "unwind"
|
||||
debug = true
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
fn main () {
|
||||
fn main() {
|
||||
napi_build_ohos::setup();
|
||||
}
|
||||
Binary file not shown.
+2
@@ -0,0 +1,2 @@
|
||||
# 0.0.1
|
||||
- init package
|
||||
+165
@@ -0,0 +1,165 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
+162
@@ -0,0 +1,162 @@
|
||||
# `easytier-ohrs`
|
||||
|
||||
## Install
|
||||
|
||||
use `ohpm` to install package.
|
||||
|
||||
```shell
|
||||
ohpm install easytier-ohrs
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### collectNetworkInfos
|
||||
|
||||
```ts
|
||||
collectNetworkInfos(): Array<KeyValuePair>
|
||||
````
|
||||
|
||||
获取正在运行的网络实例的信息。
|
||||
|
||||
---
|
||||
|
||||
### collectRunningNetwork
|
||||
|
||||
```ts
|
||||
collectRunningNetwork(): Array<string>
|
||||
```
|
||||
|
||||
获取当前正在运行的网络实例名称列表。
|
||||
|
||||
---
|
||||
|
||||
### convertTomlToNetworkConfig
|
||||
|
||||
```ts
|
||||
convertTomlToNetworkConfig(cfgStr: string): string
|
||||
```
|
||||
|
||||
将 TOML 配置转换为 NetworkConfig。
|
||||
|
||||
* `cfgStr`:TOML 配置内容
|
||||
|
||||
---
|
||||
|
||||
### defaultNetworkConfig
|
||||
|
||||
```ts
|
||||
defaultNetworkConfig(): string
|
||||
```
|
||||
|
||||
获取默认的网络配置(JSON 字符串),用于转换为object进行赋值。
|
||||
|
||||
---
|
||||
|
||||
### easytierVersion
|
||||
|
||||
```ts
|
||||
easytierVersion(): string
|
||||
```
|
||||
|
||||
获取 EasyTier 当前版本号。
|
||||
|
||||
---
|
||||
|
||||
### hilogGlobalOptions
|
||||
|
||||
```ts
|
||||
hilogGlobalOptions(domain: number, tag: string): void
|
||||
```
|
||||
|
||||
设置全局日志选项。
|
||||
|
||||
* `domain`:日志域 ID
|
||||
* `tag`:日志标签
|
||||
|
||||
---
|
||||
|
||||
### initPanicHook
|
||||
|
||||
```ts
|
||||
initPanicHook(): void
|
||||
```
|
||||
|
||||
初始化 panic 钩子,用于将Rust侧的panic输出到hilog中,请先通过 hilogGlobalOptions 设置hilog的参数。
|
||||
|
||||
---
|
||||
|
||||
### initTracingSubscriber
|
||||
|
||||
```ts
|
||||
initTracingSubscriber(): void
|
||||
```
|
||||
|
||||
初始化 tracing 日志订阅器,用于将Rust侧日志同步输出到hilog中,请先通过 hilogGlobalOptions 设置hilog的参数。
|
||||
|
||||
---
|
||||
|
||||
### isRunningNetwork
|
||||
|
||||
```ts
|
||||
isRunningNetwork(instId: string): boolean
|
||||
```
|
||||
|
||||
判断指定网络实例是否正在运行。
|
||||
|
||||
* `instId`:网络实例 ID
|
||||
|
||||
---
|
||||
|
||||
### parseNetworkConfig
|
||||
|
||||
```ts
|
||||
parseNetworkConfig(cfgJson: string): boolean
|
||||
```
|
||||
|
||||
校验网络配置(JSON 格式)是否合法。
|
||||
|
||||
* `cfgJson`:网络配置内容
|
||||
|
||||
---
|
||||
|
||||
### runNetworkInstance
|
||||
|
||||
```ts
|
||||
runNetworkInstance(cfgJson: string): boolean
|
||||
```
|
||||
|
||||
启动网络实例。
|
||||
|
||||
* `cfgJson`:网络配置(JSON)
|
||||
|
||||
---
|
||||
|
||||
### setTunFd
|
||||
|
||||
```ts
|
||||
setTunFd(instId: string, fd: number): boolean
|
||||
```
|
||||
|
||||
为指定网络实例设置 TUN 设备文件描述符。
|
||||
|
||||
* `instId`:网络实例 ID
|
||||
* `fd`:TUN 设备文件描述符
|
||||
|
||||
---
|
||||
|
||||
### stopNetworkInstance
|
||||
|
||||
```ts
|
||||
stopNetworkInstance(instNames: Array<string>): void
|
||||
```
|
||||
|
||||
停止指定的网络实例。
|
||||
|
||||
* `instNames`:网络实例名称列表
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```ts
|
||||
// todo
|
||||
```
|
||||
+4
@@ -0,0 +1,4 @@
|
||||
import * as api from "libeasytier_ohrs.so";
|
||||
|
||||
export * from 'libeasytier_ohrs.so';
|
||||
export default api;
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"license": "LGPL-3.0",
|
||||
"author": "easytier",
|
||||
"name": "easytier-ohrs",
|
||||
"description": "EasyTier for OpenHarmonyOS",
|
||||
"main": "index.ets",
|
||||
"version": "0.0.1",
|
||||
"types": "libs/index.d.ts",
|
||||
"dependencies": {},
|
||||
"compatibleSdkVersion": "17",
|
||||
"compatibleSdkType": "OpenHarmony",
|
||||
"obfuscated": false,
|
||||
"nativeComponents": [
|
||||
{
|
||||
"name": "libeasytier_ohrs.so",
|
||||
"compatibleSdkVersion": "17",
|
||||
"compatibleSdkType": "OpenHarmony"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"module": {
|
||||
"name": "easytier-ohrs",
|
||||
"type": "har",
|
||||
"deviceTypes": ["default", "tablet", "2in1"]
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
pub(crate) mod repository;
|
||||
pub(crate) mod services;
|
||||
pub(crate) mod storage;
|
||||
pub(crate) mod types;
|
||||
@@ -0,0 +1,13 @@
|
||||
#[path = "../../config_repo/field_store.rs"]
|
||||
mod field_store;
|
||||
#[path = "../../config_repo/import_export.rs"]
|
||||
mod import_export;
|
||||
#[path = "../../config_repo/legacy_migration.rs"]
|
||||
mod legacy_migration;
|
||||
#[path = "../../config_repo/validation.rs"]
|
||||
mod validation;
|
||||
|
||||
#[path = "../../config_repo.rs"]
|
||||
mod repo;
|
||||
|
||||
pub use repo::*;
|
||||
@@ -0,0 +1,2 @@
|
||||
pub(crate) mod schema_service;
|
||||
pub(crate) mod share_link_service;
|
||||
@@ -0,0 +1,414 @@
|
||||
use easytier::proto::ALL_DESCRIPTOR_BYTES;
|
||||
use napi_derive_ohos::napi;
|
||||
use once_cell::sync::Lazy;
|
||||
use prost_reflect::{Cardinality, DescriptorPool, FieldDescriptor, Kind, MessageDescriptor};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[napi(object)]
|
||||
pub struct FieldOption {
|
||||
pub label: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[napi(object)]
|
||||
pub struct ValidationRule {
|
||||
pub rule_type: String,
|
||||
pub arg: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[napi(object)]
|
||||
pub struct NetworkConfigSchema {
|
||||
pub node_kind: String,
|
||||
pub name: String,
|
||||
pub field_number: i32,
|
||||
pub type_name: Option<String>,
|
||||
pub semantic_type: Option<String>,
|
||||
pub value_kind: String,
|
||||
pub is_list: bool,
|
||||
pub required: bool,
|
||||
pub default_value_text: Option<String>,
|
||||
pub enum_options: Vec<FieldOption>,
|
||||
pub validations: Vec<ValidationRule>,
|
||||
pub children: Vec<NetworkConfigSchema>,
|
||||
pub definitions: Vec<NetworkConfigSchema>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[napi(object)]
|
||||
pub struct ConfigFieldMapping {
|
||||
pub field_name: String,
|
||||
pub field_number: i32,
|
||||
}
|
||||
|
||||
static DESCRIPTOR_POOL: Lazy<DescriptorPool> = Lazy::new(|| {
|
||||
DescriptorPool::decode(ALL_DESCRIPTOR_BYTES)
|
||||
.expect("easytier descriptor pool should decode from embedded protobuf descriptors")
|
||||
});
|
||||
|
||||
const NETWORK_CONFIG_MESSAGE_NAME: &str = "api.manage.NetworkConfig";
|
||||
|
||||
fn descriptor_pool() -> &'static DescriptorPool {
|
||||
&DESCRIPTOR_POOL
|
||||
}
|
||||
|
||||
fn network_config_descriptor() -> MessageDescriptor {
|
||||
descriptor_pool()
|
||||
.get_message_by_name(NETWORK_CONFIG_MESSAGE_NAME)
|
||||
.expect("api.manage.NetworkConfig descriptor should exist")
|
||||
}
|
||||
|
||||
fn field_default_value_text(field: &FieldDescriptor) -> Option<String> {
|
||||
if field.is_list() || field.is_map() {
|
||||
return Some("[]".to_string());
|
||||
}
|
||||
|
||||
match field.kind() {
|
||||
Kind::Bool => Some("false".to_string()),
|
||||
Kind::String => Some("\"\"".to_string()),
|
||||
Kind::Bytes => Some("\"\"".to_string()),
|
||||
Kind::Int32
|
||||
| Kind::Sint32
|
||||
| Kind::Sfixed32
|
||||
| Kind::Int64
|
||||
| Kind::Sint64
|
||||
| Kind::Sfixed64
|
||||
| Kind::Uint32
|
||||
| Kind::Fixed32
|
||||
| Kind::Uint64
|
||||
| Kind::Fixed64
|
||||
| Kind::Float
|
||||
| Kind::Double => Some("0".to_string()),
|
||||
Kind::Enum(enum_desc) => enum_desc
|
||||
.get_value(0)
|
||||
.map(|value| value.number().to_string()),
|
||||
Kind::Message(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn field_type_name(field: &FieldDescriptor) -> Option<String> {
|
||||
match field.kind() {
|
||||
Kind::Enum(enum_desc) => Some(enum_desc.full_name().to_string()),
|
||||
Kind::Message(message_desc) => Some(message_desc.full_name().to_string()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn field_semantic_type(field: &FieldDescriptor) -> Option<String> {
|
||||
match field.name() {
|
||||
"virtual_ipv4" => Some("cidr_ip".to_string()),
|
||||
"network_length" => Some("cidr_mask".to_string()),
|
||||
"peer_urls" => Some("peer[]".to_string()),
|
||||
"proxy_cidrs" => Some("cidr[]".to_string()),
|
||||
"listener_urls" => Some("listener[]".to_string()),
|
||||
"routes" => Some("route[]".to_string()),
|
||||
"exit_nodes" => Some("ip[]".to_string()),
|
||||
"relay_network_whitelist" => Some("network_name[]".to_string()),
|
||||
"mapped_listeners" => Some("mapped_listener[]".to_string()),
|
||||
"port_forwards" => Some("port_forward[]".to_string()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn enum_options(kind: Kind) -> Vec<FieldOption> {
|
||||
match kind {
|
||||
Kind::Enum(enum_desc) => enum_desc
|
||||
.values()
|
||||
.map(|value| FieldOption {
|
||||
label: value.name().to_string(),
|
||||
value: value.number().to_string(),
|
||||
})
|
||||
.collect(),
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn should_expose_field(field: &FieldDescriptor) -> bool {
|
||||
match field.containing_oneof() {
|
||||
Some(_) => field
|
||||
.field_descriptor_proto()
|
||||
.proto3_optional
|
||||
.unwrap_or(false),
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_validations(field: &FieldDescriptor) -> Vec<ValidationRule> {
|
||||
if field.cardinality() == Cardinality::Required {
|
||||
return vec![ValidationRule {
|
||||
rule_type: "required".to_string(),
|
||||
arg: String::new(),
|
||||
message: format!("{} is required", field.name()),
|
||||
}];
|
||||
}
|
||||
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
fn kind_to_value_kind(field: &FieldDescriptor) -> String {
|
||||
if field.is_map() {
|
||||
return "object".to_string();
|
||||
}
|
||||
|
||||
match field.kind() {
|
||||
Kind::Bool => "boolean".to_string(),
|
||||
Kind::String | Kind::Bytes => "string".to_string(),
|
||||
Kind::Int32
|
||||
| Kind::Sint32
|
||||
| Kind::Sfixed32
|
||||
| Kind::Int64
|
||||
| Kind::Sint64
|
||||
| Kind::Sfixed64
|
||||
| Kind::Uint32
|
||||
| Kind::Fixed32
|
||||
| Kind::Uint64
|
||||
| Kind::Fixed64
|
||||
| Kind::Float
|
||||
| Kind::Double => "number".to_string(),
|
||||
Kind::Enum(_) => "enum".to_string(),
|
||||
Kind::Message(_) => "object".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_node(
|
||||
node_kind: &str,
|
||||
name: String,
|
||||
field_number: i32,
|
||||
type_name: Option<String>,
|
||||
semantic_type: Option<String>,
|
||||
value_kind: String,
|
||||
is_list: bool,
|
||||
required: bool,
|
||||
default_value_text: Option<String>,
|
||||
enum_options: Vec<FieldOption>,
|
||||
validations: Vec<ValidationRule>,
|
||||
children: Vec<NetworkConfigSchema>,
|
||||
definitions: Vec<NetworkConfigSchema>,
|
||||
) -> NetworkConfigSchema {
|
||||
NetworkConfigSchema {
|
||||
node_kind: node_kind.to_string(),
|
||||
name,
|
||||
field_number,
|
||||
type_name,
|
||||
semantic_type,
|
||||
value_kind,
|
||||
is_list,
|
||||
required,
|
||||
default_value_text,
|
||||
enum_options,
|
||||
validations,
|
||||
children,
|
||||
definitions,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_map_entry_node(message_desc: &MessageDescriptor) -> NetworkConfigSchema {
|
||||
let key_field = message_desc.map_entry_key_field();
|
||||
let value_field = message_desc.map_entry_value_field();
|
||||
|
||||
build_node(
|
||||
"object",
|
||||
message_desc.name().to_string(),
|
||||
0,
|
||||
Some(message_desc.full_name().to_string()),
|
||||
None,
|
||||
"object".to_string(),
|
||||
false,
|
||||
true,
|
||||
None,
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
vec![
|
||||
build_schema_field_node(&key_field),
|
||||
build_schema_field_node(&value_field),
|
||||
],
|
||||
Vec::new(),
|
||||
)
|
||||
}
|
||||
|
||||
fn field_children(field: &FieldDescriptor) -> Vec<NetworkConfigSchema> {
|
||||
if field.is_map() {
|
||||
if let Kind::Message(message_desc) = field.kind() {
|
||||
return vec![build_map_entry_node(&message_desc)];
|
||||
}
|
||||
}
|
||||
|
||||
match field.kind() {
|
||||
Kind::Message(message_desc) => build_message_children(&message_desc),
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_message_children(message_desc: &MessageDescriptor) -> Vec<NetworkConfigSchema> {
|
||||
message_desc
|
||||
.fields()
|
||||
.filter(should_expose_field)
|
||||
.map(|field| build_schema_field_node(&field))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn build_schema_field_node(field: &FieldDescriptor) -> NetworkConfigSchema {
|
||||
build_node(
|
||||
"field",
|
||||
field.name().to_string(),
|
||||
field.number() as i32,
|
||||
field_type_name(field),
|
||||
field_semantic_type(field),
|
||||
kind_to_value_kind(field),
|
||||
field.is_list() || field.is_map(),
|
||||
field.cardinality() == Cardinality::Required,
|
||||
field_default_value_text(field),
|
||||
enum_options(field.kind()),
|
||||
build_validations(field),
|
||||
field_children(field),
|
||||
Vec::new(),
|
||||
)
|
||||
}
|
||||
|
||||
fn collect_definitions() -> Vec<NetworkConfigSchema> {
|
||||
let mut definitions = Vec::new();
|
||||
|
||||
for message_desc in descriptor_pool().all_messages() {
|
||||
let full_name = message_desc.full_name();
|
||||
if full_name == NETWORK_CONFIG_MESSAGE_NAME || message_desc.is_map_entry() {
|
||||
continue;
|
||||
}
|
||||
|
||||
definitions.push(build_node(
|
||||
"object",
|
||||
full_name.to_string(),
|
||||
0,
|
||||
Some(full_name.to_string()),
|
||||
None,
|
||||
"object".to_string(),
|
||||
false,
|
||||
true,
|
||||
None,
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
build_message_children(&message_desc),
|
||||
Vec::new(),
|
||||
));
|
||||
}
|
||||
|
||||
for enum_desc in descriptor_pool().all_enums() {
|
||||
definitions.push(build_node(
|
||||
"enum",
|
||||
enum_desc.full_name().to_string(),
|
||||
0,
|
||||
Some(enum_desc.full_name().to_string()),
|
||||
None,
|
||||
"enum".to_string(),
|
||||
false,
|
||||
false,
|
||||
None,
|
||||
enum_options(Kind::Enum(enum_desc.clone())),
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
));
|
||||
}
|
||||
|
||||
definitions.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
definitions
|
||||
}
|
||||
|
||||
fn build_network_config_schema() -> NetworkConfigSchema {
|
||||
let network_config = network_config_descriptor();
|
||||
build_node(
|
||||
"schema",
|
||||
network_config.name().to_string(),
|
||||
0,
|
||||
Some(network_config.full_name().to_string()),
|
||||
None,
|
||||
"object".to_string(),
|
||||
false,
|
||||
true,
|
||||
None,
|
||||
Vec::new(),
|
||||
Vec::new(),
|
||||
build_message_children(&network_config),
|
||||
collect_definitions(),
|
||||
)
|
||||
}
|
||||
|
||||
fn build_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
|
||||
network_config_descriptor()
|
||||
.fields()
|
||||
.filter(should_expose_field)
|
||||
.map(|field| ConfigFieldMapping {
|
||||
field_name: field.name().to_string(),
|
||||
field_number: field.number() as i32,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_network_config_schema() -> NetworkConfigSchema {
|
||||
build_network_config_schema()
|
||||
}
|
||||
|
||||
pub fn get_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
|
||||
build_network_config_field_mappings()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn schema_is_exposed_as_single_tree_type() {
|
||||
let schema = get_network_config_schema();
|
||||
assert_eq!(schema.node_kind, "schema");
|
||||
assert_eq!(schema.name, "NetworkConfig");
|
||||
assert_eq!(
|
||||
schema.type_name.as_deref(),
|
||||
Some("api.manage.NetworkConfig")
|
||||
);
|
||||
|
||||
let virtual_ipv4 = schema
|
||||
.children
|
||||
.iter()
|
||||
.find(|field| field.name == "virtual_ipv4")
|
||||
.expect("virtual_ipv4 field");
|
||||
assert_eq!(virtual_ipv4.semantic_type.as_deref(), Some("cidr_ip"));
|
||||
|
||||
let secure_mode = schema
|
||||
.children
|
||||
.iter()
|
||||
.find(|field| field.name == "secure_mode")
|
||||
.expect("secure_mode field");
|
||||
assert!(
|
||||
secure_mode
|
||||
.children
|
||||
.iter()
|
||||
.any(|field| field.name == "enabled")
|
||||
);
|
||||
|
||||
let secure_mode_definition = schema
|
||||
.definitions
|
||||
.iter()
|
||||
.find(|definition| definition.name == "common.SecureModeConfig")
|
||||
.expect("secure mode definition");
|
||||
assert!(
|
||||
secure_mode_definition
|
||||
.children
|
||||
.iter()
|
||||
.any(|field| field.name == "local_private_key")
|
||||
);
|
||||
|
||||
let networking_method_definition = schema
|
||||
.definitions
|
||||
.iter()
|
||||
.find(|definition| definition.name == "api.manage.NetworkingMethod")
|
||||
.expect("networking method enum definition");
|
||||
assert!(
|
||||
networking_method_definition
|
||||
.enum_options
|
||||
.iter()
|
||||
.any(|option| option.label == "PublicServer")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
use crate::config::repository::{get_config_record, save_config_record};
|
||||
use crate::config::services::schema_service::get_network_config_field_mappings;
|
||||
use crate::config::types::stored_config::SharedConfigLinkPayload;
|
||||
use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD};
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use flate2::{Compression, read::ZlibDecoder, write::ZlibEncoder};
|
||||
use gethostname::gethostname;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Read, Write};
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
const SHARE_LINK_HOST: &str = "easytier.cn";
|
||||
const SHARE_LINK_PATH: &str = "/comp_cfg";
|
||||
|
||||
fn field_name_to_id_map() -> HashMap<String, String> {
|
||||
get_network_config_field_mappings()
|
||||
.into_iter()
|
||||
.map(|mapping| (mapping.field_name, mapping.field_number.to_string()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn field_id_to_name_map() -> HashMap<String, String> {
|
||||
get_network_config_field_mappings()
|
||||
.into_iter()
|
||||
.map(|mapping| (mapping.field_number.to_string(), mapping.field_name))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn prune_empty(value: &serde_json::Value) -> Option<serde_json::Value> {
|
||||
match value {
|
||||
serde_json::Value::Null => None,
|
||||
serde_json::Value::Array(values) if values.is_empty() => None,
|
||||
_ => Some(value.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_config_json(config: &NetworkConfig) -> Result<String, String> {
|
||||
let field_name_to_id = field_name_to_id_map();
|
||||
let raw = serde_json::to_value(config).map_err(|err| err.to_string())?;
|
||||
let mut mapped = serde_json::Map::new();
|
||||
|
||||
for (key, value) in raw.as_object().cloned().unwrap_or_default() {
|
||||
let Some(value) = prune_empty(&value) else {
|
||||
continue;
|
||||
};
|
||||
let mapped_key = field_name_to_id.get(&key).cloned().unwrap_or(key);
|
||||
mapped.insert(mapped_key, value);
|
||||
}
|
||||
|
||||
serde_json::to_string(&mapped).map_err(|err| err.to_string())
|
||||
}
|
||||
|
||||
fn unmap_config_json(raw: &str) -> Result<NetworkConfig, String> {
|
||||
let field_id_to_name = field_id_to_name_map();
|
||||
let value = serde_json::from_str::<serde_json::Value>(raw).map_err(|err| err.to_string())?;
|
||||
let mut mapped = serde_json::Map::new();
|
||||
for (key, value) in value.as_object().cloned().unwrap_or_default() {
|
||||
let field_name = field_id_to_name.get(&key).cloned().unwrap_or(key);
|
||||
mapped.insert(field_name, value);
|
||||
}
|
||||
serde_json::from_value(serde_json::Value::Object(mapped)).map_err(|err| err.to_string())
|
||||
}
|
||||
|
||||
fn compress_to_base64url(raw: &str) -> Result<String, String> {
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::best());
|
||||
encoder
|
||||
.write_all(raw.as_bytes())
|
||||
.map_err(|err| err.to_string())?;
|
||||
let compressed = encoder.finish().map_err(|err| err.to_string())?;
|
||||
Ok(URL_SAFE_NO_PAD.encode(compressed))
|
||||
}
|
||||
|
||||
fn decompress_from_base64url(raw: &str) -> Result<String, String> {
|
||||
let compressed = URL_SAFE_NO_PAD.decode(raw).map_err(|err| err.to_string())?;
|
||||
let mut decoder = ZlibDecoder::new(compressed.as_slice());
|
||||
let mut out = String::new();
|
||||
decoder
|
||||
.read_to_string(&mut out)
|
||||
.map_err(|err| err.to_string())?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn build_config_share_link(
|
||||
config_id: &str,
|
||||
display_name: Option<String>,
|
||||
only_start: bool,
|
||||
) -> Option<String> {
|
||||
let record = get_config_record(config_id)?;
|
||||
let config = serde_json::from_str::<NetworkConfig>(&record.config_json).ok()?;
|
||||
let mapped_json = map_config_json(&config).ok()?;
|
||||
let compressed = compress_to_base64url(&mapped_json).ok()?;
|
||||
let final_name = display_name
|
||||
.or(Some(record.meta.display_name))
|
||||
.filter(|name| !name.is_empty());
|
||||
|
||||
let mut url = Url::parse(&format!("https://{SHARE_LINK_HOST}{SHARE_LINK_PATH}")).ok()?;
|
||||
url.query_pairs_mut().append_pair("cfg", &compressed);
|
||||
if let Some(name) = final_name {
|
||||
url.query_pairs_mut().append_pair("name", &name);
|
||||
}
|
||||
if only_start {
|
||||
url.query_pairs_mut().append_pair("only_start", "true");
|
||||
}
|
||||
Some(url.to_string())
|
||||
}
|
||||
|
||||
pub fn parse_config_share_link(share_link: &str) -> Option<SharedConfigLinkPayload> {
|
||||
let url = Url::parse(share_link).ok()?;
|
||||
if url.host_str()? != SHARE_LINK_HOST || url.path() != SHARE_LINK_PATH {
|
||||
return None;
|
||||
}
|
||||
|
||||
let cfg = url
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "cfg")?
|
||||
.1
|
||||
.to_string();
|
||||
let mapped_json = decompress_from_base64url(&cfg).ok()?;
|
||||
let mut config = unmap_config_json(&mapped_json).ok()?;
|
||||
config.instance_id = Some(Uuid::new_v4().to_string());
|
||||
let hostname = gethostname().to_string_lossy().to_string();
|
||||
if !hostname.is_empty() {
|
||||
config.hostname = Some(hostname);
|
||||
}
|
||||
|
||||
let config_json = serde_json::to_string(&config).ok()?;
|
||||
let display_name = url
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "name")
|
||||
.map(|(_, value)| value.to_string())
|
||||
.filter(|name| !name.is_empty());
|
||||
let only_start = url
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "only_start")
|
||||
.map(|(_, value)| value == "true")
|
||||
.unwrap_or(false);
|
||||
|
||||
Some(SharedConfigLinkPayload {
|
||||
config_json,
|
||||
display_name,
|
||||
only_start,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn import_config_share_link(
|
||||
share_link: &str,
|
||||
display_name_override: Option<String>,
|
||||
) -> Option<String> {
|
||||
let payload = parse_config_share_link(share_link)?;
|
||||
let config = serde_json::from_str::<NetworkConfig>(&payload.config_json).ok()?;
|
||||
let config_id = config.instance_id.clone()?;
|
||||
let display_name = display_name_override
|
||||
.filter(|name| !name.is_empty())
|
||||
.or(payload.display_name)
|
||||
.unwrap_or_else(|| config_id.clone());
|
||||
|
||||
save_config_record(config_id.clone(), display_name, payload.config_json)?;
|
||||
Some(config_id)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config_repo::{create_config_record, init_config_store};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn test_root() -> String {
|
||||
let unique = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
std::env::temp_dir()
|
||||
.join(format!("easytier_ohrs_share_test_{unique}"))
|
||||
.to_string_lossy()
|
||||
.into_owned()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn share_link_roundtrip_works() {
|
||||
assert!(init_config_store(test_root()));
|
||||
create_config_record("cfg-share".to_string(), "share-demo".to_string())
|
||||
.expect("create config");
|
||||
|
||||
let link = build_config_share_link("cfg-share", None, true).expect("share link");
|
||||
let payload = parse_config_share_link(&link).expect("parse link");
|
||||
let config =
|
||||
serde_json::from_str::<NetworkConfig>(&payload.config_json).expect("config json");
|
||||
|
||||
assert!(payload.only_start);
|
||||
assert_eq!(payload.display_name.as_deref(), Some("share-demo"));
|
||||
assert_ne!(config.instance_id.as_deref(), Some("cfg-share"));
|
||||
|
||||
let imported_id = import_config_share_link(&link, None).expect("import link");
|
||||
assert_ne!(imported_id, "cfg-share");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,333 @@
|
||||
use crate::config::types::stored_config::{StoredConfigList, StoredConfigMeta};
|
||||
use ohos_hilog_binding::{hilog_debug, hilog_error};
|
||||
use rusqlite::{Connection, OptionalExtension, params};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
static CONFIG_DB_PATH: Mutex<Option<PathBuf>> = Mutex::new(None);
|
||||
const CONFIG_DB_FILE_NAME: &str = "easytier-config-store.db";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct StoredConfigMetaRecord {
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
created_at: String,
|
||||
updated_at: String,
|
||||
favorite: bool,
|
||||
temporary: bool,
|
||||
}
|
||||
|
||||
pub(crate) fn now_ts_string() -> String {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|d| d.as_secs().to_string())
|
||||
.unwrap_or_else(|_| "0".to_string())
|
||||
}
|
||||
|
||||
fn db_file_path() -> Option<PathBuf> {
|
||||
CONFIG_DB_PATH
|
||||
.lock()
|
||||
.ok()
|
||||
.and_then(|guard| guard.as_ref().cloned())
|
||||
}
|
||||
|
||||
fn init_schema(conn: &Connection) -> rusqlite::Result<()> {
|
||||
conn.execute_batch(
|
||||
"PRAGMA foreign_keys = ON;
|
||||
CREATE TABLE IF NOT EXISTS stored_configs (
|
||||
config_id TEXT PRIMARY KEY,
|
||||
display_name TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
favorite INTEGER NOT NULL DEFAULT 0,
|
||||
temporary INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS stored_config_fields (
|
||||
config_id TEXT NOT NULL,
|
||||
field_name TEXT NOT NULL,
|
||||
field_json TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
PRIMARY KEY (config_id, field_name),
|
||||
FOREIGN KEY (config_id) REFERENCES stored_configs(config_id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_stored_config_fields_config_id
|
||||
ON stored_config_fields(config_id);",
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn open_db() -> Option<Connection> {
|
||||
let path = db_file_path()?;
|
||||
let conn = match Connection::open(&path) {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to open config db {}: {}", path.display(), e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = init_schema(&conn) {
|
||||
hilog_error!(
|
||||
"[Rust] failed to initialize config db {}: {}",
|
||||
path.display(),
|
||||
e
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(conn)
|
||||
}
|
||||
|
||||
fn row_to_meta(row: &rusqlite::Row<'_>) -> rusqlite::Result<StoredConfigMetaRecord> {
|
||||
Ok(StoredConfigMetaRecord {
|
||||
config_id: row.get(0)?,
|
||||
display_name: row.get(1)?,
|
||||
created_at: row.get(2)?,
|
||||
updated_at: row.get(3)?,
|
||||
favorite: row.get::<_, i64>(4)? != 0,
|
||||
temporary: row.get::<_, i64>(5)? != 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_meta_record(conn: &Connection, config_id: &str) -> Option<StoredConfigMetaRecord> {
|
||||
conn.query_row(
|
||||
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
FROM stored_configs WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
row_to_meta,
|
||||
)
|
||||
.optional()
|
||||
.ok()
|
||||
.flatten()
|
||||
}
|
||||
|
||||
fn to_meta(record: StoredConfigMetaRecord) -> StoredConfigMeta {
|
||||
StoredConfigMeta {
|
||||
config_id: record.config_id,
|
||||
display_name: record.display_name,
|
||||
created_at: record.created_at,
|
||||
updated_at: record.updated_at,
|
||||
favorite: record.favorite,
|
||||
temporary: record.temporary,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_config_meta_store(root_dir: String) -> bool {
|
||||
let root = PathBuf::from(root_dir);
|
||||
if let Err(e) = std::fs::create_dir_all(&root) {
|
||||
hilog_error!(
|
||||
"[Rust] failed to create config db dir {}: {}",
|
||||
root.display(),
|
||||
e
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
let db_path = root.join(CONFIG_DB_FILE_NAME);
|
||||
match CONFIG_DB_PATH.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = Some(db_path.clone());
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to lock config db path: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if open_db().is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
hilog_debug!("[Rust] initialized config db at {}", db_path.display());
|
||||
true
|
||||
}
|
||||
|
||||
pub fn list_config_meta_entries() -> StoredConfigList {
|
||||
let Some(conn) = open_db() else {
|
||||
return StoredConfigList { configs: vec![] };
|
||||
};
|
||||
|
||||
let mut stmt = match conn.prepare(
|
||||
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
FROM stored_configs
|
||||
ORDER BY updated_at DESC, display_name ASC",
|
||||
) {
|
||||
Ok(stmt) => stmt,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to prepare list meta query: {}", e);
|
||||
return StoredConfigList { configs: vec![] };
|
||||
}
|
||||
};
|
||||
|
||||
let rows = match stmt.query_map([], row_to_meta) {
|
||||
Ok(rows) => rows,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to list config meta rows: {}", e);
|
||||
return StoredConfigList { configs: vec![] };
|
||||
}
|
||||
};
|
||||
|
||||
let configs = rows.filter_map(Result::ok).map(to_meta).collect();
|
||||
StoredConfigList { configs }
|
||||
}
|
||||
|
||||
pub fn get_config_display_name(config_id: &str) -> Option<String> {
|
||||
let conn = open_db()?;
|
||||
load_meta_record(&conn, config_id).map(|record| record.display_name)
|
||||
}
|
||||
|
||||
pub fn get_config_meta(config_id: &str) -> Option<StoredConfigMeta> {
|
||||
let conn = open_db()?;
|
||||
load_meta_record(&conn, config_id).map(to_meta)
|
||||
}
|
||||
|
||||
pub fn upsert_config_meta(
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
favorite: bool,
|
||||
temporary: bool,
|
||||
) -> StoredConfigMeta {
|
||||
let now = now_ts_string();
|
||||
let Some(conn) = open_db() else {
|
||||
return StoredConfigMeta {
|
||||
config_id,
|
||||
display_name,
|
||||
created_at: now.clone(),
|
||||
updated_at: now,
|
||||
favorite,
|
||||
temporary,
|
||||
};
|
||||
};
|
||||
|
||||
let created_at = load_meta_record(&conn, &config_id)
|
||||
.map(|record| record.created_at)
|
||||
.unwrap_or_else(|| now.clone());
|
||||
|
||||
if let Err(e) = conn.execute(
|
||||
"INSERT INTO stored_configs (
|
||||
config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
) VALUES (?1, ?2, ?3, ?4, ?5, ?6)
|
||||
ON CONFLICT(config_id) DO UPDATE SET
|
||||
display_name = excluded.display_name,
|
||||
updated_at = excluded.updated_at,
|
||||
favorite = excluded.favorite,
|
||||
temporary = excluded.temporary",
|
||||
params![
|
||||
config_id,
|
||||
display_name,
|
||||
created_at,
|
||||
now,
|
||||
if favorite { 1 } else { 0 },
|
||||
if temporary { 1 } else { 0 }
|
||||
],
|
||||
) {
|
||||
hilog_error!("[Rust] failed to upsert config meta: {}", e);
|
||||
}
|
||||
|
||||
get_config_meta(&config_id).unwrap_or(StoredConfigMeta {
|
||||
config_id,
|
||||
display_name,
|
||||
created_at,
|
||||
updated_at: now,
|
||||
favorite,
|
||||
temporary,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn upsert_config_meta_in_tx(
|
||||
tx: &rusqlite::Transaction<'_>,
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
favorite: bool,
|
||||
temporary: bool,
|
||||
) -> Option<StoredConfigMeta> {
|
||||
let now = now_ts_string();
|
||||
let created_at = tx
|
||||
.query_row(
|
||||
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
FROM stored_configs WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
row_to_meta,
|
||||
)
|
||||
.optional()
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|record| record.created_at)
|
||||
.unwrap_or_else(|| now.clone());
|
||||
|
||||
tx.execute(
|
||||
"INSERT INTO stored_configs (
|
||||
config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
) VALUES (?1, ?2, ?3, ?4, ?5, ?6)
|
||||
ON CONFLICT(config_id) DO UPDATE SET
|
||||
display_name = excluded.display_name,
|
||||
updated_at = excluded.updated_at,
|
||||
favorite = excluded.favorite,
|
||||
temporary = excluded.temporary",
|
||||
params![
|
||||
config_id,
|
||||
display_name,
|
||||
created_at,
|
||||
now,
|
||||
if favorite { 1 } else { 0 },
|
||||
if temporary { 1 } else { 0 }
|
||||
],
|
||||
)
|
||||
.ok()?;
|
||||
|
||||
tx.query_row(
|
||||
"SELECT config_id, display_name, created_at, updated_at, favorite, temporary
|
||||
FROM stored_configs WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
row_to_meta,
|
||||
)
|
||||
.optional()
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(to_meta)
|
||||
.or(Some(StoredConfigMeta {
|
||||
config_id,
|
||||
display_name,
|
||||
created_at,
|
||||
updated_at: now,
|
||||
favorite,
|
||||
temporary,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn set_config_display_name(
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
) -> Option<StoredConfigMeta> {
|
||||
let conn = open_db()?;
|
||||
let mut record = load_meta_record(&conn, &config_id)?;
|
||||
record.display_name = display_name;
|
||||
record.updated_at = now_ts_string();
|
||||
|
||||
conn.execute(
|
||||
"UPDATE stored_configs
|
||||
SET display_name = ?2, updated_at = ?3
|
||||
WHERE config_id = ?1",
|
||||
params![config_id, record.display_name, record.updated_at],
|
||||
)
|
||||
.ok()?;
|
||||
|
||||
Some(to_meta(record))
|
||||
}
|
||||
|
||||
pub fn delete_config_meta(config_id: &str) -> bool {
|
||||
let Some(conn) = open_db() else {
|
||||
return false;
|
||||
};
|
||||
|
||||
match conn.execute(
|
||||
"DELETE FROM stored_configs WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
) {
|
||||
Ok(rows) => rows > 0,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to delete config meta {}: {}", config_id, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod config_meta;
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod stored_config;
|
||||
@@ -0,0 +1,68 @@
|
||||
use napi_derive_ohos::napi;
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct StoredConfigMeta {
|
||||
pub config_id: String,
|
||||
pub display_name: String,
|
||||
pub created_at: String,
|
||||
pub updated_at: String,
|
||||
pub favorite: bool,
|
||||
pub temporary: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct StoredConfigRecord {
|
||||
pub meta: StoredConfigMeta,
|
||||
pub config_json: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct StoredConfigList {
|
||||
pub configs: Vec<StoredConfigMeta>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct ExportTomlResult {
|
||||
pub toml_text: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct StoredConfigSummary {
|
||||
pub config_id: String,
|
||||
pub display_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct SharedConfigLinkPayload {
|
||||
pub config_json: String,
|
||||
pub display_name: Option<String>,
|
||||
pub only_start: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct LocalSocketSyncMessage {
|
||||
pub message_type: String,
|
||||
pub payload_json: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[napi(object)]
|
||||
pub struct KeyValuePair {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
@@ -0,0 +1,349 @@
|
||||
use super::{field_store, import_export, legacy_migration, validation};
|
||||
use crate::config::storage::config_meta::{
|
||||
delete_config_meta, get_config_meta, init_config_meta_store, list_config_meta_entries, open_db,
|
||||
upsert_config_meta_in_tx,
|
||||
};
|
||||
use crate::config::types::stored_config::{ExportTomlResult, StoredConfigRecord};
|
||||
use easytier::common::config::ConfigLoader;
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use ohos_hilog_binding::{hilog_debug, hilog_error};
|
||||
use rusqlite::params;
|
||||
use serde_json::Value;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
|
||||
static CONFIG_ROOT_DIR: Mutex<Option<PathBuf>> = Mutex::new(None);
|
||||
pub(crate) const CONFIG_DIR_NAME: &str = "easytier-configs";
|
||||
pub(crate) const KERNEL_SOCKET_FILE_NAME: &str = "easytier-kernel.sock";
|
||||
|
||||
pub(crate) fn config_root_dir() -> Option<PathBuf> {
|
||||
CONFIG_ROOT_DIR
|
||||
.lock()
|
||||
.ok()
|
||||
.and_then(|guard| guard.as_ref().cloned())
|
||||
}
|
||||
|
||||
pub(crate) fn kernel_socket_path() -> Option<PathBuf> {
|
||||
config_root_dir().map(|root| root.join(KERNEL_SOCKET_FILE_NAME))
|
||||
}
|
||||
|
||||
pub(crate) fn legacy_config_file_path(config_id: &str) -> Option<PathBuf> {
|
||||
legacy_migration::legacy_config_file_path(&config_root_dir(), CONFIG_DIR_NAME, config_id)
|
||||
}
|
||||
|
||||
pub fn init_config_store(root_dir: String) -> bool {
|
||||
let root = PathBuf::from(root_dir);
|
||||
let configs_dir = root.join(CONFIG_DIR_NAME);
|
||||
if let Err(e) = std::fs::create_dir_all(&configs_dir) {
|
||||
hilog_error!(
|
||||
"[Rust] failed to create config dir {}: {}",
|
||||
configs_dir.display(),
|
||||
e
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
match CONFIG_ROOT_DIR.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = Some(root.clone());
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to lock config root dir: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if !init_config_meta_store(root.to_string_lossy().into_owned()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hilog_debug!(
|
||||
"[Rust] initialized config repo at {}",
|
||||
configs_dir.display()
|
||||
);
|
||||
true
|
||||
}
|
||||
|
||||
fn migrate_legacy_file_if_needed(config_id: &str) -> Option<()> {
|
||||
legacy_migration::migrate_legacy_file_if_needed(
|
||||
&config_root_dir(),
|
||||
CONFIG_DIR_NAME,
|
||||
config_id,
|
||||
save_config_record,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn save_config_record(
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
config_json: String,
|
||||
) -> Option<StoredConfigRecord> {
|
||||
let config = match validation::validate_config_json(&config_json, config_id.clone()) {
|
||||
Ok(config) => config,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] save_config_record failed {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let normalized_json = match serde_json::to_string(&config) {
|
||||
Ok(raw) => raw,
|
||||
Err(e) => {
|
||||
hilog_error!(
|
||||
"[Rust] failed to serialize normalized config {}: {}",
|
||||
config_id,
|
||||
e
|
||||
);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let fields = match validation::config_to_top_level_map(&config) {
|
||||
Some(fields) => fields,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let conn = open_db()?;
|
||||
let tx = conn.unchecked_transaction().ok()?;
|
||||
let existing_meta = get_config_meta(&config_id);
|
||||
let favorite = existing_meta
|
||||
.as_ref()
|
||||
.map(|meta| meta.favorite)
|
||||
.unwrap_or(false);
|
||||
let temporary = existing_meta
|
||||
.as_ref()
|
||||
.map(|meta| meta.temporary)
|
||||
.unwrap_or(false);
|
||||
let meta = upsert_config_meta_in_tx(&tx, config_id.clone(), display_name, favorite, temporary)?;
|
||||
|
||||
field_store::replace_config_fields(&tx, &config_id, fields)?;
|
||||
|
||||
tx.commit().ok()?;
|
||||
|
||||
if let Some(legacy_path) = legacy_config_file_path(&config_id) {
|
||||
if legacy_path.exists() {
|
||||
let _ = std::fs::remove_file(legacy_path);
|
||||
}
|
||||
}
|
||||
|
||||
Some(StoredConfigRecord {
|
||||
meta,
|
||||
config_json: normalized_json,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load_config_json(config_id: &str) -> Option<String> {
|
||||
migrate_legacy_file_if_needed(config_id)?;
|
||||
let object = field_store::load_config_map_from_db(config_id)?;
|
||||
serde_json::to_string(&Value::Object(object)).ok()
|
||||
}
|
||||
|
||||
pub fn get_config_record(config_id: &str) -> Option<StoredConfigRecord> {
|
||||
let config_json = load_config_json(config_id)?;
|
||||
let meta = get_config_meta(config_id)?;
|
||||
Some(StoredConfigRecord { meta, config_json })
|
||||
}
|
||||
|
||||
pub fn get_config_field_value(config_id: &str, field: &str) -> Option<String> {
|
||||
migrate_legacy_file_if_needed(config_id)?;
|
||||
let conn = open_db()?;
|
||||
conn.query_row(
|
||||
"SELECT field_json FROM stored_config_fields
|
||||
WHERE config_id = ?1 AND field_name = ?2",
|
||||
params![config_id, field],
|
||||
|row| row.get::<_, String>(0),
|
||||
)
|
||||
.ok()
|
||||
}
|
||||
|
||||
pub fn set_config_field_value(config_id: &str, field: &str, json_value: &str) -> bool {
|
||||
if field.contains('.') {
|
||||
return false;
|
||||
}
|
||||
|
||||
let raw = match load_config_json(config_id) {
|
||||
Some(raw) => raw,
|
||||
None => return false,
|
||||
};
|
||||
let mut value = match serde_json::from_str::<Value>(&raw) {
|
||||
Ok(value) => value,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let new_field_value = match serde_json::from_str::<Value>(json_value) {
|
||||
Ok(value) => value,
|
||||
Err(_) => return false,
|
||||
};
|
||||
let object = match value.as_object_mut() {
|
||||
Some(object) => object,
|
||||
None => return false,
|
||||
};
|
||||
object.insert(field.to_string(), new_field_value);
|
||||
|
||||
let normalized = match serde_json::to_string(&value) {
|
||||
Ok(raw) => raw,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
let display_name = get_config_meta(config_id)
|
||||
.map(|meta| meta.display_name)
|
||||
.unwrap_or_else(|| config_id.to_string());
|
||||
|
||||
save_config_record(config_id.to_string(), display_name, normalized).is_some()
|
||||
}
|
||||
|
||||
pub fn get_display_name(config_id: &str) -> Option<String> {
|
||||
get_config_meta(config_id).map(|meta| meta.display_name)
|
||||
}
|
||||
|
||||
pub fn get_default_config_json() -> Option<String> {
|
||||
crate::build_default_network_config_json().ok()
|
||||
}
|
||||
|
||||
pub fn create_config_record(config_id: String, display_name: String) -> Option<StoredConfigRecord> {
|
||||
let raw = get_default_config_json()?;
|
||||
let mut config = serde_json::from_str::<NetworkConfig>(&raw).ok()?;
|
||||
config.instance_id = Some(config_id.clone());
|
||||
let normalized_json = serde_json::to_string(&config).ok()?;
|
||||
save_config_record(config_id, display_name, normalized_json)
|
||||
}
|
||||
|
||||
pub fn start_kernel_with_config_id(config_id: &str) -> bool {
|
||||
let raw = match load_config_json(config_id) {
|
||||
Some(raw) => raw,
|
||||
None => return false,
|
||||
};
|
||||
crate::run_network_instance_from_json(&raw)
|
||||
}
|
||||
|
||||
pub fn list_config_meta_json() -> String {
|
||||
serde_json::to_string(&list_config_meta_entries().configs).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
|
||||
pub fn delete_config_record(config_id: &str) -> bool {
|
||||
if let Some(path) = legacy_config_file_path(config_id) {
|
||||
if path.exists() {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
}
|
||||
|
||||
let conn = match open_db() {
|
||||
Some(conn) => conn,
|
||||
None => return false,
|
||||
};
|
||||
if let Err(e) = conn.execute(
|
||||
"DELETE FROM stored_config_fields WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
) {
|
||||
hilog_error!("[Rust] failed to delete config fields {}: {}", config_id, e);
|
||||
return false;
|
||||
}
|
||||
|
||||
delete_config_meta(config_id)
|
||||
}
|
||||
|
||||
pub fn export_config_toml(config_id: &str) -> Option<ExportTomlResult> {
|
||||
let record = get_config_record(config_id)?;
|
||||
import_export::export_config_toml_from_record(&record)
|
||||
}
|
||||
|
||||
pub fn import_toml_config(
|
||||
toml_text: String,
|
||||
display_name: Option<String>,
|
||||
) -> Option<StoredConfigRecord> {
|
||||
import_export::import_toml_to_record(toml_text, display_name, save_config_record)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rusqlite::params;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn test_root() -> String {
|
||||
let unique = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let dir = std::env::temp_dir().join(format!("easytier_ohrs_test_{}", unique));
|
||||
dir.to_string_lossy().into_owned()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn save_get_export_delete_roundtrip() {
|
||||
let root = test_root();
|
||||
assert!(init_config_store(root.clone()));
|
||||
|
||||
let config_json = crate::build_default_network_config_json().expect("default config");
|
||||
let saved = save_config_record("cfg-1".to_string(), "test-config".to_string(), config_json)
|
||||
.expect("save config");
|
||||
|
||||
assert_eq!(saved.meta.config_id, "cfg-1");
|
||||
assert_eq!(saved.meta.display_name, "test-config");
|
||||
|
||||
let loaded = get_config_record("cfg-1").expect("load config");
|
||||
assert_eq!(loaded.meta.display_name, "test-config");
|
||||
assert!(loaded.config_json.contains("cfg-1"));
|
||||
|
||||
let legacy_json_path = PathBuf::from(&root)
|
||||
.join(CONFIG_DIR_NAME)
|
||||
.join("cfg-1.json");
|
||||
assert!(
|
||||
!legacy_json_path.exists(),
|
||||
"config should no longer be persisted as a per-config json file"
|
||||
);
|
||||
|
||||
let conn = open_db().expect("db should be open");
|
||||
let field_count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM stored_config_fields WHERE config_id = ?1",
|
||||
params!["cfg-1"],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.expect("count config fields");
|
||||
assert!(field_count > 0, "config fields should be stored in sqlite");
|
||||
|
||||
let exported = export_config_toml("cfg-1").expect("export toml");
|
||||
assert!(exported.toml_text.contains("instance_id"));
|
||||
|
||||
assert!(delete_config_record("cfg-1"));
|
||||
assert!(get_config_record("cfg-1").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_config_field_updates_only_requested_top_level_field() {
|
||||
let root = test_root();
|
||||
assert!(init_config_store(root));
|
||||
|
||||
let config_json = crate::build_default_network_config_json().expect("default config");
|
||||
save_config_record(
|
||||
"cfg-field".to_string(),
|
||||
"field-config".to_string(),
|
||||
config_json,
|
||||
)
|
||||
.expect("save config");
|
||||
|
||||
let before_network_name = get_config_field_value("cfg-field", "network_name");
|
||||
let before_instance_id = get_config_field_value("cfg-field", "instance_id")
|
||||
.expect("instance id field should exist");
|
||||
|
||||
assert!(set_config_field_value(
|
||||
"cfg-field",
|
||||
"network_name",
|
||||
"\"changed-network\""
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
get_config_field_value("cfg-field", "network_name"),
|
||||
Some("\"changed-network\"".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
get_config_field_value("cfg-field", "instance_id"),
|
||||
Some(before_instance_id)
|
||||
);
|
||||
assert_ne!(
|
||||
get_config_field_value("cfg-field", "network_name"),
|
||||
before_network_name
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
use crate::config::storage::config_meta::{now_ts_string, open_db};
|
||||
use ohos_hilog_binding::hilog_error;
|
||||
use rusqlite::{Connection, params};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
pub(super) fn load_config_map_from_db(config_id: &str) -> Option<Map<String, Value>> {
|
||||
let conn = open_db()?;
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT field_name, field_json
|
||||
FROM stored_config_fields
|
||||
WHERE config_id = ?1",
|
||||
)
|
||||
.ok()?;
|
||||
let rows = stmt
|
||||
.query_map(params![config_id], |row| {
|
||||
let field_name: String = row.get(0)?;
|
||||
let field_json: String = row.get(1)?;
|
||||
Ok((field_name, field_json))
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
let mut object = Map::new();
|
||||
for row in rows {
|
||||
let (field_name, field_json) = row.ok()?;
|
||||
let value = serde_json::from_str::<Value>(&field_json).ok()?;
|
||||
object.insert(field_name, value);
|
||||
}
|
||||
|
||||
if object.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(object)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn replace_config_fields(
|
||||
tx: &Connection,
|
||||
config_id: &str,
|
||||
fields: Map<String, Value>,
|
||||
) -> Option<()> {
|
||||
if let Err(e) = tx.execute(
|
||||
"DELETE FROM stored_config_fields WHERE config_id = ?1",
|
||||
params![config_id],
|
||||
) {
|
||||
hilog_error!(
|
||||
"[Rust] failed to clear existing config fields {}: {}",
|
||||
config_id,
|
||||
e
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
for (field_name, value) in fields {
|
||||
let field_json = serde_json::to_string(&value).ok()?;
|
||||
if let Err(e) = tx.execute(
|
||||
"INSERT INTO stored_config_fields (config_id, field_name, field_json, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
params![config_id, field_name, field_json, now_ts_string()],
|
||||
) {
|
||||
hilog_error!("[Rust] failed to persist config field {}: {}", config_id, e);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
Some(())
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
use crate::config::types::stored_config::{ExportTomlResult, StoredConfigRecord};
|
||||
use easytier::common::config::{ConfigLoader, TomlConfigLoader};
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
|
||||
pub(super) fn export_config_toml_from_record(
|
||||
record: &StoredConfigRecord,
|
||||
) -> Option<ExportTomlResult> {
|
||||
let config = serde_json::from_str::<NetworkConfig>(&record.config_json).ok()?;
|
||||
let toml = config.gen_config().ok()?;
|
||||
Some(ExportTomlResult {
|
||||
toml_text: toml.dump(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn import_toml_to_record(
|
||||
toml_text: String,
|
||||
display_name: Option<String>,
|
||||
save_config_record: impl Fn(String, String, String) -> Option<StoredConfigRecord>,
|
||||
) -> Option<StoredConfigRecord> {
|
||||
let config =
|
||||
NetworkConfig::new_from_config(TomlConfigLoader::new_from_str(&toml_text).ok()?).ok()?;
|
||||
|
||||
let config_id = config.instance_id.clone()?;
|
||||
let name_from_toml = toml_text
|
||||
.lines()
|
||||
.find_map(|line| {
|
||||
let trimmed = line.trim();
|
||||
if !trimmed.starts_with("instance_name") {
|
||||
return None;
|
||||
}
|
||||
trimmed.split_once('=').map(|(_, value)| {
|
||||
value
|
||||
.trim()
|
||||
.trim_matches('"')
|
||||
.trim_matches('\'')
|
||||
.to_string()
|
||||
})
|
||||
})
|
||||
.filter(|name| !name.is_empty());
|
||||
|
||||
let final_name = display_name
|
||||
.filter(|name| !name.is_empty())
|
||||
.or(name_from_toml)
|
||||
.unwrap_or_else(|| config_id.clone());
|
||||
|
||||
let config_json = serde_json::to_string(&config).ok()?;
|
||||
save_config_record(config_id, final_name, config_json)
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
use crate::config::storage::config_meta::get_config_meta;
|
||||
use ohos_hilog_binding::hilog_error;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub(super) fn legacy_config_file_path(
|
||||
root_dir: &Option<PathBuf>,
|
||||
config_dir_name: &str,
|
||||
config_id: &str,
|
||||
) -> Option<PathBuf> {
|
||||
root_dir.as_ref().map(|root| {
|
||||
root.join(config_dir_name)
|
||||
.join(format!("{}.json", config_id))
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn migrate_legacy_file_if_needed(
|
||||
root_dir: &Option<PathBuf>,
|
||||
config_dir_name: &str,
|
||||
config_id: &str,
|
||||
save_config_record: impl Fn(
|
||||
String,
|
||||
String,
|
||||
String,
|
||||
) -> Option<crate::config::types::stored_config::StoredConfigRecord>,
|
||||
) -> Option<()> {
|
||||
let legacy_path = legacy_config_file_path(root_dir, config_dir_name, config_id)?;
|
||||
if !legacy_path.exists() {
|
||||
return Some(());
|
||||
}
|
||||
|
||||
let raw = std::fs::read_to_string(&legacy_path).ok()?;
|
||||
let display_name = get_config_meta(config_id)
|
||||
.map(|meta| meta.display_name)
|
||||
.unwrap_or_else(|| config_id.to_string());
|
||||
save_config_record(config_id.to_string(), display_name, raw)?;
|
||||
|
||||
if let Err(e) = std::fs::remove_file(&legacy_path) {
|
||||
hilog_error!(
|
||||
"[Rust] failed to remove legacy config file {}: {}",
|
||||
legacy_path.display(),
|
||||
e
|
||||
);
|
||||
}
|
||||
Some(())
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
pub(super) fn normalize_config_id(
|
||||
mut config: NetworkConfig,
|
||||
requested_id: String,
|
||||
) -> Result<NetworkConfig, String> {
|
||||
if requested_id.is_empty() {
|
||||
return Err("config_id is required".to_string());
|
||||
}
|
||||
config.instance_id = Some(requested_id);
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub(super) fn validate_config_json(
|
||||
config_json: &str,
|
||||
config_id: String,
|
||||
) -> Result<NetworkConfig, String> {
|
||||
let config = serde_json::from_str::<NetworkConfig>(config_json)
|
||||
.map_err(|e| format!("parse config json failed: {}", e))?;
|
||||
let config = normalize_config_id(config, config_id)?;
|
||||
config
|
||||
.gen_config()
|
||||
.map_err(|e| format!("generate toml failed: {}", e))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub(super) fn config_to_top_level_map(config: &NetworkConfig) -> Option<Map<String, Value>> {
|
||||
serde_json::to_value(config).ok()?.as_object().cloned()
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
pub(crate) mod config_api;
|
||||
pub(crate) mod runtime_api;
|
||||
@@ -0,0 +1,46 @@
|
||||
use crate::config;
|
||||
|
||||
pub(crate) fn init_config_store(root_dir: String) -> bool {
|
||||
config::repository::init_config_store(root_dir)
|
||||
}
|
||||
|
||||
pub(crate) fn list_configs() -> String {
|
||||
config::repository::list_config_meta_json()
|
||||
}
|
||||
|
||||
pub(crate) fn save_config(config_id: String, display_name: String, config_json: String) -> bool {
|
||||
config::repository::save_config_record(config_id, display_name, config_json).is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn create_config(config_id: String, display_name: String) -> bool {
|
||||
config::repository::create_config_record(config_id, display_name).is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn delete_stored_config_meta(config_id: String) -> bool {
|
||||
config::repository::delete_config_record(&config_id)
|
||||
}
|
||||
|
||||
pub(crate) fn get_config(config_id: String) -> Option<String> {
|
||||
config::repository::load_config_json(&config_id)
|
||||
}
|
||||
|
||||
pub(crate) fn get_default_config() -> Option<String> {
|
||||
config::repository::get_default_config_json()
|
||||
}
|
||||
|
||||
pub(crate) fn get_config_field(config_id: String, field: String) -> Option<String> {
|
||||
config::repository::get_config_field_value(&config_id, &field)
|
||||
}
|
||||
|
||||
pub(crate) fn set_config_field(config_id: String, field: String, json_value: String) -> bool {
|
||||
config::repository::set_config_field_value(&config_id, &field, &json_value)
|
||||
}
|
||||
|
||||
pub(crate) fn import_toml(toml_text: String, display_name: Option<String>) -> Option<String> {
|
||||
config::repository::import_toml_config(toml_text, display_name)
|
||||
.map(|record| record.meta.config_id)
|
||||
}
|
||||
|
||||
pub(crate) fn export_toml(config_id: String) -> Option<String> {
|
||||
config::repository::export_config_toml(&config_id).map(|ret| ret.toml_text)
|
||||
}
|
||||
@@ -0,0 +1,184 @@
|
||||
use crate::config::repository::load_config_json;
|
||||
use crate::config::storage::config_meta::get_config_display_name;
|
||||
use crate::config::types::stored_config::KeyValuePair;
|
||||
use crate::kernel_bridge::{
|
||||
aggregate_requested_tun_routes, start_local_socket_server as start_local_socket_server_inner,
|
||||
stop_local_socket_server as stop_local_socket_server_inner,
|
||||
};
|
||||
use crate::runtime::state::runtime_state::{
|
||||
RuntimeAggregateState, TunAggregateState, clear_tun_attached, mark_tun_attached,
|
||||
runtime_instance_from_running_info,
|
||||
};
|
||||
use crate::{ASYNC_RUNTIME, EASYTIER_VERSION, INSTANCE_MANAGER, WEB_CLIENTS};
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use ohos_hilog_binding::{hilog_error, hilog_info};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(crate) fn start_kernel(
|
||||
config_id: String,
|
||||
start_kernel_with_config_id: impl Fn(&str) -> bool,
|
||||
) -> bool {
|
||||
start_kernel_with_config_id(&config_id)
|
||||
}
|
||||
|
||||
pub(crate) fn stop_kernel(
|
||||
config_id: String,
|
||||
stop_web_client: impl Fn(&str) -> bool,
|
||||
parse_instance_uuid: impl Fn(&str) -> Option<uuid::Uuid>,
|
||||
maybe_stop_local_socket_server: impl Fn(),
|
||||
) -> bool {
|
||||
clear_tun_attached(&config_id);
|
||||
if stop_web_client(&config_id) {
|
||||
return true;
|
||||
}
|
||||
|
||||
let Some(instance_id) = parse_instance_uuid(&config_id) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let ret = INSTANCE_MANAGER
|
||||
.delete_network_instance(vec![instance_id])
|
||||
.map(|_| true)
|
||||
.unwrap_or_else(|err| {
|
||||
hilog_error!("[Rust] stop_kernel failed {}: {}", config_id, err);
|
||||
false
|
||||
});
|
||||
maybe_stop_local_socket_server();
|
||||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn stop_network_instance(
|
||||
config_ids: Vec<String>,
|
||||
stop_kernel: impl Fn(String) -> bool,
|
||||
) -> bool {
|
||||
let mut ok = true;
|
||||
for config_id in config_ids {
|
||||
ok = stop_kernel(config_id) && ok;
|
||||
}
|
||||
ok
|
||||
}
|
||||
|
||||
pub(crate) fn collect_network_infos() -> Vec<KeyValuePair> {
|
||||
let infos = match INSTANCE_MANAGER.collect_network_infos_sync() {
|
||||
Ok(infos) => infos,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] collect network infos failed {}", err);
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
|
||||
infos
|
||||
.into_iter()
|
||||
.filter_map(|(key, value)| {
|
||||
serde_json::to_string(&value)
|
||||
.ok()
|
||||
.map(|value_json| KeyValuePair {
|
||||
key: key.to_string(),
|
||||
value: value_json,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn set_tun_fd(
|
||||
config_id: String,
|
||||
fd: i32,
|
||||
parse_instance_uuid: impl Fn(&str) -> Option<uuid::Uuid>,
|
||||
) -> bool {
|
||||
let Some(instance_id) = parse_instance_uuid(&config_id) else {
|
||||
hilog_error!("[Rust] set_tun_fd invalid instance id: {}", config_id);
|
||||
return false;
|
||||
};
|
||||
|
||||
INSTANCE_MANAGER
|
||||
.set_tun_fd(&instance_id, fd)
|
||||
.map(|_| {
|
||||
mark_tun_attached(&config_id);
|
||||
hilog_info!(
|
||||
"[Rust] set_tun_fd success instance={} fd={} marked_attached=true",
|
||||
config_id,
|
||||
fd
|
||||
);
|
||||
true
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
hilog_error!("[Rust] set_tun_fd failed {}: {}", config_id, err);
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn get_runtime_snapshot() -> RuntimeAggregateState {
|
||||
get_runtime_snapshot_inner()
|
||||
}
|
||||
|
||||
pub(crate) fn get_runtime_snapshot_inner() -> RuntimeAggregateState {
|
||||
let infos = match INSTANCE_MANAGER.collect_network_infos_sync() {
|
||||
Ok(infos) => infos,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] collect network infos failed {}", err);
|
||||
return RuntimeAggregateState {
|
||||
instances: vec![],
|
||||
tun: TunAggregateState {
|
||||
active: false,
|
||||
attached_instance_ids: vec![],
|
||||
aggregated_routes: vec![],
|
||||
dns_servers: vec![],
|
||||
need_rebuild: false,
|
||||
},
|
||||
running_instance_count: 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let mut instances = Vec::with_capacity(infos.len());
|
||||
for (instance_uuid, info) in infos {
|
||||
let config_id = instance_uuid.to_string();
|
||||
let display_name = get_config_display_name(&config_id).unwrap_or_else(|| config_id.clone());
|
||||
let config_json = load_config_json(&config_id);
|
||||
let stored_config = config_json
|
||||
.as_deref()
|
||||
.and_then(|raw| serde_json::from_str::<NetworkConfig>(raw).ok());
|
||||
let magic_dns_enabled = stored_config
|
||||
.as_ref()
|
||||
.and_then(|cfg| cfg.enable_magic_dns)
|
||||
.unwrap_or(false);
|
||||
let need_exit_node = stored_config
|
||||
.as_ref()
|
||||
.map(|cfg| !cfg.exit_nodes.is_empty())
|
||||
.unwrap_or(false);
|
||||
instances.push(runtime_instance_from_running_info(
|
||||
config_id,
|
||||
display_name,
|
||||
magic_dns_enabled,
|
||||
need_exit_node,
|
||||
info,
|
||||
));
|
||||
}
|
||||
|
||||
instances.sort_by(|a, b| {
|
||||
a.display_name
|
||||
.cmp(&b.display_name)
|
||||
.then_with(|| a.instance_id.cmp(&b.instance_id))
|
||||
});
|
||||
let attached_instance_ids = instances
|
||||
.iter()
|
||||
.filter(|instance| instance.tun_required)
|
||||
.map(|instance| instance.instance_id.clone())
|
||||
.collect::<Vec<_>>();
|
||||
let aggregated_routes = aggregate_requested_tun_routes(&instances);
|
||||
let running_instance_count =
|
||||
instances.iter().filter(|instance| instance.running).count() as i32;
|
||||
let tun_active = !attached_instance_ids.is_empty();
|
||||
|
||||
RuntimeAggregateState {
|
||||
instances,
|
||||
tun: TunAggregateState {
|
||||
active: tun_active,
|
||||
attached_instance_ids,
|
||||
aggregated_routes,
|
||||
dns_servers: vec![],
|
||||
need_rebuild: false,
|
||||
},
|
||||
running_instance_count,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
mod protocol;
|
||||
mod routing;
|
||||
mod socket_server;
|
||||
|
||||
pub(crate) use routing::aggregate_requested_tun_routes;
|
||||
pub use socket_server::{start_local_socket_server, stop_local_socket_server};
|
||||
@@ -0,0 +1,50 @@
|
||||
use crate::config::types::stored_config::LocalSocketSyncMessage;
|
||||
use serde::Serialize;
|
||||
use std::io::{Error, ErrorKind, Write};
|
||||
use std::os::unix::net::UnixStream;
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct TunRequestPayload {
|
||||
pub config_id: String,
|
||||
pub instance_id: String,
|
||||
pub display_name: String,
|
||||
pub virtual_ipv4: Option<String>,
|
||||
pub virtual_ipv4_cidr: Option<String>,
|
||||
pub aggregated_routes: Vec<String>,
|
||||
pub magic_dns_enabled: bool,
|
||||
pub need_exit_node: bool,
|
||||
}
|
||||
|
||||
pub(crate) fn send_local_socket_message(
|
||||
stream: &mut UnixStream,
|
||||
message_type: &str,
|
||||
payload_json: String,
|
||||
) -> std::io::Result<()> {
|
||||
let message = LocalSocketSyncMessage {
|
||||
message_type: message_type.to_string(),
|
||||
payload_json,
|
||||
};
|
||||
let mut raw = serde_json::to_vec(&message)
|
||||
.map_err(|err| Error::new(ErrorKind::InvalidData, err.to_string()))?;
|
||||
raw.push(b'\n');
|
||||
stream.write_all(&raw)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn broadcast_local_socket_message(
|
||||
clients: &mut Vec<UnixStream>,
|
||||
message_type: &str,
|
||||
payload_json: &str,
|
||||
) -> bool {
|
||||
let mut active_clients = Vec::with_capacity(clients.len());
|
||||
let mut delivered = false;
|
||||
for mut client in clients.drain(..) {
|
||||
if send_local_socket_message(&mut client, message_type, payload_json.to_string()).is_ok() {
|
||||
delivered = true;
|
||||
active_clients.push(client);
|
||||
}
|
||||
}
|
||||
*clients = active_clients;
|
||||
delivered
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
use crate::config::repository::load_config_json;
|
||||
use crate::runtime::state::runtime_state::RuntimeInstanceState;
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use ipnet::IpNet;
|
||||
use ohos_hilog_binding::hilog_debug;
|
||||
use std::collections::HashSet;
|
||||
use std::net::IpAddr;
|
||||
|
||||
pub(crate) fn load_manual_routes(config_id: &str) -> Vec<String> {
|
||||
load_config_json(config_id)
|
||||
.and_then(|raw| serde_json::from_str::<NetworkConfig>(&raw).ok())
|
||||
.map(|config| config.routes)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn normalize_route_cidr(route: &str) -> Option<String> {
|
||||
route
|
||||
.parse::<IpNet>()
|
||||
.ok()
|
||||
.map(|network| match network {
|
||||
IpNet::V4(net) => net.trunc().to_string(),
|
||||
IpNet::V6(net) => net.trunc().to_string(),
|
||||
})
|
||||
.or_else(|| {
|
||||
route.parse::<IpAddr>().ok().map(|addr| match addr {
|
||||
IpAddr::V4(ip) => format!("{}/32", ip),
|
||||
IpAddr::V6(ip) => format!("{}/128", ip),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn simplify_routes(routes: Vec<String>) -> Vec<String> {
|
||||
let mut parsed = routes
|
||||
.into_iter()
|
||||
.filter_map(|route| normalize_route_cidr(&route))
|
||||
.filter_map(|route| route.parse::<IpNet>().ok())
|
||||
.collect::<Vec<_>>();
|
||||
parsed.sort_by(|left, right| {
|
||||
left.prefix_len()
|
||||
.cmp(&right.prefix_len())
|
||||
.then_with(|| left.network().to_string().cmp(&right.network().to_string()))
|
||||
});
|
||||
|
||||
let mut simplified = Vec::<IpNet>::new();
|
||||
'outer: for route in parsed {
|
||||
for existing in &simplified {
|
||||
if existing.contains(&route.network()) && existing.prefix_len() <= route.prefix_len() {
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
simplified.retain(|existing| {
|
||||
!(route.contains(&existing.network()) && route.prefix_len() <= existing.prefix_len())
|
||||
});
|
||||
simplified.push(route);
|
||||
}
|
||||
|
||||
let mut seen = HashSet::new();
|
||||
simplified
|
||||
.into_iter()
|
||||
.map(|route| route.to_string())
|
||||
.filter(|route| seen.insert(route.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn aggregate_tun_routes(instance: &RuntimeInstanceState) -> Vec<String> {
|
||||
let virtual_ipv4_cidr = instance
|
||||
.my_node_info
|
||||
.as_ref()
|
||||
.and_then(|info| info.virtual_ipv4_cidr.clone());
|
||||
let manual_routes = load_manual_routes(&instance.config_id);
|
||||
let proxy_cidrs = instance
|
||||
.routes
|
||||
.iter()
|
||||
.flat_map(|route| route.proxy_cidrs.iter().cloned())
|
||||
.collect::<Vec<_>>();
|
||||
let mut raw_routes = Vec::new();
|
||||
|
||||
if let Some(cidr) = virtual_ipv4_cidr.clone() {
|
||||
raw_routes.push(cidr);
|
||||
}
|
||||
|
||||
raw_routes.extend(manual_routes.iter().cloned());
|
||||
raw_routes.extend(proxy_cidrs.iter().cloned());
|
||||
let aggregated_routes = simplify_routes(raw_routes);
|
||||
hilog_debug!(
|
||||
"[Rust] aggregate_tun_routes instance={} proxy_cidrs={:?} aggregated_routes={:?}",
|
||||
instance.instance_id,
|
||||
proxy_cidrs,
|
||||
aggregated_routes
|
||||
);
|
||||
aggregated_routes
|
||||
}
|
||||
|
||||
pub(crate) fn aggregate_requested_tun_routes(instances: &[RuntimeInstanceState]) -> Vec<String> {
|
||||
let mut aggregated_routes = Vec::new();
|
||||
let mut seen_routes = HashSet::new();
|
||||
for instance in instances.iter().filter(|instance| instance.tun_required) {
|
||||
for route in aggregate_tun_routes(instance) {
|
||||
if seen_routes.insert(route.clone()) {
|
||||
aggregated_routes.push(route);
|
||||
}
|
||||
}
|
||||
}
|
||||
aggregated_routes
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
use super::protocol::{TunRequestPayload, broadcast_local_socket_message};
|
||||
use crate::config::repository::kernel_socket_path;
|
||||
use crate::get_runtime_snapshot_inner;
|
||||
use crate::kernel_bridge::routing::aggregate_tun_routes;
|
||||
use ohos_hilog_binding::{hilog_error, hilog_info};
|
||||
use once_cell::sync::Lazy;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::ErrorKind;
|
||||
use std::os::unix::net::{UnixListener, UnixStream};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::thread::{self, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
struct LocalSocketState {
|
||||
stop_flag: std::sync::Arc<AtomicBool>,
|
||||
socket_path: PathBuf,
|
||||
worker: JoinHandle<()>,
|
||||
}
|
||||
|
||||
static LOCAL_SOCKET_STATE: Lazy<Mutex<Option<LocalSocketState>>> = Lazy::new(|| Mutex::new(None));
|
||||
|
||||
pub fn start_local_socket_server() -> bool {
|
||||
let socket_path = match kernel_socket_path() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
hilog_error!("[Rust] kernel socket path unavailable");
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
match LOCAL_SOCKET_STATE.lock() {
|
||||
Ok(guard) if guard.is_some() => return true,
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] lock localsocket state failed: {}", err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if socket_path.exists() {
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
}
|
||||
|
||||
let listener = match UnixListener::bind(&socket_path) {
|
||||
Ok(listener) => listener,
|
||||
Err(err) => {
|
||||
hilog_error!(
|
||||
"[Rust] bind localsocket failed {}: {}",
|
||||
socket_path.display(),
|
||||
err
|
||||
);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
if let Err(err) = listener.set_nonblocking(true) {
|
||||
hilog_error!("[Rust] set localsocket nonblocking failed: {}", err);
|
||||
let _ = std::fs::remove_file(&socket_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
let stop_flag = std::sync::Arc::new(AtomicBool::new(false));
|
||||
let worker_stop_flag = stop_flag.clone();
|
||||
let worker = thread::spawn(move || {
|
||||
let mut last_snapshot_json = String::new();
|
||||
let mut delivered_tun_requests = HashSet::new();
|
||||
let mut last_tun_route_signatures = HashMap::<String, String>::new();
|
||||
let mut clients = Vec::<UnixStream>::new();
|
||||
|
||||
while !worker_stop_flag.load(Ordering::Relaxed) {
|
||||
let mut accepted_client = false;
|
||||
loop {
|
||||
match listener.accept() {
|
||||
Ok((stream, _addr)) => {
|
||||
accepted_client = true;
|
||||
clients.push(stream);
|
||||
}
|
||||
Err(err) if err.kind() == ErrorKind::WouldBlock => break,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] accept localsocket failed: {}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let snapshot = get_runtime_snapshot_inner();
|
||||
let snapshot_json = match serde_json::to_string(&snapshot) {
|
||||
Ok(json) => json,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] serialize runtime snapshot failed: {}", err);
|
||||
thread::sleep(Duration::from_millis(250));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if accepted_client || snapshot_json != last_snapshot_json {
|
||||
let _ = broadcast_local_socket_message(
|
||||
&mut clients,
|
||||
"runtime_snapshot",
|
||||
&snapshot_json,
|
||||
);
|
||||
last_snapshot_json = snapshot_json;
|
||||
}
|
||||
|
||||
for instance in snapshot.instances.iter() {
|
||||
if instance.running && instance.tun_required {
|
||||
let virtual_ipv4 = instance
|
||||
.my_node_info
|
||||
.as_ref()
|
||||
.and_then(|info| info.virtual_ipv4.clone());
|
||||
let virtual_ipv4_cidr = instance
|
||||
.my_node_info
|
||||
.as_ref()
|
||||
.and_then(|info| info.virtual_ipv4_cidr.clone());
|
||||
if clients.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if virtual_ipv4.is_none() || virtual_ipv4_cidr.is_none() {
|
||||
continue;
|
||||
}
|
||||
let aggregated_routes = aggregate_tun_routes(instance);
|
||||
let route_signature = serde_json::to_string(&aggregated_routes)
|
||||
.unwrap_or_else(|_| "[]".to_string());
|
||||
let should_send = !delivered_tun_requests.contains(&instance.instance_id)
|
||||
|| last_tun_route_signatures
|
||||
.get(&instance.instance_id)
|
||||
.map(|value| value != &route_signature)
|
||||
.unwrap_or(true);
|
||||
if !should_send {
|
||||
continue;
|
||||
}
|
||||
let payload = TunRequestPayload {
|
||||
config_id: instance.config_id.clone(),
|
||||
instance_id: instance.instance_id.clone(),
|
||||
display_name: instance.display_name.clone(),
|
||||
virtual_ipv4,
|
||||
virtual_ipv4_cidr,
|
||||
aggregated_routes,
|
||||
magic_dns_enabled: instance.magic_dns_enabled,
|
||||
need_exit_node: instance.need_exit_node,
|
||||
};
|
||||
let payload_json = match serde_json::to_string(&payload) {
|
||||
Ok(json) => json,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] serialize tun request failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if broadcast_local_socket_message(&mut clients, "tun_request", &payload_json) {
|
||||
delivered_tun_requests.insert(instance.instance_id.clone());
|
||||
last_tun_route_signatures
|
||||
.insert(instance.instance_id.clone(), route_signature);
|
||||
}
|
||||
} else {
|
||||
delivered_tun_requests.remove(&instance.instance_id);
|
||||
last_tun_route_signatures.remove(&instance.instance_id);
|
||||
}
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_millis(250));
|
||||
}
|
||||
});
|
||||
|
||||
match LOCAL_SOCKET_STATE.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = Some(LocalSocketState {
|
||||
stop_flag,
|
||||
socket_path,
|
||||
worker,
|
||||
});
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] lock localsocket state failed: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stop_local_socket_server() -> bool {
|
||||
let state = match LOCAL_SOCKET_STATE.lock() {
|
||||
Ok(mut guard) => guard.take(),
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] lock localsocket state failed: {}", err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(state) = state {
|
||||
state.stop_flag.store(true, Ordering::Relaxed);
|
||||
let _ = state.worker.join();
|
||||
let _ = std::fs::remove_file(state.socket_path);
|
||||
}
|
||||
true
|
||||
}
|
||||
@@ -1,63 +1,243 @@
|
||||
mod native_log;
|
||||
mod config;
|
||||
mod exports;
|
||||
mod kernel_bridge;
|
||||
mod platform;
|
||||
mod runtime;
|
||||
|
||||
use easytier::common::config::{ConfigLoader, TomlConfigLoader};
|
||||
use config::repository::{
|
||||
create_config_record, delete_config_record, export_config_toml, get_config_field_value,
|
||||
get_default_config_json, import_toml_config, init_config_store as init_repo_store,
|
||||
list_config_meta_json, save_config_record, set_config_field_value, start_kernel_with_config_id,
|
||||
};
|
||||
use config::services::schema_service::{
|
||||
ConfigFieldMapping, NetworkConfigSchema,
|
||||
get_network_config_field_mappings as build_network_config_field_mappings,
|
||||
get_network_config_schema as build_network_config_schema,
|
||||
};
|
||||
use config::services::share_link_service::{
|
||||
build_config_share_link as build_config_share_link_inner,
|
||||
import_config_share_link as import_config_share_link_inner,
|
||||
parse_config_share_link as parse_config_share_link_inner,
|
||||
};
|
||||
use config::storage::config_meta::get_config_display_name;
|
||||
use config::types::stored_config::{KeyValuePair, SharedConfigLinkPayload};
|
||||
use easytier::common::constants::EASYTIER_VERSION;
|
||||
use easytier::common::{
|
||||
MachineIdOptions,
|
||||
config::{ConfigFileControl, ConfigLoader, TomlConfigLoader},
|
||||
};
|
||||
use easytier::instance_manager::NetworkInstanceManager;
|
||||
use easytier::launcher::ConfigSource;
|
||||
use easytier::proto::api::manage::NetworkConfig;
|
||||
use easytier::proto::api::manage::NetworkingMethod;
|
||||
use easytier::web_client::{WebClient, WebClientHooks, run_web_client};
|
||||
use kernel_bridge::{
|
||||
aggregate_requested_tun_routes, start_local_socket_server as start_local_socket_server_inner,
|
||||
stop_local_socket_server as stop_local_socket_server_inner,
|
||||
};
|
||||
use napi_derive_ohos::napi;
|
||||
use ohos_hilog_binding::{hilog_debug, hilog_error};
|
||||
use ohos_hilog_binding::{hilog_error, hilog_info};
|
||||
use runtime::state::runtime_state::{
|
||||
RuntimeAggregateState, TunAggregateState, clear_tun_attached, mark_tun_attached,
|
||||
runtime_instance_from_running_info,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::format;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::runtime::{Builder, Runtime};
|
||||
use uuid::Uuid;
|
||||
|
||||
static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
|
||||
once_cell::sync::Lazy::new(NetworkInstanceManager::new);
|
||||
pub(crate) static INSTANCE_MANAGER: once_cell::sync::Lazy<Arc<NetworkInstanceManager>> =
|
||||
once_cell::sync::Lazy::new(|| Arc::new(NetworkInstanceManager::new()));
|
||||
static ASYNC_RUNTIME: once_cell::sync::Lazy<Runtime> = once_cell::sync::Lazy::new(|| {
|
||||
Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("tokio runtime for easytier-ohrs")
|
||||
});
|
||||
static WEB_CLIENTS: once_cell::sync::Lazy<Mutex<HashMap<String, ManagedWebClient>>> =
|
||||
once_cell::sync::Lazy::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
#[napi(object)]
|
||||
pub struct KeyValuePair {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
#[derive(Default)]
|
||||
struct TrackedWebClientHooks {
|
||||
instance_ids: Mutex<HashSet<Uuid>>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn set_tun_fd(
|
||||
inst_id: String,
|
||||
fd: i32,
|
||||
) -> bool {
|
||||
match Uuid::try_parse(&inst_id) {
|
||||
Ok(uuid) => {
|
||||
match INSTANCE_MANAGER.set_tun_fd(&uuid, fd) {
|
||||
Ok(_) => {
|
||||
hilog_debug!("[Rust] set tun fd {} to {}.", fd, inst_id);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] cant set tun fd {} to {}. {}", fd, inst_id, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] cant covert {} to uuid. {}", inst_id, e);
|
||||
false
|
||||
struct ManagedWebClient {
|
||||
_client: WebClient,
|
||||
hooks: Arc<TrackedWebClientHooks>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WebClientHooks for TrackedWebClientHooks {
|
||||
async fn post_run_network_instance(&self, id: &Uuid) -> Result<(), String> {
|
||||
self.instance_ids
|
||||
.lock()
|
||||
.map_err(|err| err.to_string())?
|
||||
.insert(*id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn post_remove_network_instances(&self, ids: &[Uuid]) -> Result<(), String> {
|
||||
let mut guard = self.instance_ids.lock().map_err(|err| err.to_string())?;
|
||||
for id in ids {
|
||||
guard.remove(id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn parse_config(cfg_str: String) -> bool {
|
||||
match TomlConfigLoader::new_from_str(&cfg_str) {
|
||||
Ok(_) => {
|
||||
fn is_config_server_config(config: &NetworkConfig) -> bool {
|
||||
matches!(
|
||||
NetworkingMethod::try_from(config.networking_method.unwrap_or_default())
|
||||
.unwrap_or_default(),
|
||||
NetworkingMethod::PublicServer
|
||||
) && config
|
||||
.public_server_url
|
||||
.as_ref()
|
||||
.is_some_and(|url| !url.trim().is_empty())
|
||||
}
|
||||
|
||||
fn stop_web_client(config_id: &str) -> bool {
|
||||
let managed = match WEB_CLIENTS.lock() {
|
||||
Ok(mut guard) => guard.remove(config_id),
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] stop_web_client lock failed {}", err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(managed) = managed else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let tracked_ids = managed
|
||||
.hooks
|
||||
.instance_ids
|
||||
.lock()
|
||||
.map(|guard| guard.iter().copied().collect::<Vec<_>>())
|
||||
.unwrap_or_default();
|
||||
drop(managed);
|
||||
|
||||
if tracked_ids.is_empty() {
|
||||
maybe_stop_local_socket_server();
|
||||
return true;
|
||||
}
|
||||
|
||||
let ret = INSTANCE_MANAGER
|
||||
.delete_network_instance(tracked_ids)
|
||||
.map(|_| true)
|
||||
.unwrap_or_else(|err| {
|
||||
hilog_error!(
|
||||
"[Rust] stop config server instances failed {}: {}",
|
||||
config_id,
|
||||
err
|
||||
);
|
||||
false
|
||||
});
|
||||
maybe_stop_local_socket_server();
|
||||
ret
|
||||
}
|
||||
|
||||
fn ensure_local_socket_server_started() -> bool {
|
||||
start_local_socket_server_inner()
|
||||
}
|
||||
|
||||
fn maybe_stop_local_socket_server() {
|
||||
let no_local_instances = INSTANCE_MANAGER.list_network_instance_ids().is_empty();
|
||||
let no_web_clients = WEB_CLIENTS
|
||||
.lock()
|
||||
.map(|guard| guard.is_empty())
|
||||
.unwrap_or(false);
|
||||
if no_local_instances && no_web_clients {
|
||||
let _ = stop_local_socket_server_inner();
|
||||
}
|
||||
}
|
||||
|
||||
fn run_config_server_instance(config_id: &str, config: &NetworkConfig) -> bool {
|
||||
if INSTANCE_MANAGER
|
||||
.list_network_instance_ids()
|
||||
.iter()
|
||||
.next()
|
||||
.is_some()
|
||||
{
|
||||
hilog_error!("[Rust] there is a running instance!");
|
||||
return false;
|
||||
}
|
||||
|
||||
let Some(config_server_url) = config.public_server_url.clone() else {
|
||||
hilog_error!("[Rust] public_server_url missing for config server mode");
|
||||
return false;
|
||||
};
|
||||
let hooks = Arc::new(TrackedWebClientHooks::default());
|
||||
let secure_mode = config
|
||||
.secure_mode
|
||||
.as_ref()
|
||||
.map(|mode| mode.enabled)
|
||||
.unwrap_or(false);
|
||||
let hostname = config.hostname.clone();
|
||||
|
||||
if !ensure_local_socket_server_started() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let client = ASYNC_RUNTIME.block_on(run_web_client(
|
||||
&config_server_url,
|
||||
MachineIdOptions::default(),
|
||||
hostname,
|
||||
secure_mode,
|
||||
INSTANCE_MANAGER.clone(),
|
||||
Some(hooks.clone()),
|
||||
));
|
||||
|
||||
let client = match client {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] start config server failed {}", err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
match WEB_CLIENTS.lock() {
|
||||
Ok(mut guard) => {
|
||||
guard.insert(
|
||||
config_id.to_string(),
|
||||
ManagedWebClient {
|
||||
_client: client,
|
||||
hooks,
|
||||
},
|
||||
);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] parse config failed {}", e);
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] store config server client failed {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn run_network_instance(cfg_str: String) -> bool {
|
||||
let cfg = match TomlConfigLoader::new_from_str(&cfg_str) {
|
||||
pub(crate) fn build_default_network_config_json() -> Result<String, String> {
|
||||
let config = NetworkConfig::new_from_config(TomlConfigLoader::default())
|
||||
.map_err(|e| format!("default_network_config failed {}", e))?;
|
||||
serde_json::to_string(&config).map_err(|e| format!("default_network_config failed {}", e))
|
||||
}
|
||||
|
||||
fn convert_toml_to_network_config_inner(toml_text: &str) -> Result<String, String> {
|
||||
let config = NetworkConfig::new_from_config(
|
||||
TomlConfigLoader::new_from_str(toml_text).map_err(|e| e.to_string())?,
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
serde_json::to_string(&config).map_err(|e| e.to_string())
|
||||
}
|
||||
|
||||
fn parse_network_config_inner(cfg_json: &str) -> bool {
|
||||
serde_json::from_str::<NetworkConfig>(cfg_json)
|
||||
.ok()
|
||||
.and_then(|cfg| cfg.gen_config().ok())
|
||||
.is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn run_network_instance_from_json(cfg_json: &str) -> bool {
|
||||
let config = match serde_json::from_str::<NetworkConfig>(cfg_json) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] parse config failed {}", e);
|
||||
@@ -65,84 +245,241 @@ pub fn run_network_instance(cfg_str: String) -> bool {
|
||||
}
|
||||
};
|
||||
|
||||
if INSTANCE_MANAGER.list_network_instance_ids().len() > 0 {
|
||||
if is_config_server_config(&config) {
|
||||
let Some(config_id) = config.instance_id.as_deref() else {
|
||||
hilog_error!("[Rust] config server config missing instance id");
|
||||
return false;
|
||||
};
|
||||
return run_config_server_instance(config_id, &config);
|
||||
}
|
||||
|
||||
let cfg = match config.gen_config() {
|
||||
Ok(toml) => toml,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] parse config failed {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
if !INSTANCE_MANAGER.list_network_instance_ids().is_empty() {
|
||||
hilog_error!("[Rust] there is a running instance!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if !ensure_local_socket_server_started() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let inst_id = cfg.get_id();
|
||||
if INSTANCE_MANAGER
|
||||
.list_network_instance_ids()
|
||||
.contains(&inst_id)
|
||||
{
|
||||
hilog_error!("[Rust] instance {} already exists", inst_id);
|
||||
return false;
|
||||
}
|
||||
INSTANCE_MANAGER
|
||||
.run_network_instance(cfg, ConfigSource::FFI)
|
||||
.unwrap();
|
||||
true
|
||||
|
||||
match INSTANCE_MANAGER.run_network_instance(cfg, false, ConfigFileControl::STATIC_CONFIG) {
|
||||
Ok(_) => true,
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] start_kernel failed for {}: {}", inst_id, err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_instance_uuid(config_id: &str) -> Option<Uuid> {
|
||||
match Uuid::parse_str(config_id) {
|
||||
Ok(uuid) => Some(uuid),
|
||||
Err(err) => {
|
||||
hilog_error!("[Rust] invalid config_id {}: {}", config_id, err);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn stop_network_instance(inst_names: Vec<String>) {
|
||||
INSTANCE_MANAGER
|
||||
.delete_network_instance(
|
||||
inst_names
|
||||
.into_iter()
|
||||
.filter_map(|s| Uuid::parse_str(&s).ok())
|
||||
.collect(),
|
||||
)
|
||||
.unwrap();
|
||||
hilog_debug!("[Rust] stop_network_instance");
|
||||
pub fn init_config_store(root_dir: String) -> bool {
|
||||
exports::config_api::init_config_store(root_dir)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn list_configs() -> String {
|
||||
exports::config_api::list_configs()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_config_display_name_by_id(config_id: String) -> Option<String> {
|
||||
get_config_display_name(&config_id)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn save_config(config_id: String, display_name: String, config_json: String) -> bool {
|
||||
exports::config_api::save_config(config_id, display_name, config_json)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn create_config(config_id: String, display_name: String) -> bool {
|
||||
exports::config_api::create_config(config_id, display_name)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn rename_stored_config(config_id: String, display_name: String) -> bool {
|
||||
config::storage::config_meta::set_config_display_name(config_id, display_name).is_some()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn delete_stored_config_meta(config_id: String) -> bool {
|
||||
exports::config_api::delete_stored_config_meta(config_id)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_config(config_id: String) -> Option<String> {
|
||||
exports::config_api::get_config(config_id)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_default_config() -> Option<String> {
|
||||
exports::config_api::get_default_config()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_config_field(config_id: String, field: String) -> Option<String> {
|
||||
exports::config_api::get_config_field(config_id, field)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn set_config_field(config_id: String, field: String, json_value: String) -> bool {
|
||||
exports::config_api::set_config_field(config_id, field, json_value)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn import_toml(toml_text: String, display_name: Option<String>) -> Option<String> {
|
||||
exports::config_api::import_toml(toml_text, display_name)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn export_toml(config_id: String) -> Option<String> {
|
||||
exports::config_api::export_toml(config_id)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn start_kernel(config_id: String) -> bool {
|
||||
exports::runtime_api::start_kernel(config_id, start_kernel_with_config_id)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn stop_kernel(config_id: String) -> bool {
|
||||
exports::runtime_api::stop_kernel(
|
||||
config_id,
|
||||
stop_web_client,
|
||||
parse_instance_uuid,
|
||||
maybe_stop_local_socket_server,
|
||||
)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn stop_network_instance(config_ids: Vec<String>) -> bool {
|
||||
exports::runtime_api::stop_network_instance(config_ids, stop_kernel)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn easytier_version() -> String {
|
||||
EASYTIER_VERSION.to_string()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn default_network_config() -> String {
|
||||
get_default_config().unwrap_or_else(|| "{}".to_string())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn convert_toml_to_network_config(toml_text: String) -> String {
|
||||
convert_toml_to_network_config_inner(&toml_text).unwrap_or_else(|err| format!("ERROR: {err}"))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn parse_network_config(cfg_json: String) -> bool {
|
||||
parse_network_config_inner(&cfg_json)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn run_network_instance(cfg_json: String) -> bool {
|
||||
run_network_instance_from_json(&cfg_json)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn collect_network_infos() -> Vec<KeyValuePair> {
|
||||
let mut result = Vec::new();
|
||||
match INSTANCE_MANAGER.collect_network_infos() {
|
||||
Ok(map) => {
|
||||
for (uuid, info) in map.iter() {
|
||||
// convert value to json string
|
||||
let value = match serde_json::to_string(&info) {
|
||||
Ok(value) => value,
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] failed to serialize instance {} info: {}", uuid, e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
result.push(KeyValuePair {
|
||||
key: uuid.clone().to_string(),
|
||||
value: value.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
Err(_) => {}
|
||||
}
|
||||
result
|
||||
exports::runtime_api::collect_network_infos()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn collect_running_network() -> Vec<String> {
|
||||
INSTANCE_MANAGER
|
||||
.list_network_instance_ids()
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|id| id.to_string())
|
||||
.collect()
|
||||
pub fn set_tun_fd(config_id: String, fd: i32) -> bool {
|
||||
exports::runtime_api::set_tun_fd(config_id, fd, parse_instance_uuid)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn is_running_network(inst_id: String) -> bool {
|
||||
match Uuid::try_parse(&inst_id) {
|
||||
Ok(uuid) => {
|
||||
INSTANCE_MANAGER
|
||||
.list_network_instance_ids()
|
||||
.contains(&uuid)
|
||||
}
|
||||
Err(e) => {
|
||||
hilog_error!("[Rust] cant covert {} to uuid. {}", inst_id, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_network_config_schema() -> NetworkConfigSchema {
|
||||
build_network_config_schema()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_network_config_field_mappings() -> Vec<ConfigFieldMapping> {
|
||||
build_network_config_field_mappings()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn exported_plain_object_schema_contains_core_networkconfig_metadata() {
|
||||
let schema = get_network_config_schema();
|
||||
assert_eq!(schema.name, "NetworkConfig");
|
||||
assert_eq!(schema.node_kind, "schema");
|
||||
assert!(
|
||||
schema
|
||||
.children
|
||||
.iter()
|
||||
.any(|field| field.name == "network_name")
|
||||
);
|
||||
let secure_mode = schema
|
||||
.children
|
||||
.iter()
|
||||
.find(|field| field.name == "secure_mode")
|
||||
.expect("secure_mode field");
|
||||
assert!(
|
||||
secure_mode
|
||||
.children
|
||||
.iter()
|
||||
.any(|field| field.name == "enabled")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn get_runtime_snapshot() -> RuntimeAggregateState {
|
||||
exports::runtime_api::get_runtime_snapshot()
|
||||
}
|
||||
|
||||
pub(crate) fn get_runtime_snapshot_inner() -> RuntimeAggregateState {
|
||||
exports::runtime_api::get_runtime_snapshot_inner()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn build_config_share_link(config_id: String, only_start: Option<bool>) -> Option<String> {
|
||||
build_config_share_link_inner(&config_id, None, only_start.unwrap_or(false))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn parse_config_share_link(share_link: String) -> Option<SharedConfigLinkPayload> {
|
||||
parse_config_share_link_inner(&share_link)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn import_config_share_link(
|
||||
share_link: String,
|
||||
display_name_override: Option<String>,
|
||||
) -> Option<String> {
|
||||
import_config_share_link_inner(&share_link, display_name_override)
|
||||
}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod logging;
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod native_log;
|
||||
+11
-13
@@ -1,7 +1,9 @@
|
||||
use napi_derive_ohos::napi;
|
||||
use ohos_hilog_binding::{
|
||||
LogOptions, hilog_debug, hilog_error, hilog_info, hilog_warn, set_global_options,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::panic;
|
||||
use napi_derive_ohos::napi;
|
||||
use ohos_hilog_binding::{hilog_debug, hilog_error, hilog_info, hilog_warn, set_global_options, LogOptions};
|
||||
use tracing::{Event, Subscriber};
|
||||
use tracing_core::Level;
|
||||
use tracing_subscriber::layer::{Context, Layer};
|
||||
@@ -20,12 +22,9 @@ pub fn init_panic_hook() {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn hilog_global_options(
|
||||
domain: u32,
|
||||
tag: String,
|
||||
) {
|
||||
pub fn hilog_global_options(domain: u32, tag: String) {
|
||||
ohos_hilog_binding::forward_stdio_to_hilog();
|
||||
set_global_options(LogOptions{
|
||||
set_global_options(LogOptions {
|
||||
domain,
|
||||
tag: Box::leak(tag.clone().into_boxed_str()),
|
||||
})
|
||||
@@ -34,11 +33,9 @@ pub fn hilog_global_options(
|
||||
#[napi]
|
||||
pub fn init_tracing_subscriber() {
|
||||
tracing_subscriber::registry()
|
||||
.with(
|
||||
CallbackLayer {
|
||||
callback: Box::new(tracing_callback),
|
||||
}
|
||||
)
|
||||
.with(CallbackLayer {
|
||||
callback: Box::new(tracing_callback),
|
||||
})
|
||||
.init();
|
||||
}
|
||||
|
||||
@@ -93,6 +90,7 @@ impl<'a> tracing::field::Visit for FieldCollector<'a> {
|
||||
}
|
||||
|
||||
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
|
||||
self.0.insert(field.name().to_string(), format!("{:?}", value));
|
||||
self.0
|
||||
.insert(field.name().to_string(), format!("{:?}", value));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod state;
|
||||
@@ -0,0 +1 @@
|
||||
pub(crate) mod runtime_state;
|
||||
@@ -0,0 +1,293 @@
|
||||
use easytier::proto::{api, common};
|
||||
use napi_derive_ohos::napi;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Mutex;
|
||||
|
||||
static ATTACHED_TUN_INSTANCE_IDS: once_cell::sync::Lazy<Mutex<HashSet<String>>> =
|
||||
once_cell::sync::Lazy::new(|| Mutex::new(HashSet::new()));
|
||||
|
||||
pub fn mark_tun_attached(instance_id: &str) {
|
||||
if let Ok(mut guard) = ATTACHED_TUN_INSTANCE_IDS.lock() {
|
||||
guard.insert(instance_id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear_tun_attached(instance_id: &str) {
|
||||
if let Ok(mut guard) = ATTACHED_TUN_INSTANCE_IDS.lock() {
|
||||
guard.remove(instance_id);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_tun_attached(instance_id: &str) -> bool {
|
||||
ATTACHED_TUN_INSTANCE_IDS
|
||||
.lock()
|
||||
.map(|guard| guard.contains(instance_id))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct PeerConnStats {
|
||||
pub rx_bytes: i64,
|
||||
pub tx_bytes: i64,
|
||||
pub rx_packets: i64,
|
||||
pub tx_packets: i64,
|
||||
pub latency_us: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct PeerConnInfo {
|
||||
pub conn_id: String,
|
||||
pub my_peer_id: i64,
|
||||
pub peer_id: i64,
|
||||
pub features: Vec<String>,
|
||||
pub tunnel_type: Option<String>,
|
||||
pub local_addr: Option<String>,
|
||||
pub remote_addr: Option<String>,
|
||||
pub resolved_remote_addr: Option<String>,
|
||||
pub stats: Option<PeerConnStats>,
|
||||
pub loss_rate: Option<f64>,
|
||||
pub is_client: bool,
|
||||
pub network_name: Option<String>,
|
||||
pub is_closed: bool,
|
||||
pub secure_auth_level: Option<i32>,
|
||||
pub peer_identity_type: Option<i32>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct PeerInfo {
|
||||
pub peer_id: i64,
|
||||
pub default_conn_id: Option<String>,
|
||||
pub directly_connected_conns: Vec<String>,
|
||||
pub conns: Vec<PeerConnInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct RouteView {
|
||||
pub peer_id: i64,
|
||||
pub hostname: Option<String>,
|
||||
pub ipv4: Option<String>,
|
||||
pub ipv4_cidr: Option<String>,
|
||||
pub ipv6_cidr: Option<String>,
|
||||
pub proxy_cidrs: Vec<String>,
|
||||
pub next_hop_peer_id: Option<i64>,
|
||||
pub cost: Option<i32>,
|
||||
pub path_latency: Option<i64>,
|
||||
pub udp_nat_type: Option<i32>,
|
||||
pub tcp_nat_type: Option<i32>,
|
||||
pub inst_id: Option<String>,
|
||||
pub version: Option<String>,
|
||||
pub is_public_server: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct MyNodeInfo {
|
||||
pub virtual_ipv4: Option<String>,
|
||||
pub virtual_ipv4_cidr: Option<String>,
|
||||
pub hostname: Option<String>,
|
||||
pub version: Option<String>,
|
||||
pub peer_id: Option<i64>,
|
||||
pub listeners: Vec<String>,
|
||||
pub vpn_portal_cfg: Option<String>,
|
||||
pub udp_nat_type: Option<i32>,
|
||||
pub tcp_nat_type: Option<i32>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct RuntimeInstanceState {
|
||||
pub config_id: String,
|
||||
pub instance_id: String,
|
||||
pub display_name: String,
|
||||
pub running: bool,
|
||||
pub tun_required: bool,
|
||||
pub tun_attached: bool,
|
||||
pub magic_dns_enabled: bool,
|
||||
pub need_exit_node: bool,
|
||||
pub error_message: Option<String>,
|
||||
pub my_node_info: Option<MyNodeInfo>,
|
||||
pub events: Vec<String>,
|
||||
pub routes: Vec<RouteView>,
|
||||
pub peers: Vec<PeerInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct TunAggregateState {
|
||||
pub active: bool,
|
||||
pub attached_instance_ids: Vec<String>,
|
||||
pub aggregated_routes: Vec<String>,
|
||||
pub dns_servers: Vec<String>,
|
||||
pub need_rebuild: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[napi(object)]
|
||||
pub struct RuntimeAggregateState {
|
||||
pub instances: Vec<RuntimeInstanceState>,
|
||||
pub tun: TunAggregateState,
|
||||
pub running_instance_count: i32,
|
||||
}
|
||||
|
||||
fn stringify_ipv4_inet(value: Option<common::Ipv4Inet>) -> Option<String> {
|
||||
value.map(|v| v.to_string())
|
||||
}
|
||||
|
||||
fn stringify_ipv6_inet(value: Option<common::Ipv6Inet>) -> Option<String> {
|
||||
value.map(|v| v.to_string())
|
||||
}
|
||||
|
||||
fn stringify_url(value: Option<common::Url>) -> Option<String> {
|
||||
value.map(|v| v.to_string())
|
||||
}
|
||||
|
||||
fn stringify_uuid(value: Option<common::Uuid>) -> Option<String> {
|
||||
value.map(|v| v.to_string())
|
||||
}
|
||||
|
||||
fn optional_u32_to_i64(value: Option<u32>) -> Option<i64> {
|
||||
value.map(|v| v as i64)
|
||||
}
|
||||
|
||||
fn optional_i32_to_i64(value: Option<i32>) -> Option<i64> {
|
||||
value.map(|v| v as i64)
|
||||
}
|
||||
|
||||
fn route_to_view(route: api::instance::Route) -> RouteView {
|
||||
let stun = route.stun_info;
|
||||
let feature_flag = route.feature_flag;
|
||||
RouteView {
|
||||
peer_id: route.peer_id as i64,
|
||||
hostname: (!route.hostname.is_empty()).then_some(route.hostname),
|
||||
ipv4: route
|
||||
.ipv4_addr
|
||||
.as_ref()
|
||||
.and_then(|inet| inet.address.as_ref())
|
||||
.map(|addr| addr.to_string()),
|
||||
ipv4_cidr: stringify_ipv4_inet(route.ipv4_addr),
|
||||
ipv6_cidr: stringify_ipv6_inet(route.ipv6_addr),
|
||||
proxy_cidrs: route.proxy_cidrs,
|
||||
next_hop_peer_id: optional_u32_to_i64(route.next_hop_peer_id_latency_first)
|
||||
.or_else(|| Some(route.next_hop_peer_id as i64)),
|
||||
cost: Some(route.cost),
|
||||
path_latency: optional_i32_to_i64(route.path_latency_latency_first)
|
||||
.or_else(|| Some(route.path_latency as i64)),
|
||||
udp_nat_type: stun.as_ref().map(|info| info.udp_nat_type),
|
||||
tcp_nat_type: stun.as_ref().map(|info| info.tcp_nat_type),
|
||||
inst_id: (!route.inst_id.is_empty()).then_some(route.inst_id),
|
||||
version: (!route.version.is_empty()).then_some(route.version),
|
||||
is_public_server: feature_flag.map(|flag| flag.is_public_server),
|
||||
}
|
||||
}
|
||||
|
||||
fn peer_conn_to_view(conn: api::instance::PeerConnInfo) -> PeerConnInfo {
|
||||
let stats = conn.stats.map(|stats| PeerConnStats {
|
||||
rx_bytes: stats.rx_bytes as i64,
|
||||
tx_bytes: stats.tx_bytes as i64,
|
||||
rx_packets: stats.rx_packets as i64,
|
||||
tx_packets: stats.tx_packets as i64,
|
||||
latency_us: stats.latency_us as i64,
|
||||
});
|
||||
|
||||
PeerConnInfo {
|
||||
conn_id: conn.conn_id,
|
||||
my_peer_id: conn.my_peer_id as i64,
|
||||
peer_id: conn.peer_id as i64,
|
||||
features: conn.features,
|
||||
tunnel_type: conn.tunnel.as_ref().map(|t| t.tunnel_type.clone()),
|
||||
local_addr: conn
|
||||
.tunnel
|
||||
.as_ref()
|
||||
.and_then(|t| stringify_url(t.local_addr.clone())),
|
||||
remote_addr: conn
|
||||
.tunnel
|
||||
.as_ref()
|
||||
.and_then(|t| stringify_url(t.remote_addr.clone())),
|
||||
resolved_remote_addr: conn
|
||||
.tunnel
|
||||
.as_ref()
|
||||
.and_then(|t| stringify_url(t.resolved_remote_addr.clone())),
|
||||
stats,
|
||||
loss_rate: Some(conn.loss_rate as f64),
|
||||
is_client: conn.is_client,
|
||||
network_name: (!conn.network_name.is_empty()).then_some(conn.network_name),
|
||||
is_closed: conn.is_closed,
|
||||
secure_auth_level: Some(conn.secure_auth_level),
|
||||
peer_identity_type: Some(conn.peer_identity_type),
|
||||
}
|
||||
}
|
||||
|
||||
fn peer_to_view(peer: api::instance::PeerInfo) -> PeerInfo {
|
||||
PeerInfo {
|
||||
peer_id: peer.peer_id as i64,
|
||||
default_conn_id: stringify_uuid(peer.default_conn_id),
|
||||
directly_connected_conns: peer
|
||||
.directly_connected_conns
|
||||
.into_iter()
|
||||
.map(|id| id.to_string())
|
||||
.collect(),
|
||||
conns: peer.conns.into_iter().map(peer_conn_to_view).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn my_node_info_to_view(info: api::manage::MyNodeInfo) -> MyNodeInfo {
|
||||
MyNodeInfo {
|
||||
virtual_ipv4: info
|
||||
.virtual_ipv4
|
||||
.as_ref()
|
||||
.and_then(|inet| inet.address.as_ref())
|
||||
.map(|addr| addr.to_string()),
|
||||
virtual_ipv4_cidr: stringify_ipv4_inet(info.virtual_ipv4),
|
||||
hostname: (!info.hostname.is_empty()).then_some(info.hostname),
|
||||
version: (!info.version.is_empty()).then_some(info.version),
|
||||
peer_id: Some(info.peer_id as i64),
|
||||
listeners: info
|
||||
.listeners
|
||||
.into_iter()
|
||||
.map(|url| url.to_string())
|
||||
.collect(),
|
||||
vpn_portal_cfg: info.vpn_portal_cfg,
|
||||
udp_nat_type: info.stun_info.as_ref().map(|stun| stun.udp_nat_type),
|
||||
tcp_nat_type: info.stun_info.as_ref().map(|stun| stun.tcp_nat_type),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runtime_instance_from_running_info(
|
||||
config_id: String,
|
||||
display_name: String,
|
||||
magic_dns_enabled: bool,
|
||||
need_exit_node: bool,
|
||||
info: api::manage::NetworkInstanceRunningInfo,
|
||||
) -> RuntimeInstanceState {
|
||||
let tun_attached = info.running && is_tun_attached(&config_id);
|
||||
let tun_required = info.running && (info.dev_name != "no_tun" || tun_attached);
|
||||
|
||||
RuntimeInstanceState {
|
||||
config_id: config_id.clone(),
|
||||
instance_id: config_id,
|
||||
display_name,
|
||||
running: info.running,
|
||||
tun_required,
|
||||
tun_attached,
|
||||
magic_dns_enabled,
|
||||
need_exit_node,
|
||||
error_message: info.error_msg,
|
||||
my_node_info: info.my_node_info.map(my_node_info_to_view),
|
||||
events: info.events,
|
||||
routes: info.routes.into_iter().map(route_to_view).collect(),
|
||||
peers: info.peers.into_iter().map(peer_to_view).collect(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
# Development Environment Configuration
|
||||
SERVER_HOST=127.0.0.1
|
||||
SERVER_PORT=8080
|
||||
DATABASE_PATH=uptime.db
|
||||
DATABASE_MAX_CONNECTIONS=5
|
||||
HEALTH_CHECK_INTERVAL=60
|
||||
HEALTH_CHECK_TIMEOUT=15
|
||||
HEALTH_CHECK_RETRIES=2
|
||||
RUST_LOG=debug
|
||||
LOG_LEVEL=debug
|
||||
CORS_ALLOWED_ORIGINS=http://localhost:3000,http://localhost:8080
|
||||
CORS_ALLOWED_METHODS=GET,POST,PUT,DELETE,OPTIONS
|
||||
CORS_ALLOWED_HEADERS=content-type,authorization
|
||||
NODE_ENV=development
|
||||
API_BASE_URL=/api
|
||||
ENABLE_COMPRESSION=true
|
||||
ENABLE_CORS=true
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "easytier-uptime"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
@@ -12,9 +12,11 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "1.0", features = ["v4", "serde"] }
|
||||
guarden = "0.1"
|
||||
|
||||
# Axum web framework
|
||||
axum = { version = "0.8.4", features = ["macros"] }
|
||||
axum-extra = { version = "0.10", features = ["query"] }
|
||||
tower-http = { version = "0.6", features = ["cors", "compression-full"] }
|
||||
tower = "0.5"
|
||||
|
||||
@@ -56,6 +58,8 @@ once_cell = "1.19"
|
||||
# EasyTier core
|
||||
easytier = { path = "../../easytier" }
|
||||
|
||||
mimalloc = { version = "*" }
|
||||
|
||||
# Testing
|
||||
[dev-dependencies]
|
||||
mockall = "0.12"
|
||||
|
||||
+9
-9
@@ -9,7 +9,7 @@
|
||||
"version": "0.0.0",
|
||||
"dependencies": {
|
||||
"@element-plus/icons-vue": "^2.3.1",
|
||||
"axios": "^1.7.9",
|
||||
"axios": "^1.13.5",
|
||||
"dayjs": "^1.11.13",
|
||||
"element-plus": "^2.8.8",
|
||||
"vue": "^3.5.18",
|
||||
@@ -1220,13 +1220,13 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.11.0",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.11.0.tgz",
|
||||
"integrity": "sha512-1Lx3WLFQWm3ooKDYZD1eXmoGO9fxYQjrycfHFC8P0sCfQVXyROp0p9PFWBehewBOdCwHc+f/b8I0fMto5eSfwA==",
|
||||
"version": "1.13.6",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.13.6.tgz",
|
||||
"integrity": "sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.4",
|
||||
"follow-redirects": "^1.15.11",
|
||||
"form-data": "^4.0.5",
|
||||
"proxy-from-env": "^1.1.0"
|
||||
}
|
||||
},
|
||||
@@ -1616,9 +1616,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"version": "4.0.5",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
|
||||
"integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@element-plus/icons-vue": "^2.3.1",
|
||||
"axios": "^1.7.9",
|
||||
"axios": "^1.13.5",
|
||||
"dayjs": "^1.11.13",
|
||||
"easytier-uptime-frontend": "link:",
|
||||
"element-plus": "^2.8.8",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<script setup>
|
||||
import { ref, onMounted } from 'vue'
|
||||
import { ref, onMounted, computed } from 'vue'
|
||||
import { useRouter, useRoute } from 'vue-router'
|
||||
import { healthApi } from './api'
|
||||
import {
|
||||
@@ -70,6 +70,20 @@ const menuItems = [
|
||||
}
|
||||
]
|
||||
|
||||
// 根据当前路由计算默认激活的菜单项
|
||||
const activeMenuIndex = computed(() => {
|
||||
const p = route.path
|
||||
if (p.startsWith('/submit')) return 'submit'
|
||||
return 'dashboard'
|
||||
})
|
||||
|
||||
// 处理菜单选择,避免返回 Promise 导致异步补丁问题
|
||||
const handleMenuSelect = (key) => {
|
||||
const item = menuItems.find((i) => i.name === key)
|
||||
if (item && item.path) {
|
||||
router.push(item.path)
|
||||
}
|
||||
}
|
||||
onMounted(() => {
|
||||
checkHealth()
|
||||
// 定期检查健康状态
|
||||
@@ -89,8 +103,8 @@ onMounted(() => {
|
||||
<h1 class="app-title">EasyTier Uptime</h1>
|
||||
</div>
|
||||
|
||||
<el-menu :default-active="route.name" mode="horizontal" class="nav-menu"
|
||||
@select="(key) => router.push(menuItems.find(item => item.name === key)?.path || '/')">
|
||||
<el-menu :default-active="activeMenuIndex" mode="horizontal" class="nav-menu"
|
||||
@select="handleMenuSelect">
|
||||
<el-menu-item v-for="item in menuItems" :key="item.name" :index="item.name">
|
||||
<el-icon>
|
||||
<component :is="item.icon" />
|
||||
|
||||
@@ -6,6 +6,18 @@ const api = axios.create({
|
||||
timeout: 10000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
// 保证数组参数使用 repeated keys 风格序列化:tags=a&tags=b
|
||||
paramsSerializer: params => {
|
||||
const usp = new URLSearchParams()
|
||||
Object.entries(params || {}).forEach(([key, value]) => {
|
||||
if (Array.isArray(value)) {
|
||||
value.forEach(v => usp.append(key, v))
|
||||
} else if (value !== undefined && value !== null && value !== '') {
|
||||
usp.append(key, value)
|
||||
}
|
||||
})
|
||||
return usp.toString()
|
||||
}
|
||||
})
|
||||
|
||||
@@ -50,9 +62,15 @@ api.interceptors.response.use(
|
||||
|
||||
// 节点相关API
|
||||
export const nodeApi = {
|
||||
// 获取节点列表
|
||||
async getNodes(params = {}) {
|
||||
const response = await api.get('/api/nodes', { params })
|
||||
// 获取节点列表(支持传入 AbortController.signal 用于取消)
|
||||
async getNodes(params = {}, options = {}) {
|
||||
const response = await api.get('/api/nodes', { params, signal: options.signal })
|
||||
return response.data
|
||||
},
|
||||
|
||||
// 获取所有标签
|
||||
async getAllTags() {
|
||||
const response = await api.get('/api/tags')
|
||||
return response.data
|
||||
},
|
||||
|
||||
@@ -149,6 +167,28 @@ export const adminApi = {
|
||||
async updateNode(id, data) {
|
||||
const response = await api.put(`/api/admin/nodes/${id}`, data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
// 兼容方法:获取所有节点(参数转换)
|
||||
async getAllNodes(params = {}) {
|
||||
const mapped = {
|
||||
page: params.page,
|
||||
per_page: params.page_size ?? params.per_page,
|
||||
is_approved: params.approved ?? params.is_approved,
|
||||
is_active: params.online ?? params.is_active,
|
||||
protocol: params.protocol,
|
||||
search: params.search,
|
||||
tag: params.tag
|
||||
}
|
||||
// 移除未定义的字段
|
||||
Object.keys(mapped).forEach(k => {
|
||||
if (mapped[k] === undefined || mapped[k] === null || mapped[k] === '') {
|
||||
delete mapped[k]
|
||||
}
|
||||
})
|
||||
// 直接复用现有接口
|
||||
const response = await api.get('/api/admin/nodes', { params: mapped })
|
||||
return response.data
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -85,6 +85,15 @@
|
||||
<div class="form-tip">详细描述有助于用户选择合适的节点</div>
|
||||
</el-form-item>
|
||||
|
||||
<!-- 新增:标签管理(仅在管理员编辑时显示) -->
|
||||
<el-form-item v-if="props.showTags" label="标签" prop="tags">
|
||||
<el-select v-model="form.tags" multiple filterable allow-create default-first-option :multiple-limit="10"
|
||||
placeholder="输入后按回车添加,如:北京、联通、IPv6、高带宽">
|
||||
<el-option v-for="opt in (form.tags || [])" :key="opt" :label="opt" :value="opt" />
|
||||
</el-select>
|
||||
<div class="form-tip">用于分类与检索,建议 1-6 个标签,每个不超过 32 字符</div>
|
||||
</el-form-item>
|
||||
|
||||
<!-- 联系方式 -->
|
||||
<el-form-item label="联系方式" prop="contact_info">
|
||||
<div class="contact-section">
|
||||
@@ -238,6 +247,7 @@ const props = defineProps({
|
||||
wechat: '',
|
||||
qq_number: '',
|
||||
mail: '',
|
||||
tags: [],
|
||||
agreed: false
|
||||
})
|
||||
},
|
||||
@@ -264,6 +274,11 @@ const props = defineProps({
|
||||
showCancel: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
},
|
||||
// 新增:是否显示标签管理
|
||||
showTags: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
})
|
||||
|
||||
@@ -353,6 +368,38 @@ const rules = {
|
||||
},
|
||||
trigger: 'change'
|
||||
}
|
||||
],
|
||||
// 新增:标签规则(仅在显示标签管理时生效)
|
||||
tags: [
|
||||
{
|
||||
validator: (rule, value, callback) => {
|
||||
if (!props.showTags) {
|
||||
callback()
|
||||
return
|
||||
}
|
||||
if (!Array.isArray(form.tags)) {
|
||||
callback(new Error('标签格式错误'))
|
||||
return
|
||||
}
|
||||
if (form.tags.length > 10) {
|
||||
callback(new Error('最多添加 10 个标签'))
|
||||
return
|
||||
}
|
||||
for (const t of form.tags) {
|
||||
const s = (t || '').trim()
|
||||
if (s.length === 0) {
|
||||
callback(new Error('标签不能为空'))
|
||||
return
|
||||
}
|
||||
if (s.length > 32) {
|
||||
callback(new Error('每个标签不超过 32 字符'))
|
||||
return
|
||||
}
|
||||
}
|
||||
callback()
|
||||
},
|
||||
trigger: 'change'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -362,7 +409,7 @@ const canTest = computed(() => {
|
||||
})
|
||||
|
||||
const buildDataFromForm = () => {
|
||||
return {
|
||||
const data = {
|
||||
name: form.name || 'Test Node',
|
||||
host: form.host,
|
||||
port: form.port,
|
||||
@@ -376,6 +423,11 @@ const buildDataFromForm = () => {
|
||||
qq_number: form.qq_number || null,
|
||||
mail: form.mail || null
|
||||
}
|
||||
// 仅在管理员编辑时附带标签
|
||||
if (props.showTags) {
|
||||
data.tags = Array.isArray(form.tags) ? form.tags : []
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// 测试连接
|
||||
@@ -441,6 +493,10 @@ const resetFields = () => {
|
||||
if (formRef.value) {
|
||||
formRef.value.resetFields()
|
||||
}
|
||||
// 重置标签
|
||||
if (props.showTags) {
|
||||
form.tags = []
|
||||
}
|
||||
testResult.value = null
|
||||
emit('reset')
|
||||
}
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
// Deterministic tag color generator (pure frontend)
|
||||
// Same tag => same color; different tags => different colors
|
||||
|
||||
function stringHash(str) {
|
||||
const s = String(str)
|
||||
let hash = 5381
|
||||
for (let i = 0; i < s.length; i++) {
|
||||
hash = (hash * 33) ^ s.charCodeAt(i)
|
||||
}
|
||||
return hash >>> 0 // ensure positive
|
||||
}
|
||||
|
||||
function hslToRgb(h, s, l) {
|
||||
// h,s,l in [0,1]
|
||||
let r, g, b
|
||||
|
||||
if (s === 0) {
|
||||
r = g = b = l // achromatic
|
||||
} else {
|
||||
const hue2rgb = (p, q, t) => {
|
||||
if (t < 0) t += 1
|
||||
if (t > 1) t -= 1
|
||||
if (t < 1 / 6) return p + (q - p) * 6 * t
|
||||
if (t < 1 / 2) return q
|
||||
if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6
|
||||
return p
|
||||
}
|
||||
|
||||
const q = l < 0.5 ? l * (1 + s) : l + s - l * s
|
||||
const p = 2 * l - q
|
||||
r = hue2rgb(p, q, h + 1 / 3)
|
||||
g = hue2rgb(p, q, h)
|
||||
b = hue2rgb(p, q, h - 1 / 3)
|
||||
}
|
||||
|
||||
return [Math.round(r * 255), Math.round(g * 255), Math.round(b * 255)]
|
||||
}
|
||||
|
||||
function rgbToHex(r, g, b) {
|
||||
const toHex = (v) => v.toString(16).padStart(2, '0')
|
||||
return `#${toHex(r)}${toHex(g)}${toHex(b)}`
|
||||
}
|
||||
|
||||
export function getTagStyle(tag) {
|
||||
const hash = stringHash(tag)
|
||||
const hue = hash % 360 // 0-359
|
||||
const saturation = 65 // percentage
|
||||
const lightness = 47 // percentage
|
||||
|
||||
const rgb = hslToRgb(hue / 360, saturation / 100, lightness / 100)
|
||||
const hex = rgbToHex(rgb[0], rgb[1], rgb[2])
|
||||
|
||||
// Perceived brightness for text color selection
|
||||
const brightness = rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114
|
||||
const textColor = brightness > 160 ? '#1f1f1f' : '#ffffff'
|
||||
|
||||
return {
|
||||
backgroundColor: hex,
|
||||
borderColor: hex,
|
||||
color: textColor
|
||||
}
|
||||
}
|
||||
@@ -196,6 +196,17 @@
|
||||
|
||||
<el-table-column prop="description" label="描述" min-width="150" show-overflow-tooltip />
|
||||
|
||||
<el-table-column prop="tags" label="标签" min-width="160">
|
||||
<template #default="{ row }">
|
||||
<div class="tags-list">
|
||||
<el-tag v-for="(tag, idx) in row.tags" :key="tag + idx" size="small" class="tag-chip" :style="getTagStyle(tag)">
|
||||
{{ tag }}
|
||||
</el-tag>
|
||||
<span v-if="!row.tags || row.tags.length === 0" class="text-muted">无</span>
|
||||
</div>
|
||||
</template>
|
||||
</el-table-column>
|
||||
|
||||
<el-table-column prop="created_at" label="创建时间" width="160">
|
||||
<template #default="{ row }">
|
||||
{{ formatDate(row.created_at) }}
|
||||
@@ -228,8 +239,8 @@
|
||||
<!-- 编辑节点对话框 -->
|
||||
<el-dialog v-model="editDialogVisible" title="编辑节点" width="800px" destroy-on-close>
|
||||
<NodeForm v-if="editDialogVisible" v-model="editForm" :submitting="updating" submit-text="更新节点" submit-icon="Edit"
|
||||
:show-connection-test="false" :show-agreement="false" :show-cancel="true" @submit="handleUpdateNode"
|
||||
@cancel="editDialogVisible = false" @reset="resetEditForm" />
|
||||
:show-connection-test="false" :show-agreement="false" :show-cancel="true" :show-tags="true"
|
||||
@submit="handleUpdateNode" @cancel="editDialogVisible = false" @reset="resetEditForm" />
|
||||
</el-dialog>
|
||||
</div>
|
||||
</template>
|
||||
@@ -240,6 +251,7 @@ import dayjs from 'dayjs'
|
||||
import { ElMessage, ElMessageBox } from 'element-plus'
|
||||
import { Check, Clock, DataAnalysis, CircleCheck, Loading } from '@element-plus/icons-vue'
|
||||
import NodeForm from '../components/NodeForm.vue'
|
||||
import { getTagStyle } from '../utils/tagColor'
|
||||
|
||||
export default {
|
||||
name: 'AdminDashboard',
|
||||
@@ -270,7 +282,8 @@ export default {
|
||||
protocol: 'tcp',
|
||||
version: '',
|
||||
max_connections: 100,
|
||||
description: ''
|
||||
description: '',
|
||||
tags: []
|
||||
},
|
||||
editingNodeId: null,
|
||||
updating: false
|
||||
@@ -302,6 +315,7 @@ export default {
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
getTagStyle,
|
||||
async loadNodes() {
|
||||
try {
|
||||
this.loading = true
|
||||
@@ -379,13 +393,47 @@ export default {
|
||||
},
|
||||
editNode(node) {
|
||||
this.editingNodeId = node.id
|
||||
this.editForm = node
|
||||
// 只取需要的字段,并复制 tags 数组以避免引用问题
|
||||
this.editForm = {
|
||||
id: node.id,
|
||||
name: node.name,
|
||||
host: node.host,
|
||||
port: node.port,
|
||||
protocol: node.protocol,
|
||||
version: node.version,
|
||||
max_connections: node.max_connections,
|
||||
description: node.description || '',
|
||||
allow_relay: node.allow_relay,
|
||||
network_name: node.network_name,
|
||||
network_secret: node.network_secret,
|
||||
wechat: node.wechat,
|
||||
qq_number: node.qq_number,
|
||||
mail: node.mail,
|
||||
tags: Array.isArray(node.tags) ? [...node.tags] : []
|
||||
}
|
||||
this.editDialogVisible = true
|
||||
},
|
||||
async handleUpdateNode(formData) {
|
||||
try {
|
||||
this.updating = true
|
||||
await adminApi.updateNode(this.editingNodeId, formData)
|
||||
// 确保提交包含 tags 字段(为空数组也传)
|
||||
const payload = {
|
||||
name: formData.name,
|
||||
host: formData.host,
|
||||
port: formData.port,
|
||||
protocol: formData.protocol,
|
||||
version: formData.version,
|
||||
max_connections: formData.max_connections,
|
||||
description: formData.description,
|
||||
allow_relay: formData.allow_relay,
|
||||
network_name: formData.network_name,
|
||||
network_secret: formData.network_secret,
|
||||
wechat: formData.wechat,
|
||||
qq_number: formData.qq_number,
|
||||
mail: formData.mail,
|
||||
tags: Array.isArray(formData.tags) ? formData.tags : []
|
||||
}
|
||||
await adminApi.updateNode(this.editingNodeId, payload)
|
||||
ElMessage.success('节点更新成功')
|
||||
this.editDialogVisible = false
|
||||
await this.loadNodes()
|
||||
@@ -576,4 +624,8 @@ export default {
|
||||
.text-secondary {
|
||||
color: #909399;
|
||||
}
|
||||
|
||||
.tag-chip {
|
||||
margin-right: 4px;
|
||||
}
|
||||
</style>
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
<!-- 搜索和筛选 -->
|
||||
<el-card class="filter-card">
|
||||
<el-row :gutter="20">
|
||||
<el-row :gutter="26">
|
||||
<el-col :span="8">
|
||||
<el-input v-model="searchText" placeholder="搜索节点名称、主机地址或描述" prefix-icon="Search" clearable
|
||||
@input="handleSearch" />
|
||||
@@ -77,14 +77,16 @@
|
||||
<el-option label="WSS" value="wss" />
|
||||
</el-select>
|
||||
</el-col>
|
||||
<!-- 新增:标签多选筛选 -->
|
||||
<el-col :span="4">
|
||||
<el-button type="primary" @click="refreshData" :loading="loading">
|
||||
<el-icon>
|
||||
<Refresh />
|
||||
</el-icon>
|
||||
刷新
|
||||
</el-button>
|
||||
<el-select v-model="selectedTags" multiple collapse-tags collapse-tags-tooltip filterable clearable
|
||||
placeholder="按标签筛选(可多选)" @change="handleFilter">
|
||||
<el-option v-for="tag in allTags" :key="tag" :label="tag" :value="tag">
|
||||
<span class="tag-option" :style="getTagStyle(tag)">{{ tag }}</span>
|
||||
</el-option>
|
||||
</el-select>
|
||||
</el-col>
|
||||
|
||||
<el-col :span="4">
|
||||
<el-button type="success" @click="$router.push('/submit')">
|
||||
<el-icon>
|
||||
@@ -97,17 +99,24 @@
|
||||
</el-card>
|
||||
|
||||
<!-- 节点列表 -->
|
||||
<el-card class="nodes-card">
|
||||
<el-card ref="nodesCardRef" class="nodes-card">
|
||||
<template #header>
|
||||
<div class="card-header">
|
||||
<span>节点列表</span>
|
||||
<span>
|
||||
节点列表
|
||||
<el-button type="text" :loading="loading" @click="refreshData" style="margin-left: 8px;">
|
||||
<el-icon>
|
||||
<Refresh />
|
||||
</el-icon>
|
||||
</el-button>
|
||||
</span>
|
||||
<el-tag :type="loading ? 'info' : 'success'">
|
||||
{{ loading ? '加载中...' : `共 ${pagination.total} 个节点` }}
|
||||
</el-tag>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<el-table :data="nodes" v-loading="loading" stripe style="width: 100%" row-key="id">
|
||||
<el-table ref="tableRef" :data="nodes" v-loading="loading" stripe style="width: 100%" row-key="id">
|
||||
<!-- 展开列 -->
|
||||
<el-table-column type="expand" width="50">
|
||||
<template #default="{ row }">
|
||||
@@ -151,7 +160,7 @@
|
||||
<template #default="{ row }">
|
||||
<div style="display: flex; flex-direction: column; gap: 1px; align-items: flex-start;">
|
||||
<el-tag v-if="row.version" size="small" style="font-size: 11px; padding: 1px 4px;">{{ row.version
|
||||
}}</el-tag>
|
||||
}}</el-tag>
|
||||
<span v-else class="text-muted" style="font-size: 11px;">未知</span>
|
||||
<el-tag :type="row.allow_relay ? 'success' : 'info'" size="small"
|
||||
style="font-size: 9px; padding: 1px 3px;">
|
||||
@@ -176,6 +185,18 @@
|
||||
<span class="description">{{ row.description || '暂无描述' }}</span>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<!-- 新增:标签展示 -->
|
||||
<el-table-column label="标签" min-width="160">
|
||||
<template #default="{ row }">
|
||||
<div class="tags-list">
|
||||
<el-tag v-for="(tag, idx) in row.tags" :key="tag + idx" size="small" class="tag-chip"
|
||||
:style="getTagStyle(tag)" style="margin: 2px 6px 2px 0;">
|
||||
{{ tag }}
|
||||
</el-tag>
|
||||
<span v-if="!row.tags || row.tags.length === 0" class="text-muted">无</span>
|
||||
</div>
|
||||
</template>
|
||||
</el-table-column>
|
||||
|
||||
<el-table-column prop="created_at" label="创建时间" width="180">
|
||||
<template #default="{ row }">
|
||||
@@ -223,6 +244,16 @@
|
||||
<el-descriptions-item label="创建时间">{{ formatDate(selectedNode.created_at) }}</el-descriptions-item>
|
||||
<el-descriptions-item label="更新时间">{{ formatDate(selectedNode.updated_at) }}</el-descriptions-item>
|
||||
<el-descriptions-item label="描述" :span="2">{{ selectedNode.description || '暂无描述' }}</el-descriptions-item>
|
||||
<!-- 新增:标签 -->
|
||||
<el-descriptions-item label="标签" :span="2">
|
||||
<div class="tags-list">
|
||||
<el-tag v-for="(tag, idx) in selectedNode.tags" :key="tag + idx" size="small" class="tag-chip"
|
||||
style="margin: 2px 6px 2px 0;">
|
||||
{{ tag }}
|
||||
</el-tag>
|
||||
<span v-if="!selectedNode.tags || selectedNode.tags.length === 0" class="text-muted">无</span>
|
||||
</div>
|
||||
</el-descriptions-item>
|
||||
</el-descriptions>
|
||||
|
||||
<!-- 健康状态统计 -->
|
||||
@@ -261,7 +292,7 @@
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, reactive, onMounted, computed } from 'vue'
|
||||
import { ref, reactive, onMounted, computed, watch, nextTick, onBeforeUnmount } from 'vue'
|
||||
import { ElMessage } from 'element-plus'
|
||||
import { nodeApi } from '../api'
|
||||
import dayjs from 'dayjs'
|
||||
@@ -276,6 +307,7 @@ import {
|
||||
Refresh,
|
||||
Plus
|
||||
} from '@element-plus/icons-vue'
|
||||
import { getTagStyle } from '../utils/tagColor'
|
||||
|
||||
// 响应式数据
|
||||
const loading = ref(false)
|
||||
@@ -283,11 +315,18 @@ const nodes = ref([])
|
||||
const searchText = ref('')
|
||||
const statusFilter = ref('')
|
||||
const protocolFilter = ref('')
|
||||
const selectedTags = ref([])
|
||||
const allTags = ref([])
|
||||
const detailDialogVisible = ref(false)
|
||||
const selectedNode = ref(null)
|
||||
const healthStats = ref(null)
|
||||
const expandedRows = ref([])
|
||||
const apiUrl = ref(window.location.href)
|
||||
const tableRef = ref(null)
|
||||
const nodesCardRef = ref(null)
|
||||
|
||||
// 请求取消控制(避免重复请求覆盖)
|
||||
let fetchController = null
|
||||
|
||||
// 分页数据
|
||||
const pagination = reactive({
|
||||
@@ -309,6 +348,17 @@ const averageUptime = computed(() => {
|
||||
})
|
||||
|
||||
// 方法
|
||||
const fetchTags = async () => {
|
||||
try {
|
||||
const resp = await nodeApi.getAllTags()
|
||||
if (resp.success && Array.isArray(resp.data)) {
|
||||
allTags.value = resp.data
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('获取标签列表失败:', error)
|
||||
}
|
||||
}
|
||||
|
||||
const fetchNodes = async (with_loading = true) => {
|
||||
try {
|
||||
if (with_loading) {
|
||||
@@ -328,13 +378,26 @@ const fetchNodes = async (with_loading = true) => {
|
||||
if (protocolFilter.value) {
|
||||
params.protocol = protocolFilter.value
|
||||
}
|
||||
if (selectedTags.value && selectedTags.value.length > 0) {
|
||||
params.tags = selectedTags.value
|
||||
}
|
||||
|
||||
const response = await nodeApi.getNodes(params)
|
||||
// 取消上一请求,创建新的请求控制器
|
||||
if (fetchController) {
|
||||
try { fetchController.abort() } catch (_) { }
|
||||
}
|
||||
fetchController = new AbortController()
|
||||
|
||||
const response = await nodeApi.getNodes(params, { signal: fetchController.signal })
|
||||
if (response.success && response.data) {
|
||||
nodes.value = response.data.items
|
||||
pagination.total = response.data.total
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.name === 'CanceledError' || error.name === 'AbortError') {
|
||||
// 被取消的旧请求,忽略
|
||||
return
|
||||
}
|
||||
console.error('获取节点列表失败:', error)
|
||||
ElMessage.error('获取节点列表失败')
|
||||
} finally {
|
||||
@@ -345,6 +408,7 @@ const fetchNodes = async (with_loading = true) => {
|
||||
}
|
||||
|
||||
const refreshData = () => {
|
||||
pagination.page = 1
|
||||
fetchNodes()
|
||||
}
|
||||
|
||||
@@ -408,12 +472,69 @@ const copyAddress = (address) => {
|
||||
|
||||
// 生命周期
|
||||
onMounted(() => {
|
||||
fetchTags()
|
||||
fetchNodes()
|
||||
|
||||
// 设置定时刷新
|
||||
setInterval(() => {
|
||||
fetchNodes(false)
|
||||
}, 3000) // 每30秒刷新一次
|
||||
}, 30000) // 每30秒刷新一次
|
||||
})
|
||||
|
||||
// 智能滚动处理:纵向滚动时页面整体滚动,横向滚动时表格内部滚动
|
||||
let wheelHandler = null
|
||||
let wheelTargets = []
|
||||
|
||||
const detachWheelHandlers = () => {
|
||||
if (wheelTargets && wheelTargets.length) {
|
||||
wheelTargets.forEach((el) => {
|
||||
try { el.removeEventListener('wheel', wheelHandler, { capture: true }) } catch (_) { }
|
||||
})
|
||||
}
|
||||
wheelTargets = []
|
||||
}
|
||||
|
||||
const attachWheelHandler = () => {
|
||||
const tableEl = tableRef.value?.$el
|
||||
const body = tableEl ? tableEl.querySelector('.el-table__body-wrapper') : null
|
||||
if (!body) return
|
||||
|
||||
detachWheelHandlers()
|
||||
const wrap = body.querySelector('.el-scrollbar__wrap') || body
|
||||
|
||||
wheelHandler = (e) => {
|
||||
const deltaX = e.deltaX
|
||||
const deltaY = e.deltaY
|
||||
|
||||
// 如果是横向滚动(Shift + 滚轮 或 触摸板横向滑动)
|
||||
if (Math.abs(deltaX) > Math.abs(deltaY) || e.shiftKey) {
|
||||
// 允许表格内部横向滚动,不阻止默认行为
|
||||
return
|
||||
}
|
||||
|
||||
// 如果是纵向滚动,阻止表格内部滚动,让页面整体滚动
|
||||
if (deltaY) {
|
||||
e.preventDefault()
|
||||
e.stopPropagation()
|
||||
const scroller = document.scrollingElement || document.documentElement
|
||||
scroller.scrollTop += deltaY
|
||||
}
|
||||
}
|
||||
|
||||
body.addEventListener('wheel', wheelHandler, { passive: false, capture: true })
|
||||
wheelTargets.push(body)
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
nextTick(attachWheelHandler)
|
||||
})
|
||||
|
||||
watch(nodes, () => {
|
||||
nextTick(attachWheelHandler)
|
||||
})
|
||||
|
||||
onBeforeUnmount(() => {
|
||||
detachWheelHandlers()
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -570,4 +691,28 @@ onMounted(() => {
|
||||
background-color: #fafafa;
|
||||
border-top: 1px solid #ebeef5;
|
||||
}
|
||||
|
||||
.tag-option {
|
||||
display: inline-block;
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
:deep(.el-table__body-wrapper) {
|
||||
overflow-x: auto !important;
|
||||
overflow-y: hidden !important;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
:deep(.el-card__body) {
|
||||
overflow: visible !important;
|
||||
}
|
||||
|
||||
:deep(.el-table__body-wrapper .el-scrollbar__wrap) {
|
||||
overflow-x: auto !important;
|
||||
overflow-y: hidden !important;
|
||||
height: auto !important;
|
||||
max-height: none !important;
|
||||
}
|
||||
</style>
|
||||
|
||||
@@ -18,11 +18,11 @@ export default defineConfig({
|
||||
server: {
|
||||
proxy: {
|
||||
'/api': {
|
||||
target: 'http://localhost:8080',
|
||||
target: 'http://localhost:11030',
|
||||
changeOrigin: true,
|
||||
},
|
||||
'/health': {
|
||||
target: 'http://localhost:8080',
|
||||
target: 'http://localhost:11030',
|
||||
changeOrigin: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::ops::{Div, Mul};
|
||||
|
||||
use axum::extract::{Path, Query, State};
|
||||
use axum::Json;
|
||||
use axum::extract::{Path, State};
|
||||
use sea_orm::{
|
||||
ColumnTrait, Condition, EntityTrait, IntoActiveModel, ModelTrait, Order, PaginatorTrait,
|
||||
QueryFilter, QueryOrder, QuerySelect, Set, TryIntoModel,
|
||||
@@ -14,8 +14,9 @@ use crate::api::{
|
||||
models::*,
|
||||
};
|
||||
use crate::db::entity::{self, health_records, shared_nodes};
|
||||
use crate::db::{operations::*, Db};
|
||||
use crate::db::{Db, operations::*};
|
||||
use crate::health_checker_manager::HealthCheckerManager;
|
||||
use axum_extra::extract::Query;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -60,6 +61,35 @@ pub async fn get_nodes(
|
||||
);
|
||||
}
|
||||
|
||||
// 标签过滤(支持单标签与多标签 OR)
|
||||
let mut filtered_ids: Option<Vec<i32>> = None;
|
||||
if !filters.tags.is_empty() {
|
||||
let ids_any =
|
||||
NodeOperations::filter_node_ids_by_tags_any(&app_state.db, &filters.tags).await?;
|
||||
filtered_ids = match filtered_ids {
|
||||
Some(mut existing) => {
|
||||
// 合并去重
|
||||
existing.extend(ids_any);
|
||||
existing.sort();
|
||||
existing.dedup();
|
||||
Some(existing)
|
||||
}
|
||||
None => Some(ids_any),
|
||||
};
|
||||
}
|
||||
if let Some(ids) = filtered_ids {
|
||||
if ids.is_empty() {
|
||||
return Ok(Json(ApiResponse::success(PaginatedResponse {
|
||||
items: vec![],
|
||||
total: 0,
|
||||
page,
|
||||
per_page,
|
||||
total_pages: 0,
|
||||
})));
|
||||
}
|
||||
query = query.filter(entity::shared_nodes::Column::Id.is_in(ids));
|
||||
}
|
||||
|
||||
let total = query.clone().count(app_state.db.orm_db()).await?;
|
||||
let nodes = query
|
||||
.order_by_asc(entity::shared_nodes::Column::Id)
|
||||
@@ -71,6 +101,13 @@ pub async fn get_nodes(
|
||||
let mut node_responses: Vec<NodeResponse> = nodes.into_iter().map(NodeResponse::from).collect();
|
||||
let total_pages = total.div_ceil(per_page as u64);
|
||||
|
||||
// 补充标签
|
||||
let ids: Vec<i32> = node_responses.iter().map(|n| n.id).collect();
|
||||
let tags_map = NodeOperations::get_nodes_tags_map(&app_state.db, &ids).await?;
|
||||
for n in &mut node_responses {
|
||||
n.tags = tags_map.get(&n.id).cloned().unwrap_or_default();
|
||||
}
|
||||
|
||||
// 为每个节点添加健康状态信息
|
||||
for node_response in &mut node_responses {
|
||||
if let Some(mut health_record) = app_state
|
||||
@@ -99,7 +136,6 @@ pub async fn get_nodes(
|
||||
|
||||
// remove sensitive information
|
||||
node_responses.iter_mut().for_each(|node| {
|
||||
tracing::info!("node: {:?}", node);
|
||||
node.network_name = None;
|
||||
node.network_secret = None;
|
||||
|
||||
@@ -161,7 +197,10 @@ pub async fn get_node(
|
||||
.await?
|
||||
.ok_or_else(|| ApiError::NotFound(format!("Node with id {} not found", id)))?;
|
||||
|
||||
Ok(Json(ApiResponse::success(NodeResponse::from(node))))
|
||||
let mut resp = NodeResponse::from(node);
|
||||
resp.tags = NodeOperations::get_node_tags(&app_state.db, resp.id).await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(resp)))
|
||||
}
|
||||
|
||||
pub async fn get_node_health(
|
||||
@@ -234,7 +273,7 @@ pub struct InstanceFilterParams {
|
||||
use crate::config::AppConfig;
|
||||
use axum::http::{HeaderMap, StatusCode};
|
||||
use chrono::{Duration, Utc};
|
||||
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
|
||||
use jsonwebtoken::{DecodingKey, EncodingKey, Header, Validation, decode, encode};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -325,6 +364,39 @@ pub async fn admin_get_nodes(
|
||||
);
|
||||
}
|
||||
|
||||
// 标签过滤(支持单标签与多标签 OR)
|
||||
let mut filtered_ids: Option<Vec<i32>> = None;
|
||||
if let Some(tag) = filters.tag {
|
||||
let ids = NodeOperations::filter_node_ids_by_tag(&app_state.db, &tag).await?;
|
||||
filtered_ids = Some(ids);
|
||||
}
|
||||
if let Some(tags) = filters.tags
|
||||
&& !tags.is_empty()
|
||||
{
|
||||
let ids_any = NodeOperations::filter_node_ids_by_tags_any(&app_state.db, &tags).await?;
|
||||
filtered_ids = match filtered_ids {
|
||||
Some(mut existing) => {
|
||||
existing.extend(ids_any);
|
||||
existing.sort();
|
||||
existing.dedup();
|
||||
Some(existing)
|
||||
}
|
||||
None => Some(ids_any),
|
||||
};
|
||||
}
|
||||
if let Some(ids) = filtered_ids {
|
||||
if ids.is_empty() {
|
||||
return Ok(Json(ApiResponse::success(PaginatedResponse {
|
||||
items: vec![],
|
||||
total: 0,
|
||||
page,
|
||||
per_page,
|
||||
total_pages: 0,
|
||||
})));
|
||||
}
|
||||
query = query.filter(entity::shared_nodes::Column::Id.is_in(ids));
|
||||
}
|
||||
|
||||
let total = query.clone().count(app_state.db.orm_db()).await?;
|
||||
|
||||
let nodes = query
|
||||
@@ -334,7 +406,14 @@ pub async fn admin_get_nodes(
|
||||
.all(app_state.db.orm_db())
|
||||
.await?;
|
||||
|
||||
let node_responses: Vec<NodeResponse> = nodes.into_iter().map(NodeResponse::from).collect();
|
||||
let mut node_responses: Vec<NodeResponse> = nodes.into_iter().map(NodeResponse::from).collect();
|
||||
|
||||
// 补充标签
|
||||
let ids: Vec<i32> = node_responses.iter().map(|n| n.id).collect();
|
||||
let tags_map = NodeOperations::get_nodes_tags_map(&app_state.db, &ids).await?;
|
||||
for n in &mut node_responses {
|
||||
n.tags = tags_map.get(&n.id).cloned().unwrap_or_default();
|
||||
}
|
||||
|
||||
let total_pages = (total as f64 / per_page as f64).ceil() as u32;
|
||||
|
||||
@@ -366,7 +445,10 @@ pub async fn admin_approve_node(
|
||||
.exec(app_state.db.orm_db())
|
||||
.await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
|
||||
let mut resp = NodeResponse::from(updated_node);
|
||||
resp.tags = NodeOperations::get_node_tags(&app_state.db, resp.id).await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(resp)))
|
||||
}
|
||||
|
||||
pub async fn admin_update_node(
|
||||
@@ -432,7 +514,15 @@ pub async fn admin_update_node(
|
||||
.exec(app_state.db.orm_db())
|
||||
.await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
|
||||
// 更新标签
|
||||
if let Some(tags) = request.tags {
|
||||
NodeOperations::set_node_tags(&app_state.db, updated_node.id, tags).await?;
|
||||
}
|
||||
|
||||
let mut resp = NodeResponse::from(updated_node);
|
||||
resp.tags = NodeOperations::get_node_tags(&app_state.db, resp.id).await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(resp)))
|
||||
}
|
||||
|
||||
pub async fn admin_revoke_approval(
|
||||
@@ -454,7 +544,10 @@ pub async fn admin_revoke_approval(
|
||||
.exec(app_state.db.orm_db())
|
||||
.await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
|
||||
let mut resp = NodeResponse::from(updated_node);
|
||||
resp.tags = NodeOperations::get_node_tags(&app_state.db, resp.id).await?;
|
||||
|
||||
Ok(Json(ApiResponse::success(resp)))
|
||||
}
|
||||
|
||||
pub async fn admin_delete_node(
|
||||
@@ -505,3 +598,10 @@ fn verify_admin_token(headers: &HeaderMap) -> ApiResult<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_all_tags(
|
||||
State(app_state): State<AppState>,
|
||||
) -> ApiResult<Json<ApiResponse<Vec<String>>>> {
|
||||
let tags = NodeOperations::get_all_tags(&app_state.db).await?;
|
||||
Ok(Json(ApiResponse::success(tags)))
|
||||
}
|
||||
|
||||
@@ -162,6 +162,9 @@ pub struct UpdateNodeRequest {
|
||||
|
||||
#[validate(email)]
|
||||
pub mail: Option<String>,
|
||||
|
||||
// 标签字段(仅管理员可用)
|
||||
pub tags: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -198,6 +201,7 @@ pub struct NodeResponse {
|
||||
pub qq_number: Option<String>,
|
||||
pub wechat: Option<String>,
|
||||
pub mail: Option<String>,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<entity::shared_nodes::Model> for NodeResponse {
|
||||
@@ -247,6 +251,7 @@ impl From<entity::shared_nodes::Model> for NodeResponse {
|
||||
} else {
|
||||
Some(node.mail)
|
||||
},
|
||||
tags: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,6 +286,8 @@ pub struct NodeFilterParams {
|
||||
pub is_active: Option<bool>,
|
||||
pub protocol: Option<String>,
|
||||
pub search: Option<String>,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -313,4 +320,6 @@ pub struct AdminNodeFilterParams {
|
||||
pub is_approved: Option<bool>,
|
||||
pub protocol: Option<String>,
|
||||
pub search: Option<String>,
|
||||
pub tag: Option<String>,
|
||||
pub tags: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use axum::routing::{delete, get, post, put};
|
||||
use axum::Router;
|
||||
use axum::routing::{delete, get, post, put};
|
||||
use tower_http::compression::CompressionLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
|
||||
use super::handlers::AppState;
|
||||
use super::handlers::{
|
||||
admin_approve_node, admin_delete_node, admin_get_nodes, admin_login, admin_revoke_approval,
|
||||
admin_update_node, admin_verify_token, create_node, get_node, get_node_health,
|
||||
admin_update_node, admin_verify_token, create_node, get_all_tags, get_node, get_node_health,
|
||||
get_node_health_stats, get_nodes, health_check,
|
||||
};
|
||||
use crate::api::{get_node_connect_url, test_connection};
|
||||
@@ -38,6 +38,7 @@ pub fn create_routes() -> Router<AppState> {
|
||||
.route("/node/{id}", get(get_node_connect_url))
|
||||
.route("/health", get(health_check))
|
||||
.route("/api/nodes", get(get_nodes).post(create_node))
|
||||
.route("/api/tags", get(get_all_tags))
|
||||
.route("/api/test_connection", post(test_connection))
|
||||
.route("/api/nodes/{id}/health", get(get_node_health))
|
||||
.route("/api/nodes/{id}/health/stats", get(get_node_health_stats))
|
||||
|
||||
@@ -2,6 +2,8 @@ use std::env;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use easytier::common::config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfig};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AppConfig {
|
||||
pub server: ServerConfig,
|
||||
@@ -32,12 +34,6 @@ pub struct HealthCheckConfig {
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LoggingConfig {
|
||||
pub level: String,
|
||||
pub rust_log: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CorsConfig {
|
||||
pub allowed_origins: Vec<String>,
|
||||
@@ -100,8 +96,14 @@ impl AppConfig {
|
||||
};
|
||||
|
||||
let logging_config = LoggingConfig {
|
||||
level: env::var("LOG_LEVEL").unwrap_or_else(|_| "info".to_string()),
|
||||
rust_log: env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()),
|
||||
file_logger: Some(FileLoggerConfig {
|
||||
level: Some(env::var("LOG_LEVEL").unwrap_or_else(|_| "info".to_string())),
|
||||
file: Some("easytier-uptime.log".to_string()),
|
||||
..Default::default()
|
||||
}),
|
||||
console_logger: Some(ConsoleLoggerConfig {
|
||||
level: Some(env::var("LOG_LEVEL").unwrap_or_else(|_| "info".to_string())),
|
||||
}),
|
||||
};
|
||||
|
||||
let cors_config = CorsConfig {
|
||||
@@ -161,8 +163,14 @@ impl AppConfig {
|
||||
max_retries: 3,
|
||||
},
|
||||
logging: LoggingConfig {
|
||||
level: "info".to_string(),
|
||||
rust_log: "info".to_string(),
|
||||
file_logger: Some(FileLoggerConfig {
|
||||
level: Some("info".to_string()),
|
||||
file: Some("easytier-uptime.log".to_string()),
|
||||
..Default::default()
|
||||
}),
|
||||
console_logger: Some(ConsoleLoggerConfig {
|
||||
level: Some("info".to_string()),
|
||||
}),
|
||||
},
|
||||
cors: CorsConfig {
|
||||
allowed_origins: vec![
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::db::entity::*;
|
||||
use crate::db::Db;
|
||||
use crate::db::entity::*;
|
||||
use sea_orm::*;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// 数据清理策略配置
|
||||
|
||||
@@ -3,4 +3,5 @@
|
||||
pub mod prelude;
|
||||
|
||||
pub mod health_records;
|
||||
pub mod node_tags;
|
||||
pub mod shared_nodes;
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
//! `SeaORM` Entity for node tags
|
||||
|
||||
use sea_orm::entity::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
|
||||
#[sea_orm(table_name = "node_tags")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: i32,
|
||||
pub node_id: i32,
|
||||
pub tag: String,
|
||||
pub created_at: DateTimeWithTimeZone,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::shared_nodes::Entity",
|
||||
from = "Column::NodeId",
|
||||
to = "super::shared_nodes::Column::Id"
|
||||
)]
|
||||
SharedNodes,
|
||||
}
|
||||
|
||||
impl Related<super::shared_nodes::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::SharedNodes.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
@@ -1,4 +1,5 @@
|
||||
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
|
||||
|
||||
pub use super::health_records::Entity as HealthRecords;
|
||||
pub use super::node_tags::Entity as NodeTags;
|
||||
pub use super::shared_nodes::Entity as SharedNodes;
|
||||
|
||||
@@ -33,6 +33,9 @@ pub struct Model {
|
||||
pub enum Relation {
|
||||
#[sea_orm(has_many = "super::health_records::Entity")]
|
||||
HealthRecords,
|
||||
// add relation to node_tags
|
||||
#[sea_orm(has_many = "super::node_tags::Entity")]
|
||||
NodeTags,
|
||||
}
|
||||
|
||||
impl Related<super::health_records::Entity> for Entity {
|
||||
@@ -41,4 +44,10 @@ impl Related<super::health_records::Entity> for Entity {
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::node_tags::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::NodeTags.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
@@ -5,12 +5,12 @@ pub mod operations;
|
||||
use std::fmt;
|
||||
|
||||
use sea_orm::{
|
||||
prelude::*, sea_query::OnConflict, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait,
|
||||
QueryFilter as _, Set, SqlxSqliteConnector, Statement, TransactionTrait as _,
|
||||
ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait, QueryFilter as _, Set,
|
||||
SqlxSqliteConnector, Statement, TransactionTrait as _, prelude::*, sea_query::OnConflict,
|
||||
};
|
||||
use sea_orm_migration::MigratorTrait as _;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{migrate::MigrateDatabase as _, Sqlite, SqlitePool};
|
||||
use sqlx::{Sqlite, SqlitePool, migrate::MigrateDatabase as _};
|
||||
|
||||
use crate::migrator;
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::api::CreateNodeRequest;
|
||||
use crate::db::entity::*;
|
||||
use crate::db::Db;
|
||||
use crate::db::HealthStats;
|
||||
use crate::db::HealthStatus;
|
||||
use crate::db::entity::*;
|
||||
use sea_orm::*;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
/// 节点管理操作
|
||||
pub struct NodeOperations;
|
||||
@@ -229,6 +230,128 @@ impl HealthOperations {
|
||||
Ok(result.rows_affected)
|
||||
}
|
||||
}
|
||||
impl NodeOperations {
|
||||
/// 获取节点的全部标签
|
||||
pub async fn get_node_tags(db: &Db, node_id: i32) -> Result<Vec<String>, DbErr> {
|
||||
let tags = node_tags::Entity::find()
|
||||
.filter(node_tags::Column::NodeId.eq(node_id))
|
||||
.all(db.orm_db())
|
||||
.await?;
|
||||
Ok(tags.into_iter().map(|m| m.tag).collect())
|
||||
}
|
||||
|
||||
/// 批量获取节点的标签映射
|
||||
pub async fn get_nodes_tags_map(
|
||||
db: &Db,
|
||||
node_ids: &[i32],
|
||||
) -> Result<HashMap<i32, Vec<String>>, DbErr> {
|
||||
if node_ids.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let tags = node_tags::Entity::find()
|
||||
.filter(node_tags::Column::NodeId.is_in(node_ids.to_vec()))
|
||||
.order_by_asc(node_tags::Column::NodeId)
|
||||
.all(db.orm_db())
|
||||
.await?;
|
||||
let mut map: HashMap<i32, Vec<String>> = HashMap::new();
|
||||
for t in tags {
|
||||
map.entry(t.node_id).or_default().push(t.tag);
|
||||
}
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
/// 使用标签过滤节点(返回节点ID)
|
||||
pub async fn filter_node_ids_by_tag(db: &Db, tag: &str) -> Result<Vec<i32>, DbErr> {
|
||||
let tagged = node_tags::Entity::find()
|
||||
.filter(node_tags::Column::Tag.eq(tag))
|
||||
.all(db.orm_db())
|
||||
.await?;
|
||||
Ok(tagged.into_iter().map(|m| m.node_id).collect())
|
||||
}
|
||||
|
||||
/// 设置节点标签(替换为给定集合)
|
||||
pub async fn set_node_tags(db: &Db, node_id: i32, tags: Vec<String>) -> Result<(), DbErr> {
|
||||
// 去重与清理空白
|
||||
let mut set: HashSet<String> = HashSet::new();
|
||||
for tag in tags.into_iter() {
|
||||
let trimmed = tag.trim();
|
||||
if !trimmed.is_empty() {
|
||||
set.insert(trimmed.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// 取出当前标签
|
||||
let existing = node_tags::Entity::find()
|
||||
.filter(node_tags::Column::NodeId.eq(node_id))
|
||||
.all(db.orm_db())
|
||||
.await?;
|
||||
|
||||
let existing_set: HashSet<String> = existing.iter().map(|m| m.tag.clone()).collect();
|
||||
|
||||
// 需要删除的
|
||||
let to_delete: Vec<i32> = existing
|
||||
.iter()
|
||||
.filter(|m| !set.contains(&m.tag))
|
||||
.map(|m| m.id)
|
||||
.collect();
|
||||
|
||||
// 需要新增的
|
||||
let to_insert: Vec<String> = set
|
||||
.into_iter()
|
||||
.filter(|t| !existing_set.contains(t))
|
||||
.collect();
|
||||
|
||||
// 执行删除
|
||||
if !to_delete.is_empty() {
|
||||
node_tags::Entity::delete_many()
|
||||
.filter(node_tags::Column::Id.is_in(to_delete))
|
||||
.exec(db.orm_db())
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 执行新增
|
||||
for t in to_insert {
|
||||
let now = chrono::Utc::now().fixed_offset();
|
||||
let am = node_tags::ActiveModel {
|
||||
id: NotSet,
|
||||
node_id: Set(node_id),
|
||||
tag: Set(t),
|
||||
created_at: Set(now),
|
||||
};
|
||||
node_tags::Entity::insert(am).exec(db.orm_db()).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 新增:获取所有唯一标签(按字母排序)
|
||||
pub async fn get_all_tags(db: &Db) -> Result<Vec<String>, DbErr> {
|
||||
let rows = node_tags::Entity::find().all(db.orm_db()).await?;
|
||||
let mut set: HashSet<String> = HashSet::new();
|
||||
for r in rows {
|
||||
set.insert(r.tag);
|
||||
}
|
||||
let mut list: Vec<String> = set.into_iter().collect();
|
||||
list.sort();
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
// 新增:使用多标签(OR 语义)过滤节点,返回匹配的节点ID
|
||||
pub async fn filter_node_ids_by_tags_any(db: &Db, tags: &[String]) -> Result<Vec<i32>, DbErr> {
|
||||
if tags.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let tagged = node_tags::Entity::find()
|
||||
.filter(node_tags::Column::Tag.is_in(tags.to_vec()))
|
||||
.all(db.orm_db())
|
||||
.await?;
|
||||
let mut set: HashSet<i32> = HashSet::new();
|
||||
for m in tagged {
|
||||
set.insert(m.node_id);
|
||||
}
|
||||
Ok(set.into_iter().collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -7,22 +7,21 @@ use std::{
|
||||
use anyhow::Context as _;
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader},
|
||||
scoped_task::ScopedTask,
|
||||
common::config::{
|
||||
ConfigFileControl, ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader,
|
||||
},
|
||||
defer,
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::ConfigSource,
|
||||
};
|
||||
use guarden::defer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::any;
|
||||
use tokio_util::task::AbortOnDropHandle;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
use crate::db::{
|
||||
Db, HealthStatus,
|
||||
entity::shared_nodes,
|
||||
operations::{HealthOperations, NodeOperations},
|
||||
Db, HealthStatus,
|
||||
};
|
||||
|
||||
pub struct HealthCheckOneNode {
|
||||
@@ -241,7 +240,7 @@ pub struct HealthChecker {
|
||||
db: Db,
|
||||
instance_mgr: Arc<NetworkInstanceManager>,
|
||||
inst_id_map: DashMap<i32, uuid::Uuid>,
|
||||
node_tasks: DashMap<i32, ScopedTask<()>>,
|
||||
node_tasks: DashMap<i32, AbortOnDropHandle<()>>,
|
||||
node_records: Arc<DashMap<i32, HealthyMemRecord>>,
|
||||
node_cfg: Arc<DashMap<i32, TomlConfigLoader>>,
|
||||
}
|
||||
@@ -360,6 +359,7 @@ impl HealthChecker {
|
||||
)
|
||||
.parse()
|
||||
.with_context(|| "failed to parse peer uri")?,
|
||||
peer_public_key: None,
|
||||
}]);
|
||||
|
||||
let inst_id = inst_id.unwrap_or(uuid::Uuid::new_v4());
|
||||
@@ -375,6 +375,7 @@ impl HealthChecker {
|
||||
flags.no_tun = true;
|
||||
flags.disable_p2p = true;
|
||||
flags.disable_udp_hole_punching = true;
|
||||
flags.disable_tcp_hole_punching = true;
|
||||
cfg.set_flags(flags);
|
||||
|
||||
Ok(cfg)
|
||||
@@ -392,7 +393,7 @@ impl HealthChecker {
|
||||
.delete_network_instance(vec![cfg.get_id()]);
|
||||
});
|
||||
self.instance_mgr
|
||||
.run_network_instance(cfg.clone(), ConfigSource::FFI)
|
||||
.run_network_instance(cfg.clone(), false, ConfigFileControl::STATIC_CONFIG)
|
||||
.with_context(|| "failed to run network instance")?;
|
||||
|
||||
let now = Instant::now();
|
||||
@@ -436,7 +437,7 @@ impl HealthChecker {
|
||||
);
|
||||
|
||||
self.instance_mgr
|
||||
.run_network_instance(cfg.clone(), ConfigSource::Web)
|
||||
.run_network_instance(cfg.clone(), true, ConfigFileControl::STATIC_CONFIG)
|
||||
.with_context(|| "failed to run network instance")?;
|
||||
self.inst_id_map.insert(node_id, cfg.get_id());
|
||||
|
||||
@@ -464,7 +465,7 @@ impl HealthChecker {
|
||||
}
|
||||
|
||||
// 启动健康检查任务
|
||||
let task = ScopedTask::from(tokio::spawn(Self::node_health_check_task(
|
||||
let task = AbortOnDropHandle::new(tokio::spawn(Self::node_health_check_task(
|
||||
node_id,
|
||||
cfg.get_id(),
|
||||
Arc::clone(&self.instance_mgr),
|
||||
@@ -497,7 +498,7 @@ impl HealthChecker {
|
||||
instance_mgr: Arc<NetworkInstanceManager>,
|
||||
// return version, response time on healthy, conn_count
|
||||
) -> anyhow::Result<(String, u64, u32)> {
|
||||
let Some(instance) = instance_mgr.get_network_info(&inst_id) else {
|
||||
let Some(instance) = instance_mgr.get_network_info(&inst_id).await else {
|
||||
anyhow::bail!("healthy check node is not started");
|
||||
};
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use std::{collections::HashSet, sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use tokio::time::{interval, Interval};
|
||||
use tokio::time::{Interval, interval};
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
db::{entity::shared_nodes, operations::NodeOperations, Db},
|
||||
db::{Db, entity::shared_nodes, operations::NodeOperations},
|
||||
health_checker::HealthChecker,
|
||||
};
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ mod migrator;
|
||||
use api::routes::create_routes;
|
||||
use clap::Parser;
|
||||
use config::AppConfig;
|
||||
use db::{operations::NodeOperations, Db};
|
||||
use db::{Db, operations::NodeOperations};
|
||||
use easytier::common::log;
|
||||
use health_checker::HealthChecker;
|
||||
use health_checker_manager::HealthCheckerManager;
|
||||
use std::env;
|
||||
@@ -22,6 +23,11 @@ use tracing_subscriber::EnvFilter;
|
||||
|
||||
use crate::db::cleanup::{CleanupConfig, CleanupManager};
|
||||
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL_MIMALLOC: MiMalloc = MiMalloc;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
@@ -30,31 +36,22 @@ struct Args {
|
||||
admin_password: Option<String>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
#[tokio::main(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// 加载配置
|
||||
let config = AppConfig::default();
|
||||
|
||||
// 初始化日志
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(match config.logging.level.as_str() {
|
||||
"debug" => tracing::Level::DEBUG,
|
||||
"info" => tracing::Level::INFO,
|
||||
"warn" => tracing::Level::WARN,
|
||||
"error" => tracing::Level::ERROR,
|
||||
_ => tracing::Level::INFO,
|
||||
})
|
||||
.with_target(false)
|
||||
.with_thread_ids(true)
|
||||
.with_env_filter(EnvFilter::new("easytier_uptime"))
|
||||
.init();
|
||||
let _ = log::init(&config.logging, false);
|
||||
|
||||
// 解析命令行参数
|
||||
let args = Args::parse();
|
||||
|
||||
// 如果提供了管理员密码,设置环境变量
|
||||
if let Some(password) = args.admin_password {
|
||||
env::set_var("ADMIN_PASSWORD", password);
|
||||
unsafe {
|
||||
env::set_var("ADMIN_PASSWORD", password);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
|
||||
@@ -0,0 +1,119 @@
|
||||
use sea_orm_migration::{prelude::*, schema::*};
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum NodeTags {
|
||||
Table,
|
||||
Id,
|
||||
NodeId,
|
||||
Tag,
|
||||
CreatedAt,
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum SharedNodes {
|
||||
Table,
|
||||
Id,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// 创建 node_tags 表
|
||||
manager
|
||||
.create_table(
|
||||
Table::create()
|
||||
.table(NodeTags::Table)
|
||||
.if_not_exists()
|
||||
.col(pk_auto(NodeTags::Id).not_null())
|
||||
.col(integer(NodeTags::NodeId).not_null())
|
||||
.col(string(NodeTags::Tag).not_null())
|
||||
.col(
|
||||
timestamp_with_time_zone(NodeTags::CreatedAt)
|
||||
.not_null()
|
||||
.default(Expr::current_timestamp()),
|
||||
)
|
||||
.foreign_key(
|
||||
ForeignKey::create()
|
||||
.name("fk_node_tags_node")
|
||||
.from(NodeTags::Table, NodeTags::NodeId)
|
||||
.to(SharedNodes::Table, SharedNodes::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade),
|
||||
)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 索引:NodeId
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_node_tags_node")
|
||||
.table(NodeTags::Table)
|
||||
.col(NodeTags::NodeId)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 索引:Tag
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("idx_node_tags_tag")
|
||||
.table(NodeTags::Table)
|
||||
.col(NodeTags::Tag)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 唯一索引:每个节点的标签唯一
|
||||
manager
|
||||
.create_index(
|
||||
Index::create()
|
||||
.name("uniq_node_tag_per_node")
|
||||
.table(NodeTags::Table)
|
||||
.col(NodeTags::NodeId)
|
||||
.col(NodeTags::Tag)
|
||||
.unique()
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
// 先删除索引
|
||||
manager
|
||||
.drop_index(
|
||||
Index::drop()
|
||||
.name("idx_node_tags_node")
|
||||
.table(NodeTags::Table)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
manager
|
||||
.drop_index(
|
||||
Index::drop()
|
||||
.name("idx_node_tags_tag")
|
||||
.table(NodeTags::Table)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
manager
|
||||
.drop_index(
|
||||
Index::drop()
|
||||
.name("uniq_node_tag_per_node")
|
||||
.table(NodeTags::Table)
|
||||
.to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
manager
|
||||
.drop_table(Table::drop().table(NodeTags::Table).to_owned())
|
||||
.await
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,16 @@
|
||||
use sea_orm_migration::prelude::*;
|
||||
|
||||
mod m20250101_000001_create_tables;
|
||||
mod m20250101_000002_create_node_tags;
|
||||
|
||||
pub struct Migrator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigratorTrait for Migrator {
|
||||
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
|
||||
vec![Box::new(m20250101_000001_create_tables::Migration)]
|
||||
vec![
|
||||
Box::new(m20250101_000001_create_tables::Migration),
|
||||
Box::new(m20250101_000002_create_node_tags::Migration),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "easytier-gui",
|
||||
"type": "module",
|
||||
"version": "2.4.4",
|
||||
"version": "2.6.4",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
|
||||
"scripts": {
|
||||
@@ -13,18 +13,17 @@
|
||||
"lint:fix": "eslint . --ignore-pattern src-tauri --fix"
|
||||
},
|
||||
"dependencies": {
|
||||
"@primevue/themes": "4.3.3",
|
||||
"@primeuix/themes": "^1.2.3",
|
||||
"@tauri-apps/plugin-autostart": "2.0.0",
|
||||
"@tauri-apps/plugin-clipboard-manager": "2.3.0",
|
||||
"@tauri-apps/plugin-os": "2.3.0",
|
||||
"@tauri-apps/plugin-process": "2.3.0",
|
||||
"@tauri-apps/plugin-shell": "2.3.0",
|
||||
"@vueuse/core": "^11.2.0",
|
||||
"aura": "link:@primevue\\themes\\aura",
|
||||
"easytier-frontend-lib": "workspace:*",
|
||||
"ip-num": "1.5.1",
|
||||
"pinia": "^2.2.4",
|
||||
"primevue": "4.3.3",
|
||||
"primevue": "^4.3.9",
|
||||
"tauri-plugin-vpnservice-api": "workspace:*",
|
||||
"vue": "^3.5.12",
|
||||
"vue-router": "^4.4.5"
|
||||
@@ -32,7 +31,7 @@
|
||||
"devDependencies": {
|
||||
"@antfu/eslint-config": "^3.7.3",
|
||||
"@intlify/unplugin-vue-i18n": "^5.2.0",
|
||||
"@primevue/auto-import-resolver": "4.3.3",
|
||||
"@primevue/auto-import-resolver": "4.3.9",
|
||||
"@tauri-apps/api": "2.7.0",
|
||||
"@tauri-apps/cli": "2.7.1",
|
||||
"@types/default-gateway": "^7.2.2",
|
||||
@@ -54,7 +53,7 @@
|
||||
"unplugin-vue-markdown": "^0.26.2",
|
||||
"unplugin-vue-router": "^0.10.8",
|
||||
"uuid": "^10.0.0",
|
||||
"vite": "^5.4.8",
|
||||
"vite": "^5.4.21",
|
||||
"vite-plugin-vue-devtools": "^7.4.6",
|
||||
"vite-plugin-vue-layouts": "^0.11.0",
|
||||
"vue-i18n": "^10.0.0",
|
||||
|
||||
Generated
-7220
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user