mirror of
https://github.com/EasyTier/EasyTier.git
synced 2026-05-07 10:14:35 +00:00
refactor(rpc): Centralize RPC service and unify API (#1427)
This change introduces a major refactoring of the RPC service layer to improve modularity, unify the API, and simplify the overall architecture. Key changes: - Replaced per-network-instance RPC services with a single global RPC server, reducing resource usage and simplifying management. - All clients (CLI, Web UI, etc.) now interact with EasyTier core through a unified RPC entrypoint, enabling consistent authentication and control. - RPC implementation logic has been moved to `easytier/src/rpc_service/` and organized by functionality (e.g., `instance_manage.rs`, `peer_manage.rs`, `config.rs`) for better maintainability. - Standardized Protobuf API definitions under `easytier/src/proto/` with an `api_` prefix (e.g., `cli.proto` → `api_instance.proto`) to provide a consistent interface. - CLI commands now require explicit `--instance-id` or `--instance-name` when multiple network instances are running; the parameter is optional when only one instance exists. BREAKING CHANGE: RPC portal configuration (`rpc_portal` and `rpc_portal_whitelist`) has been removed from per-instance configs and the Web UI. The RPC listen address must now be specified globally via the `--rpc-portal` command-line flag or the `ET_RPC_PORTAL` environment variable, as there is only one RPC service for the entire application.
This commit is contained in:
@@ -30,6 +30,7 @@ use easytier::{
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::{add_proxy_network_to_config, ConfigSource},
|
||||
proto::common::{CompressionAlgoPb, NatType},
|
||||
rpc_service::ApiRpcServer,
|
||||
tunnel::{IpVersion, PROTO_PORT_OFFSET},
|
||||
utils::{init_logger, setup_panic_handler},
|
||||
web_client,
|
||||
@@ -130,6 +131,9 @@ struct Cli {
|
||||
#[command(flatten)]
|
||||
logging_options: LoggingOptions,
|
||||
|
||||
#[command(flatten)]
|
||||
rpc_portal_options: RpcPortalOptions,
|
||||
|
||||
#[clap(long, help = t!("core_clap.generate_completions").to_string())]
|
||||
gen_autocomplete: Option<Shell>,
|
||||
|
||||
@@ -205,22 +209,6 @@ struct NetworkOptions {
|
||||
)]
|
||||
proxy_networks: Vec<String>,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
env = "ET_RPC_PORTAL",
|
||||
help = t!("core_clap.rpc_portal").to_string(),
|
||||
)]
|
||||
rpc_portal: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_RPC_PORTAL_WHITELIST",
|
||||
value_delimiter = ',',
|
||||
help = t!("core_clap.rpc_portal_whitelist").to_string(),
|
||||
)]
|
||||
rpc_portal_whitelist: Option<Vec<IpCidr>>,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
@@ -624,6 +612,25 @@ struct LoggingOptions {
|
||||
file_log_count: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
struct RpcPortalOptions {
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
env = "ET_RPC_PORTAL",
|
||||
help = t!("core_clap.rpc_portal").to_string(),
|
||||
)]
|
||||
rpc_portal: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_RPC_PORTAL_WHITELIST",
|
||||
value_delimiter = ',',
|
||||
help = t!("core_clap.rpc_portal_whitelist").to_string(),
|
||||
)]
|
||||
rpc_portal_whitelist: Option<Vec<IpCidr>>,
|
||||
}
|
||||
|
||||
rust_i18n::i18n!("locales", fallback = "en");
|
||||
|
||||
impl Cli {
|
||||
@@ -671,14 +678,6 @@ impl Cli {
|
||||
|
||||
Ok(listeners)
|
||||
}
|
||||
|
||||
fn parse_rpc_portal(rpc_portal: String) -> anyhow::Result<SocketAddr> {
|
||||
if let Ok(port) = rpc_portal.parse::<u16>() {
|
||||
return Ok(format!("0.0.0.0:{}", port).parse().unwrap());
|
||||
}
|
||||
|
||||
Ok(rpc_portal.parse()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkOptions {
|
||||
@@ -786,24 +785,6 @@ impl NetworkOptions {
|
||||
add_proxy_network_to_config(n, cfg)?;
|
||||
}
|
||||
|
||||
let rpc_portal = if let Some(r) = &self.rpc_portal {
|
||||
Cli::parse_rpc_portal(r.clone())
|
||||
.with_context(|| format!("failed to parse rpc portal: {}", r))?
|
||||
} else if let Some(r) = cfg.get_rpc_portal() {
|
||||
r
|
||||
} else {
|
||||
Cli::parse_rpc_portal("0".into())?
|
||||
};
|
||||
cfg.set_rpc_portal(rpc_portal);
|
||||
|
||||
if let Some(rpc_portal_whitelist) = &self.rpc_portal_whitelist {
|
||||
let mut whitelist = cfg.get_rpc_portal_whitelist().unwrap_or_default();
|
||||
for cidr in rpc_portal_whitelist {
|
||||
whitelist.push(*cidr);
|
||||
}
|
||||
cfg.set_rpc_portal_whitelist(Some(whitelist));
|
||||
}
|
||||
|
||||
if let Some(external_nodes) = self.external_node.as_ref() {
|
||||
let mut old_peers = cfg.get_peers();
|
||||
old_peers.push(PeerConfig {
|
||||
@@ -1127,6 +1108,16 @@ fn win_service_main(arg: Vec<std::ffi::OsString>) {
|
||||
async fn run_main(cli: Cli) -> anyhow::Result<()> {
|
||||
init_logger(&cli.logging_options, true)?;
|
||||
|
||||
let manager = Arc::new(NetworkInstanceManager::new());
|
||||
|
||||
let _rpc_server = ApiRpcServer::new(
|
||||
cli.rpc_portal_options.rpc_portal,
|
||||
cli.rpc_portal_options.rpc_portal_whitelist,
|
||||
manager.clone(),
|
||||
)?
|
||||
.serve()
|
||||
.await?;
|
||||
|
||||
if cli.config_server.is_some() {
|
||||
set_default_machine_id(cli.machine_id);
|
||||
let config_server_url_s = cli.config_server.clone().unwrap();
|
||||
@@ -1175,11 +1166,11 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
|
||||
create_connector_by_url(c_url.as_str(), &global_ctx, IpVersion::Both).await?,
|
||||
token.to_string(),
|
||||
hostname,
|
||||
manager,
|
||||
);
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
return Ok(());
|
||||
}
|
||||
let manager = NetworkInstanceManager::new();
|
||||
let mut crate_cli_network =
|
||||
cli.config_file.is_none() || cli.network_options.network_name.is_some();
|
||||
if let Some(config_files) = cli.config_file {
|
||||
|
||||
Reference in New Issue
Block a user