introduce uptime monitor for easytier public nodes (#1250)

This commit is contained in:
Sijie.Sun
2025-08-20 22:59:44 +08:00
committed by GitHub
parent 8f37d4ef7c
commit e6ec7f405c
61 changed files with 12122 additions and 17 deletions
@@ -0,0 +1,80 @@
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use serde_json::json;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ApiError {
#[error("Database error: {0}")]
Database(#[from] sea_orm::DbErr),
#[error("Validation error: {0}")]
Validation(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Bad request: {0}")]
BadRequest(String),
#[error("Internal server error: {0}")]
Internal(String),
#[error("Unauthorized: {0}")]
Unauthorized(String),
#[error("Forbidden: {0}")]
Forbidden(String),
}
impl IntoResponse for ApiError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
ApiError::Database(err) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("Database error: {}", err),
),
ApiError::Validation(msg) => (StatusCode::BAD_REQUEST, msg),
ApiError::NotFound(msg) => (StatusCode::NOT_FOUND, msg),
ApiError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg),
ApiError::Internal(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg),
ApiError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg),
ApiError::Forbidden(msg) => (StatusCode::FORBIDDEN, msg),
};
let body = json!({
"error": {
"code": status.as_u16(),
"message": error_message
}
});
(status, axum::Json(body)).into_response()
}
}
pub type ApiResult<T> = Result<T, ApiError>;
impl From<validator::ValidationErrors> for ApiError {
fn from(err: validator::ValidationErrors) -> Self {
let errors: Vec<String> = err
.field_errors()
.iter()
.map(|(field, errors)| {
let error_msgs: Vec<String> = errors
.iter()
.map(|error| {
if let Some(msg) = &error.message {
msg.to_string()
} else {
format!("Validation failed for field: {}", field)
}
})
.collect();
error_msgs.join(", ")
})
.collect();
ApiError::Validation(errors.join("; "))
}
}
@@ -0,0 +1,507 @@
use std::ops::{Div, Mul};
use axum::extract::{Path, Query, State};
use axum::Json;
use sea_orm::{
ColumnTrait, Condition, EntityTrait, IntoActiveModel, ModelTrait, Order, PaginatorTrait,
QueryFilter, QueryOrder, QuerySelect, Set, TryIntoModel,
};
use serde::Deserialize;
use validator::Validate;
use crate::api::{
error::{ApiError, ApiResult},
models::*,
};
use crate::db::entity::{self, health_records, shared_nodes};
use crate::db::{operations::*, Db};
use crate::health_checker_manager::HealthCheckerManager;
use std::sync::Arc;
#[derive(Clone)]
pub struct AppState {
pub db: Db,
pub health_checker_manager: Arc<HealthCheckerManager>,
}
pub async fn health_check() -> Json<ApiResponse<String>> {
Json(ApiResponse::message("Service is healthy".to_string()))
}
pub async fn get_nodes(
State(app_state): State<AppState>,
Query(pagination): Query<PaginationParams>,
Query(filters): Query<NodeFilterParams>,
) -> ApiResult<Json<ApiResponse<PaginatedResponse<NodeResponse>>>> {
let page = pagination.page.unwrap_or(1);
let per_page = pagination.per_page.unwrap_or(20);
let offset = (page - 1) * per_page;
let mut query = entity::shared_nodes::Entity::find();
// 普通用户只能看到已审核的节点
query = query.filter(entity::shared_nodes::Column::IsApproved.eq(true));
if let Some(is_active) = filters.is_active {
query = query.filter(entity::shared_nodes::Column::IsActive.eq(is_active));
}
if let Some(protocol) = filters.protocol {
query = query.filter(entity::shared_nodes::Column::Protocol.eq(protocol));
}
if let Some(search) = filters.search {
query = query.filter(
sea_orm::Condition::any()
.add(entity::shared_nodes::Column::Name.contains(&search))
.add(entity::shared_nodes::Column::Host.contains(&search))
.add(entity::shared_nodes::Column::Description.contains(&search)),
);
}
let total = query.clone().count(app_state.db.orm_db()).await?;
let nodes = query
.order_by_asc(entity::shared_nodes::Column::Id)
.limit(Some(per_page as u64))
.offset(Some(offset as u64))
.all(app_state.db.orm_db())
.await?;
let mut node_responses: Vec<NodeResponse> = nodes.into_iter().map(NodeResponse::from).collect();
let total_pages = total.div_ceil(per_page as u64);
// 为每个节点添加健康状态信息
for node_response in &mut node_responses {
if let Some(mut health_record) = app_state
.health_checker_manager
.get_node_memory_record(node_response.id)
{
node_response.current_health_status =
Some(health_record.get_current_health_status().to_string());
node_response.last_check_time = Some(health_record.get_last_check_time());
node_response.last_response_time = health_record.get_last_response_time();
// 获取24小时健康统计
if let Some(stats) = app_state
.health_checker_manager
.get_node_health_stats(node_response.id, 24)
{
node_response.health_percentage_24h = Some(stats.health_percentage);
}
let (total_ring, healthy_ring) = health_record.get_counter_ring();
node_response.health_record_total_counter_ring = total_ring;
node_response.health_record_healthy_counter_ring = healthy_ring;
node_response.ring_granularity = health_record.get_ring_granularity();
}
}
// remove sensitive information
node_responses.iter_mut().for_each(|node| {
tracing::info!("node: {:?}", node);
node.network_name = None;
node.network_secret = None;
// make cur connection and max conn round to percentage
if node.max_connections != 0 {
node.current_connections = node.current_connections.mul(100).div(node.max_connections);
node.max_connections = 100;
} else {
node.current_connections = 0;
node.max_connections = 0;
}
node.wechat = None;
node.qq_number = None;
node.mail = None;
});
Ok(Json(ApiResponse::success(PaginatedResponse {
items: node_responses,
total,
page,
per_page,
total_pages: total_pages as u32,
})))
}
pub async fn create_node(
State(app_state): State<AppState>,
Json(request): Json<CreateNodeRequest>,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
request.validate()?;
let node = NodeOperations::create_node(&app_state.db, request).await?;
Ok(Json(ApiResponse::success(NodeResponse::from(node))))
}
pub async fn test_connection(
State(app_state): State<AppState>,
Json(request): Json<CreateNodeRequest>,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
let mut node = NodeOperations::create_node_model(request);
node.id = Set(0);
let node = node.try_into_model()?;
app_state
.health_checker_manager
.test_connection(&node, std::time::Duration::from_secs(5))
.await
.map_err(|e| ApiError::Internal(e.to_string()))?;
Ok(Json(ApiResponse::success(NodeResponse::from(node))))
}
pub async fn get_node(
State(app_state): State<AppState>,
Path(id): Path<i32>,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
let node = NodeOperations::get_node_by_id(&app_state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Node with id {} not found", id)))?;
Ok(Json(ApiResponse::success(NodeResponse::from(node))))
}
pub async fn get_node_health(
State(app_state): State<AppState>,
Path(node_id): Path<i32>,
Query(pagination): Query<PaginationParams>,
Query(filters): Query<HealthFilterParams>,
) -> ApiResult<Json<ApiResponse<PaginatedResponse<HealthRecordResponse>>>> {
let page = pagination.page.unwrap_or(1);
let per_page = pagination.per_page.unwrap_or(20);
let offset = (page - 1) * per_page;
let mut query = entity::health_records::Entity::find()
.filter(entity::health_records::Column::NodeId.eq(node_id));
if let Some(status) = filters.status {
query = query.filter(entity::health_records::Column::Status.eq(status));
}
if let Some(since) = filters.since {
query = query.filter(entity::health_records::Column::CheckedAt.gte(since.naive_utc()));
}
let total = query.clone().count(app_state.db.orm_db()).await?;
let records = query
.order_by_desc(entity::health_records::Column::CheckedAt)
.limit(Some(per_page as u64))
.offset(Some(offset as u64))
.all(app_state.db.orm_db())
.await?;
let record_responses: Vec<HealthRecordResponse> = records
.into_iter()
.map(HealthRecordResponse::from)
.collect();
let total_pages = total.div_ceil(per_page as u64);
Ok(Json(ApiResponse::success(PaginatedResponse {
items: record_responses,
total,
page,
per_page,
total_pages: total_pages as u32,
})))
}
pub async fn get_node_health_stats(
State(app_state): State<AppState>,
Path(node_id): Path<i32>,
Query(params): Query<HealthStatsParams>,
) -> ApiResult<Json<ApiResponse<HealthStatsResponse>>> {
let hours = params.hours.unwrap_or(24);
let stats = HealthOperations::get_health_stats(&app_state.db, node_id, hours).await?;
Ok(Json(ApiResponse::success(HealthStatsResponse::from(stats))))
}
#[derive(Debug, Deserialize)]
pub struct HealthStatsParams {
pub hours: Option<i64>,
}
#[derive(Debug, Deserialize)]
pub struct InstanceFilterParams {
pub node_id: Option<i32>,
pub status: Option<String>,
}
// 管理员相关处理器
use crate::config::AppConfig;
use axum::http::{HeaderMap, StatusCode};
use chrono::{Duration, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use serde::Serialize;
#[derive(Debug, Serialize, Deserialize)]
struct AdminClaims {
sub: String,
exp: usize,
iat: usize,
}
pub async fn get_node_connect_url(
State(app_state): State<AppState>,
Path(id): Path<i32>,
) -> ApiResult<String> {
let node = NodeOperations::get_node_by_id(&app_state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Node with id {} not found", id)))?;
let connect_url = format!("{}://{}:{}", node.protocol, node.host, node.port);
Ok(connect_url)
}
pub async fn admin_login(
Json(request): Json<AdminLoginRequest>,
) -> ApiResult<Json<ApiResponse<AdminLoginResponse>>> {
request
.validate()
.map_err(|e| ApiError::Validation(e.to_string()))?;
let config = AppConfig::default();
if request.password != config.security.admin_password {
return Err(ApiError::Unauthorized("Invalid password".to_string()));
}
let now = Utc::now();
let expires_at = now + Duration::hours(24);
let claims = AdminClaims {
sub: "admin".to_string(),
exp: expires_at.timestamp() as usize,
iat: now.timestamp() as usize,
};
let token = encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(config.security.jwt_secret.as_ref()),
)
.map_err(|e| ApiError::Internal(format!("Token generation failed: {}", e)))?;
Ok(Json(ApiResponse::success(AdminLoginResponse {
token,
expires_at,
})))
}
pub async fn admin_get_nodes(
State(app_state): State<AppState>,
Query(pagination): Query<PaginationParams>,
Query(filters): Query<AdminNodeFilterParams>,
headers: HeaderMap,
) -> ApiResult<Json<ApiResponse<PaginatedResponse<NodeResponse>>>> {
verify_admin_token(&headers)?;
let page = pagination.page.unwrap_or(1);
let per_page = pagination.per_page.unwrap_or(20);
let offset = (page - 1) * per_page;
let mut query = entity::shared_nodes::Entity::find();
if let Some(is_active) = filters.is_active {
query = query.filter(entity::shared_nodes::Column::IsActive.eq(is_active));
}
if let Some(is_approved) = filters.is_approved {
query = query.filter(entity::shared_nodes::Column::IsApproved.eq(is_approved));
}
if let Some(protocol) = filters.protocol {
query = query.filter(entity::shared_nodes::Column::Protocol.eq(protocol));
}
if let Some(search) = filters.search {
query = query.filter(
sea_orm::Condition::any()
.add(entity::shared_nodes::Column::Name.contains(&search))
.add(entity::shared_nodes::Column::Host.contains(&search))
.add(entity::shared_nodes::Column::Description.contains(&search)),
);
}
let total = query.clone().count(app_state.db.orm_db()).await?;
let nodes = query
.order_by(entity::shared_nodes::Column::CreatedAt, Order::Desc)
.offset(offset as u64)
.limit(per_page as u64)
.all(app_state.db.orm_db())
.await?;
let node_responses: Vec<NodeResponse> = nodes.into_iter().map(NodeResponse::from).collect();
let total_pages = (total as f64 / per_page as f64).ceil() as u32;
Ok(Json(ApiResponse::success(PaginatedResponse {
items: node_responses,
total,
page,
per_page,
total_pages,
})))
}
pub async fn admin_approve_node(
State(app_state): State<AppState>,
Path(id): Path<i32>,
headers: HeaderMap,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
verify_admin_token(&headers)?;
let node = entity::shared_nodes::Entity::find_by_id(id)
.one(app_state.db.orm_db())
.await?
.ok_or_else(|| ApiError::NotFound("Node not found".to_string()))?;
let mut active_model = node.into_active_model();
active_model.is_approved = sea_orm::Set(true);
let updated_node = entity::shared_nodes::Entity::update(active_model)
.exec(app_state.db.orm_db())
.await?;
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
}
pub async fn admin_update_node(
State(app_state): State<AppState>,
Path(id): Path<i32>,
headers: HeaderMap,
Json(request): Json<UpdateNodeRequest>,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
verify_admin_token(&headers)?;
request.validate()?;
let mut node = NodeOperations::get_node_by_id(&app_state.db, id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Node with id {} not found", id)))?;
let mut node = node.into_active_model();
if let Some(name) = request.name {
node.name = Set(name);
}
if let Some(host) = request.host {
node.host = Set(host);
}
if let Some(port) = request.port {
node.port = Set(port);
}
if let Some(protocol) = request.protocol {
node.protocol = Set(protocol);
}
if let Some(description) = request.description {
node.description = Set(description);
}
if let Some(max_connections) = request.max_connections {
node.max_connections = Set(max_connections);
}
if let Some(is_active) = request.is_active {
node.is_active = Set(is_active);
}
if let Some(allow_relay) = request.allow_relay {
node.allow_relay = Set(allow_relay);
}
if let Some(network_name) = request.network_name {
node.network_name = Set(network_name);
}
if let Some(network_secret) = request.network_secret {
node.network_secret = Set(network_secret);
}
if let Some(wechat) = request.wechat {
node.wechat = Set(wechat);
}
if let Some(mail) = request.mail {
node.mail = Set(mail);
}
if let Some(qq_number) = request.qq_number {
node.qq_number = Set(qq_number);
}
node.updated_at = Set(chrono::Utc::now().fixed_offset());
tracing::info!("updated node: {:?}", node);
let updated_node = entity::shared_nodes::Entity::update(node)
.exec(app_state.db.orm_db())
.await?;
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
}
pub async fn admin_revoke_approval(
State(app_state): State<AppState>,
Path(id): Path<i32>,
headers: HeaderMap,
) -> ApiResult<Json<ApiResponse<NodeResponse>>> {
verify_admin_token(&headers)?;
let node = entity::shared_nodes::Entity::find_by_id(id)
.one(app_state.db.orm_db())
.await?
.ok_or_else(|| ApiError::NotFound("Node not found".to_string()))?;
let mut active_model = node.into_active_model();
active_model.is_approved = sea_orm::Set(false);
let updated_node = entity::shared_nodes::Entity::update(active_model)
.exec(app_state.db.orm_db())
.await?;
Ok(Json(ApiResponse::success(NodeResponse::from(updated_node))))
}
pub async fn admin_delete_node(
State(app_state): State<AppState>,
Path(id): Path<i32>,
headers: HeaderMap,
) -> ApiResult<Json<ApiResponse<String>>> {
verify_admin_token(&headers)?;
let node = entity::shared_nodes::Entity::find_by_id(id)
.one(app_state.db.orm_db())
.await?
.ok_or_else(|| ApiError::NotFound("Node not found".to_string()))?;
node.delete(app_state.db.orm_db()).await?;
Ok(Json(ApiResponse::message(
"Node deleted successfully".to_string(),
)))
}
pub async fn admin_verify_token(headers: HeaderMap) -> ApiResult<Json<ApiResponse<String>>> {
verify_admin_token(&headers)?;
Ok(Json(ApiResponse::message("Token is valid".to_string())))
}
fn verify_admin_token(headers: &HeaderMap) -> ApiResult<()> {
let config = AppConfig::default();
let auth_header = headers
.get("authorization")
.ok_or_else(|| ApiError::Unauthorized("Missing authorization header".to_string()))?;
let auth_str = auth_header
.to_str()
.map_err(|_| ApiError::Unauthorized("Invalid authorization header".to_string()))?;
let token = auth_str
.strip_prefix("Bearer ")
.ok_or_else(|| ApiError::Unauthorized("Invalid authorization format".to_string()))?;
let _claims = decode::<AdminClaims>(
token,
&DecodingKey::from_secret(config.security.jwt_secret.as_ref()),
&Validation::default(),
)
.map_err(|_| ApiError::Unauthorized("Invalid token".to_string()))?;
Ok(())
}
@@ -0,0 +1,8 @@
pub mod error;
pub mod handlers;
pub mod models;
pub mod routes;
pub use error::{ApiError, ApiResult};
pub use handlers::*;
pub use models::*;
@@ -0,0 +1,316 @@
use crate::db::entity;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Debug, Serialize, Deserialize)]
pub struct ApiResponse<T> {
pub success: bool,
pub data: Option<T>,
pub error: Option<String>,
pub message: Option<String>,
}
impl<T> ApiResponse<T> {
pub fn success(data: T) -> Self {
Self {
success: true,
data: Some(data),
error: None,
message: None,
}
}
pub fn error(error: String) -> Self {
Self {
success: false,
data: None,
error: Some(error),
message: None,
}
}
pub fn message(message: String) -> Self {
Self {
success: true,
data: None,
error: None,
message: Some(message),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PaginatedResponse<T> {
pub items: Vec<T>,
pub total: u64,
pub page: u32,
pub per_page: u32,
pub total_pages: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PaginationParams {
pub page: Option<u32>,
pub per_page: Option<u32>,
}
impl Default for PaginationParams {
fn default() -> Self {
Self {
page: Some(1),
per_page: Some(20),
}
}
}
#[derive(Debug, Serialize, Deserialize, Validate)]
#[validate(schema(function = "validate_contact_info", skip_on_field_errors = false))]
pub struct CreateNodeRequest {
#[validate(length(min = 1, max = 100))]
pub name: String,
#[validate(length(min = 1, max = 255))]
pub host: String,
#[validate(range(min = 1, max = 65535))]
pub port: i32,
#[validate(length(min = 1, max = 20))]
pub protocol: String,
#[validate(length(max = 500))]
pub description: Option<String>,
#[validate(range(min = 1, max = 10000))]
pub max_connections: i32,
pub allow_relay: bool,
#[validate(length(min = 1, max = 100))]
pub network_name: String,
#[validate(length(max = 100))]
pub network_secret: Option<String>,
// 联系方式字段
#[validate(length(max = 20))]
pub qq_number: Option<String>,
#[validate(length(max = 50))]
pub wechat: Option<String>,
#[validate(email)]
pub mail: Option<String>,
}
// 自定义验证函数:确保至少填写一种联系方式
fn validate_contact_info(request: &CreateNodeRequest) -> Result<(), validator::ValidationError> {
let has_qq = request
.qq_number
.as_ref()
.is_some_and(|s| !s.trim().is_empty());
let has_wechat = request
.wechat
.as_ref()
.is_some_and(|s| !s.trim().is_empty());
let has_mail = request.mail.as_ref().is_some_and(|s| !s.trim().is_empty());
if !has_qq && !has_wechat && !has_mail {
return Err(validator::ValidationError::new("contact_required"));
}
Ok(())
}
#[derive(Debug, Serialize, Deserialize, Validate)]
pub struct UpdateNodeRequest {
#[validate(length(min = 1, max = 100))]
pub name: Option<String>,
#[validate(length(min = 1, max = 255))]
pub host: Option<String>,
#[validate(range(min = 1, max = 65535))]
pub port: Option<i32>,
#[validate(length(min = 1, max = 20))]
pub protocol: Option<String>,
#[validate(length(max = 500))]
pub description: Option<String>,
#[validate(range(min = 1, max = 10000))]
pub max_connections: Option<i32>,
pub is_active: Option<bool>,
pub allow_relay: Option<bool>,
#[validate(length(min = 1, max = 100))]
pub network_name: Option<String>,
#[validate(length(max = 100))]
pub network_secret: Option<String>,
// 联系方式字段
#[validate(length(max = 20))]
pub qq_number: Option<String>,
#[validate(length(max = 50))]
pub wechat: Option<String>,
#[validate(email)]
pub mail: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeResponse {
pub id: i32,
pub name: String,
pub host: String,
pub port: i32,
pub protocol: String,
pub version: Option<String>,
pub description: Option<String>,
pub max_connections: i32,
pub current_connections: i32,
pub is_active: bool,
pub is_approved: bool,
pub allow_relay: bool,
pub network_name: Option<String>,
pub network_secret: Option<String>,
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
pub address: String,
pub usage_percentage: f64,
// 健康状态相关字段
pub current_health_status: Option<String>,
pub last_check_time: Option<chrono::DateTime<chrono::Utc>>,
pub last_response_time: Option<i32>,
pub health_percentage_24h: Option<f64>,
pub health_record_total_counter_ring: Vec<u64>,
pub health_record_healthy_counter_ring: Vec<u64>,
pub ring_granularity: u32,
// 联系方式字段
pub qq_number: Option<String>,
pub wechat: Option<String>,
pub mail: Option<String>,
}
impl From<entity::shared_nodes::Model> for NodeResponse {
fn from(node: entity::shared_nodes::Model) -> Self {
Self {
id: node.id,
name: node.name.clone(),
host: node.host.clone(),
port: node.port,
protocol: node.protocol.clone(),
version: Some(node.version.clone()),
description: Some(node.description.clone()),
max_connections: node.max_connections,
current_connections: node.current_connections,
is_active: node.is_active,
is_approved: node.is_approved,
allow_relay: node.allow_relay,
network_name: Some(node.network_name.clone()),
network_secret: Some(node.network_secret.clone()),
created_at: node.created_at.into(),
updated_at: node.updated_at.into(),
address: format!("{}://{}:{}", node.protocol, node.host, node.port),
usage_percentage: node.current_connections as f64 / node.max_connections as f64 * 100.0,
// 健康状态字段初始化为 None,将在 handlers 中填充
current_health_status: None,
last_check_time: None,
last_response_time: None,
health_percentage_24h: None,
health_record_healthy_counter_ring: Vec::new(),
health_record_total_counter_ring: Vec::new(),
ring_granularity: 0,
// 联系方式字段
qq_number: if node.qq_number.is_empty() {
None
} else {
Some(node.qq_number)
},
wechat: if node.wechat.is_empty() {
None
} else {
Some(node.wechat)
},
mail: if node.mail.is_empty() {
None
} else {
Some(node.mail)
},
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HealthRecordResponse {
pub id: i32,
pub node_id: i32,
pub status: String,
pub response_time: Option<i32>,
pub error_message: Option<String>,
pub checked_at: chrono::DateTime<chrono::Utc>,
}
impl From<entity::health_records::Model> for HealthRecordResponse {
fn from(record: entity::health_records::Model) -> Self {
Self {
id: record.id,
node_id: record.node_id,
status: record.status.to_string(),
response_time: Some(record.response_time),
error_message: Some(record.error_message),
checked_at: record.checked_at.into(),
}
}
}
pub type HealthStatsResponse = crate::db::HealthStats;
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeFilterParams {
pub is_active: Option<bool>,
pub protocol: Option<String>,
pub search: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HealthFilterParams {
pub status: Option<String>,
pub since: Option<DateTime<Utc>>,
}
// 管理员相关模型
#[derive(Debug, Serialize, Deserialize, Validate)]
pub struct AdminLoginRequest {
#[validate(length(min = 1))]
pub password: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AdminLoginResponse {
pub token: String,
pub expires_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ApproveNodeRequest {
pub approved: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AdminNodeFilterParams {
pub is_active: Option<bool>,
pub is_approved: Option<bool>,
pub protocol: Option<String>,
pub search: Option<String>,
}
@@ -0,0 +1,64 @@
use axum::routing::{delete, get, post, put};
use axum::Router;
use tower_http::compression::CompressionLayer;
use tower_http::cors::CorsLayer;
use super::handlers::AppState;
use super::handlers::{
admin_approve_node, admin_delete_node, admin_get_nodes, admin_login, admin_revoke_approval,
admin_update_node, admin_verify_token, create_node, get_node, get_node_health,
get_node_health_stats, get_nodes, health_check,
};
use crate::api::{get_node_connect_url, test_connection};
use crate::config::AppConfig;
use crate::db::Db;
pub fn create_routes() -> Router<AppState> {
let config = AppConfig::default();
let compression_layer = if config.security.enable_compression {
Some(
CompressionLayer::new()
.br(true)
.deflate(true)
.gzip(true)
.zstd(true),
)
} else {
None
};
let cors_layer = if config.cors.enabled {
Some(CorsLayer::very_permissive())
} else {
None
};
let mut router = Router::new()
.route("/node/{id}", get(get_node_connect_url))
.route("/health", get(health_check))
.route("/api/nodes", get(get_nodes).post(create_node))
.route("/api/test_connection", post(test_connection))
.route("/api/nodes/{id}/health", get(get_node_health))
.route("/api/nodes/{id}/health/stats", get(get_node_health_stats))
// 管理员路由
.route("/api/admin/login", post(admin_login))
.route("/api/admin/verify", get(admin_verify_token))
.route("/api/admin/nodes", get(admin_get_nodes))
.route("/api/admin/nodes/{id}/approve", put(admin_approve_node))
.route("/api/admin/nodes/{id}/revoke", put(admin_revoke_approval))
.route(
"/api/admin/nodes/{id}",
put(admin_update_node).delete(admin_delete_node),
);
if let Some(layer) = compression_layer {
router = router.layer(layer);
}
if let Some(layer) = cors_layer {
router = router.layer(layer);
}
router
}
@@ -0,0 +1,198 @@
use std::env;
use std::net::{IpAddr, SocketAddr};
use std::path::PathBuf;
#[derive(Debug, Clone)]
pub struct AppConfig {
pub server: ServerConfig,
pub database: DatabaseConfig,
pub health_check: HealthCheckConfig,
pub logging: LoggingConfig,
pub cors: CorsConfig,
pub security: SecurityConfig,
}
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
pub addr: SocketAddr,
}
#[derive(Debug, Clone)]
pub struct DatabaseConfig {
pub path: PathBuf,
pub max_connections: u32,
}
#[derive(Debug, Clone)]
pub struct HealthCheckConfig {
pub interval_seconds: u64,
pub timeout_seconds: u64,
pub max_retries: u32,
}
#[derive(Debug, Clone)]
pub struct LoggingConfig {
pub level: String,
pub rust_log: String,
}
#[derive(Debug, Clone)]
pub struct CorsConfig {
pub allowed_origins: Vec<String>,
pub allowed_methods: Vec<String>,
pub allowed_headers: Vec<String>,
pub enabled: bool,
}
#[derive(Debug, Clone)]
pub struct SecurityConfig {
pub enable_compression: bool,
pub secret_key: String,
pub jwt_secret: String,
pub admin_password: String,
}
impl Default for AppConfig {
fn default() -> Self {
Self::from_env().unwrap_or_else(|_| Self::default_config())
}
}
impl AppConfig {
pub fn from_env() -> Result<Self, env::VarError> {
let server_config = ServerConfig {
host: env::var("SERVER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()),
port: env::var("SERVER_PORT")
.map(|s| s.parse().unwrap_or(8080))
.unwrap_or(8080),
addr: SocketAddr::from((
env::var("SERVER_HOST")
.unwrap_or_else(|_| "127.0.0.1".to_string())
.parse::<IpAddr>()
.unwrap(),
env::var("SERVER_PORT")
.map(|s| s.parse().unwrap_or(8080))
.unwrap_or(8080),
)),
};
let database_config = DatabaseConfig {
path: PathBuf::from(
env::var("DATABASE_PATH").unwrap_or_else(|_| "uptime.db".to_string()),
),
max_connections: env::var("DATABASE_MAX_CONNECTIONS")
.map(|s| s.parse().unwrap_or(10))
.unwrap_or(10),
};
let health_check_config = HealthCheckConfig {
interval_seconds: env::var("HEALTH_CHECK_INTERVAL")
.map(|s| s.parse().unwrap_or(30))
.unwrap_or(30),
timeout_seconds: env::var("HEALTH_CHECK_TIMEOUT")
.map(|s| s.parse().unwrap_or(10))
.unwrap_or(10),
max_retries: env::var("HEALTH_CHECK_RETRIES")
.map(|s| s.parse().unwrap_or(3))
.unwrap_or(3),
};
let logging_config = LoggingConfig {
level: env::var("LOG_LEVEL").unwrap_or_else(|_| "info".to_string()),
rust_log: env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()),
};
let cors_config = CorsConfig {
allowed_origins: env::var("CORS_ALLOWED_ORIGINS")
.unwrap_or_else(|_| "http://localhost:3000,http://localhost:8080".to_string())
.split(',')
.map(|s| s.trim().to_string())
.collect(),
allowed_methods: env::var("CORS_ALLOWED_METHODS")
.unwrap_or_else(|_| "GET,POST,PUT,DELETE,OPTIONS".to_string())
.split(',')
.map(|s| s.trim().to_string())
.collect(),
allowed_headers: env::var("CORS_ALLOWED_HEADERS")
.unwrap_or_else(|_| "content-type,authorization".to_string())
.split(',')
.map(|s| s.trim().to_string())
.collect(),
enabled: env::var("ENABLE_CORS")
.map(|s| s.parse().unwrap_or(true))
.unwrap_or(true),
};
let security_config = SecurityConfig {
enable_compression: env::var("ENABLE_COMPRESSION")
.map(|s| s.parse().unwrap_or(true))
.unwrap_or(true),
secret_key: env::var("SECRET_KEY").unwrap_or_else(|_| "default-secret-key".to_string()),
jwt_secret: env::var("JWT_SECRET").unwrap_or_else(|_| "default-jwt-secret".to_string()),
admin_password: env::var("ADMIN_PASSWORD").unwrap_or_else(|_| "admin123".to_string()),
};
Ok(AppConfig {
server: server_config,
database: database_config,
health_check: health_check_config,
logging: logging_config,
cors: cors_config,
security: security_config,
})
}
pub fn default_config() -> Self {
Self {
server: ServerConfig {
host: "127.0.0.1".to_string(),
port: 8080,
addr: SocketAddr::from(([127, 0, 0, 1], 8080)),
},
database: DatabaseConfig {
path: PathBuf::from("uptime.db"),
max_connections: 10,
},
health_check: HealthCheckConfig {
interval_seconds: 30,
timeout_seconds: 10,
max_retries: 3,
},
logging: LoggingConfig {
level: "info".to_string(),
rust_log: "info".to_string(),
},
cors: CorsConfig {
allowed_origins: vec![
"http://localhost:3000".to_string(),
"http://localhost:8080".to_string(),
],
allowed_methods: vec![
"GET".to_string(),
"POST".to_string(),
"PUT".to_string(),
"DELETE".to_string(),
"OPTIONS".to_string(),
],
allowed_headers: vec!["content-type".to_string(), "authorization".to_string()],
enabled: true,
},
security: SecurityConfig {
enable_compression: true,
secret_key: "default-secret-key".to_string(),
jwt_secret: "default-jwt-secret".to_string(),
admin_password: "admin123".to_string(),
},
}
}
pub fn is_development(&self) -> bool {
env::var("NODE_ENV").unwrap_or_else(|_| "development".to_string()) == "development"
}
pub fn is_production(&self) -> bool {
env::var("NODE_ENV").unwrap_or_else(|_| "development".to_string()) == "production"
}
}
@@ -0,0 +1,360 @@
use crate::db::entity::*;
use crate::db::Db;
use sea_orm::*;
use tokio::time::{sleep, Duration};
use tracing::{error, info, warn};
/// 数据清理策略配置
#[derive(Debug, Clone)]
pub struct CleanupConfig {
/// 健康记录保留天数
pub health_record_retention_days: i64,
/// 每个节点保留的健康记录最大数量
pub max_health_records_per_node: u64,
/// 清理任务运行间隔(秒)
pub cleanup_interval_seconds: u64,
/// 是否启用自动清理
pub auto_cleanup_enabled: bool,
}
impl Default for CleanupConfig {
fn default() -> Self {
Self {
health_record_retention_days: 30,
max_health_records_per_node: 70000,
cleanup_interval_seconds: 1200, // 20分钟
auto_cleanup_enabled: true,
}
}
}
/// 数据清理管理器
pub struct CleanupManager {
db: Db,
config: CleanupConfig,
running: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
impl CleanupManager {
/// 创建新的清理管理器
pub fn new(db: Db, config: CleanupConfig) -> Self {
Self {
db,
config,
running: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
}
}
/// 使用默认配置创建清理管理器
pub fn with_default_config(db: Db) -> Self {
Self::new(db, CleanupConfig::default())
}
/// 启动自动清理任务
pub async fn start_auto_cleanup(&self) -> anyhow::Result<()> {
if self.config.auto_cleanup_enabled {
let running = self.running.clone();
let db = self.db.clone();
let config = self.config.clone();
running.store(true, std::sync::atomic::Ordering::SeqCst);
tokio::spawn(async move {
info!("Auto cleanup task started");
while running.load(std::sync::atomic::Ordering::SeqCst) {
if let Err(e) = Self::perform_cleanup(&db, &config).await {
error!("Auto cleanup failed: {}", e);
}
sleep(Duration::from_secs(config.cleanup_interval_seconds)).await;
}
info!("Auto cleanup task stopped");
});
}
Ok(())
}
/// 停止自动清理任务
pub fn stop_auto_cleanup(&self) {
self.running
.store(false, std::sync::atomic::Ordering::SeqCst);
}
/// 执行一次完整的清理操作
pub async fn perform_cleanup(db: &Db, config: &CleanupConfig) -> anyhow::Result<CleanupResult> {
let mut result = CleanupResult::default();
// 清理旧的健康记录
let health_cleanup_result =
Self::cleanup_old_health_records(db, config.health_record_retention_days).await?;
result.old_health_records_cleaned = health_cleanup_result.records_removed;
// 清理过量的健康记录
let excess_cleanup_result =
Self::cleanup_excess_health_records(db, config.max_health_records_per_node).await?;
result.excess_health_records_cleaned = excess_cleanup_result.records_removed;
// 数据库维护
let maintenance_result = Self::perform_database_maintenance(db).await?;
result.vacuum_performed = maintenance_result.vacuum_performed;
result.analyze_performed = maintenance_result.analyze_performed;
info!("Cleanup completed: {:?}", result);
Ok(result)
}
/// 清理旧的健康记录
async fn cleanup_old_health_records(
db: &Db,
days: i64,
) -> anyhow::Result<CleanupHealthRecordsResult> {
let cutoff = chrono::Local::now().fixed_offset() - chrono::Duration::days(days);
let result = health_records::Entity::delete_many()
.filter(health_records::Column::CheckedAt.lt(cutoff))
.exec(db.orm_db())
.await?;
let records_removed = result.rows_affected;
if records_removed > 0 {
info!(
"Cleaned {} old health records (older than {} days)",
records_removed, days
);
}
Ok(CleanupHealthRecordsResult { records_removed })
}
/// 清理过量的健康记录
async fn cleanup_excess_health_records(
db: &Db,
max_records: u64,
) -> anyhow::Result<CleanupExcessRecordsResult> {
// 获取所有节点
let nodes = shared_nodes::Entity::find().all(db.orm_db()).await?;
let mut total_removed = 0;
for node in nodes {
// 计算需要删除的记录数量
let total_count = health_records::Entity::find()
.filter(health_records::Column::NodeId.eq(node.id))
.count(db.orm_db())
.await?;
if total_count > max_records {
let to_remove = total_count - max_records;
// 获取需要保留的最小ID
let keep_id = health_records::Entity::find()
.filter(health_records::Column::NodeId.eq(node.id))
.order_by_desc(health_records::Column::CheckedAt)
.offset(max_records)
.limit(1)
.into_model::<health_records::Model>()
.one(db.orm_db())
.await?;
info!(
"Node {}: total count: {}, to remove: {}, last keep record: {:?}",
node.id, total_count, to_remove, keep_id
);
if let Some(keep_record) = keep_id {
// 删除比保留记录更早的记录
let result = health_records::Entity::delete_many()
.filter(health_records::Column::NodeId.eq(node.id))
.filter(health_records::Column::Id.lt(keep_record.id))
.exec(db.orm_db())
.await?;
total_removed += result.rows_affected;
}
}
}
if total_removed > 0 {
info!(
"Cleaned {} excess health records (max {} per node)",
total_removed, max_records
);
}
Ok(CleanupExcessRecordsResult {
records_removed: total_removed,
})
}
/// 执行数据库维护操作
async fn perform_database_maintenance(db: &Db) -> anyhow::Result<DatabaseMaintenanceResult> {
let mut vacuum_performed = false;
let mut analyze_performed = false;
// 执行 ANALYZE
match db
.orm_db()
.execute(Statement::from_string(
DatabaseBackend::Sqlite,
"ANALYZE".to_string(),
))
.await
{
Ok(_) => {
analyze_performed = true;
info!("Database ANALYZE completed");
}
Err(e) => {
warn!("Database ANALYZE failed: {}", e);
}
}
// 执行 VACUUM(仅在需要时)
if vacuum_performed || analyze_performed {
match db
.orm_db()
.execute(Statement::from_string(
DatabaseBackend::Sqlite,
"VACUUM".to_string(),
))
.await
{
Ok(_) => {
vacuum_performed = true;
info!("Database VACUUM completed");
}
Err(e) => {
warn!("Database VACUUM failed: {}", e);
}
}
}
Ok(DatabaseMaintenanceResult {
vacuum_performed,
analyze_performed,
})
}
/// 获取数据库统计信息
pub async fn get_database_stats(db: &Db) -> anyhow::Result<DatabaseStats> {
let total_nodes = shared_nodes::Entity::find().count(db.orm_db()).await?;
let total_health_records = health_records::Entity::find().count(db.orm_db()).await?;
let active_nodes = shared_nodes::Entity::find()
.filter(shared_nodes::Column::IsActive.eq(true))
.count(db.orm_db())
.await?;
Ok(DatabaseStats {
total_nodes,
active_nodes,
total_health_records,
})
}
/// 获取清理配置
pub fn get_config(&self) -> &CleanupConfig {
&self.config
}
/// 更新清理配置
pub fn update_config(&mut self, config: CleanupConfig) {
self.config = config;
}
}
/// 清理结果
#[derive(Default, Debug, Clone, serde::Serialize)]
pub struct CleanupResult {
pub old_health_records_cleaned: u64,
pub old_instances_cleaned: u64,
pub excess_health_records_cleaned: u64,
pub vacuum_performed: bool,
pub analyze_performed: bool,
}
/// 健康记录清理结果
#[derive(Debug, Clone, serde::Serialize)]
pub struct CleanupHealthRecordsResult {
pub records_removed: u64,
}
/// 停止实例清理结果
#[derive(Debug, Clone, serde::Serialize)]
pub struct CleanupStoppedInstancesResult {
pub instances_removed: u64,
}
/// 过量记录清理结果
#[derive(Debug, Clone, serde::Serialize)]
pub struct CleanupExcessRecordsResult {
pub records_removed: u64,
}
/// 数据库维护结果
#[derive(Debug, Clone, serde::Serialize)]
pub struct DatabaseMaintenanceResult {
pub vacuum_performed: bool,
pub analyze_performed: bool,
}
/// 数据库统计信息
#[derive(Debug, Clone, serde::Serialize)]
pub struct DatabaseStats {
pub total_nodes: u64,
pub active_nodes: u64,
pub total_health_records: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Db;
#[tokio::test]
async fn test_cleanup_manager() {
let db = Db::memory_db().await;
let cleanup_manager = CleanupManager::with_default_config(db.clone());
// 测试获取配置
let config = cleanup_manager.get_config();
assert_eq!(config.health_record_retention_days, 30);
// 测试清理操作
let result = CleanupManager::perform_cleanup(&db, config).await.unwrap();
println!("Cleanup result: {:?}", result);
// 测试获取统计信息
let stats = CleanupManager::get_database_stats(&db).await.unwrap();
println!("Database stats: {:?}", stats);
}
#[tokio::test]
async fn test_cleanup_config() {
let config = CleanupConfig {
health_record_retention_days: 7,
max_health_records_per_node: 500,
cleanup_interval_seconds: 1800,
auto_cleanup_enabled: false,
};
let db = Db::memory_db().await;
let mut cleanup_manager = CleanupManager::new(db, config.clone());
assert_eq!(cleanup_manager.get_config().health_record_retention_days, 7);
// 测试更新配置
let new_config = CleanupConfig::default();
cleanup_manager.update_config(new_config);
assert_eq!(
cleanup_manager.get_config().health_record_retention_days,
30
);
}
}
@@ -0,0 +1,39 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "connection_instances")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
pub node_id: i32,
#[sea_orm(unique)]
pub instance_id: String,
pub status: String,
#[sea_orm(column_type = "Text")]
pub config: String,
pub started_at: DateTimeWithTimeZone,
pub stopped_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::shared_nodes::Entity",
from = "Column::NodeId",
to = "super::shared_nodes::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
SharedNodes,
}
impl Related<super::shared_nodes::Entity> for Entity {
fn to() -> RelationDef {
Relation::SharedNodes.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
@@ -0,0 +1,37 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "health_records")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
pub node_id: i32,
pub status: String,
pub response_time: i32,
#[sea_orm(column_type = "Text")]
pub error_message: String,
pub checked_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::shared_nodes::Entity",
from = "Column::NodeId",
to = "super::shared_nodes::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
SharedNodes,
}
impl Related<super::shared_nodes::Entity> for Entity {
fn to() -> RelationDef {
Relation::SharedNodes.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
@@ -0,0 +1,6 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
pub mod prelude;
pub mod health_records;
pub mod shared_nodes;
@@ -0,0 +1,4 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
pub use super::health_records::Entity as HealthRecords;
pub use super::shared_nodes::Entity as SharedNodes;
@@ -0,0 +1,44 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "shared_nodes")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
pub name: String,
pub host: String,
pub port: i32,
pub protocol: String,
pub version: String,
pub allow_relay: bool,
pub network_name: String,
pub network_secret: String,
#[sea_orm(column_type = "Text")]
pub description: String,
pub max_connections: i32,
pub current_connections: i32,
pub is_active: bool,
pub is_approved: bool,
pub qq_number: String,
pub wechat: String,
pub mail: String,
pub created_at: DateTimeWithTimeZone,
pub updated_at: DateTimeWithTimeZone,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::health_records::Entity")]
HealthRecords,
}
impl Related<super::health_records::Entity> for Entity {
fn to() -> RelationDef {
Relation::HealthRecords.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
@@ -0,0 +1,351 @@
pub mod cleanup;
pub mod entity;
pub mod operations;
use std::fmt;
use sea_orm::{
prelude::*, sea_query::OnConflict, ColumnTrait as _, DatabaseConnection, DbErr, EntityTrait,
QueryFilter as _, Set, SqlxSqliteConnector, Statement, TransactionTrait as _,
};
use sea_orm_migration::MigratorTrait as _;
use serde::{Deserialize, Serialize};
use sqlx::{migrate::MigrateDatabase as _, Sqlite, SqlitePool};
use crate::migrator;
#[derive(Debug, Clone)]
pub struct Db {
db_path: String,
db: SqlitePool,
orm_db: DatabaseConnection,
}
impl Db {
pub async fn new<T: ToString>(db_path: T) -> anyhow::Result<Self> {
let db = Self::prepare_db(db_path.to_string().as_str()).await?;
let orm_db = SqlxSqliteConnector::from_sqlx_sqlite_pool(db.clone());
// 运行数据库迁移
migrator::Migrator::up(&orm_db, None).await?;
// 优化 SQLite 性能
Self::optimize_sqlite(&orm_db).await?;
Ok(Self {
db_path: db_path.to_string(),
db,
orm_db,
})
}
pub async fn memory_db() -> Self {
Self::new(":memory:").await.unwrap()
}
#[tracing::instrument(ret)]
async fn prepare_db(db_path: &str) -> anyhow::Result<SqlitePool> {
if !Sqlite::database_exists(db_path).await.unwrap_or(false) {
tracing::info!("Database not found, creating a new one");
Sqlite::create_database(db_path).await?;
}
let db = sqlx::pool::PoolOptions::new()
.max_lifetime(None)
.idle_timeout(None)
.connect(db_path)
.await?;
Ok(db)
}
async fn optimize_sqlite(db: &DatabaseConnection) -> Result<(), DbErr> {
// 优化 SQLite 性能
let pragmas = vec![
"PRAGMA journal_mode = WAL", // 使用 WAL 模式提高并发性能
"PRAGMA synchronous = NORMAL", // 平衡性能和数据安全
"PRAGMA cache_size = 10000", // 增加缓存大小
"PRAGMA temp_store = memory", // 临时存储使用内存
"PRAGMA mmap_size = 268435456", // 内存映射大小 256MB
"PRAGMA foreign_keys = ON", // 启用外键约束
];
for pragma in pragmas {
db.execute(sea_orm::Statement::from_string(
sea_orm::DatabaseBackend::Sqlite,
pragma.to_string(),
))
.await?;
}
Ok(())
}
pub fn inner(&self) -> SqlitePool {
self.db.clone()
}
pub fn orm_db(&self) -> &DatabaseConnection {
&self.orm_db
}
/// 清理旧的健康度记录(删除30天前的记录)
pub async fn cleanup_old_health_records(&self) -> Result<u64, DbErr> {
use chrono::Duration;
use entity::health_records;
let cutoff_date = chrono::Utc::now().naive_utc() - Duration::days(30);
let result = health_records::Entity::delete_many()
.filter(health_records::Column::CheckedAt.lt(cutoff_date))
.exec(self.orm_db())
.await?;
Ok(result.rows_affected)
}
/// 获取数据库统计信息
pub async fn get_database_stats(&self) -> anyhow::Result<DatabaseStats> {
use entity::{health_records, shared_nodes};
let node_count = shared_nodes::Entity::find().count(self.orm_db()).await?;
let health_record_count = health_records::Entity::find().count(self.orm_db()).await?;
let active_nodes_count = shared_nodes::Entity::find()
.filter(shared_nodes::Column::IsActive.eq(true))
.count(self.orm_db())
.await?;
Ok(DatabaseStats {
total_nodes: node_count,
active_nodes: active_nodes_count,
total_health_records: health_record_count,
})
}
}
#[derive(Debug, Clone, serde::Serialize)]
pub struct DatabaseStats {
pub total_nodes: u64,
pub active_nodes: u64,
pub total_health_records: u64,
}
/// 健康状态枚举
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum HealthStatus {
/// 健康状态
Healthy,
/// 不健康状态
Unhealthy,
/// 超时状态
Timeout,
/// 连接错误
ConnectionError,
/// 未知错误
Unknown,
}
impl fmt::Display for HealthStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HealthStatus::Healthy => write!(f, "healthy"),
HealthStatus::Unhealthy => write!(f, "unhealthy"),
HealthStatus::Timeout => write!(f, "timeout"),
HealthStatus::ConnectionError => write!(f, "connection_error"),
HealthStatus::Unknown => write!(f, "unknown"),
}
}
}
impl From<String> for HealthStatus {
fn from(s: String) -> Self {
match s.to_lowercase().as_str() {
"healthy" => HealthStatus::Healthy,
"unhealthy" => HealthStatus::Unhealthy,
"timeout" => HealthStatus::Timeout,
"connection_error" => HealthStatus::ConnectionError,
_ => HealthStatus::Unknown,
}
}
}
impl From<&str> for HealthStatus {
fn from(s: &str) -> Self {
HealthStatus::from(s.to_string())
}
}
/// 健康统计信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthStats {
/// 总检查次数
pub total_checks: u64,
/// 健康检查次数
pub healthy_count: u64,
/// 不健康检查次数
pub unhealthy_count: u64,
/// 健康百分比
pub health_percentage: f64,
/// 平均响应时间(毫秒)
pub average_response_time: Option<f64>,
/// 正常运行时间百分比
pub uptime_percentage: f64,
/// 最后检查时间
pub last_check_time: Option<chrono::DateTime<chrono::Utc>>,
/// 最后健康状态
pub last_status: Option<HealthStatus>,
}
impl Default for HealthStats {
fn default() -> Self {
Self {
total_checks: 0,
healthy_count: 0,
unhealthy_count: 0,
health_percentage: 0.0,
average_response_time: None,
uptime_percentage: 0.0,
last_check_time: None,
last_status: None,
}
}
}
impl HealthStats {
/// 从健康记录列表创建统计信息
pub fn from_records(records: &[self::entity::health_records::Model]) -> Self {
if records.is_empty() {
return Self::default();
}
let total_checks = records.len() as u64;
let healthy_count = records.iter().filter(|r| r.is_healthy()).count() as u64;
let unhealthy_count = total_checks - healthy_count;
let health_percentage = if total_checks > 0 {
(healthy_count as f64 / total_checks as f64) * 100.0
} else {
0.0
};
// 计算平均响应时间(只计算健康状态的记录)
let healthy_records: Vec<_> = records
.iter()
.filter(|r| r.is_healthy() && r.response_time > 0)
.collect();
let average_response_time = if !healthy_records.is_empty() {
let total_time: i32 = healthy_records.iter().map(|r| r.response_time).sum();
Some(total_time as f64 / healthy_records.len() as f64)
} else {
None
};
// 正常运行时间百分比(基于健康状态)
let uptime_percentage = health_percentage;
// 获取最后的检查信息
let last_record = records.first(); // records 应该按时间倒序排列
let last_check_time = last_record.map(|r| r.checked_at.into());
let last_status = last_record.map(|r| HealthStatus::from(r.status.clone()));
Self {
total_checks,
healthy_count,
unhealthy_count,
health_percentage,
average_response_time,
uptime_percentage,
last_check_time,
last_status,
}
}
}
/// Model 的扩展方法
impl entity::health_records::Model {
/// 检查记录是否为健康状态
pub fn is_healthy(&self) -> bool {
let status = HealthStatus::from(self.status.clone());
matches!(status, HealthStatus::Healthy)
}
/// 创建新的活动模型
pub fn new_active_model(
node_id: i32,
status: HealthStatus,
response_time: Option<i32>,
error_message: Option<String>,
) -> entity::health_records::ActiveModel {
entity::health_records::ActiveModel {
node_id: Set(node_id),
status: Set(status.to_string()),
response_time: Set(response_time.unwrap_or(0)),
error_message: Set(error_message.unwrap_or_default()),
checked_at: Set(chrono::Utc::now().fixed_offset()),
..Default::default()
}
}
/// 获取健康状态
pub fn get_status(&self) -> HealthStatus {
HealthStatus::from(self.status.clone())
}
}
/// Model 的扩展方法
impl entity::shared_nodes::Model {
/// 创建新的活动模型
#[allow(clippy::too_many_arguments)]
pub fn new_active_model(
name: String,
host: String,
port: i32,
protocol: String,
version: Option<String>,
description: Option<String>,
max_connections: i32,
allow_relay: bool,
network_name: String,
network_secret: Option<String>,
) -> entity::shared_nodes::ActiveModel {
let now = chrono::Utc::now().fixed_offset();
entity::shared_nodes::ActiveModel {
name: Set(name),
host: Set(host),
port: Set(port),
protocol: Set(protocol),
version: Set(version.unwrap_or_default()),
description: Set(description.unwrap_or_default()),
max_connections: Set(max_connections),
current_connections: Set(0),
is_active: Set(true),
is_approved: Set(false),
allow_relay: Set(allow_relay),
network_name: Set(network_name),
network_secret: Set(network_secret.unwrap_or_default()),
created_at: Set(now),
updated_at: Set(now),
..Default::default()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter as _};
#[tokio::test]
async fn test_database_creation() {
let db = Db::memory_db().await;
let stats = db.get_database_stats().await.unwrap();
// 初始状态下应该没有记录
assert_eq!(stats.total_nodes, 0);
assert_eq!(stats.active_nodes, 0);
assert_eq!(stats.total_health_records, 0);
}
}
@@ -0,0 +1,343 @@
use crate::api::CreateNodeRequest;
use crate::db::entity::*;
use crate::db::Db;
use crate::db::HealthStats;
use crate::db::HealthStatus;
use sea_orm::*;
/// 节点管理操作
pub struct NodeOperations;
impl NodeOperations {
pub fn create_node_model(req: CreateNodeRequest) -> shared_nodes::ActiveModel {
shared_nodes::ActiveModel {
id: NotSet,
name: Set(req.name),
host: Set(req.host),
port: Set(req.port),
protocol: Set(req.protocol),
version: Set("".to_string()),
description: Set(req.description.unwrap_or_default()),
max_connections: Set(req.max_connections),
current_connections: Set(0),
is_active: Set(false),
is_approved: Set(false),
allow_relay: Set(req.allow_relay),
network_name: Set(req.network_name),
network_secret: Set(req.network_secret.unwrap_or_default()),
qq_number: Set(req.qq_number.unwrap_or_default()),
wechat: Set(req.wechat.unwrap_or_default()),
mail: Set(req.mail.unwrap_or_default()),
created_at: Set(chrono::Utc::now().fixed_offset()),
updated_at: Set(chrono::Utc::now().fixed_offset()),
}
}
/// 创建新节点
pub async fn create_node(
db: &Db,
req: CreateNodeRequest,
) -> Result<shared_nodes::Model, DbErr> {
let node = Self::create_node_model(req);
let insert_result = shared_nodes::Entity::insert(node).exec(db.orm_db()).await?;
shared_nodes::Entity::find_by_id(insert_result.last_insert_id)
.one(db.orm_db())
.await?
.ok_or(DbErr::RecordNotFound(
"Failed to retrieve created node".to_string(),
))
}
/// 获取所有节点
pub async fn get_all_nodes(db: &Db) -> Result<Vec<shared_nodes::Model>, DbErr> {
shared_nodes::Entity::find()
.order_by_asc(shared_nodes::Column::Id)
.all(db.orm_db())
.await
}
/// 根据ID获取节点
pub async fn get_node_by_id(db: &Db, id: i32) -> Result<Option<shared_nodes::Model>, DbErr> {
shared_nodes::Entity::find_by_id(id).one(db.orm_db()).await
}
/// 更新节点状态
pub async fn update_node_status(
db: &Db,
id: i32,
is_active: bool,
current_connections: Option<i32>,
) -> Result<shared_nodes::Model, DbErr> {
let mut node = shared_nodes::Entity::find_by_id(id)
.one(db.orm_db())
.await?
.ok_or(DbErr::RecordNotFound("Node not found".to_string()))?;
let mut node = node.into_active_model();
node.is_active = Set(is_active);
if let Some(connections) = current_connections {
node.current_connections = Set(connections);
}
node.updated_at = Set(chrono::Utc::now().fixed_offset());
let updated_node = shared_nodes::Entity::update(node).exec(db.orm_db()).await?;
Ok(updated_node)
}
/// 删除节点
pub async fn delete_node(db: &Db, id: i32) -> Result<u64, DbErr> {
let result = shared_nodes::Entity::delete_by_id(id)
.exec(db.orm_db())
.await?;
Ok(result.rows_affected)
}
/// 获取活跃节点
pub async fn get_active_nodes(db: &Db) -> Result<Vec<shared_nodes::Model>, DbErr> {
shared_nodes::Entity::find()
.filter(shared_nodes::Column::IsActive.eq(true))
.order_by_asc(shared_nodes::Column::Id)
.all(db.orm_db())
.await
}
/// 检查节点是否存在(根据host、port、protocol
pub async fn node_exists(
db: &Db,
host: &str,
port: i32,
protocol: &str,
) -> Result<bool, DbErr> {
let count = shared_nodes::Entity::find()
.filter(shared_nodes::Column::Host.eq(host))
.filter(shared_nodes::Column::Port.eq(port))
.filter(shared_nodes::Column::Protocol.eq(protocol))
.count(db.orm_db())
.await?;
Ok(count > 0)
}
pub async fn update_node_version(
db: &Db,
node_id: i32,
version: String,
) -> Result<shared_nodes::Model, DbErr> {
let mut node = shared_nodes::Entity::find_by_id(node_id)
.one(db.orm_db())
.await?
.ok_or(DbErr::RecordNotFound("Node not found".to_string()))?;
let mut node = node.into_active_model();
node.version = Set(version);
node.updated_at = Set(chrono::Utc::now().fixed_offset());
let updated_node = shared_nodes::Entity::update(node).exec(db.orm_db()).await?;
Ok(updated_node)
}
}
/// 健康记录操作
pub struct HealthOperations;
impl HealthOperations {
/// 创建健康记录
pub async fn create_health_record(
db: &Db,
node_id: i32,
status: HealthStatus,
response_time: Option<i32>,
error_message: Option<String>,
) -> Result<health_records::Model, DbErr> {
let record =
health_records::Model::new_active_model(node_id, status, response_time, error_message);
let insert_result = health_records::Entity::insert(record)
.exec(db.orm_db())
.await?;
health_records::Entity::find_by_id(insert_result.last_insert_id)
.one(db.orm_db())
.await?
.ok_or(DbErr::RecordNotFound(
"Failed to retrieve created health record".to_string(),
))
}
/// 获取节点的健康记录
pub async fn get_node_health_records(
db: &Db,
node_id: i32,
from_date: Option<chrono::NaiveDateTime>,
limit: Option<u64>,
) -> Result<Vec<health_records::Model>, DbErr> {
let mut query = health_records::Entity::find()
.filter(health_records::Column::NodeId.eq(node_id))
.order_by_desc(health_records::Column::CheckedAt);
if let Some(from_date) = from_date {
query = query.filter(health_records::Column::CheckedAt.gte(from_date));
}
if let Some(limit) = limit {
query = query.limit(Some(limit));
}
query.all(db.orm_db()).await
}
/// 获取节点最近的健康状态
pub async fn get_latest_health_status(
db: &Db,
node_id: i32,
) -> Result<Option<health_records::Model>, DbErr> {
health_records::Entity::find()
.filter(health_records::Column::NodeId.eq(node_id))
.order_by_desc(health_records::Column::CheckedAt)
.one(db.orm_db())
.await
}
/// 获取健康统计信息
pub async fn get_health_stats(db: &Db, node_id: i32, hours: i64) -> Result<HealthStats, DbErr> {
let since = chrono::Utc::now().naive_utc() - chrono::Duration::hours(hours);
let records = health_records::Entity::find()
.filter(health_records::Column::NodeId.eq(node_id))
.filter(health_records::Column::CheckedAt.gte(since))
.order_by_desc(health_records::Column::CheckedAt)
.all(db.orm_db())
.await?;
Ok(HealthStats::from_records(&records))
}
/// 清理旧的健康记录
pub async fn cleanup_old_records(db: &Db, days: i64) -> Result<u64, DbErr> {
let cutoff = chrono::Utc::now().naive_utc() - chrono::Duration::days(days);
let result = health_records::Entity::delete_many()
.filter(health_records::Column::CheckedAt.lt(cutoff))
.exec(db.orm_db())
.await?;
Ok(result.rows_affected)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Db;
#[tokio::test]
async fn test_node_operations() {
let db = Db::memory_db().await;
let req = CreateNodeRequest {
name: "Test Node".to_string(),
host: "test.example.com".to_string(),
port: 11010,
protocol: "tcp".to_string(),
description: Some("Test node".to_string()),
max_connections: 100,
allow_relay: false,
network_name: "test-network".to_string(),
network_secret: Some("test-secret".to_string()),
qq_number: Some("123456789".to_string()),
wechat: Some("test_wechat".to_string()),
mail: Some("test@example.com".to_string()),
};
// 测试创建节点
let node = NodeOperations::create_node(&db, req).await.unwrap();
assert_eq!(node.name, "Test Node");
assert_eq!(node.host, "test.example.com");
assert_eq!(node.port, 11010);
assert!(node.is_active);
// 测试获取节点
let found_node = NodeOperations::get_node_by_id(&db, node.id).await.unwrap();
assert!(found_node.is_some());
assert_eq!(found_node.unwrap().id, node.id);
// 测试获取所有节点
let all_nodes = NodeOperations::get_all_nodes(&db).await.unwrap();
assert_eq!(all_nodes.len(), 1);
// 测试节点存在性检查
let exists = NodeOperations::node_exists(&db, "test.example.com", 11010, "tcp")
.await
.unwrap();
assert!(exists);
let not_exists = NodeOperations::node_exists(&db, "nonexistent.com", 8080, "tcp")
.await
.unwrap();
assert!(!not_exists);
}
#[tokio::test]
async fn test_health_operations() {
let db = Db::memory_db().await;
let req = CreateNodeRequest {
name: "Test Node".to_string(),
host: "test.example.com".to_string(),
port: 11010,
protocol: "tcp".to_string(),
description: Some("Test node".to_string()),
max_connections: 100,
allow_relay: false,
network_name: "test-network".to_string(),
network_secret: Some("test-secret".to_string()),
qq_number: Some("123456789".to_string()),
wechat: Some("test_wechat".to_string()),
mail: Some("test@example.com".to_string()),
};
// 创建测试节点
let node = NodeOperations::create_node(&db, req).await.unwrap();
// 测试创建健康记录
let record = HealthOperations::create_health_record(
&db,
node.id,
HealthStatus::Healthy,
Some(100),
None,
)
.await
.unwrap();
assert_eq!(record.node_id, node.id);
assert!(record.is_healthy());
assert_eq!(record.response_time, 100);
// 测试获取健康记录
let records = HealthOperations::get_node_health_records(&db, node.id, None, None)
.await
.unwrap();
assert_eq!(records.len(), 1);
// 测试获取最新状态
let latest = HealthOperations::get_latest_health_status(&db, node.id)
.await
.unwrap();
assert!(latest.is_some());
assert_eq!(latest.unwrap().id, record.id);
// 测试健康统计
let stats = HealthOperations::get_health_stats(&db, node.id, 24)
.await
.unwrap();
assert_eq!(stats.total_checks, 1);
assert_eq!(stats.healthy_count, 1);
assert_eq!(stats.health_percentage, 100.0);
}
}
@@ -0,0 +1,660 @@
use std::{
ops::{DerefMut, Div},
sync::Arc,
time::{Duration, Instant},
};
use anyhow::Context as _;
use dashmap::DashMap;
use easytier::{
common::{
config::{ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader},
scoped_task::ScopedTask,
},
defer,
instance_manager::NetworkInstanceManager,
launcher::ConfigSource,
};
use serde::{Deserialize, Serialize};
use sqlx::any;
use tracing::{debug, error, info, instrument, warn};
use crate::db::{
entity::shared_nodes,
operations::{HealthOperations, NodeOperations},
Db, HealthStatus,
};
pub struct HealthCheckOneNode {
node_id: String,
}
const HEALTH_CHECK_RING_GRANULARITY_SEC: usize = 60 * 15; // 15分钟
const HEALTH_CHECK_RING_MAX_DURATION_SEC: usize = 60 * 60 * 24; // 最多一天
// const HEALTH_CHECK_RING_GRANULARITY_SEC: usize = 10;
// const HEALTH_CHECK_RING_MAX_DURATION_SEC: usize = 60;
const HEALTH_CHECK_RING_SIZE: usize =
HEALTH_CHECK_RING_MAX_DURATION_SEC / HEALTH_CHECK_RING_GRANULARITY_SEC;
#[derive(Debug, Default, Clone)]
struct RingItem {
counter: u64,
round: u64,
}
impl RingItem {
fn try_update_round(&mut self, timestamp: u64) {
let cur_round =
timestamp.div((HEALTH_CHECK_RING_GRANULARITY_SEC * HEALTH_CHECK_RING_SIZE) as u64);
if self.round != cur_round {
self.round = cur_round;
self.counter = 0;
}
}
fn inc(&mut self, timestamp: u64) {
self.try_update_round(timestamp);
self.counter += 1;
}
fn get(&mut self, timestamp: u64) -> u64 {
self.try_update_round(timestamp);
self.counter
}
}
#[derive(Debug, Clone)]
pub struct HealthyMemRecord {
node_id: i32,
current_health_status: HealthStatus,
last_error_info: Option<String>,
last_check_time: chrono::DateTime<chrono::Utc>,
last_response_time: Option<i32>,
// the current time is corresponding to the index by modulo with UNIX-timestamp.
total_check_counter_ring: Vec<RingItem>,
healthy_counter_ring: Vec<RingItem>,
}
impl HealthyMemRecord {
pub fn new(node_id: i32) -> Self {
Self {
node_id,
current_health_status: HealthStatus::Unknown,
last_error_info: None,
last_check_time: chrono::Utc::now(),
last_response_time: None,
total_check_counter_ring: vec![Default::default(); HEALTH_CHECK_RING_SIZE],
healthy_counter_ring: vec![Default::default(); HEALTH_CHECK_RING_SIZE],
}
}
/// 从数据库记录初始化内存记录
pub fn from_db_records(
node_id: i32,
records: &[crate::db::entity::health_records::Model],
) -> Self {
let mut mem_record = Self::new(node_id);
if let Some(latest) = records.first() {
mem_record.current_health_status = latest.get_status();
mem_record.last_check_time = latest.checked_at.to_utc();
mem_record.last_response_time = if latest.response_time == 0 {
None
} else {
Some(latest.response_time)
};
mem_record.last_error_info = if latest.error_message.is_empty() {
None
} else {
Some(latest.error_message.clone())
};
}
// 填充环形缓冲区
mem_record.populate_ring_from_records(records);
mem_record
}
/// 从历史记录填充环形缓冲区
fn populate_ring_from_records(&mut self, records: &[crate::db::entity::health_records::Model]) {
let now = chrono::Utc::now().timestamp() as usize;
for record in records {
let record_time = record.checked_at.to_utc().timestamp() as usize;
let time_diff = now.saturating_sub(record_time);
// 只处理在环形缓冲区时间范围内的记录
if time_diff < HEALTH_CHECK_RING_MAX_DURATION_SEC {
let ring_index =
(record_time / HEALTH_CHECK_RING_GRANULARITY_SEC) % HEALTH_CHECK_RING_SIZE;
self.total_check_counter_ring[ring_index].inc(record_time as u64);
if record.get_status() == HealthStatus::Healthy {
self.healthy_counter_ring[ring_index].inc(record_time as u64);
}
}
}
}
/// 更新健康状态并记录到环形缓冲区
pub fn update_health_status(
&mut self,
status: HealthStatus,
response_time: Option<i32>,
error_message: Option<String>,
) {
self.current_health_status = status.clone();
self.last_check_time = chrono::Utc::now();
self.last_response_time = response_time;
self.last_error_info = error_message;
// 更新环形缓冲区
let now = chrono::Utc::now().timestamp() as usize;
let ring_index = (now / HEALTH_CHECK_RING_GRANULARITY_SEC) % HEALTH_CHECK_RING_SIZE;
self.total_check_counter_ring[ring_index].inc(now as u64);
self.healthy_counter_ring[ring_index].try_update_round(now as u64);
if status == HealthStatus::Healthy {
self.healthy_counter_ring[ring_index].inc(now as u64);
}
}
/// 获取健康统计信息
pub fn get_health_stats(&self, hours: u64) -> crate::db::HealthStats {
let now = chrono::Utc::now().timestamp() as usize;
let mut total_checks = 0;
let mut healthy_count = 0;
for ring_index in 0..HEALTH_CHECK_RING_SIZE {
total_checks += self.total_check_counter_ring[ring_index].counter;
healthy_count += self.healthy_counter_ring[ring_index].counter;
}
let health_percentage = if total_checks > 0 {
(healthy_count as f64 / total_checks as f64) * 100.0
} else {
0.0
};
crate::db::HealthStats {
total_checks,
healthy_count,
unhealthy_count: total_checks - healthy_count,
health_percentage,
average_response_time: self.last_response_time.map(|rt| rt as f64),
uptime_percentage: health_percentage,
last_check_time: Some(self.last_check_time),
last_status: Some(self.current_health_status.clone()),
}
}
/// 获取当前健康状态
pub fn get_current_health_status(&self) -> &HealthStatus {
&self.current_health_status
}
/// 获取最后检查时间
pub fn get_last_check_time(&self) -> chrono::DateTime<chrono::Utc> {
self.last_check_time
}
/// 获取最后响应时间
pub fn get_last_response_time(&self) -> Option<i32> {
self.last_response_time
}
/// 获取最后错误信息
pub fn get_last_error_info(&self) -> &Option<String> {
&self.last_error_info
}
pub fn get_counter_ring(&mut self) -> (Vec<u64>, Vec<u64>) {
let now = self.last_check_time.timestamp() as usize;
let mut total_ring = vec![0; HEALTH_CHECK_RING_SIZE];
let mut healthy_ring = vec![0; HEALTH_CHECK_RING_SIZE];
let mut total_checks = 0;
let mut healthy_count = 0;
for i in 0..HEALTH_CHECK_RING_SIZE {
let ring_time = now - (i * HEALTH_CHECK_RING_GRANULARITY_SEC);
let ring_index =
ring_time.div_euclid(HEALTH_CHECK_RING_GRANULARITY_SEC) % HEALTH_CHECK_RING_SIZE;
total_ring[i] = self.total_check_counter_ring[ring_index].get(ring_time as u64);
healthy_ring[i] = self.healthy_counter_ring[ring_index].counter;
}
(total_ring, healthy_ring)
}
pub fn get_ring_granularity(&self) -> u32 {
HEALTH_CHECK_RING_GRANULARITY_SEC as u32
}
}
pub struct HealthChecker {
db: Db,
instance_mgr: Arc<NetworkInstanceManager>,
inst_id_map: DashMap<i32, uuid::Uuid>,
node_tasks: DashMap<i32, ScopedTask<()>>,
node_records: Arc<DashMap<i32, HealthyMemRecord>>,
node_cfg: Arc<DashMap<i32, TomlConfigLoader>>,
}
impl HealthChecker {
pub fn new(db: Db) -> Self {
let instance_mgr = Arc::new(NetworkInstanceManager::new());
Self {
db,
instance_mgr,
inst_id_map: DashMap::new(),
node_tasks: DashMap::new(),
node_records: Arc::new(DashMap::new()),
node_cfg: Arc::new(DashMap::new()),
}
}
/// 启动时从数据库加载所有节点的健康记录到内存
pub async fn load_health_records_from_db(&self) -> anyhow::Result<()> {
info!("Loading health records from database...");
// 获取所有活跃节点
let nodes = NodeOperations::get_all_nodes(&self.db)
.await
.with_context(|| "Failed to get all nodes from database")?;
let from_date = chrono::Utc::now().naive_utc()
- chrono::Duration::seconds(HEALTH_CHECK_RING_MAX_DURATION_SEC as i64);
for node in nodes {
// 获取每个节点最近的健康记录(用于初始化环形缓冲区)
let records =
HealthOperations::get_node_health_records(&self.db, node.id, Some(from_date), None)
.await
.with_context(|| {
format!("Failed to get health records for node {}", node.id)
})?;
// 创建内存记录
let mem_record = HealthyMemRecord::from_db_records(node.id, &records);
self.node_records.insert(node.id, mem_record);
debug!(
"Loaded {} health records for node {} ({})",
records.len(),
node.id,
node.name
);
}
info!(
"Loaded health records for {} nodes",
self.node_records.len()
);
Ok(())
}
/// 获取节点的内存健康记录
pub fn get_node_memory_record(&self, node_id: i32) -> Option<HealthyMemRecord> {
self.node_records.get(&node_id).map(|entry| entry.clone())
}
/// 获取节点的健康统计信息(从内存)
pub fn get_node_health_stats(
&self,
node_id: i32,
hours: u64,
) -> Option<crate::db::HealthStats> {
self.node_records
.get(&node_id)
.map(|record| record.get_health_stats(hours))
}
/// 获取所有节点的当前健康状态(从内存)
pub fn get_all_nodes_health_status(&self) -> Vec<(i32, HealthStatus, Option<String>)> {
self.node_records
.iter()
.map(|entry| {
let record = entry.value();
(
record.node_id,
record.current_health_status.clone(),
record.last_error_info.clone(),
)
})
.collect()
}
pub async fn try_update_node(&self, node_id: i32) -> anyhow::Result<()> {
let old_cfg = self
.node_cfg
.get(&node_id)
.ok_or_else(|| anyhow::anyhow!("old node cfg not found, node_id: {}", node_id))?
.clone();
let new_cfg = self.get_node_cfg(node_id, Some(old_cfg.get_id())).await?;
if new_cfg.dump() != old_cfg.dump() {
self.remove_node(node_id).await?;
self.add_node(node_id).await?;
info!("node {} cfg updated", node_id);
}
Ok(())
}
async fn get_node_cfg_with_model(
&self,
node_info: &shared_nodes::Model,
inst_id: Option<uuid::Uuid>,
) -> anyhow::Result<TomlConfigLoader> {
let cfg = TomlConfigLoader::default();
cfg.set_peers(vec![PeerConfig {
uri: format!(
"{}://{}:{}",
node_info.protocol, node_info.host, node_info.port
)
.parse()
.with_context(|| "failed to parse peer uri")?,
}]);
let inst_id = inst_id.unwrap_or(uuid::Uuid::new_v4());
cfg.set_id(inst_id);
cfg.set_network_identity(NetworkIdentity::new(
node_info.network_name.clone(),
node_info.network_secret.clone(),
));
cfg.set_hostname(Some("HealthCheckNode".to_string()));
let mut flags = cfg.get_flags();
flags.no_tun = true;
flags.disable_p2p = true;
flags.disable_udp_hole_punching = true;
cfg.set_flags(flags);
Ok(cfg)
}
pub async fn test_connection(
&self,
node_info: &shared_nodes::Model,
max_time: Duration,
) -> anyhow::Result<()> {
let cfg = self.get_node_cfg_with_model(node_info, None).await?;
defer!({
let _ = self
.instance_mgr
.delete_network_instance(vec![cfg.get_id()]);
});
self.instance_mgr
.run_network_instance(cfg.clone(), ConfigSource::FFI)
.with_context(|| "failed to run network instance")?;
let now = Instant::now();
let mut err = None;
while now.elapsed() < max_time {
match Self::test_node_healthy(cfg.get_id(), self.instance_mgr.clone()).await {
Ok(_) => {
return Ok(());
}
Err(e) => {
warn!(
"test node healthy failed, node_info: {:?}, err: {}",
node_info, e
);
err = Some(e);
}
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
Err(anyhow::anyhow!("test node healthy failed, err: {:?}", err))
}
async fn get_node_cfg(
&self,
node_id: i32,
inst_id: Option<uuid::Uuid>,
) -> anyhow::Result<TomlConfigLoader> {
let node_info = NodeOperations::get_node_by_id(&self.db, node_id)
.await
.with_context(|| format!("failed to get node by id: {}", node_id))?
.ok_or_else(|| anyhow::anyhow!("node not found"))?;
self.get_node_cfg_with_model(&node_info, inst_id).await
}
pub async fn add_node(&self, node_id: i32) -> anyhow::Result<()> {
let cfg = self.get_node_cfg(node_id, None).await?;
info!(
"Add node {} to health checker, cfg: {}",
node_id,
cfg.dump()
);
self.instance_mgr
.run_network_instance(cfg.clone(), ConfigSource::FFI)
.with_context(|| "failed to run network instance")?;
self.inst_id_map.insert(node_id, cfg.get_id());
// 初始化内存记录(如果不存在)
if !self.node_records.contains_key(&node_id) {
// 从数据库加载历史记录
let from_date = chrono::Utc::now().naive_utc()
- chrono::Duration::seconds(HEALTH_CHECK_RING_MAX_DURATION_SEC as i64);
if let Ok(records) =
HealthOperations::get_node_health_records(&self.db, node_id, Some(from_date), None)
.await
{
let mem_record = HealthyMemRecord::from_db_records(node_id, &records);
self.node_records.insert(node_id, mem_record);
info!(
"Initialized memory record for node {} with {} historical records",
node_id,
records.len()
);
} else {
self.node_records
.insert(node_id, HealthyMemRecord::new(node_id));
info!("Initialized new memory record for node {}", node_id);
}
}
// 启动健康检查任务
let task = ScopedTask::from(tokio::spawn(Self::node_health_check_task(
node_id,
cfg.get_id(),
Arc::clone(&self.instance_mgr),
self.db.clone(),
Arc::clone(&self.node_records),
)));
self.node_tasks.insert(node_id, task);
self.node_cfg.insert(node_id, cfg.clone());
Ok(())
}
pub async fn remove_node(&self, node_id: i32) -> anyhow::Result<()> {
self.node_tasks.remove(&node_id);
if let Some(inst_id) = self.inst_id_map.remove(&node_id) {
let _ = self.instance_mgr.delete_network_instance(vec![inst_id.1]);
}
self.node_cfg.remove(&node_id);
// 保留内存记录,不删除,以便后续查询历史数据
info!(
"Removed health check task for node {}, memory record retained",
node_id
);
Ok(())
}
#[instrument(err, ret, skip(instance_mgr))]
async fn test_node_healthy(
inst_id: uuid::Uuid,
instance_mgr: Arc<NetworkInstanceManager>,
// return version, response time on healthy, conn_count
) -> anyhow::Result<(String, u64, u32)> {
let Some(instance) = instance_mgr.get_network_info(&inst_id) else {
anyhow::bail!("healthy check node is not started");
};
let running = instance.running;
// health check node is not running, update db
if !running {
anyhow::bail!("healthy check node is not running");
}
if let Some(err) = instance.error_msg {
anyhow::bail!("healthy check node has error: {}", err);
}
let p = instance.peer_route_pairs;
// dst node is not online
let Some(dst_node) = p.iter().find(|x| {
// we disable p2p, so we only check direct connected peer
x.route.as_ref().is_some_and(|route| {
!route.feature_flag.unwrap().is_public_server && route.hostname != "HealthCheckNode"
}) && x.peer.as_ref().is_some_and(|p| !p.conns.is_empty())
}) else {
anyhow::bail!("dst node is not online");
};
let Some(route_info) = &dst_node.route else {
anyhow::bail!("dst node route is not found");
};
let Some(peer_info) = &dst_node.peer else {
anyhow::bail!("dst node peer is not found");
};
let version = route_info
.version
.clone()
.split("-")
.next()
.unwrap_or("")
.to_string();
// 计算响应时间(这里可以根据实际需要实现)
let response_time = peer_info
.conns
.iter()
.filter_map(|x| x.stats)
.map(|x| x.latency_us)
.min()
.unwrap_or(0);
let peer_id = peer_info.peer_id;
let conn_count = if let Some(summary) = instance.foreign_network_summary {
summary
.info_map
.get(&peer_id)
.map(|x| x.network_count)
.unwrap_or(0)
} else {
0
};
Ok((version, response_time, conn_count))
}
async fn node_health_check_task(
node_id: i32,
inst_id: uuid::Uuid,
instance_mgr: Arc<NetworkInstanceManager>,
db: Db,
node_records: Arc<DashMap<i32, HealthyMemRecord>>,
) {
/// 记录健康状态到数据库和内存
async fn record_health_status(
db: &Db,
node_records: &Arc<DashMap<i32, HealthyMemRecord>>,
node_id: i32,
status: HealthStatus,
response_time: Option<i32>,
error_message: Option<String>,
) {
// 写入数据库
if let Err(e) = HealthOperations::create_health_record(
db,
node_id,
status.clone(),
response_time,
error_message.clone(),
)
.await
{
error!("Failed to create health record for node {}: {}", node_id, e);
}
// 更新内存记录
if let Some(mut record) = node_records.get_mut(&node_id) {
record.update_health_status(status, response_time, error_message);
} else {
let mut new_record = HealthyMemRecord::new(node_id);
new_record.update_health_status(status, response_time, error_message);
node_records.insert(node_id, new_record);
}
}
let mut tick = tokio::time::interval(Duration::from_secs(5));
let mut counter: u64 = 0;
loop {
if counter != 0 {
tick.tick().await;
}
counter += 1;
match Self::test_node_healthy(inst_id, instance_mgr.clone()).await {
Ok((version, response_time, conn_count)) => {
if let Err(e) = NodeOperations::update_node_status(
&db,
node_id,
true,
Some(conn_count as i32),
)
.await
{
error!("Failed to update node status for node {}: {}", node_id, e);
}
record_health_status(
&db,
&node_records,
node_id,
HealthStatus::Healthy,
Some(response_time as i32),
None,
)
.await;
// update node version
if let Err(e) = NodeOperations::update_node_version(&db, node_id, version).await
{
error!("Failed to update node version for node {}: {}", node_id, e);
}
}
Err(e) => {
if let Err(e) =
NodeOperations::update_node_status(&db, node_id, false, None).await
{
error!("Failed to update node status for node {}: {}", node_id, e);
}
record_health_status(
&db,
&node_records,
node_id,
HealthStatus::Unhealthy,
None,
Some(e.to_string()),
)
.await;
}
}
}
}
}
@@ -0,0 +1,160 @@
use std::{collections::HashSet, sync::Arc, time::Duration};
use anyhow::Context as _;
use tokio::time::{interval, Interval};
use tracing::{error, info};
use crate::{
db::{entity::shared_nodes, operations::NodeOperations, Db},
health_checker::HealthChecker,
};
/// HealthChecker的封装器,用于监控数据库中节点的添加和删除
pub struct HealthCheckerManager {
health_checker: Arc<HealthChecker>,
db: Db,
current_nodes: Arc<tokio::sync::RwLock<HashSet<i32>>>,
monitor_interval: Duration,
}
impl HealthCheckerManager {
/// 创建新的HealthCheckerManager实例
pub fn new(health_checker: Arc<HealthChecker>, db: Db) -> Self {
Self {
health_checker,
db,
current_nodes: Arc::new(tokio::sync::RwLock::new(HashSet::new())),
monitor_interval: Duration::from_secs(1), // 默认每1秒检查一次
}
}
/// 设置监控间隔
pub fn with_monitor_interval(mut self, interval: Duration) -> Self {
self.monitor_interval = interval;
self
}
/// 启动监控任务
pub async fn start_monitoring(&self) -> anyhow::Result<()> {
// 启动定期检查任务
let health_checker = Arc::clone(&self.health_checker);
let db = self.db.clone();
let current_nodes = Arc::clone(&self.current_nodes);
let monitor_interval = self.monitor_interval;
tokio::spawn(async move {
let mut ticker = interval(monitor_interval);
loop {
if let Err(e) = Self::check_node_changes(&health_checker, &db, &current_nodes).await
{
tracing::error!("Error checking node changes: {}", e);
}
ticker.tick().await;
}
});
Ok(())
}
/// 检查节点变化并更新监控
async fn check_node_changes(
health_checker: &Arc<HealthChecker>,
db: &Db,
current_nodes: &Arc<tokio::sync::RwLock<HashSet<i32>>>,
) -> anyhow::Result<()> {
// 获取数据库中当前的所有节点
let db_nodes = NodeOperations::get_all_nodes(db)
.await
.with_context(|| "Failed to get all nodes from database")?;
let db_node_ids: HashSet<i32> = db_nodes.iter().map(|node| node.id).collect();
let mut current_nodes_guard = current_nodes.write().await;
// 检查新增的节点
for &node_id in &db_node_ids {
if !current_nodes_guard.contains(&node_id) {
// 新节点,添加到监控
if let Err(e) = health_checker.add_node(node_id).await {
error!("Failed to add node {} to health checker: {}", node_id, e);
continue;
}
current_nodes_guard.insert(node_id);
info!("Added new node {} to health monitoring", node_id);
} else if let Err(e) = health_checker.try_update_node(node_id).await {
error!("Failed to add node {} to health checker: {}", node_id, e);
}
}
// 检查删除的节点
let nodes_to_remove: Vec<i32> = current_nodes_guard
.iter()
.filter(|&&node_id| !db_node_ids.contains(&node_id))
.copied()
.collect();
for node_id in nodes_to_remove {
// 节点已删除,从监控中移除
if let Err(e) = health_checker.remove_node(node_id).await {
error!(
"Failed to remove node {} from health checker: {}",
node_id, e
);
continue;
}
current_nodes_guard.remove(&node_id);
info!("Removed node {} from health monitoring", node_id);
}
Ok(())
}
/// 手动触发节点变化检查
pub async fn refresh_nodes(&self) -> anyhow::Result<()> {
Self::check_node_changes(&self.health_checker, &self.db, &self.current_nodes).await
}
/// 获取当前监控的节点数量
pub async fn get_monitored_node_count(&self) -> usize {
self.current_nodes.read().await.len()
}
/// 获取当前监控的节点ID列表
pub async fn get_monitored_nodes(&self) -> Vec<i32> {
self.current_nodes.read().await.iter().copied().collect()
}
/// 获取节点的内存健康记录
pub fn get_node_memory_record(
&self,
node_id: i32,
) -> Option<crate::health_checker::HealthyMemRecord> {
self.health_checker.get_node_memory_record(node_id)
}
/// 获取节点的健康统计信息
pub fn get_node_health_stats(
&self,
node_id: i32,
hours: u64,
) -> Option<crate::db::HealthStats> {
self.health_checker.get_node_health_stats(node_id, hours)
}
/// 获取所有节点的当前健康状态
pub fn get_all_nodes_health_status(
&self,
) -> Vec<(i32, crate::db::HealthStatus, Option<String>)> {
self.health_checker.get_all_nodes_health_status()
}
pub async fn test_connection(
&self,
node_info: &shared_nodes::Model,
max_time: Duration,
) -> anyhow::Result<()> {
self.health_checker
.test_connection(node_info, max_time)
.await
}
}
@@ -0,0 +1,153 @@
#![allow(unused)]
mod api;
mod config;
mod db;
mod health_checker;
mod health_checker_manager;
mod migrator;
use api::routes::create_routes;
use clap::Parser;
use config::AppConfig;
use db::{operations::NodeOperations, Db};
use health_checker::HealthChecker;
use health_checker_manager::HealthCheckerManager;
use std::env;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tracing_subscriber::EnvFilter;
use crate::db::cleanup::{CleanupConfig, CleanupManager};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Admin password for management access
#[arg(long, env = "ADMIN_PASSWORD")]
admin_password: Option<String>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// 加载配置
let config = AppConfig::default();
// 初始化日志
tracing_subscriber::fmt()
.with_max_level(match config.logging.level.as_str() {
"debug" => tracing::Level::DEBUG,
"info" => tracing::Level::INFO,
"warn" => tracing::Level::WARN,
"error" => tracing::Level::ERROR,
_ => tracing::Level::INFO,
})
.with_target(false)
.with_thread_ids(true)
.with_env_filter(EnvFilter::new("easytier_uptime"))
.init();
// 解析命令行参数
let args = Args::parse();
// 如果提供了管理员密码,设置环境变量
if let Some(password) = args.admin_password {
env::set_var("ADMIN_PASSWORD", password);
}
tracing::info!(
"Admin password configured: {}",
!config.security.admin_password.is_empty()
);
// 创建数据库连接
let db = Db::new(&config.database.path.to_string_lossy()).await?;
// 获取数据库统计信息
let stats = db.get_database_stats().await?;
tracing::info!("Database initialized successfully!");
tracing::info!("Database stats: {:?}", stats);
// 创建配置目录
let config_dir = PathBuf::from("./configs");
tokio::fs::create_dir_all(&config_dir).await?;
// 创建健康检查器和管理器
let health_checker = Arc::new(HealthChecker::new(db.clone()));
let health_checker_manager = HealthCheckerManager::new(health_checker, db.clone())
.with_monitor_interval(Duration::from_secs(1)); // 每30秒检查一次节点变化
let cleanup_manager = CleanupManager::new(db.clone(), CleanupConfig::default());
cleanup_manager.start_auto_cleanup().await?;
// 启动节点监控
health_checker_manager.start_monitoring().await?;
tracing::info!("Health checker manager started successfully!");
let monitored_count = health_checker_manager.get_monitored_node_count().await;
tracing::info!("Currently monitoring {} nodes", monitored_count);
// 创建应用状态
let app_state = crate::api::handlers::AppState {
db: db.clone(),
health_checker_manager: Arc::new(health_checker_manager),
};
// 创建 API 路由
let app = create_routes().with_state(app_state);
// 配置服务器地址
let addr = config.server.addr;
tracing::info!("Starting server on http://{}", addr);
tracing::info!("Available endpoints:");
tracing::info!(" GET /health - Health check");
tracing::info!(" GET /api/nodes - Get nodes (paginated, approved only)");
tracing::info!(" POST /api/nodes - Create node (pending approval)");
tracing::info!(" GET /api/nodes/:id - Get node by ID");
tracing::info!(" PUT /api/nodes/:id - Update node");
tracing::info!(" DELETE /api/nodes/:id - Delete node");
tracing::info!(" GET /api/nodes/:id/health - Get node health history");
tracing::info!(" GET /api/nodes/:id/health/stats - Get node health stats");
tracing::info!("Admin endpoints:");
tracing::info!(" POST /api/admin/login - Admin login");
tracing::info!(" GET /api/admin/nodes - Get all nodes (including pending)");
tracing::info!(" PUT /api/admin/nodes/:id/approve - Approve/reject node");
tracing::info!(" DELETE /api/admin/nodes/:id - Delete node (admin only)");
// 启动服务器
let listener = tokio::net::TcpListener::bind(addr).await?;
// 设置优雅关闭
let shutdown_signal = Arc::new(tokio::sync::Notify::new());
let server_shutdown_signal = shutdown_signal.clone();
// 启动服务器任务
let server_handle = tokio::spawn(async move {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
server_shutdown_signal.notified().await;
})
.await
.unwrap();
});
// 等待 Ctrl+C 信号
tokio::select! {
_ = tokio::signal::ctrl_c() => {
tracing::info!("Received shutdown signal");
}
_ = server_handle => {
tracing::info!("Server task completed");
}
}
// 优雅关闭
tracing::info!("Shutting down gracefully...");
shutdown_signal.notify_waiters();
tracing::info!("Shutdown complete");
Ok(())
}
@@ -0,0 +1,181 @@
use sea_orm_migration::{prelude::*, schema::*};
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m20250101_000001_create_tables"
}
}
#[derive(DeriveIden)]
pub enum SharedNodes {
Table,
Id,
Name,
Host,
Port,
Protocol,
Version,
AllowRelay,
NetworkName,
NetworkSecret,
Description,
MaxConnections,
CurrentConnections,
IsActive,
IsApproved,
QQNumber,
Wechat,
Mail,
CreatedAt,
UpdatedAt,
}
#[derive(DeriveIden)]
pub enum HealthRecords {
Table,
Id,
NodeId,
Status,
ResponseTime,
ErrorMessage,
CheckedAt,
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// 创建共享节点表
manager
.create_table(
Table::create()
.if_not_exists()
.table(SharedNodes::Table)
.col(pk_auto(SharedNodes::Id).not_null())
.col(string(SharedNodes::Name).not_null())
.col(string(SharedNodes::Host).not_null())
.col(integer(SharedNodes::Port).not_null())
.col(string(SharedNodes::Protocol).not_null().default("tcp"))
.col(string(SharedNodes::Version))
.col(boolean(SharedNodes::AllowRelay).default(false))
.col(string(SharedNodes::NetworkName))
.col(string(SharedNodes::NetworkSecret))
.col(text(SharedNodes::Description))
.col(integer(SharedNodes::MaxConnections).default(100))
.col(integer(SharedNodes::CurrentConnections).default(0))
.col(boolean(SharedNodes::IsActive).default(true))
.col(boolean(SharedNodes::IsApproved).default(false))
.col(string(SharedNodes::QQNumber))
.col(string(SharedNodes::Wechat))
.col(string(SharedNodes::Mail))
.col(
timestamp_with_time_zone(SharedNodes::CreatedAt)
.default(Expr::current_timestamp()),
)
.col(
timestamp_with_time_zone(SharedNodes::UpdatedAt)
.default(Expr::current_timestamp()),
)
.to_owned(),
)
.await?;
// 创建唯一约束
manager
.create_index(
Index::create()
.name("idx_shared_nodes_host_port_protocol")
.table(SharedNodes::Table)
.col(SharedNodes::Host)
.col(SharedNodes::Port)
.col(SharedNodes::Protocol)
.unique()
.to_owned(),
)
.await?;
// 创建健康度记录表
manager
.create_table(
Table::create()
.if_not_exists()
.table(HealthRecords::Table)
.col(pk_auto(HealthRecords::Id).not_null())
.col(integer(HealthRecords::NodeId).not_null())
.col(string(HealthRecords::Status).not_null())
.col(integer(HealthRecords::ResponseTime))
.col(text(HealthRecords::ErrorMessage).null())
.col(
timestamp_with_time_zone(HealthRecords::CheckedAt)
.default(Expr::current_timestamp()),
)
.foreign_key(
ForeignKey::create()
.name("fk_health_records_node_id_to_shared_nodes_id")
.from(HealthRecords::Table, HealthRecords::NodeId)
.to(SharedNodes::Table, SharedNodes::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
// 创建健康度记录索引
manager
.create_index(
Index::create()
.name("idx_health_records_node_id")
.table(HealthRecords::Table)
.col(HealthRecords::NodeId)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_health_records_checked_at")
.table(HealthRecords::Table)
.col(HealthRecords::CheckedAt)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_health_records_node_time")
.table(HealthRecords::Table)
.col(HealthRecords::NodeId)
.col(HealthRecords::CheckedAt)
.to_owned(),
)
.await?;
manager
.create_index(
Index::create()
.name("idx_health_records_status")
.table(HealthRecords::Table)
.col(HealthRecords::Status)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(HealthRecords::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(SharedNodes::Table).to_owned())
.await?;
Ok(())
}
}
@@ -0,0 +1,12 @@
use sea_orm_migration::prelude::*;
mod m20250101_000001_create_tables;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20250101_000001_create_tables::Migration)]
}
}