fix: scope peer center server data to instance (#2198)

Stop sharing PeerCenterServer state through a process-global map so local and foreign-network services cannot mix peer-center data when peer ids overlap.
This commit is contained in:
KKRainbow
2026-05-02 01:43:01 +08:00
committed by GitHub
parent 4eba9b07b6
commit 12a7b5a5c5
2 changed files with 77 additions and 72 deletions
+1 -24
View File
@@ -65,7 +65,7 @@ impl PeerCenterBase {
return Err(Error::Shutdown); return Err(Error::Shutdown);
}; };
rpc_mgr.rpc_server().registry().register( rpc_mgr.rpc_server().registry().register(
PeerCenterRpcServer::new(PeerCenterServer::new(self.peer_mgr.my_peer_id())), PeerCenterRpcServer::new(PeerCenterServer::new()),
&self.peer_mgr.get_global_ctx().get_network_name(), &self.peer_mgr.get_global_ctx().get_network_name(),
); );
Ok(()) Ok(())
@@ -486,7 +486,6 @@ impl PeerCenterPeerManagerTrait for PeerMapWithPeerRpcManager {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{ use crate::{
peer_center::server::get_global_data,
peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear}, peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
tunnel::common::tests::wait_for_condition, tunnel::common::tests::wait_for_condition,
}; };
@@ -515,25 +514,6 @@ mod tests {
.await .await
.unwrap(); .unwrap();
let center_peer = PeerCenterBase::select_center_peer(&peer_mgr_a)
.await
.unwrap();
let center_data = get_global_data(center_peer);
// wait center_data has 3 records for 10 seconds
wait_for_condition(
|| async {
if center_data.global_peer_map.len() == 4 {
println!("center data {:#?}", center_data.global_peer_map);
true
} else {
false
}
},
Duration::from_secs(20),
)
.await;
let mut digest = None; let mut digest = None;
for pc in peer_centers.iter() { for pc in peer_centers.iter() {
let rpc_service = pc.get_rpc_service(); let rpc_service = pc.get_rpc_service();
@@ -578,8 +558,5 @@ mod tests {
route_cost.end_update(); route_cost.end_update();
assert!(!route_cost.need_update()); assert!(!route_cost.need_update());
} }
let global_digest = get_global_data(center_peer).digest.load();
assert_eq!(digest.as_ref().unwrap(), &global_digest);
} }
} }
+76 -48
View File
@@ -6,7 +6,6 @@ use std::{
use crossbeam::atomic::AtomicCell; use crossbeam::atomic::AtomicCell;
use dashmap::DashMap; use dashmap::DashMap;
use once_cell::sync::Lazy;
use tokio::task::JoinSet; use tokio::task::JoinSet;
use crate::{ use crate::{
@@ -36,35 +35,21 @@ pub(crate) struct PeerCenterInfoEntry {
} }
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub(crate) struct PeerCenterServerGlobalData { struct PeerCenterServerData {
pub(crate) global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>, global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>,
pub(crate) peer_report_time: DashMap<PeerId, std::time::Instant>, peer_report_time: DashMap<PeerId, std::time::Instant>,
pub(crate) digest: AtomicCell<Digest>, digest: AtomicCell<Digest>,
}
// a global unique instance for PeerCenterServer
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<PeerCenterServerGlobalData>>> =
Lazy::new(DashMap::new);
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<PeerCenterServerGlobalData> {
GLOBAL_DATA
.entry(node_id)
.or_insert_with(|| Arc::new(PeerCenterServerGlobalData::default()))
.value()
.clone()
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct PeerCenterServer { pub struct PeerCenterServer {
// every peer has its own server, so use per-struct dash map is ok. data: Arc<PeerCenterServerData>,
my_node_id: PeerId,
data: Arc<PeerCenterServerGlobalData>,
tasks: Arc<JoinSet<()>>, tasks: Arc<JoinSet<()>>,
} }
impl PeerCenterServer { impl PeerCenterServer {
pub fn new(my_node_id: PeerId) -> Self { pub fn new() -> Self {
let data = get_global_data(my_node_id); let data = Arc::new(PeerCenterServerData::default());
let weak_data = Arc::downgrade(&data); let weak_data = Arc::downgrade(&data);
let mut tasks = JoinSet::new(); let mut tasks = JoinSet::new();
tasks.spawn(async move { tasks.spawn(async move {
@@ -78,13 +63,12 @@ impl PeerCenterServer {
}); });
PeerCenterServer { PeerCenterServer {
my_node_id,
data, data,
tasks: Arc::new(tasks), tasks: Arc::new(tasks),
} }
} }
async fn clean_outdated_peer_data(data: &PeerCenterServerGlobalData) { async fn clean_outdated_peer_data(data: &PeerCenterServerData) {
data.peer_report_time.retain(|_, v| { data.peer_report_time.retain(|_, v| {
std::time::Instant::now().duration_since(*v) < std::time::Duration::from_secs(180) std::time::Instant::now().duration_since(*v) < std::time::Duration::from_secs(180)
}); });
@@ -94,7 +78,7 @@ impl PeerCenterServer {
}); });
} }
fn calc_global_digest_data(data: &PeerCenterServerGlobalData) -> Digest { fn calc_global_digest_data(data: &PeerCenterServerData) -> Digest {
let mut hasher = std::collections::hash_map::DefaultHasher::new(); let mut hasher = std::collections::hash_map::DefaultHasher::new();
data.global_peer_map data.global_peer_map
.iter() .iter()
@@ -107,18 +91,6 @@ impl PeerCenterServer {
} }
} }
impl Drop for PeerCenterServer {
fn drop(&mut self) {
if Arc::strong_count(&self.tasks) != 1 {
return;
}
GLOBAL_DATA.remove_if(&self.my_node_id, |_, data| {
Arc::ptr_eq(data, &self.data) && Arc::strong_count(data) <= 2
});
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl PeerCenterRpc for PeerCenterServer { impl PeerCenterRpc for PeerCenterServer {
type Controller = BaseController; type Controller = BaseController;
@@ -194,18 +166,74 @@ mod tests {
use super::*; use super::*;
#[tokio::test] #[tokio::test]
async fn global_data_removed_when_last_server_drops() { async fn server_clones_share_instance_data() {
let peer_id = u32::MAX - 17; let server = PeerCenterServer::new();
GLOBAL_DATA.remove(&peer_id);
let server = PeerCenterServer::new(peer_id);
assert!(GLOBAL_DATA.contains_key(&peer_id));
let server_clone = server.clone(); let server_clone = server.clone();
drop(server);
assert!(GLOBAL_DATA.contains_key(&peer_id));
drop(server_clone); let mut peers = PeerInfoForGlobalMap::default();
assert!(!GLOBAL_DATA.contains_key(&peer_id)); peers
.direct_peers
.insert(100, DirectConnectedPeerInfo { latency_ms: 3 });
server
.report_peers(
BaseController::default(),
ReportPeersRequest {
my_peer_id: 99,
peer_infos: Some(peers),
},
)
.await
.unwrap();
let resp = server_clone
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert_eq!(1, resp.global_peer_map.len());
assert!(resp.global_peer_map[&99].direct_peers.contains_key(&100));
}
#[tokio::test]
async fn independent_server_instances_do_not_share_data() {
let server_a = PeerCenterServer::new();
let server_b = PeerCenterServer::new();
let mut peers = PeerInfoForGlobalMap::default();
peers
.direct_peers
.insert(101, DirectConnectedPeerInfo { latency_ms: 5 });
server_a
.report_peers(
BaseController::default(),
ReportPeersRequest {
my_peer_id: 100,
peer_infos: Some(peers),
},
)
.await
.unwrap();
let resp_a = server_a
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert_eq!(1, resp_a.global_peer_map.len());
let resp_b = server_b
.get_global_peer_map(
BaseController::default(),
GetGlobalPeerMapRequest { digest: 0 },
)
.await
.unwrap();
assert!(resp_b.global_peer_map.is_empty());
} }
} }