use peer center instance to gatter peers info (#21)

* use peer center instance to gatter peers info
This commit is contained in:
Sijie.Sun 2024-02-29 00:04:48 +08:00 committed by GitHub
parent 31af413b03
commit 24178bcf6e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 291 additions and 65 deletions

View File

@ -5,7 +5,8 @@ use easytier_core::{
common::stun::{StunInfoCollector, UdpNatTypeDetector}, common::stun::{StunInfoCollector, UdpNatTypeDetector},
rpc::{ rpc::{
connector_manage_rpc_client::ConnectorManageRpcClient, connector_manage_rpc_client::ConnectorManageRpcClient,
peer_manage_rpc_client::PeerManageRpcClient, *, peer_center_rpc_client::PeerCenterRpcClient, peer_manage_rpc_client::PeerManageRpcClient,
*,
}, },
}; };
use humansize::format_size; use humansize::format_size;
@ -30,6 +31,7 @@ enum SubCommand {
Connector(ConnectorArgs), Connector(ConnectorArgs),
Stun, Stun,
Route, Route,
PeerCenter,
} }
#[derive(Args, Debug)] #[derive(Args, Debug)]
@ -202,6 +204,12 @@ impl CommandHandler {
Ok(ConnectorManageRpcClient::connect(self.addr.clone()).await?) Ok(ConnectorManageRpcClient::connect(self.addr.clone()).await?)
} }
async fn get_peer_center_client(
&self,
) -> Result<PeerCenterRpcClient<tonic::transport::Channel>, Error> {
Ok(PeerCenterRpcClient::connect(self.addr.clone()).await?)
}
async fn list_peers(&self) -> Result<ListPeerResponse, Error> { async fn list_peers(&self) -> Result<ListPeerResponse, Error> {
let mut client = self.get_peer_manager_client().await?; let mut client = self.get_peer_manager_client().await?;
let request = tonic::Request::new(ListPeerRequest::default()); let request = tonic::Request::new(ListPeerRequest::default());
@ -424,6 +432,46 @@ async fn main() -> Result<(), Error> {
let stun = UdpNatTypeDetector::new(StunInfoCollector::get_default_servers()); let stun = UdpNatTypeDetector::new(StunInfoCollector::get_default_servers());
println!("udp type: {:?}", stun.get_udp_nat_type(0).await); println!("udp type: {:?}", stun.get_udp_nat_type(0).await);
} }
SubCommand::PeerCenter => {
let mut peer_center_client = handler.get_peer_center_client().await?;
let resp = peer_center_client
.get_global_peer_map(GetGlobalPeerMapRequest::default())
.await?
.into_inner();
#[derive(tabled::Tabled)]
struct PeerCenterTableItem {
node_id: String,
direct_peers: String,
}
let mut table_rows = vec![];
for (k, v) in resp.global_peer_map.iter() {
let node_id = k;
let direct_peers = v
.direct_peers
.iter()
.map(|(k, v)| {
format!(
"{}:{:?}",
k,
LatencyLevel::try_from(v.latency_level).unwrap()
)
})
.collect::<Vec<_>>();
table_rows.push(PeerCenterTableItem {
node_id: node_id.clone(),
direct_peers: direct_peers.join("\n"),
});
}
println!(
"{}",
tabled::Table::new(table_rows)
.with(Style::modern())
.to_string()
);
}
} }
Ok(()) Ok(())

View File

@ -92,6 +92,9 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure() tonic_build::configure()
.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]") .type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]")
.type_attribute("cli.DirectConnectedPeerInfo", "#[derive(Hash)]")
.type_attribute("cli.PeerInfoForGlobalMap", "#[derive(Hash)]")
.btree_map(&["."])
.compile(&["proto/cli.proto"], &["proto/"]) .compile(&["proto/cli.proto"], &["proto/"])
.unwrap(); .unwrap();
// tonic_build::compile_protos("proto/cli.proto")?; // tonic_build::compile_protos("proto/cli.proto")?;

View File

@ -115,3 +115,29 @@ service ConnectorManageRpc {
rpc ListConnector (ListConnectorRequest) returns (ListConnectorResponse); rpc ListConnector (ListConnectorRequest) returns (ListConnectorResponse);
rpc ManageConnector (ManageConnectorRequest) returns (ManageConnectorResponse); rpc ManageConnector (ManageConnectorRequest) returns (ManageConnectorResponse);
} }
enum LatencyLevel {
VeryLow = 0;
Low = 1;
Normal = 2;
High = 3;
VeryHigh = 4;
}
message DirectConnectedPeerInfo {
LatencyLevel latency_level = 2;
}
message PeerInfoForGlobalMap {
map<string, DirectConnectedPeerInfo> direct_peers = 1;
}
message GetGlobalPeerMapRequest {}
message GetGlobalPeerMapResponse {
map<string, PeerInfoForGlobalMap> global_peer_map = 1;
}
service PeerCenterRpc {
rpc GetGlobalPeerMap (GetGlobalPeerMapRequest) returns (GetGlobalPeerMapResponse);
}

View File

@ -20,6 +20,7 @@ use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManage
use crate::connector::udp_hole_punch::UdpHolePunchConnector; use crate::connector::udp_hole_punch::UdpHolePunchConnector;
use crate::gateway::icmp_proxy::IcmpProxy; use crate::gateway::icmp_proxy::IcmpProxy;
use crate::gateway::tcp_proxy::TcpProxy; use crate::gateway::tcp_proxy::TcpProxy;
use crate::peer_center::instance::PeerCenterInstance;
use crate::peers::peer_manager::PeerManager; use crate::peers::peer_manager::PeerManager;
use crate::peers::rpc_service::PeerManagerRpcService; use crate::peers::rpc_service::PeerManagerRpcService;
use crate::tunnels::SinkItem; use crate::tunnels::SinkItem;
@ -85,6 +86,8 @@ pub struct Instance {
tcp_proxy: Arc<TcpProxy>, tcp_proxy: Arc<TcpProxy>,
icmp_proxy: Arc<IcmpProxy>, icmp_proxy: Arc<IcmpProxy>,
peer_center: Arc<PeerCenterInstance>,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
} }
@ -141,6 +144,8 @@ impl Instance {
let arc_tcp_proxy = TcpProxy::new(global_ctx.clone(), peer_manager.clone()); let arc_tcp_proxy = TcpProxy::new(global_ctx.clone(), peer_manager.clone());
let arc_icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone()).unwrap(); let arc_icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone()).unwrap();
let peer_center = Arc::new(PeerCenterInstance::new(peer_manager.clone()));
Instance { Instance {
inst_name: inst_name.to_string(), inst_name: inst_name.to_string(),
id, id,
@ -158,6 +163,8 @@ impl Instance {
tcp_proxy: arc_tcp_proxy, tcp_proxy: arc_tcp_proxy,
icmp_proxy: arc_icmp_proxy, icmp_proxy: arc_icmp_proxy,
peer_center,
global_ctx, global_ctx,
} }
} }
@ -281,6 +288,8 @@ impl Instance {
self.udp_hole_puncher.lock().await.run().await?; self.udp_hole_puncher.lock().await.run().await?;
self.peer_center.init().await;
Ok(()) Ok(())
} }
@ -312,6 +321,7 @@ impl Instance {
let peer_mgr = self.peer_manager.clone(); let peer_mgr = self.peer_manager.clone();
let conn_manager = self.conn_manager.clone(); let conn_manager = self.conn_manager.clone();
let net_ns = self.global_ctx.net_ns.clone(); let net_ns = self.global_ctx.net_ns.clone();
let peer_center = self.peer_center.clone();
self.tasks.spawn(async move { self.tasks.spawn(async move {
let _g = net_ns.guard(); let _g = net_ns.guard();
@ -327,6 +337,11 @@ impl Instance {
ConnectorManagerRpcService(conn_manager.clone()), ConnectorManagerRpcService(conn_manager.clone()),
), ),
) )
.add_service(
crate::rpc::peer_center_rpc_server::PeerCenterRpcServer::new(
peer_center.get_rpc_service(),
),
)
.serve(addr) .serve(addr)
.await .await
.unwrap(); .unwrap();

View File

@ -9,20 +9,27 @@ use std::{
}; };
use futures::Future; use futures::Future;
use tokio::{sync::Mutex, task::JoinSet}; use tokio::{
sync::{Mutex, RwLock},
task::JoinSet,
};
use tracing::Instrument; use tracing::Instrument;
use crate::peers::{peer_manager::PeerManager, rpc_service::PeerManagerRpcService, PeerId}; use crate::{
peers::{peer_manager::PeerManager, rpc_service::PeerManagerRpcService, PeerId},
rpc::{GetGlobalPeerMapRequest, GetGlobalPeerMapResponse},
};
use super::{ use super::{
server::PeerCenterServer, server::PeerCenterServer,
service::{PeerCenterService, PeerCenterServiceClient, PeerInfoForGlobalMap}, service::{GlobalPeerMap, PeerCenterService, PeerCenterServiceClient, PeerInfoForGlobalMap},
Digest, Error, Digest, Error,
}; };
pub struct PeerCenterClient { struct PeerCenterBase {
peer_mgr: Arc<PeerManager>, peer_mgr: Arc<PeerManager>,
tasks: Arc<Mutex<JoinSet<()>>>, tasks: Arc<Mutex<JoinSet<()>>>,
lock: Arc<Mutex<()>>,
} }
static SERVICE_ID: u32 = 5; static SERVICE_ID: u32 = 5;
@ -32,7 +39,7 @@ struct PeridicJobCtx<T> {
job_ctx: T, job_ctx: T,
} }
impl PeerCenterClient { impl PeerCenterBase {
pub async fn init(&self) -> Result<(), Error> { pub async fn init(&self) -> Result<(), Error> {
self.peer_mgr.get_peer_rpc_mgr().run_service( self.peer_mgr.get_peer_rpc_mgr().run_service(
SERVICE_ID, SERVICE_ID,
@ -67,21 +74,22 @@ impl PeerCenterClient {
) -> () { ) -> () {
let my_node_id = self.peer_mgr.my_node_id(); let my_node_id = self.peer_mgr.my_node_id();
let peer_mgr = self.peer_mgr.clone(); let peer_mgr = self.peer_mgr.clone();
let lock = self.lock.clone();
self.tasks.lock().await.spawn( self.tasks.lock().await.spawn(
async move { async move {
let ctx = Arc::new(PeridicJobCtx { let ctx = Arc::new(PeridicJobCtx {
peer_mgr: peer_mgr.clone(), peer_mgr: peer_mgr.clone(),
job_ctx, job_ctx,
}); });
tracing::warn!(?my_node_id, "before periodic job loop");
loop { loop {
let Some(center_peer) = Self::select_center_peer(&peer_mgr).await else { let Some(center_peer) = Self::select_center_peer(&peer_mgr).await else {
tracing::warn!("no center peer found, sleep 1 second"); tracing::warn!("no center peer found, sleep 1 second");
tokio::time::sleep(Duration::from_secs(1)).await; tokio::time::sleep(Duration::from_secs(1)).await;
continue; continue;
}; };
tracing::warn!(?center_peer, "run periodic job"); tracing::info!(?center_peer, "run periodic job");
let rpc_mgr = peer_mgr.get_peer_rpc_mgr(); let rpc_mgr = peer_mgr.get_peer_rpc_mgr();
let _g = lock.lock().await;
let ret = rpc_mgr let ret = rpc_mgr
.do_client_rpc_scoped(SERVICE_ID, center_peer, |c| async { .do_client_rpc_scoped(SERVICE_ID, center_peer, |c| async {
let client = let client =
@ -90,6 +98,7 @@ impl PeerCenterClient {
job_fn(client, ctx.clone()).await job_fn(client, ctx.clone()).await
}) })
.await; .await;
drop(_g);
let Ok(sleep_time_ms) = ret else { let Ok(sleep_time_ms) = ret else {
tracing::error!("periodic job to center server rpc failed: {:?}", ret); tracing::error!("periodic job to center server rpc failed: {:?}", ret);
@ -106,35 +115,85 @@ impl PeerCenterClient {
); );
} }
pub async fn new(peer_mgr: Arc<PeerManager>) -> Self { pub fn new(peer_mgr: Arc<PeerManager>) -> Self {
PeerCenterClient { PeerCenterBase {
peer_mgr, peer_mgr,
tasks: Arc::new(Mutex::new(JoinSet::new())), tasks: Arc::new(Mutex::new(JoinSet::new())),
lock: Arc::new(Mutex::new(())),
} }
} }
} }
struct PeerCenterInstance { pub struct PeerCenterInstanceService {
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
}
#[tonic::async_trait]
impl crate::rpc::cli::peer_center_rpc_server::PeerCenterRpc for PeerCenterInstanceService {
async fn get_global_peer_map(
&self,
_request: tonic::Request<GetGlobalPeerMapRequest>,
) -> Result<tonic::Response<GetGlobalPeerMapResponse>, tonic::Status> {
let global_peer_map = self.global_peer_map.read().await.clone();
Ok(tonic::Response::new(GetGlobalPeerMapResponse {
global_peer_map: global_peer_map
.map
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
}))
}
}
pub struct PeerCenterInstance {
peer_mgr: Arc<PeerManager>, peer_mgr: Arc<PeerManager>,
client: Arc<PeerCenterClient>,
client: Arc<PeerCenterBase>,
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
} }
impl PeerCenterInstance { impl PeerCenterInstance {
pub async fn new(peer_mgr: Arc<PeerManager>) -> Self { pub fn new(peer_mgr: Arc<PeerManager>) -> Self {
let client = Arc::new(PeerCenterClient::new(peer_mgr.clone()).await); PeerCenterInstance {
client.init().await.unwrap(); peer_mgr: peer_mgr.clone(),
client: Arc::new(PeerCenterBase::new(peer_mgr.clone())),
global_peer_map: Arc::new(RwLock::new(GlobalPeerMap::new())),
global_peer_map_digest: Arc::new(RwLock::new(Digest::default())),
}
}
PeerCenterInstance { peer_mgr, client } pub async fn init(&self) {
self.client.init().await.unwrap();
self.init_get_global_info_job().await;
self.init_report_peers_job().await;
} }
async fn init_get_global_info_job(&self) { async fn init_get_global_info_job(&self) {
struct Ctx {
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
}
let ctx = Arc::new(Ctx {
global_peer_map: self.global_peer_map.clone(),
global_peer_map_digest: self.global_peer_map_digest.clone(),
});
self.client self.client
.init_periodic_job({}, |client, _ctx| async move { .init_periodic_job(ctx, |client, ctx| async move {
let mut rpc_ctx = tarpc::context::current();
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
let ret = client let ret = client
.get_global_peer_map(tarpc::context::current(), 0) .get_global_peer_map(
rpc_ctx,
ctx.job_ctx.global_peer_map_digest.read().await.clone(),
)
.await?; .await?;
let Ok(global_peer_map) = ret else { let Ok(resp) = ret else {
tracing::error!( tracing::error!(
"get global info from center server got error result: {:?}", "get global info from center server got error result: {:?}",
ret ret
@ -142,7 +201,18 @@ impl PeerCenterInstance {
return Ok(1000); return Ok(1000);
}; };
tracing::warn!("get global info from center server: {:?}", global_peer_map); let Some(resp) = resp else {
return Ok(1000);
};
tracing::info!(
"get global info from center server: {:?}, digest: {:?}",
resp.global_peer_map,
resp.digest
);
*ctx.job_ctx.global_peer_map.write().await = resp.global_peer_map;
*ctx.job_ctx.global_peer_map_digest.write().await = resp.digest;
Ok(5000) Ok(5000)
}) })
@ -196,10 +266,19 @@ impl PeerCenterInstance {
}) })
.await; .await;
} }
pub fn get_rpc_service(&self) -> PeerCenterInstanceService {
PeerCenterInstanceService {
global_peer_map: self.global_peer_map.clone(),
global_peer_map_digest: self.global_peer_map_digest.clone(),
}
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::Deref;
use crate::{ use crate::{
peer_center::server::get_global_data, peer_center::server::get_global_data,
peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear}, peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
@ -213,13 +292,14 @@ mod tests {
let peer_mgr_b = create_mock_peer_manager().await; let peer_mgr_b = create_mock_peer_manager().await;
let peer_mgr_c = create_mock_peer_manager().await; let peer_mgr_c = create_mock_peer_manager().await;
let peer_center_a = PeerCenterInstance::new(peer_mgr_a.clone()).await; let peer_center_a = PeerCenterInstance::new(peer_mgr_a.clone());
let peer_center_b = PeerCenterInstance::new(peer_mgr_b.clone()).await; let peer_center_b = PeerCenterInstance::new(peer_mgr_b.clone());
let peer_center_c = PeerCenterInstance::new(peer_mgr_c.clone()).await; let peer_center_c = PeerCenterInstance::new(peer_mgr_c.clone());
peer_center_a.init_report_peers_job().await; let peer_centers = vec![&peer_center_a, &peer_center_b, &peer_center_c];
peer_center_b.init_report_peers_job().await; for pc in peer_centers.iter() {
peer_center_c.init_report_peers_job().await; pc.init().await;
}
connect_peer_manager(peer_mgr_a.clone(), peer_mgr_b.clone()).await; connect_peer_manager(peer_mgr_a.clone(), peer_mgr_b.clone()).await;
connect_peer_manager(peer_mgr_b.clone(), peer_mgr_c.clone()).await; connect_peer_manager(peer_mgr_b.clone(), peer_mgr_c.clone()).await;
@ -228,7 +308,7 @@ mod tests {
.await .await
.unwrap(); .unwrap();
let center_peer = PeerCenterClient::select_center_peer(&peer_mgr_a) let center_peer = PeerCenterBase::select_center_peer(&peer_mgr_a)
.await .await
.unwrap(); .unwrap();
let center_data = get_global_data(center_peer); let center_data = get_global_data(center_peer);
@ -248,5 +328,29 @@ mod tests {
} }
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_millis(100)).await;
} }
let mut digest = None;
for pc in peer_centers.iter() {
let rpc_service = pc.get_rpc_service();
let now = std::time::Instant::now();
while now.elapsed().as_secs() < 10 {
if rpc_service.global_peer_map.read().await.map.len() == 3 {
break;
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
assert_eq!(rpc_service.global_peer_map.read().await.map.len(), 3);
println!("rpc service ready, {:#?}", rpc_service.global_peer_map);
if digest.is_none() {
digest = Some(rpc_service.global_peer_map_digest.read().await.clone());
} else {
let v = rpc_service.global_peer_map_digest.read().await;
assert_eq!(digest.as_ref().unwrap(), v.deref());
}
}
let global_digest = get_global_data(center_peer).read().await.digest.clone();
assert_eq!(digest.as_ref().unwrap(), &global_digest);
} }
} }

View File

@ -5,7 +5,7 @@
// peer center is not guaranteed to be stable and can be changed when peer enter or leave. // peer center is not guaranteed to be stable and can be changed when peer enter or leave.
// it's used to reduce the cost to exchange infos between peers. // it's used to reduce the cost to exchange infos between peers.
mod instance; pub mod instance;
mod server; mod server;
mod service; mod service;

View File

@ -5,12 +5,12 @@ use std::{
use dashmap::DashMap; use dashmap::DashMap;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use tokio::sync::RwLock; use tokio::{sync::RwLock, task::JoinSet};
use crate::peers::PeerId; use crate::peers::PeerId;
use super::{ use super::{
service::{GlobalPeerMap, PeerCenterService, PeerInfoForGlobalMap}, service::{GetGlobalPeerMapResponse, GlobalPeerMap, PeerCenterService, PeerInfoForGlobalMap},
Digest, Error, Digest, Error,
}; };
@ -18,6 +18,7 @@ pub(crate) struct PeerCenterServerGlobalData {
pub global_peer_map: GlobalPeerMap, pub global_peer_map: GlobalPeerMap,
pub digest: Digest, pub digest: Digest,
pub update_time: std::time::Instant, pub update_time: std::time::Instant,
pub peer_update_time: DashMap<PeerId, std::time::Instant>,
} }
impl PeerCenterServerGlobalData { impl PeerCenterServerGlobalData {
@ -26,6 +27,7 @@ impl PeerCenterServerGlobalData {
global_peer_map: GlobalPeerMap::new(), global_peer_map: GlobalPeerMap::new(),
digest: Digest::default(), digest: Digest::default(),
update_time: std::time::Instant::now(), update_time: std::time::Instant::now(),
peer_update_time: DashMap::new(),
} }
} }
} }
@ -47,13 +49,41 @@ pub struct PeerCenterServer {
// every peer has its own server, so use per-struct dash map is ok. // every peer has its own server, so use per-struct dash map is ok.
my_node_id: PeerId, my_node_id: PeerId,
digest_map: DashMap<PeerId, Digest>, digest_map: DashMap<PeerId, Digest>,
tasks: Arc<JoinSet<()>>,
} }
impl PeerCenterServer { impl PeerCenterServer {
pub fn new(my_node_id: PeerId) -> Self { pub fn new(my_node_id: PeerId) -> Self {
let mut tasks = JoinSet::new();
tasks.spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
PeerCenterServer::clean_outdated_peer(my_node_id).await;
}
});
PeerCenterServer { PeerCenterServer {
my_node_id, my_node_id,
digest_map: DashMap::new(), digest_map: DashMap::new(),
tasks: Arc::new(tasks),
}
}
async fn clean_outdated_peer(my_node_id: PeerId) {
let data = get_global_data(my_node_id);
let mut locked_data = data.write().await;
let now = std::time::Instant::now();
let mut to_remove = Vec::new();
for kv in locked_data.peer_update_time.iter() {
if now.duration_since(*kv.value()).as_secs() > 10 {
to_remove.push(*kv.key());
}
}
for peer_id in to_remove {
locked_data.global_peer_map.map.remove(&peer_id);
locked_data.peer_update_time.remove(&peer_id);
} }
} }
} }
@ -70,6 +100,12 @@ impl PeerCenterService for PeerCenterServer {
) -> Result<(), Error> { ) -> Result<(), Error> {
tracing::warn!("receive report_peers"); tracing::warn!("receive report_peers");
let data = get_global_data(self.my_node_id);
let mut locked_data = data.write().await;
locked_data
.peer_update_time
.insert(my_peer_id, std::time::Instant::now());
let old_digest = self.digest_map.get(&my_peer_id); let old_digest = self.digest_map.get(&my_peer_id);
// if digest match, no need to update // if digest match, no need to update
if let Some(old_digest) = old_digest { if let Some(old_digest) = old_digest {
@ -83,8 +119,6 @@ impl PeerCenterService for PeerCenterServer {
} }
self.digest_map.insert(my_peer_id, digest); self.digest_map.insert(my_peer_id, digest);
let data = get_global_data(self.my_node_id);
let mut locked_data = data.write().await;
locked_data locked_data
.global_peer_map .global_peer_map
.map .map
@ -93,6 +127,7 @@ impl PeerCenterService for PeerCenterServer {
let mut hasher = std::collections::hash_map::DefaultHasher::new(); let mut hasher = std::collections::hash_map::DefaultHasher::new();
locked_data.global_peer_map.map.hash(&mut hasher); locked_data.global_peer_map.map.hash(&mut hasher);
locked_data.digest = hasher.finish() as Digest; locked_data.digest = hasher.finish() as Digest;
locked_data.update_time = std::time::Instant::now();
Ok(()) Ok(())
} }
@ -101,7 +136,7 @@ impl PeerCenterService for PeerCenterServer {
self, self,
_: tarpc::context::Context, _: tarpc::context::Context,
digest: Digest, digest: Digest,
) -> Result<Option<GlobalPeerMap>, Error> { ) -> Result<Option<GetGlobalPeerMapResponse>, Error> {
let data = get_global_data(self.my_node_id); let data = get_global_data(self.my_node_id);
if digest == data.read().await.digest { if digest == data.read().await.digest {
return Ok(None); return Ok(None);
@ -109,6 +144,9 @@ impl PeerCenterService for PeerCenterServer {
let data = get_global_data(self.my_node_id); let data = get_global_data(self.my_node_id);
let locked_data = data.read().await; let locked_data = data.read().await;
Ok(Some(locked_data.global_peer_map.clone())) Ok(Some(GetGlobalPeerMapResponse {
global_peer_map: locked_data.global_peer_map.clone(),
digest: locked_data.digest,
}))
} }
} }

View File

@ -1,18 +1,11 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use crate::peers::PeerId; use crate::{peers::PeerId, rpc::DirectConnectedPeerInfo};
use super::{Digest, Error}; use super::{Digest, Error};
use crate::rpc::PeerInfo; use crate::rpc::PeerInfo;
#[derive(Debug, Clone, Hash, serde::Deserialize, serde::Serialize)] pub type LatencyLevel = crate::rpc::cli::LatencyLevel;
pub enum LatencyLevel {
VeryLow,
Low,
Normal,
High,
VeryHigh,
}
impl LatencyLevel { impl LatencyLevel {
pub const fn from_latency_ms(lat_ms: u32) -> Self { pub const fn from_latency_ms(lat_ms: u32) -> Self {
@ -30,33 +23,25 @@ impl LatencyLevel {
} }
} }
#[derive(Debug, Clone, Hash, serde::Deserialize, serde::Serialize)] pub type PeerInfoForGlobalMap = crate::rpc::cli::PeerInfoForGlobalMap;
pub struct PeerConnInfoForGlobalMap {
to_peer_id: PeerId,
latency_level: LatencyLevel,
}
#[derive(Debug, Clone, Hash, serde::Deserialize, serde::Serialize)]
pub struct PeerInfoForGlobalMap {
pub direct_peers: BTreeMap<PeerId, Vec<PeerConnInfoForGlobalMap>>,
}
impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap { impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap {
fn from(peers: Vec<PeerInfo>) -> Self { fn from(peers: Vec<PeerInfo>) -> Self {
let mut peer_map = BTreeMap::new(); let mut peer_map = BTreeMap::new();
for peer in peers { for peer in peers {
let mut conn_info = Vec::new(); let min_lat = peer
for conn in peer.conns { .conns
conn_info.push(PeerConnInfoForGlobalMap { .iter()
to_peer_id: conn.peer_id.parse().unwrap(), .map(|conn| conn.stats.as_ref().unwrap().latency_us)
latency_level: LatencyLevel::from_latency_ms( .min()
conn.stats.unwrap().latency_us as u32 / 1000, .unwrap_or(0);
),
}); let dp_info = DirectConnectedPeerInfo {
} latency_level: LatencyLevel::from_latency_ms(min_lat as u32 / 1000) as i32,
};
// sort conn info so hash result is stable // sort conn info so hash result is stable
conn_info.sort_by(|a, b| a.to_peer_id.cmp(&b.to_peer_id)); peer_map.insert(peer.peer_id, dp_info);
peer_map.insert(peer.peer_id.parse().unwrap(), conn_info);
} }
PeerInfoForGlobalMap { PeerInfoForGlobalMap {
direct_peers: peer_map, direct_peers: peer_map,
@ -78,6 +63,12 @@ impl GlobalPeerMap {
} }
} }
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
pub struct GetGlobalPeerMapResponse {
pub global_peer_map: GlobalPeerMap,
pub digest: Digest,
}
#[tarpc::service] #[tarpc::service]
pub trait PeerCenterService { pub trait PeerCenterService {
// report center server which peer is directly connected to me // report center server which peer is directly connected to me
@ -88,5 +79,6 @@ pub trait PeerCenterService {
digest: Digest, digest: Digest,
) -> Result<(), Error>; ) -> Result<(), Error>;
async fn get_global_peer_map(digest: Digest) -> Result<Option<GlobalPeerMap>, Error>; async fn get_global_peer_map(digest: Digest)
-> Result<Option<GetGlobalPeerMapResponse>, Error>;
} }