support compress for rpc and tun data (#473)

* support compress for rpc and tun data
* add compression layer to easytier-web
This commit is contained in:
Sijie.Sun
2024-11-16 11:23:18 +08:00
committed by GitHub
parent 9d455e22fa
commit 6cdea38284
22 changed files with 623 additions and 82 deletions
+19 -1
View File
@@ -20,6 +20,7 @@ message FlagsInConfig {
bool disable_udp_hole_punching = 13;
string ipv6_listener = 14;
bool multi_thread = 15;
CompressionAlgoPb data_compress_algo = 16;
}
message RpcDescriptor {
@@ -32,7 +33,7 @@ message RpcDescriptor {
}
message RpcRequest {
RpcDescriptor descriptor = 1;
RpcDescriptor descriptor = 1 [ deprecated = true ];
bytes request = 2;
int32 timeout_ms = 3;
@@ -45,6 +46,21 @@ message RpcResponse {
uint64 runtime_us = 3;
}
enum CompressionAlgoPb {
Invalid = 0;
None = 1;
Zstd = 2;
}
message RpcCompressionInfo {
// use this to compress the content
CompressionAlgoPb algo = 1;
// tell the peer which compression algo is used to compress the next
// response/request
CompressionAlgoPb accepted_algo = 2;
}
message RpcPacket {
uint32 from_peer = 1;
uint32 to_peer = 2;
@@ -58,6 +74,8 @@ message RpcPacket {
uint32 piece_idx = 8;
int32 trace_id = 9;
RpcCompressionInfo compression_info = 10;
}
message Void {}
+25
View File
@@ -2,6 +2,8 @@ use std::{fmt::Display, str::FromStr};
use anyhow::Context;
use crate::tunnel::packet_def::CompressorAlgo;
include!(concat!(env!("OUT_DIR"), "/common.rs"));
impl From<uuid::Uuid> for Uuid {
@@ -180,3 +182,26 @@ impl From<SocketAddr> for std::net::SocketAddr {
}
}
}
impl TryFrom<CompressionAlgoPb> for CompressorAlgo {
type Error = anyhow::Error;
fn try_from(value: CompressionAlgoPb) -> Result<Self, Self::Error> {
match value {
CompressionAlgoPb::Zstd => Ok(CompressorAlgo::ZstdDefault),
CompressionAlgoPb::None => Ok(CompressorAlgo::None),
_ => Err(anyhow::anyhow!("Invalid CompressionAlgoPb")),
}
}
}
impl TryFrom<CompressorAlgo> for CompressionAlgoPb {
type Error = anyhow::Error;
fn try_from(value: CompressorAlgo) -> Result<Self, Self::Error> {
match value {
CompressorAlgo::ZstdDefault => Ok(CompressionAlgoPb::Zstd),
CompressorAlgo::None => Ok(CompressionAlgoPb::None),
}
}
}
+70 -5
View File
@@ -12,8 +12,10 @@ use tokio_stream::StreamExt;
use crate::common::PeerId;
use crate::defer;
use crate::proto::common::{RpcDescriptor, RpcPacket, RpcRequest, RpcResponse};
use crate::proto::rpc_impl::packet::build_rpc_packet;
use crate::proto::common::{
CompressionAlgoPb, RpcCompressionInfo, RpcDescriptor, RpcPacket, RpcRequest, RpcResponse,
};
use crate::proto::rpc_impl::packet::{build_rpc_packet, compress_packet, decompress_packet};
use crate::proto::rpc_types::controller::Controller;
use crate::proto::rpc_types::descriptor::MethodDescriptor;
use crate::proto::rpc_types::{
@@ -48,12 +50,21 @@ struct InflightRequest {
start_time: std::time::Instant,
}
#[derive(Debug, Clone, Default)]
pub struct PeerInfo {
pub peer_id: PeerId,
pub compression_info: RpcCompressionInfo,
pub last_active: Option<std::time::Instant>,
}
type InflightRequestTable = Arc<DashMap<InflightRequestKey, InflightRequest>>;
pub type PeerInfoTable = Arc<DashMap<PeerId, PeerInfo>>;
pub struct Client {
mpsc: Mutex<MpscTunnel<Box<dyn Tunnel>>>,
transport: Mutex<Transport>,
inflight_requests: InflightRequestTable,
peer_info: PeerInfoTable,
tasks: Arc<Mutex<JoinSet<()>>>,
}
@@ -64,6 +75,7 @@ impl Client {
mpsc: Mutex::new(MpscTunnel::new(ring_a, None)),
transport: Mutex::new(MpscTunnel::new(ring_b, None)),
inflight_requests: Arc::new(DashMap::new()),
peer_info: Arc::new(DashMap::new()),
tasks: Arc::new(Mutex::new(JoinSet::new())),
}
}
@@ -79,6 +91,21 @@ impl Client {
pub fn run(&self) {
let mut tasks = self.tasks.lock().unwrap();
let peer_infos = self.peer_info.clone();
tasks.spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(30)).await;
let now = std::time::Instant::now();
peer_infos.retain(|_, v| {
if let Some(last_active) = v.last_active {
return now.duration_since(last_active)
< std::time::Duration::from_secs(120);
}
true
});
}
});
let mut rx = self.mpsc.lock().unwrap().get_stream();
let inflight_requests = self.inflight_requests.clone();
tasks.spawn(async move {
@@ -111,6 +138,8 @@ impl Client {
continue;
};
tracing::trace!(?packet, "Received response packet");
let ret = inflight_request.merger.feed(packet);
match ret {
Ok(Some(rpc_packet)) => {
@@ -138,6 +167,7 @@ impl Client {
to_peer_id: PeerId,
zc_packet_sender: MpscTunnelSender,
inflight_requests: InflightRequestTable,
peer_info: PeerInfoTable,
_phan: PhantomData<F>,
}
@@ -194,23 +224,53 @@ impl Client {
};
let rpc_req = RpcRequest {
descriptor: Some(rpc_desc.clone()),
request: input.into(),
timeout_ms: ctrl.timeout_ms(),
..Default::default()
};
let peer_info = self
.peer_info
.get(&self.to_peer_id)
.map(|v| v.clone())
.unwrap_or_default();
let (buf, c_algo) = compress_packet(
peer_info.compression_info.accepted_algo(),
&rpc_req.encode_to_vec(),
)
.await
.unwrap();
let packets = build_rpc_packet(
self.from_peer_id,
self.to_peer_id,
rpc_desc,
transaction_id,
true,
&rpc_req.encode_to_vec(),
&buf,
ctrl.trace_id(),
RpcCompressionInfo {
algo: c_algo.into(),
accepted_algo: CompressionAlgoPb::Zstd.into(),
},
);
let timeout_dur = std::time::Duration::from_millis(ctrl.timeout_ms() as u64);
let rpc_packet = timeout(timeout_dur, self.do_rpc(packets, &mut rx)).await??;
let mut rpc_packet = timeout(timeout_dur, self.do_rpc(packets, &mut rx)).await??;
if let Some(compression_info) = rpc_packet.compression_info {
self.peer_info.insert(
self.to_peer_id,
PeerInfo {
peer_id: self.to_peer_id,
compression_info: compression_info.clone(),
last_active: Some(std::time::Instant::now()),
},
);
rpc_packet.body =
decompress_packet(compression_info.algo(), &rpc_packet.body).await?;
}
assert_eq!(rpc_packet.transaction_id, transaction_id);
@@ -230,6 +290,7 @@ impl Client {
to_peer_id,
zc_packet_sender: self.mpsc.lock().unwrap().get_sink(),
inflight_requests: self.inflight_requests.clone(),
peer_info: self.peer_info.clone(),
_phan: PhantomData,
})
}
@@ -237,4 +298,8 @@ impl Client {
pub fn inflight_count(&self) -> usize {
self.inflight_requests.len()
}
pub fn peer_info_table(&self) -> PeerInfoTable {
self.peer_info.clone()
}
}
+49 -11
View File
@@ -1,18 +1,44 @@
use prost::Message as _;
use crate::{
common::PeerId,
common::{compressor::DefaultCompressor, PeerId},
proto::{
common::{RpcDescriptor, RpcPacket},
common::{CompressionAlgoPb, RpcCompressionInfo, RpcDescriptor, RpcPacket},
rpc_types::error::Error,
},
tunnel::packet_def::{PacketType, ZCPacket},
tunnel::packet_def::{CompressorAlgo, PacketType, ZCPacket},
};
use super::RpcTransactId;
const RPC_PACKET_CONTENT_MTU: usize = 1300;
pub async fn compress_packet(
accepted_compression_algo: CompressionAlgoPb,
content: &[u8],
) -> Result<(Vec<u8>, CompressionAlgoPb), Error> {
let compressor = DefaultCompressor::new();
let algo = accepted_compression_algo
.try_into()
.unwrap_or(CompressorAlgo::None);
let compressed = compressor.compress_raw(&content, algo).await?;
if compressed.len() >= content.len() {
Ok((content.to_vec(), CompressionAlgoPb::None))
} else {
Ok((compressed, algo.try_into().unwrap()))
}
}
pub async fn decompress_packet(
compression_algo: CompressionAlgoPb,
content: &[u8],
) -> Result<Vec<u8>, Error> {
let compressor = DefaultCompressor::new();
let algo = compression_algo.try_into()?;
let decompressed = compressor.decompress_raw(&content, algo).await?;
Ok(decompressed)
}
pub struct PacketMerger {
first_piece: Option<RpcPacket>,
pieces: Vec<RpcPacket>,
@@ -46,7 +72,8 @@ impl PacketMerger {
body.extend_from_slice(&p.body);
}
let mut tmpl_packet = self.first_piece.as_ref().unwrap().clone();
// only the first packet contains the complete info
let mut tmpl_packet = self.pieces[0].clone();
tmpl_packet.total_pieces = 1;
tmpl_packet.piece_idx = 0;
tmpl_packet.body = body;
@@ -58,17 +85,17 @@ impl PacketMerger {
let total_pieces = rpc_packet.total_pieces;
let piece_idx = rpc_packet.piece_idx;
if rpc_packet.descriptor.is_none() {
return Err(Error::MalformatRpcPacket(
"descriptor is missing".to_owned(),
));
}
// for compatibility with old version
if total_pieces == 0 && piece_idx == 0 {
return Ok(Some(rpc_packet));
}
if rpc_packet.piece_idx == 0 && rpc_packet.descriptor.is_none() {
return Err(Error::MalformatRpcPacket(
"descriptor is missing".to_owned(),
));
}
// about 32MB max size
if total_pieces > 32 * 1024 || total_pieces == 0 {
return Err(Error::MalformatRpcPacket(format!(
@@ -89,6 +116,7 @@ impl PacketMerger {
{
self.first_piece = Some(rpc_packet.clone());
self.pieces.clear();
tracing::trace!(?rpc_packet, "got first piece");
}
self.pieces
@@ -113,6 +141,7 @@ pub fn build_rpc_packet(
is_req: bool,
content: &Vec<u8>,
trace_id: i32,
compression_info: RpcCompressionInfo,
) -> Vec<ZCPacket> {
let mut ret = Vec::new();
let content_mtu = RPC_PACKET_CONTENT_MTU;
@@ -130,13 +159,22 @@ pub fn build_rpc_packet(
let cur_packet = RpcPacket {
from_peer,
to_peer,
descriptor: Some(rpc_desc.clone()),
descriptor: if cur_offset == 0 {
Some(rpc_desc.clone())
} else {
None
},
is_request: is_req,
total_pieces: total_pieces as u32,
piece_idx: (cur_offset / content_mtu) as u32,
transaction_id,
body: cur_content,
trace_id,
compression_info: if cur_offset == 0 {
Some(compression_info.clone())
} else {
None
},
};
cur_offset += cur_len;
+27 -6
View File
@@ -12,7 +12,7 @@ use tokio_stream::StreamExt;
use crate::{
common::{join_joinset_background, PeerId},
proto::{
common::{self, RpcDescriptor, RpcPacket, RpcRequest, RpcResponse},
common::{self, CompressionAlgoPb, RpcCompressionInfo, RpcPacket, RpcRequest, RpcResponse},
rpc_types::error::Result,
},
tunnel::{
@@ -23,7 +23,7 @@ use crate::{
};
use super::{
packet::{build_rpc_packet, PacketMerger},
packet::{build_rpc_packet, compress_packet, decompress_packet, PacketMerger},
service_registry::ServiceRegistry,
RpcController, Transport,
};
@@ -31,7 +31,6 @@ use super::{
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct PacketMergerKey {
from_peer_id: PeerId,
rpc_desc: RpcDescriptor,
transaction_id: i64,
}
@@ -108,10 +107,11 @@ impl Server {
let key = PacketMergerKey {
from_peer_id: packet.from_peer,
rpc_desc: packet.descriptor.clone().unwrap_or_default(),
transaction_id: packet.transaction_id,
};
tracing::trace!(?key, ?packet, "Received request packet");
let ret = packet_merges
.entry(key.clone())
.or_insert_with(PacketMerger::new)
@@ -144,7 +144,16 @@ impl Server {
}
async fn handle_rpc_request(packet: RpcPacket, reg: Arc<ServiceRegistry>) -> Result<Bytes> {
let rpc_request = RpcRequest::decode(Bytes::from(packet.body))?;
let body = if let Some(compression_info) = packet.compression_info {
decompress_packet(
compression_info.algo.try_into().unwrap_or_default(),
&packet.body,
)
.await?
} else {
packet.body
};
let rpc_request = RpcRequest::decode(Bytes::from(body))?;
let timeout_duration = std::time::Duration::from_millis(rpc_request.timeout_ms as u64);
let ctrl = RpcController::default();
Ok(timeout(
@@ -168,6 +177,7 @@ impl Server {
let mut resp_msg = RpcResponse::default();
let now = std::time::Instant::now();
let compression_info = packet.compression_info.clone();
let resp_bytes = Self::handle_rpc_request(packet, reg).await;
match &resp_bytes {
@@ -180,14 +190,25 @@ impl Server {
};
resp_msg.runtime_us = now.elapsed().as_micros() as u64;
let (compressed_resp, algo) = compress_packet(
compression_info.unwrap_or_default().accepted_algo(),
&resp_msg.encode_to_vec(),
)
.await
.unwrap();
let packets = build_rpc_packet(
to_peer,
from_peer,
desc,
transaction_id,
false,
&resp_msg.encode_to_vec(),
&compressed_resp,
trace_id,
RpcCompressionInfo {
algo: algo.into(),
accepted_algo: CompressionAlgoPb::Zstd.into(),
},
);
for packet in packets {
+21 -1
View File
@@ -41,6 +41,7 @@ impl Greeting for GreetingService {
}
}
use crate::proto::common::{CompressionAlgoPb, RpcCompressionInfo};
use crate::proto::rpc_impl::client::Client;
use crate::proto::rpc_impl::server::Server;
@@ -107,6 +108,7 @@ fn random_string(len: usize) -> String {
#[tokio::test]
async fn rpc_basic_test() {
// enable_log();
let ctx = TestContext::new();
let server = GreetingServer::new(GreetingService {
@@ -119,7 +121,7 @@ async fn rpc_basic_test() {
.client
.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "".to_string());
// small size req and resp
// // small size req and resp
let ctrl = RpcController::default();
let input = SayHelloRequest {
@@ -128,6 +130,15 @@ async fn rpc_basic_test() {
let ret = out.say_hello(ctrl, input).await;
assert_eq!(ret.unwrap().greeting, "Hello world!");
assert_eq!(1, ctx.client.peer_info_table().len());
let first_peer_info = ctx.client.peer_info_table().iter().next().unwrap().clone();
assert_eq!(
first_peer_info.compression_info.accepted_algo(),
CompressionAlgoPb::Zstd,
);
println!("{:?}", ctx.client.peer_info_table());
let ctrl = RpcController::default();
let input = SayGoodbyeRequest {
name: "world".to_string(),
@@ -144,6 +155,15 @@ async fn rpc_basic_test() {
assert_eq!(0, ctx.client.inflight_count());
assert_eq!(0, ctx.server.inflight_count());
let first_peer_info = ctx.client.peer_info_table().iter().next().unwrap().clone();
assert_eq!(
first_peer_info.compression_info,
RpcCompressionInfo {
algo: CompressionAlgoPb::Zstd.into(),
accepted_algo: CompressionAlgoPb::Zstd.into(),
}
);
}
#[tokio::test]