*very* primitive segment-index ack's
This commit is contained in:
parent
f0481e350d
commit
dd6c4ff088
|
@ -12,6 +12,12 @@ version = "1.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "byteorder"
|
||||||
|
version = "1.3.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bytes"
|
name = "bytes"
|
||||||
version = "0.5.4"
|
version = "0.5.4"
|
||||||
|
@ -55,6 +61,7 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||||
name = "hptp"
|
name = "hptp"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"byteorder",
|
||||||
"chrono",
|
"chrono",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
|
|
@ -7,6 +7,7 @@ extern crate thiserror;
|
||||||
use hptp::logger::Logger;
|
use hptp::logger::Logger;
|
||||||
use hptp::msg::{DownMsg, UpMsg};
|
use hptp::msg::{DownMsg, UpMsg};
|
||||||
use hptp::peer::{self, DownPeer, Peer};
|
use hptp::peer::{self, DownPeer, Peer};
|
||||||
|
use std::collections::HashMap;
|
||||||
use tokio::io::AsyncWrite;
|
use tokio::io::AsyncWrite;
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
|
@ -55,22 +56,34 @@ async fn download<OUT>(log: &mut Logger, peer: &mut DownPeer, out: &mut OUT) ->
|
||||||
where
|
where
|
||||||
OUT: AsyncWrite + Unpin,
|
OUT: AsyncWrite + Unpin,
|
||||||
{
|
{
|
||||||
use tokio::io::AsyncWriteExt;
|
let mut segs = HashMap::new();
|
||||||
let mut pos = 0;
|
let mut flush_seg_idx = 0;
|
||||||
loop {
|
loop {
|
||||||
match peer.recv().await {
|
match peer.recv().await {
|
||||||
Ok(UpMsg::Data { payload, .. }) => {
|
Ok(UpMsg::Data { payload, seg_idx }) => {
|
||||||
let len = payload.len();
|
let len = payload.len();
|
||||||
out.write_all(&payload).await?;
|
segs.entry(seg_idx).or_insert(Some(payload));
|
||||||
out.flush().await?;
|
let ack = DownMsg::Ack {
|
||||||
log.recv_data_accepted(pos, len, hptp::logger::InOrder)
|
idxs: segs.keys().cloned().collect(),
|
||||||
|
};
|
||||||
|
log.debug_msg(format!("sent ack: {:?}",
|
||||||
|
segs.keys().collect::<Vec<_>>())).await;
|
||||||
|
send(peer, ack).await?;
|
||||||
|
log.recv_data_accepted(seg_idx as usize, len, hptp::logger::OutOfOrder)
|
||||||
.await;
|
.await;
|
||||||
send(peer, DownMsg::Ack {}).await?;
|
|
||||||
pos += len;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(peer::RecvError::InvalidMessage { .. }) => log.recv_corrupt().await,
|
Err(peer::RecvError::InvalidMessage { .. }) => log.recv_corrupt().await,
|
||||||
Err(peer::RecvError::Io { source }) => return Err(source.into()),
|
Err(peer::RecvError::Io { source }) => return Err(source.into()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while let Some(v) = segs.get_mut(&flush_seg_idx) {
|
||||||
|
if let Some(payload) = v.take() {
|
||||||
|
use tokio::io::AsyncWriteExt;
|
||||||
|
out.write_all(&payload).await?;
|
||||||
|
out.flush().await?;
|
||||||
|
}
|
||||||
|
flush_seg_idx += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ extern crate tokio;
|
||||||
extern crate thiserror;
|
extern crate thiserror;
|
||||||
|
|
||||||
use hptp::logger::Logger;
|
use hptp::logger::Logger;
|
||||||
use hptp::msg::{DownMsg, UpMsg};
|
use hptp::msg::UpMsg;
|
||||||
use hptp::peer::{self, Peer, UpPeer};
|
use hptp::peer::{self, Peer, UpPeer};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use tokio::io::AsyncRead;
|
use tokio::io::AsyncRead;
|
||||||
|
@ -91,30 +91,29 @@ async fn upload<IN>(log: &mut Logger, peer: &mut UpPeer, inp: &mut IN) -> Result
|
||||||
where
|
where
|
||||||
IN: AsyncRead + Unpin,
|
IN: AsyncRead + Unpin,
|
||||||
{
|
{
|
||||||
let mut pos = 0;
|
let mut seg_idx = 0;
|
||||||
let mut next_ack_len: Option<usize> = None;
|
// TODO: async recieve acks
|
||||||
loop {
|
loop {
|
||||||
if let Some(ack_len) = next_ack_len {
|
/* if let Some(ack_len) = next_ack_len {
|
||||||
match peer.recv().await {
|
match peer.recv().await {
|
||||||
Ok(DownMsg::Ack { .. }) => {
|
Ok(DownMsg::Ack { idxs }) => {
|
||||||
log.recv_ack(pos).await;
|
// log.recv_ack(pos).await;
|
||||||
pos += ack_len;
|
|
||||||
next_ack_len = None;
|
next_ack_len = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(peer::RecvError::InvalidMessage { .. }) => log.recv_corrupt().await,
|
Err(peer::RecvError::InvalidMessage { .. }) => log.recv_corrupt().await,
|
||||||
Err(peer::RecvError::Io { source }) => return Err(source.into()),
|
Err(peer::RecvError::Io { source }) => return Err(source.into()),
|
||||||
}
|
}
|
||||||
} else {
|
} else {*/
|
||||||
match read_segment(inp).await? {
|
match read_segment(inp).await? {
|
||||||
Some(data) => {
|
Some(payload) => {
|
||||||
next_ack_len = Some(data.len());
|
log.send_data(seg_idx as usize, payload.len()).await;
|
||||||
log.send_data(pos, data.len()).await;
|
let data = UpMsg::Data { payload, seg_idx };
|
||||||
send(peer, UpMsg::Data { payload: data }).await?;
|
send(peer, data).await?;
|
||||||
}
|
seg_idx += 1;
|
||||||
None => break,
|
|
||||||
}
|
}
|
||||||
|
None => break,
|
||||||
}
|
}
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
log.completed().await;
|
log.completed().await;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -11,3 +11,4 @@ edition = "2018"
|
||||||
tokio = {version = "0.2.*", features = ["io-std", "io-util", "udp"]}
|
tokio = {version = "0.2.*", features = ["io-std", "io-util", "udp"]}
|
||||||
thiserror = "*"
|
thiserror = "*"
|
||||||
chrono = "0.4.*"
|
chrono = "0.4.*"
|
||||||
|
byteorder = "1.3.*"
|
|
@ -1,8 +1,9 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate thiserror;
|
extern crate thiserror;
|
||||||
|
extern crate byteorder;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
|
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod seg;
|
|
||||||
pub mod msg;
|
pub mod msg;
|
||||||
pub mod peer;
|
pub mod peer;
|
||||||
|
pub mod seg;
|
||||||
|
|
|
@ -1,18 +1,20 @@
|
||||||
pub use super::seg::{MAX_TOTAL_PACKET_SIZE, UP_HEADER_SIZE, DOWN_HEADER_SIZE};
|
use super::seg::SegIdx;
|
||||||
|
pub use super::seg::{DOWN_HEADER_SIZE, MAX_TOTAL_PACKET_SIZE, UP_HEADER_SIZE};
|
||||||
|
use byteorder::ByteOrder;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum UpMsg {
|
pub enum UpMsg {
|
||||||
Data {
|
Data {
|
||||||
payload: Vec<u8>,
|
payload: Vec<u8>,
|
||||||
// seg_idx: SegIdx,
|
seg_idx: SegIdx,
|
||||||
// encoding: SegmentEncoding,
|
|
||||||
// is_final_packet: bool,
|
// is_final_packet: bool,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum DownMsg {
|
pub enum DownMsg {
|
||||||
Ack {}, // ackd: SegmentSet
|
Ack { idxs: HashSet<SegIdx> },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
|
@ -26,30 +28,47 @@ pub trait SerDes: Sized {
|
||||||
fn ser_to(&self, buf: &mut [u8]) -> usize;
|
fn ser_to(&self, buf: &mut [u8]) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BO = byteorder::LE;
|
||||||
|
|
||||||
impl SerDes for UpMsg {
|
impl SerDes for UpMsg {
|
||||||
fn des(buf: &[u8]) -> Result<Self, DesError> {
|
fn des(buf: &[u8]) -> Result<Self, DesError> {
|
||||||
Ok(UpMsg::Data {
|
if buf.len() < UP_HEADER_SIZE {
|
||||||
payload: buf.into(),
|
Err(DesError)
|
||||||
})
|
} else {
|
||||||
|
Ok(UpMsg::Data {
|
||||||
|
seg_idx: BO::read_u32(&buf[0..4]),
|
||||||
|
payload: buf[4..].into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ser_to(&self, buf: &mut [u8]) -> usize {
|
fn ser_to(&self, buf: &mut [u8]) -> usize {
|
||||||
match self {
|
match self {
|
||||||
UpMsg::Data { payload, .. } => {
|
UpMsg::Data { payload, seg_idx } => {
|
||||||
let len = payload.len();
|
let len = payload.len();
|
||||||
buf[..len].copy_from_slice(&payload[..]);
|
BO::write_u32(&mut buf[0..4], *seg_idx);
|
||||||
len
|
buf[4..4 + len].copy_from_slice(&payload[..]);
|
||||||
|
4 + len
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SerDes for DownMsg {
|
impl SerDes for DownMsg {
|
||||||
fn des(_buf: &[u8]) -> Result<Self, DesError> {
|
fn des(buf: &[u8]) -> Result<Self, DesError> {
|
||||||
Ok(DownMsg::Ack {})
|
let mut idxs = HashSet::new();
|
||||||
|
for (i, b) in buf.iter().cloned().enumerate() {
|
||||||
|
for j in 0..8 {
|
||||||
|
if b & (1 << j) != 0 {
|
||||||
|
idxs.insert((i * 8 + j) as SegIdx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(DownMsg::Ack { idxs })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ser_to(&self, _buf: &mut [u8]) -> usize {
|
fn ser_to(&self, _buf: &mut [u8]) -> usize {
|
||||||
1
|
// TODO: implement this!
|
||||||
|
0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
pub const MAX_TOTAL_PACKET_SIZE: usize = 1472;
|
pub const MAX_TOTAL_PACKET_SIZE: usize = 1472;
|
||||||
|
|
||||||
// TODO: change these based off the decoders
|
// TODO: change these based off the decoders
|
||||||
pub const UP_HEADER_SIZE: usize = 0;
|
pub const UP_HEADER_SIZE: usize = 4;
|
||||||
pub const DOWN_HEADER_SIZE: usize = 1;
|
pub const DOWN_HEADER_SIZE: usize = 1;
|
||||||
|
|
||||||
/// This is the maximum amount of segment data we can fit into a packet.
|
/// This is the maximum amount of segment data we can fit into a packet.
|
||||||
|
|
Loading…
Reference in New Issue