put constants in hptp::seg module

This commit is contained in:
Milo Turner 2020-03-09 17:34:18 -04:00
parent bb6f534626
commit 5dcda7aec3
5 changed files with 55 additions and 53 deletions

View File

@ -5,7 +5,7 @@ extern crate tokio;
extern crate thiserror;
use hptp::logger::Logger;
use hptp::msg::{self, DownMsg, UpMsg};
use hptp::msg::{DownMsg, UpMsg};
use hptp::peer::{self, Peer, UpPeer};
use std::net::SocketAddr;
use tokio::io::AsyncRead;
@ -65,12 +65,12 @@ fn parse_args(mut args: impl Iterator<Item = String>) -> Result<SocketAddr, Erro
.map_err(|_| Error::InvalidArgs)
}
async fn read_data<IN>(inp: &mut IN) -> Result<Option<Vec<u8>>, Error>
async fn read_segment<IN>(inp: &mut IN) -> Result<Option<Vec<u8>>, Error>
where
IN: AsyncRead + Unpin,
{
use tokio::io::AsyncReadExt;
let mut buf = [0u8; msg::MAX_SEG_SIZE];
let mut buf = [0u8; hptp::seg::MAX_SEG_SIZE];
let len = inp.read(&mut buf).await?;
Ok(if len > 0 {
Some(buf[..len].into())
@ -106,7 +106,7 @@ where
Err(peer::RecvError::Io { source }) => return Err(source.into()),
}
} else {
match read_data(inp).await? {
match read_segment(inp).await? {
Some(data) => {
next_ack_len = Some(data.len());
log.send_data(pos, data.len()).await;

View File

@ -3,5 +3,6 @@ extern crate thiserror;
extern crate chrono;
pub mod logger;
pub mod seg;
pub mod msg;
pub mod peer;

View File

@ -1,34 +1,4 @@
/// Per the assignment spec, `1472` is the maximum size packet we're allowed to send.
pub const MAX_SERIALIZED_SIZE: usize = 1472;
// TODO: change these based off the decoders
pub const UP_HEADER_SIZE: usize = 0;
pub const DOWN_HEADER_SIZE: usize = 0;
/// This is the maximum amount of segment data we can fit into a packet.
pub const MAX_SEG_SIZE: usize = MAX_SERIALIZED_SIZE - UP_HEADER_SIZE;
// Note: we can only keep so much file data in RAM, so let's see what would be the
// maximum amount of file data we keep in flux.
//
// 1456 B (MAX_SEG_SIZE) * 2.03% (compression factor for PSHex) = 2.96 KB/seg
// 2.96 KB/seg * 11,648 seg (max possible SEGS_PER_CHUNK) = 34.5 MB
//
// 34 MB is actually much larger than the maximum test case size.
/// This is calculated based on the max size we would need for a bit-field specifying
/// which segments are present in a chunk.
pub const SEG_PER_CHUNK: usize = (MAX_SERIALIZED_SIZE - DOWN_HEADER_SIZE) * 8;
/// Is `u32` big enough to handle all file sizes?
///
/// 1.46 KB/seg * 2^32 seg = 6.3 TB
///
/// If we used `u16` instead we would be just barely OK:
///
/// 1.46 KB/seg * 2^16 seg = 95.4 MB
///
pub type SegIdx = u32;
pub use super::seg::{MAX_TOTAL_PACKET_SIZE, UP_HEADER_SIZE, DOWN_HEADER_SIZE};
#[derive(Clone, Debug)]
pub enum UpMsg {
@ -45,22 +15,6 @@ pub enum DownMsg {
Ack {}, // ackd: SegmentSet
}
/*
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum SegmentEncoding {
/// Un-encoded byte sequence.
Raw,
/// "PostScript style" Hex w/ linebreaks: ([0-9a-f]{60}\n)*.
PSHex,
}
#[derive(Clone, Debug)]
pub struct SegmentSet {
pub latest_seg_idx: SegIdx,
pub other_segs: std::collections::HashSet<SegIdx>,
}
*/
#[derive(Error, Debug)]
#[error("deserialization failed; malformed packet")]
pub struct DesError;

View File

@ -55,7 +55,7 @@ impl<F, T> Peer<F, T> {
where
F: SerDes,
{
let mut buf = [0u8; msg::MAX_SERIALIZED_SIZE];
let mut buf = [0u8; msg::MAX_TOTAL_PACKET_SIZE];
let len = msg.ser_to(&mut buf);
let who = self.targ.ok_or(SendError::NoTarget)?;
let _n_sent = self.sock.send_to(&buf[..len], who).await?;
@ -66,7 +66,7 @@ impl<F, T> Peer<F, T> {
where
T: SerDes,
{
let mut buf = [0u8; msg::MAX_SERIALIZED_SIZE];
let mut buf = [0u8; msg::MAX_TOTAL_PACKET_SIZE];
let (len, who) = self.sock.recv_from(&mut buf).await?;
self.set_known_target(who);
Ok(T::des(&buf[..len])?)

47
hptp/src/seg.rs Normal file
View File

@ -0,0 +1,47 @@
/// Per the assignment spec, `1472` is the maximum size packet we're allowed to send.
pub const MAX_TOTAL_PACKET_SIZE: usize = 1472;
// TODO: change these based off the decoders
pub const UP_HEADER_SIZE: usize = 0;
pub const DOWN_HEADER_SIZE: usize = 0;
/// This is the maximum amount of segment data we can fit into a packet.
pub const MAX_SEG_SIZE: usize = MAX_TOTAL_PACKET_SIZE - UP_HEADER_SIZE;
// Note: we can only keep so much file data in RAM, so let's see what would be the
// maximum amount of file data we keep in flux.
//
// 1456 B (MAX_SEG_SIZE) * 2.03% (compression factor for PSHex) = 2.96 KB/seg
// 2.96 KB/seg * 11,648 seg (max possible SEGS_PER_CHUNK) = 34.5 MB
//
// 34 MB is actually much larger than the maximum test case size.
/// This is calculated based on the max size we would need for a bit-field specifying
/// which segments are present in a chunk.
pub const SEG_PER_CHUNK: usize = (MAX_TOTAL_PACKET_SIZE - DOWN_HEADER_SIZE) * 8;
/// Is `u32` big enough to handle all file sizes?
///
/// 1.46 KB/seg * 2^32 seg = 6.3 TB
///
/// If we used `u16` instead we would be just barely OK:
///
/// 1.46 KB/seg * 2^16 seg = 95.4 MB
///
pub type SegIdx = u32;
/*
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum SegmentEncoding {
/// Un-encoded byte sequence.
Raw,
/// "PostScript style" Hex w/ linebreaks: ([0-9a-f]{60}\n)*.
PSHex,
}
#[derive(Clone, Debug)]
pub struct SegmentSet {
pub latest_seg_idx: SegIdx,
pub other_segs: std::collections::HashSet<SegIdx>,
}
*/