Run clippy

feature/v32-tokens
Edward Shen 2021-04-14 22:52:54 -04:00
parent c75b5063af
commit 8949e41bee
Signed by: edward
GPG Key ID: 19182661E818369F
7 changed files with 88 additions and 54 deletions

22
src/cache/fs.rs vendored
View File

@ -28,7 +28,7 @@ use tokio::time::Sleep;
/// up to Client A's request, then Client B could receive a broken image, as it
/// thinks it's done reading the file.
///
/// We effectively use WRITING_STATUS as a status relay to ensure concurrent
/// We effectively use `WRITING_STATUS` as a status relay to ensure concurrent
/// reads to the file while it's being written to will wait for writing to be
/// completed.
static WRITING_STATUS: Lazy<RwLock<HashMap<PathBuf, Arc<CacheStatus>>>> =
@ -40,8 +40,7 @@ pub async fn read_file(path: &Path) -> Option<Result<FromFsStream, std::io::Erro
let status = WRITING_STATUS
.read()
.get(path)
.map(Arc::clone)
.unwrap_or_else(|| Arc::new(CacheStatus::done()));
.map_or_else(|| Arc::new(CacheStatus::done()), Arc::clone);
Some(FromFsStream::new(path, status).await)
} else {
@ -69,18 +68,17 @@ pub async fn transparent_file_stream(
let path_buf = path.to_path_buf();
tokio::spawn(async move {
let path_buf = path_buf; // moves path buf into async
let mut was_errored = false;
let mut errored = false;
while let Some(bytes) = byte_stream.next().await {
match bytes {
Ok(bytes) => file.write_all(&bytes).await?,
Err(_) => {
was_errored = true;
break;
}
if let Ok(bytes) = bytes {
file.write_all(&bytes).await?
} else {
errored = true;
break;
}
}
if was_errored {
if errored {
// It's ok if the deleting the file fails, since we truncate on
// create anyways, but it should be best effort
let _ = remove_file(&path_buf).await;
@ -92,7 +90,7 @@ pub async fn transparent_file_stream(
let mut write_lock = WRITING_STATUS.write();
// This needs to be written atomically with the write lock, else
// it's possible we have an inconsistent state
if was_errored {
if errored {
write_flag.store(WritingStatus::Error);
} else {
write_flag.store(WritingStatus::Done);

View File

@ -11,17 +11,17 @@ use super::{Cache, CacheKey, CachedImage, ImageMetadata};
pub struct GenerationalCache {
in_memory: LruCache<CacheKey, (CachedImage, ImageMetadata)>,
memory_max_size: usize,
memory_cur_size: usize,
memory_max_size: u64,
memory_cur_size: u64,
on_disk: LruCache<CacheKey, ImageMetadata>,
disk_path: PathBuf,
disk_max_size: usize,
disk_cur_size: usize,
disk_max_size: u64,
disk_cur_size: u64,
}
impl GenerationalCache {
pub fn new(memory_max_size: usize, disk_max_size: usize, disk_path: PathBuf) -> Self {
pub fn new(memory_max_size: u64, disk_max_size: u64, disk_path: PathBuf) -> Self {
Self {
in_memory: LruCache::unbounded(),
memory_max_size,
@ -38,9 +38,9 @@ impl GenerationalCache {
let new_img_size = image.0.len();
let mut to_drop = vec![];
if self.disk_max_size >= new_img_size {
if self.disk_max_size >= new_img_size as u64 {
// Add images to drop from cold cache into a queue
while new_img_size + self.disk_cur_size > self.disk_max_size {
while new_img_size as u64 + self.disk_cur_size > self.disk_max_size {
match self.on_disk.pop_lru() {
Some((key, _)) => {
let mut path = self.disk_path.clone();
@ -72,7 +72,7 @@ impl GenerationalCache {
})
.unwrap_or_default();
self.disk_cur_size -= file_size as usize;
self.disk_cur_size -= file_size;
}
to_drop.push(path);
@ -102,7 +102,7 @@ impl GenerationalCache {
let new_key_path = {
let mut root = self.disk_path.clone();
root.push(PathBuf::from(&key));
root.push(PathBuf::from(key.clone()));
root
};
@ -112,7 +112,7 @@ impl GenerationalCache {
match file.write_all(&image.0).await {
Ok(_) => {
self.on_disk.put(key, metadata);
self.disk_cur_size += new_img_size;
self.disk_cur_size += new_img_size as u64;
debug!(
"Successfully written data to disk for file {:?}",
new_key_path
@ -140,18 +140,16 @@ impl Cache for GenerationalCache {
if let Some(metadata) = self.on_disk.pop(key) {
let new_key_path = {
let mut root = self.disk_path.clone();
root.push(PathBuf::from(key));
root.push(PathBuf::from(key.clone()));
root
};
// extract from disk, if it exists
let file = File::open(new_key_path.clone()).await;
let file = File::open(&new_key_path).await;
let mut buffer = if let Some(size) = metadata.content_length {
Vec::with_capacity(size)
} else {
Vec::new()
};
let mut buffer = metadata
.content_length
.map_or_else(Vec::new, Vec::with_capacity);
match file {
Ok(mut file) => {
@ -159,7 +157,7 @@ impl Cache for GenerationalCache {
Ok(_) => {
// We don't particularly care if we fail to delete from disk since
// if it's an error it means it's already been dropped.
tokio::spawn(remove_file(new_key_path.clone()));
tokio::spawn(remove_file(new_key_path));
}
Err(e) => {
warn!("Failed to read from {:?}: {}", new_key_path, e);
@ -174,7 +172,7 @@ impl Cache for GenerationalCache {
buffer.shrink_to_fit();
self.disk_cur_size -= buffer.len();
self.disk_cur_size -= buffer.len() as u64;
let image = CachedImage(Bytes::from(buffer));
// Since we just put it in the in-memory cache it should be there
@ -189,14 +187,14 @@ impl Cache for GenerationalCache {
#[inline]
async fn put(&mut self, key: CacheKey, image: CachedImage, metadata: ImageMetadata) {
let mut hot_evicted = vec![];
let new_img_size = image.0.len();
let new_img_size = image.0.len() as u64;
if self.memory_max_size >= new_img_size {
// Evict oldest entires to make space for new image.
while new_img_size + self.memory_cur_size > self.memory_max_size {
match self.in_memory.pop_lru() {
Some((key, (image, metadata))) => {
self.memory_cur_size -= image.0.len();
self.memory_cur_size -= image.0.len() as u64;
hot_evicted.push((key, image, metadata));
}
None => unreachable!(concat!(

View File

@ -31,7 +31,7 @@ impl LowMemCache {
impl Cache for LowMemCache {
async fn get_stream(&mut self, key: &CacheKey) -> Option<Result<FromFsStream, std::io::Error>> {
if self.on_disk.get(key).is_some() {
super::fs::read_file(&Path::new(&key.to_string())).await
super::fs::read_file(Path::new(&key.to_string())).await
} else {
None
}

60
src/cache/mod.rs vendored
View File

@ -1,5 +1,5 @@
use std::fmt::Display;
use std::path::PathBuf;
use std::{fmt::Display, str::FromStr};
use actix_web::http::HeaderValue;
use async_trait::async_trait;
@ -36,19 +36,54 @@ impl From<CacheKey> for PathBuf {
}
}
impl From<&CacheKey> for PathBuf {
pub struct CachedImage(pub Bytes);
#[derive(Copy, Clone)]
pub struct ImageMetadata {
pub content_type: Option<ImageContentType>,
pub content_length: Option<usize>,
pub last_modified: Option<DateTime<FixedOffset>>,
}
// Note to self: If these are wrong blame Triscuit 9
#[derive(Copy, Clone)]
pub enum ImageContentType {
Png,
Jpeg,
Gif,
Bmp,
Tif,
}
pub struct InvalidContentType;
impl FromStr for ImageContentType {
type Err = InvalidContentType;
#[inline]
fn from(key: &CacheKey) -> Self {
key.to_string().into()
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"image/png" => Ok(Self::Png),
"image/jpeg" => Ok(Self::Jpeg),
"image/gif" => Ok(Self::Gif),
"image/bmp" => Ok(Self::Bmp),
"image/tif" => Ok(Self::Tif),
_ => Err(InvalidContentType),
}
}
}
pub struct CachedImage(pub Bytes);
pub struct ImageMetadata {
pub content_type: Option<String>,
pub content_length: Option<usize>,
pub last_modified: Option<DateTime<FixedOffset>>,
impl AsRef<str> for ImageContentType {
#[inline]
fn as_ref(&self) -> &str {
match self {
Self::Png => "image/png",
Self::Jpeg => "image/jpeg",
Self::Gif => "image/gif",
Self::Bmp => "image/bmp",
Self::Tif => "image/tif",
}
}
}
#[derive(Debug)]
@ -66,7 +101,10 @@ impl ImageMetadata {
) -> Result<Self, ImageRequestError> {
Ok(Self {
content_type: content_type
.map(|v| v.to_str().map(|v| v.to_string()))
.map(|v| match v.to_str() {
Ok(v) => ImageContentType::from_str(v),
Err(_) => Err(InvalidContentType),
})
.transpose()
.map_err(|_| ImageRequestError::InvalidContentType)?,
content_length: content_length

View File

@ -1,4 +1,4 @@
use std::num::{NonZeroU16, NonZeroUsize};
use std::num::{NonZeroU16, NonZeroU64};
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
@ -18,17 +18,17 @@ pub struct CliArgs {
/// How large, in bytes, the in-memory cache should be. Note that this does
/// not include runtime memory usage.
#[clap(long, env = "MEM_CACHE_QUOTA_BYTES")]
pub memory_quota: NonZeroUsize,
pub memory_quota: NonZeroU64,
/// How large, in bytes, the on-disk cache should be. Note that actual
/// values may be larger for metadata information.
#[clap(long, env = "DISK_CACHE_QUOTA_BYTES")]
pub disk_quota: usize,
pub disk_quota: u64,
/// Sets the location of the disk cache.
#[clap(long, default_value = "./cache", env = "DISK_CACHE_PATH")]
pub cache_path: PathBuf,
/// The network speed to advertise to Mangadex@Home control server.
#[clap(long, env = "MAX_NETWORK_SPEED")]
pub network_speed: NonZeroUsize,
pub network_speed: NonZeroU64,
/// Whether or not to provide the Server HTTP header to clients. This is
/// useful for debugging, but is generally not recommended for security
/// reasons.

View File

@ -1,6 +1,6 @@
use std::{io::BufReader, sync::Arc};
use std::{
num::{NonZeroU16, NonZeroUsize},
num::{NonZeroU16, NonZeroU64},
sync::atomic::Ordering,
};
@ -27,9 +27,9 @@ pub const CONTROL_CENTER_PING_URL: &str = "https://api.mangadex.network/ping";
pub struct Request<'a> {
secret: &'a str,
port: NonZeroU16,
disk_space: usize,
network_speed: NonZeroUsize,
build_version: usize,
disk_space: u64,
network_speed: NonZeroU64,
build_version: u64,
tls_created_at: Option<String>,
}

View File

@ -273,7 +273,7 @@ fn construct_response(cached: &CachedImage, metadata: &ImageMetadata) -> ServerR
.collect();
let mut resp = HttpResponse::Ok();
if let Some(content_type) = &metadata.content_type {
resp.append_header((CONTENT_TYPE, &**content_type));
resp.append_header((CONTENT_TYPE, content_type.as_ref()));
}
if let Some(content_length) = &metadata.content_length {
resp.append_header((CONTENT_LENGTH, content_length.to_string()));