diff --git a/Cargo.toml b/Cargo.toml index edb9d4b..8c6818c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,4 +37,4 @@ url = { version = "2", features = [ "serde" ] } [profile.release] lto = true -codegen-units = 1 \ No newline at end of file +codegen-units = 1 diff --git a/src/cache/low_mem.rs b/src/cache/low_mem.rs index a7f7218..8f92fbb 100644 --- a/src/cache/low_mem.rs +++ b/src/cache/low_mem.rs @@ -18,6 +18,10 @@ pub struct LowMemCache { } impl LowMemCache { + /// Constructs a new low memory cache at the provided path and capacity. + /// This internally spawns a task that will wait for filesystem + /// notifications when a file has been written. + #[allow(clippy::new_ret_no_self)] pub fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc>> { let (tx, mut rx) = unbounded_channel(); let new_self: Arc>> = Arc::new(RwLock::new(Box::new(Self { @@ -28,19 +32,17 @@ impl LowMemCache { master_sender: tx, }))); + // Spawns a new task that continuously listens for events received by + // the channel, which informs the low memory cache the total size of the + // item that was put into the cache. let new_self_0 = Arc::clone(&new_self); tokio::spawn(async move { - loop { - let new_size = match rx.recv().await { - Some(v) => v, - None => break, - }; - + while let Some(new_size) = rx.recv().await { new_self_0.write().await.increase_usage(new_size).await; } }); - new_self.clone() + new_self } async fn prune(&mut self) { @@ -75,6 +77,8 @@ impl Cache for LowMemCache { .map_err(Into::into) } + /// Increments the internal size counter, pruning if the value exceeds the + /// user-defined capacity. async fn increase_usage(&mut self, amt: u64) { self.disk_cur_size += amt; if self.disk_cur_size > self.disk_max_size { diff --git a/src/cache/mod.rs b/src/cache/mod.rs index aa0a2a2..6cd5313 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -6,7 +6,7 @@ use std::task::{Context, Poll}; use actix_web::http::HeaderValue; use async_trait::async_trait; -use bytes::Bytes; +use bytes::{Bytes, BytesMut}; use chrono::{DateTime, FixedOffset}; use fs::ConcurrentFsStream; use futures::{Stream, StreamExt}; @@ -88,6 +88,7 @@ impl AsRef for ImageContentType { } } +#[allow(clippy::pub_enum_variant_names)] #[derive(Debug)] pub enum ImageRequestError { InvalidContentType, @@ -184,7 +185,7 @@ impl Stream for CacheStream { Self::Memory(stream) => stream.poll_next_unpin(cx), Self::Completed(stream) => stream .poll_next_unpin(cx) - .map_ok(|v| v.freeze()) + .map_ok(BytesMut::freeze) .map_err(|_| UpstreamError), } } diff --git a/src/routes.rs b/src/routes.rs index d2013c4..8af5fd2 100644 --- a/src/routes.rs +++ b/src/routes.rs @@ -48,6 +48,7 @@ impl Responder for ServerResponse { } } +#[allow(clippy::future_not_send)] #[get("/{token}/data/{chapter_hash}/{file_name}")] async fn token_data( state: Data, @@ -64,6 +65,7 @@ async fn token_data( fetch_image(state, cache, chapter_hash, file_name, false).await } +#[allow(clippy::future_not_send)] #[get("/{token}/data-saver/{chapter_hash}/{file_name}")] async fn token_data_saver( state: Data, @@ -80,6 +82,7 @@ async fn token_data_saver( fetch_image(state, cache, chapter_hash, file_name, true).await } +#[allow(clippy::future_not_send)] pub async fn default(state: Data, req: HttpRequest) -> impl Responder { let path = &format!( "{}{}", @@ -181,6 +184,7 @@ fn push_headers(builder: &mut HttpResponseBuilder) -> &mut HttpResponseBuilder { builder } +#[allow(clippy::future_not_send)] async fn fetch_image( state: Data, cache: Data>>,