diff --git a/src/cache/low_mem.rs b/src/cache/low_mem.rs index 7b42d21..1f9c447 100644 --- a/src/cache/low_mem.rs +++ b/src/cache/low_mem.rs @@ -8,11 +8,10 @@ use std::sync::Arc; use async_trait::async_trait; use futures::StreamExt; use log::{warn, LevelFilter}; -use sqlx::{sqlite::SqliteConnectOptions, ConnectOptions, SqlitePool}; -use tokio::{ - fs::remove_file, - sync::mpsc::{channel, Sender}, -}; +use sqlx::sqlite::SqliteConnectOptions; +use sqlx::{ConnectOptions, SqlitePool}; +use tokio::fs::remove_file; +use tokio::sync::mpsc::{channel, Sender}; use tokio_stream::wrappers::ReceiverStream; use super::{BoxedImageStream, Cache, CacheError, CacheKey, CacheStream, ImageMetadata}; @@ -63,7 +62,8 @@ impl LowMemCache { // item that was put into the cache. let new_self_0 = Arc::clone(&new_self); - // Spawn a new task that will listen for updates to the db. + // Spawn a new task that will listen for updates to the db, pruning if + // the size becomes too large tokio::spawn(async move { let db_pool = db_pool; let max_on_disk_size = disk_max_size / 20 * 19; @@ -134,15 +134,11 @@ impl LowMemCache { impl Cache for LowMemCache { async fn get( &self, - key: Arc, + key: &CacheKey, ) -> Option> { let channel = self.db_update_channel_sender.clone(); - let path = Arc::new( - self.disk_path - .clone() - .join(PathBuf::from(Arc::clone(&key).as_ref())), - ); + let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key))); let path_0 = Arc::clone(&path); tokio::spawn(async move { channel.send(DbMessage::Get(path_0)).await }); @@ -154,13 +150,13 @@ impl Cache for LowMemCache { async fn put( &self, - key: Arc, + key: &CacheKey, image: BoxedImageStream, metadata: ImageMetadata, ) -> Result { let channel = self.db_update_channel_sender.clone(); - let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key.as_ref()))); + let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key))); let path_0 = Arc::clone(&path); let db_callback = |size: u32| async move { diff --git a/src/cache/mod.rs b/src/cache/mod.rs index a467d54..e4d33df 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -1,8 +1,8 @@ +use std::fmt::Display; use std::path::PathBuf; use std::pin::Pin; use std::str::FromStr; use std::task::{Context, Poll}; -use std::{fmt::Display, sync::Arc}; use actix_web::http::HeaderValue; use async_trait::async_trait; @@ -152,14 +152,12 @@ pub enum CacheError { #[async_trait] pub trait Cache: Send + Sync { - async fn get( - &self, - key: Arc, - ) -> Option>; + async fn get(&self, key: &CacheKey) + -> Option>; async fn put( &self, - key: Arc, + key: &CacheKey, image: BoxedImageStream, metadata: ImageMetadata, ) -> Result; diff --git a/src/routes.rs b/src/routes.rs index a2ebcfe..be88540 100644 --- a/src/routes.rs +++ b/src/routes.rs @@ -1,4 +1,4 @@ -use std::sync::{atomic::Ordering, Arc}; +use std::sync::atomic::Ordering; use actix_web::http::header::{ ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS, CACHE_CONTROL, CONTENT_LENGTH, @@ -191,9 +191,9 @@ async fn fetch_image( file_name: String, is_data_saver: bool, ) -> ServerResponse { - let key = Arc::new(CacheKey(chapter_hash, file_name, is_data_saver)); + let key = CacheKey(chapter_hash, file_name, is_data_saver); - match cache.get(Arc::clone(&key)).await { + match cache.get(&key).await { Some(Ok((image, metadata))) => { return construct_response(image, &metadata); } @@ -263,7 +263,7 @@ async fn fetch_image( let metadata = ImageMetadata::new(content_type, length, last_mod).unwrap(); let stream = { - match cache.put(key, Box::new(body), metadata).await { + match cache.put(&key, Box::new(body), metadata).await { Ok(stream) => stream, Err(e) => { warn!("Failed to insert into cache: {}", e);