diff --git a/src/cache/disk.rs b/src/cache/disk.rs index 8b3f0fd..09f1a78 100644 --- a/src/cache/disk.rs +++ b/src/cache/disk.rs @@ -29,11 +29,10 @@ enum DbMessage { } impl DiskCache { - /// Constructs a new low memory cache at the provided path and capaci ty. + /// Constructs a new low memory cache at the provided path and capacity. /// This internally spawns a task that will wait for filesystem /// notifications when a file has been written. - #[allow(clippy::new_ret_no_self)] - pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc> { + pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc { let (db_tx, db_rx) = channel(128); let db_pool = { let db_url = format!("sqlite:{}/metadata.sqlite", disk_path.to_string_lossy()); @@ -52,11 +51,11 @@ impl DiskCache { db }; - let new_self: Arc> = Arc::new(Box::new(Self { + let new_self = Arc::new(Self { disk_path, disk_cur_size: AtomicU64::new(0), db_update_channel_sender: db_tx, - })); + }); tokio::spawn(db_listener( Arc::clone(&new_self), @@ -72,7 +71,7 @@ impl DiskCache { /// Spawn a new task that will listen for updates to the db, pruning if the size /// becomes too large. async fn db_listener( - cache: Arc>, + cache: Arc, db_rx: Receiver, db_pool: SqlitePool, max_on_disk_size: u64, @@ -202,7 +201,7 @@ impl Cache for DiskCache { let path_0 = Arc::clone(&path); let db_callback = |size: u32| async move { - let _ = channel.send(DbMessage::Put(path_0, size)).await; + std::mem::drop(channel.send(DbMessage::Put(path_0, size)).await); }; super::fs::write_file(&path, key, image, metadata, db_callback, None) diff --git a/src/cache/fs.rs b/src/cache/fs.rs index bec91e8..e67359c 100644 --- a/src/cache/fs.rs +++ b/src/cache/fs.rs @@ -14,6 +14,15 @@ //! upstream no longer needs to process duplicate requests and sequential cache //! misses are treated as closer as a cache hit. +use std::collections::HashMap; +use std::error::Error; +use std::fmt::Display; +use std::io::SeekFrom; +use std::num::NonZeroU32; +use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::task::{Context, Poll}; + use actix_web::error::PayloadError; use bytes::{Buf, Bytes, BytesMut}; use futures::{Future, Stream, StreamExt}; @@ -23,14 +32,6 @@ use serde::{Deserialize, Serialize}; use sodiumoxide::crypto::secretstream::{ Header, Pull, Push, Stream as SecretStream, Tag, HEADERBYTES, }; -use std::collections::HashMap; -use std::error::Error; -use std::fmt::Display; -use std::io::SeekFrom; -use std::num::NonZeroU32; -use std::path::{Path, PathBuf}; -use std::pin::Pin; -use std::task::{Context, Poll}; use tokio::fs::{create_dir_all, remove_file, File}; use tokio::io::{ AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufReader, @@ -184,7 +185,7 @@ impl AsyncRead for EncryptedDiskReader { for (old, new) in buffer[cursor_start..].iter_mut().zip(&new_self.buf) { *old = *new; } - buf.set_filled(cursor_start + &new_self.buf.len()); + buf.set_filled(cursor_start + new_self.buf.len()); res } @@ -344,11 +345,11 @@ impl AsyncWrite for EncryptedDiskWriter { let new_self = Pin::into_inner(self); { let encryption_buffer = &mut new_self.encryption_buffer; - new_self.stream.as_mut().map(|stream| { + if let Some(stream) = new_self.stream.as_mut() { stream .push_to_vec(buf, None, Tag::Message, encryption_buffer) .expect("Failed to write encrypted data to buffer"); - }); + } } new_self.write_buffer.extend(&new_self.encryption_buffer); diff --git a/src/cache/mem.rs b/src/cache/mem.rs index 0c1f640..bc0c2d0 100644 --- a/src/cache/mem.rs +++ b/src/cache/mem.rs @@ -1,9 +1,6 @@ -use std::path::PathBuf; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; -use crate::cache::DiskCache; - use super::{ BoxedImageStream, Cache, CacheError, CacheKey, CacheStream, ImageMetadata, InnerStream, MemStream, @@ -31,37 +28,81 @@ pub trait InternalMemoryCache: Sync + Send { fn pop(&mut self) -> Option<(CacheKey, CacheValue)>; } +impl InternalMemoryCache for Lfu { + #[inline] + fn unbounded() -> Self { + Self::unbounded() + } + + #[inline] + fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> { + self.get(key) + } + + #[inline] + fn push(&mut self, key: CacheKey, data: CacheValue) { + self.insert(key, data); + } + + #[inline] + fn pop(&mut self) -> Option<(CacheKey, CacheValue)> { + self.pop_lfu_key_value() + } +} + +impl InternalMemoryCache for Lru { + #[inline] + fn unbounded() -> Self { + Self::unbounded() + } + + #[inline] + fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> { + self.get(key) + } + + #[inline] + fn push(&mut self, key: CacheKey, data: CacheValue) { + self.put(key, data); + } + + #[inline] + fn pop(&mut self) -> Option<(CacheKey, CacheValue)> { + self.pop_lru() + } +} + /// Memory accelerated disk cache. Uses the internal cache implementation in /// memory to speed up reads. -pub struct MemoryCache { - inner: Arc>, +pub struct MemoryCache { + inner: InnerCache, cur_mem_size: AtomicU64, mem_cache: Mutex, master_sender: Sender<(CacheKey, Bytes, ImageMetadata, usize)>, } -impl MemoryCache { - #[allow(clippy::new_ret_no_self)] - pub async fn new( - disk_max_size: u64, - disk_path: PathBuf, - max_mem_size: u64, - ) -> Arc> { +impl + MemoryCache +{ + pub async fn new(inner: InnerCache, max_mem_size: u64) -> Arc { let (tx, mut rx) = channel(100); - let new_self = Arc::new(Box::new(Self { - inner: DiskCache::new(disk_max_size, disk_path).await, + let new_self = Arc::new(Self { + inner, cur_mem_size: AtomicU64::new(0), mem_cache: Mutex::new(InternalCacheImpl::unbounded()), master_sender: tx, - }) as Box); + }); let new_self_0 = Arc::clone(&new_self); tokio::spawn(async move { let new_self = new_self_0; let max_mem_size = max_mem_size / 20 * 19; while let Some((key, bytes, metadata, size)) = rx.recv().await { - new_self.increase_usage(size as u32); - new_self.put_internal(key, bytes, metadata, size).await; + new_self.inner.increase_usage(size as u32); + new_self + .inner + .put_internal(key, bytes, metadata, size) + .await; while new_self.mem_size() >= max_mem_size { if let Some((_, _, _, size)) = new_self.pop_memory().await { new_self.decrease_usage(size as u64); @@ -77,7 +118,9 @@ impl MemoryCache Cache for MemoryCache { +impl Cache + for MemoryCache +{ #[inline] async fn get( &self, @@ -167,47 +210,3 @@ impl Cache for MemoryCache Self { - Self::unbounded() - } - - #[inline] - fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> { - self.get(key) - } - - #[inline] - fn push(&mut self, key: CacheKey, data: CacheValue) { - self.insert(key, data); - } - - #[inline] - fn pop(&mut self) -> Option<(CacheKey, CacheValue)> { - self.pop_lfu_key_value() - } -} - -impl InternalMemoryCache for Lru { - #[inline] - fn unbounded() -> Self { - Self::unbounded() - } - - #[inline] - fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> { - self.get(key) - } - - #[inline] - fn push(&mut self, key: CacheKey, data: CacheValue) { - self.put(key, data); - } - - #[inline] - fn pop(&mut self) -> Option<(CacheKey, CacheValue)> { - self.pop_lru() - } -} diff --git a/src/cache/mod.rs b/src/cache/mod.rs index 069b0e8..c3457d2 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -205,6 +205,76 @@ pub trait Cache: Send + Sync { async fn pop_memory(&self) -> Option<(CacheKey, Bytes, ImageMetadata, usize)>; } +#[async_trait] +impl Cache for std::sync::Arc { + #[inline] + async fn get( + &self, + key: &CacheKey, + ) -> Option> { + self.as_ref().get(key).await + } + + #[inline] + async fn put( + &self, + key: CacheKey, + image: BoxedImageStream, + metadata: ImageMetadata, + ) -> Result { + self.as_ref().put(key, image, metadata).await + } + + #[inline] + fn increase_usage(&self, amt: u32) { + self.as_ref().increase_usage(amt) + } + + #[inline] + fn decrease_usage(&self, amt: u64) { + self.as_ref().decrease_usage(amt) + } + + #[inline] + fn on_disk_size(&self) -> u64 { + self.as_ref().on_disk_size() + } + + #[inline] + fn mem_size(&self) -> u64 { + self.as_ref().mem_size() + } + + #[inline] + async fn put_with_on_completed_callback( + &self, + key: CacheKey, + image: BoxedImageStream, + metadata: ImageMetadata, + on_complete: Sender<(CacheKey, Bytes, ImageMetadata, usize)>, + ) -> Result { + self.as_ref() + .put_with_on_completed_callback(key, image, metadata, on_complete) + .await + } + + #[inline] + async fn put_internal( + &self, + key: CacheKey, + image: Bytes, + metadata: ImageMetadata, + size: usize, + ) { + self.as_ref().put_internal(key, image, metadata, size).await + } + + #[inline] + async fn pop_memory(&self) -> Option<(CacheKey, Bytes, ImageMetadata, usize)> { + self.as_ref().pop_memory().await + } +} + pub struct CacheStream { inner: InnerStream, decrypt: Option>, @@ -215,7 +285,7 @@ impl CacheStream { Ok(Self { inner, decrypt: header - .map(|header| SecretStream::init_pull(&header, &ENCRYPTION_KEY.get().unwrap())) + .map(|header| SecretStream::init_pull(&header, ENCRYPTION_KEY.get().unwrap())) .transpose()?, }) } diff --git a/src/main.rs b/src/main.rs index c3b9312..9f9feb3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -27,7 +27,8 @@ use state::{RwLockServerState, ServerState}; use stop::send_stop; use thiserror::Error; -use crate::cache::{mem, MemoryCache, ENCRYPTION_KEY}; +use crate::cache::mem::{Lfu, Lru}; +use crate::cache::{MemoryCache, ENCRYPTION_KEY}; use crate::config::UnstableOptions; use crate::state::DynamicServerCert; @@ -145,12 +146,13 @@ async fn main() -> Result<(), Box> { } }); - let cache: Arc> = if low_mem_mode { - DiskCache::new(disk_quota, cache_path.clone()).await + let cache = DiskCache::new(disk_quota, cache_path.clone()).await; + let cache: Arc = if low_mem_mode { + cache } else if use_lfu { - MemoryCache::::new(disk_quota, cache_path.clone(), memory_max_size).await + MemoryCache::::new(cache, memory_max_size).await } else { - MemoryCache::::new(disk_quota, cache_path.clone(), memory_max_size).await + MemoryCache::::new(cache, memory_max_size).await }; let cache_0 = Arc::clone(&cache);