Remove layer of indirection

This commit is contained in:
Edward Shen 2021-05-19 22:42:44 -04:00
parent b66cccc832
commit 4f55380d23
Signed by: edward
GPG key ID: 19182661E818369F
5 changed files with 157 additions and 86 deletions

13
src/cache/disk.rs vendored
View file

@ -29,11 +29,10 @@ enum DbMessage {
}
impl DiskCache {
/// Constructs a new low memory cache at the provided path and capaci ty.
/// Constructs a new low memory cache at the provided path and capacity.
/// This internally spawns a task that will wait for filesystem
/// notifications when a file has been written.
#[allow(clippy::new_ret_no_self)]
pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc<Box<dyn Cache>> {
pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc<Self> {
let (db_tx, db_rx) = channel(128);
let db_pool = {
let db_url = format!("sqlite:{}/metadata.sqlite", disk_path.to_string_lossy());
@ -52,11 +51,11 @@ impl DiskCache {
db
};
let new_self: Arc<Box<dyn Cache>> = Arc::new(Box::new(Self {
let new_self = Arc::new(Self {
disk_path,
disk_cur_size: AtomicU64::new(0),
db_update_channel_sender: db_tx,
}));
});
tokio::spawn(db_listener(
Arc::clone(&new_self),
@ -72,7 +71,7 @@ impl DiskCache {
/// Spawn a new task that will listen for updates to the db, pruning if the size
/// becomes too large.
async fn db_listener(
cache: Arc<Box<dyn Cache>>,
cache: Arc<DiskCache>,
db_rx: Receiver<DbMessage>,
db_pool: SqlitePool,
max_on_disk_size: u64,
@ -202,7 +201,7 @@ impl Cache for DiskCache {
let path_0 = Arc::clone(&path);
let db_callback = |size: u32| async move {
let _ = channel.send(DbMessage::Put(path_0, size)).await;
std::mem::drop(channel.send(DbMessage::Put(path_0, size)).await);
};
super::fs::write_file(&path, key, image, metadata, db_callback, None)

23
src/cache/fs.rs vendored
View file

@ -14,6 +14,15 @@
//! upstream no longer needs to process duplicate requests and sequential cache
//! misses are treated as closer as a cache hit.
use std::collections::HashMap;
use std::error::Error;
use std::fmt::Display;
use std::io::SeekFrom;
use std::num::NonZeroU32;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_web::error::PayloadError;
use bytes::{Buf, Bytes, BytesMut};
use futures::{Future, Stream, StreamExt};
@ -23,14 +32,6 @@ use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::secretstream::{
Header, Pull, Push, Stream as SecretStream, Tag, HEADERBYTES,
};
use std::collections::HashMap;
use std::error::Error;
use std::fmt::Display;
use std::io::SeekFrom;
use std::num::NonZeroU32;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::fs::{create_dir_all, remove_file, File};
use tokio::io::{
AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufReader,
@ -184,7 +185,7 @@ impl AsyncRead for EncryptedDiskReader {
for (old, new) in buffer[cursor_start..].iter_mut().zip(&new_self.buf) {
*old = *new;
}
buf.set_filled(cursor_start + &new_self.buf.len());
buf.set_filled(cursor_start + new_self.buf.len());
res
}
@ -344,11 +345,11 @@ impl AsyncWrite for EncryptedDiskWriter {
let new_self = Pin::into_inner(self);
{
let encryption_buffer = &mut new_self.encryption_buffer;
new_self.stream.as_mut().map(|stream| {
if let Some(stream) = new_self.stream.as_mut() {
stream
.push_to_vec(buf, None, Tag::Message, encryption_buffer)
.expect("Failed to write encrypted data to buffer");
});
}
}
new_self.write_buffer.extend(&new_self.encryption_buffer);

123
src/cache/mem.rs vendored
View file

@ -1,9 +1,6 @@
use std::path::PathBuf;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use crate::cache::DiskCache;
use super::{
BoxedImageStream, Cache, CacheError, CacheKey, CacheStream, ImageMetadata, InnerStream,
MemStream,
@ -31,37 +28,81 @@ pub trait InternalMemoryCache: Sync + Send {
fn pop(&mut self) -> Option<(CacheKey, CacheValue)>;
}
impl InternalMemoryCache for Lfu {
#[inline]
fn unbounded() -> Self {
Self::unbounded()
}
#[inline]
fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> {
self.get(key)
}
#[inline]
fn push(&mut self, key: CacheKey, data: CacheValue) {
self.insert(key, data);
}
#[inline]
fn pop(&mut self) -> Option<(CacheKey, CacheValue)> {
self.pop_lfu_key_value()
}
}
impl InternalMemoryCache for Lru {
#[inline]
fn unbounded() -> Self {
Self::unbounded()
}
#[inline]
fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> {
self.get(key)
}
#[inline]
fn push(&mut self, key: CacheKey, data: CacheValue) {
self.put(key, data);
}
#[inline]
fn pop(&mut self) -> Option<(CacheKey, CacheValue)> {
self.pop_lru()
}
}
/// Memory accelerated disk cache. Uses the internal cache implementation in
/// memory to speed up reads.
pub struct MemoryCache<InternalCacheImpl> {
inner: Arc<Box<dyn Cache>>,
pub struct MemoryCache<InternalCacheImpl, InnerCache> {
inner: InnerCache,
cur_mem_size: AtomicU64,
mem_cache: Mutex<InternalCacheImpl>,
master_sender: Sender<(CacheKey, Bytes, ImageMetadata, usize)>,
}
impl<InternalCacheImpl: 'static + InternalMemoryCache> MemoryCache<InternalCacheImpl> {
#[allow(clippy::new_ret_no_self)]
pub async fn new(
disk_max_size: u64,
disk_path: PathBuf,
max_mem_size: u64,
) -> Arc<Box<dyn Cache>> {
impl<InternalCacheImpl: 'static + InternalMemoryCache, InnerCache: 'static + Cache>
MemoryCache<InternalCacheImpl, InnerCache>
{
pub async fn new(inner: InnerCache, max_mem_size: u64) -> Arc<Self> {
let (tx, mut rx) = channel(100);
let new_self = Arc::new(Box::new(Self {
inner: DiskCache::new(disk_max_size, disk_path).await,
let new_self = Arc::new(Self {
inner,
cur_mem_size: AtomicU64::new(0),
mem_cache: Mutex::new(InternalCacheImpl::unbounded()),
master_sender: tx,
}) as Box<dyn Cache>);
});
let new_self_0 = Arc::clone(&new_self);
tokio::spawn(async move {
let new_self = new_self_0;
let max_mem_size = max_mem_size / 20 * 19;
while let Some((key, bytes, metadata, size)) = rx.recv().await {
new_self.increase_usage(size as u32);
new_self.put_internal(key, bytes, metadata, size).await;
new_self.inner.increase_usage(size as u32);
new_self
.inner
.put_internal(key, bytes, metadata, size)
.await;
while new_self.mem_size() >= max_mem_size {
if let Some((_, _, _, size)) = new_self.pop_memory().await {
new_self.decrease_usage(size as u64);
@ -77,7 +118,9 @@ impl<InternalCacheImpl: 'static + InternalMemoryCache> MemoryCache<InternalCache
}
#[async_trait]
impl<InternalCacheImpl: InternalMemoryCache> Cache for MemoryCache<InternalCacheImpl> {
impl<InternalCacheImpl: InternalMemoryCache, InnerCache: Cache> Cache
for MemoryCache<InternalCacheImpl, InnerCache>
{
#[inline]
async fn get(
&self,
@ -167,47 +210,3 @@ impl<InternalCacheImpl: InternalMemoryCache> Cache for MemoryCache<InternalCache
.map(|(key, (bytes, metadata, size))| (key, bytes, metadata, size))
}
}
impl InternalMemoryCache for Lfu {
#[inline]
fn unbounded() -> Self {
Self::unbounded()
}
#[inline]
fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> {
self.get(key)
}
#[inline]
fn push(&mut self, key: CacheKey, data: CacheValue) {
self.insert(key, data);
}
#[inline]
fn pop(&mut self) -> Option<(CacheKey, CacheValue)> {
self.pop_lfu_key_value()
}
}
impl InternalMemoryCache for Lru {
#[inline]
fn unbounded() -> Self {
Self::unbounded()
}
#[inline]
fn get(&mut self, key: &CacheKey) -> Option<&CacheValue> {
self.get(key)
}
#[inline]
fn push(&mut self, key: CacheKey, data: CacheValue) {
self.put(key, data);
}
#[inline]
fn pop(&mut self) -> Option<(CacheKey, CacheValue)> {
self.pop_lru()
}
}

72
src/cache/mod.rs vendored
View file

@ -205,6 +205,76 @@ pub trait Cache: Send + Sync {
async fn pop_memory(&self) -> Option<(CacheKey, Bytes, ImageMetadata, usize)>;
}
#[async_trait]
impl<T: Cache> Cache for std::sync::Arc<T> {
#[inline]
async fn get(
&self,
key: &CacheKey,
) -> Option<Result<(CacheStream, ImageMetadata), CacheError>> {
self.as_ref().get(key).await
}
#[inline]
async fn put(
&self,
key: CacheKey,
image: BoxedImageStream,
metadata: ImageMetadata,
) -> Result<CacheStream, CacheError> {
self.as_ref().put(key, image, metadata).await
}
#[inline]
fn increase_usage(&self, amt: u32) {
self.as_ref().increase_usage(amt)
}
#[inline]
fn decrease_usage(&self, amt: u64) {
self.as_ref().decrease_usage(amt)
}
#[inline]
fn on_disk_size(&self) -> u64 {
self.as_ref().on_disk_size()
}
#[inline]
fn mem_size(&self) -> u64 {
self.as_ref().mem_size()
}
#[inline]
async fn put_with_on_completed_callback(
&self,
key: CacheKey,
image: BoxedImageStream,
metadata: ImageMetadata,
on_complete: Sender<(CacheKey, Bytes, ImageMetadata, usize)>,
) -> Result<CacheStream, CacheError> {
self.as_ref()
.put_with_on_completed_callback(key, image, metadata, on_complete)
.await
}
#[inline]
async fn put_internal(
&self,
key: CacheKey,
image: Bytes,
metadata: ImageMetadata,
size: usize,
) {
self.as_ref().put_internal(key, image, metadata, size).await
}
#[inline]
async fn pop_memory(&self) -> Option<(CacheKey, Bytes, ImageMetadata, usize)> {
self.as_ref().pop_memory().await
}
}
pub struct CacheStream {
inner: InnerStream,
decrypt: Option<SecretStream<Pull>>,
@ -215,7 +285,7 @@ impl CacheStream {
Ok(Self {
inner,
decrypt: header
.map(|header| SecretStream::init_pull(&header, &ENCRYPTION_KEY.get().unwrap()))
.map(|header| SecretStream::init_pull(&header, ENCRYPTION_KEY.get().unwrap()))
.transpose()?,
})
}

View file

@ -27,7 +27,8 @@ use state::{RwLockServerState, ServerState};
use stop::send_stop;
use thiserror::Error;
use crate::cache::{mem, MemoryCache, ENCRYPTION_KEY};
use crate::cache::mem::{Lfu, Lru};
use crate::cache::{MemoryCache, ENCRYPTION_KEY};
use crate::config::UnstableOptions;
use crate::state::DynamicServerCert;
@ -145,12 +146,13 @@ async fn main() -> Result<(), Box<dyn Error>> {
}
});
let cache: Arc<Box<dyn Cache>> = if low_mem_mode {
DiskCache::new(disk_quota, cache_path.clone()).await
let cache = DiskCache::new(disk_quota, cache_path.clone()).await;
let cache: Arc<dyn Cache> = if low_mem_mode {
cache
} else if use_lfu {
MemoryCache::<mem::Lfu>::new(disk_quota, cache_path.clone(), memory_max_size).await
MemoryCache::<Lfu, _>::new(cache, memory_max_size).await
} else {
MemoryCache::<mem::Lru>::new(disk_quota, cache_path.clone(), memory_max_size).await
MemoryCache::<Lru, _>::new(cache, memory_max_size).await
};
let cache_0 = Arc::clone(&cache);