Don't use arc for cache keys

feature/v32-tokens
Edward Shen 2021-04-22 21:46:34 -04:00
parent 8d95fe3f07
commit a86cd3edf5
Signed by: edward
GPG Key ID: 19182661E818369F
3 changed files with 18 additions and 24 deletions

24
src/cache/low_mem.rs vendored
View File

@ -8,11 +8,10 @@ use std::sync::Arc;
use async_trait::async_trait;
use futures::StreamExt;
use log::{warn, LevelFilter};
use sqlx::{sqlite::SqliteConnectOptions, ConnectOptions, SqlitePool};
use tokio::{
fs::remove_file,
sync::mpsc::{channel, Sender},
};
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::{ConnectOptions, SqlitePool};
use tokio::fs::remove_file;
use tokio::sync::mpsc::{channel, Sender};
use tokio_stream::wrappers::ReceiverStream;
use super::{BoxedImageStream, Cache, CacheError, CacheKey, CacheStream, ImageMetadata};
@ -63,7 +62,8 @@ impl LowMemCache {
// item that was put into the cache.
let new_self_0 = Arc::clone(&new_self);
// Spawn a new task that will listen for updates to the db.
// Spawn a new task that will listen for updates to the db, pruning if
// the size becomes too large
tokio::spawn(async move {
let db_pool = db_pool;
let max_on_disk_size = disk_max_size / 20 * 19;
@ -134,15 +134,11 @@ impl LowMemCache {
impl Cache for LowMemCache {
async fn get(
&self,
key: Arc<CacheKey>,
key: &CacheKey,
) -> Option<Result<(CacheStream, ImageMetadata), CacheError>> {
let channel = self.db_update_channel_sender.clone();
let path = Arc::new(
self.disk_path
.clone()
.join(PathBuf::from(Arc::clone(&key).as_ref())),
);
let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key)));
let path_0 = Arc::clone(&path);
tokio::spawn(async move { channel.send(DbMessage::Get(path_0)).await });
@ -154,13 +150,13 @@ impl Cache for LowMemCache {
async fn put(
&self,
key: Arc<CacheKey>,
key: &CacheKey,
image: BoxedImageStream,
metadata: ImageMetadata,
) -> Result<CacheStream, CacheError> {
let channel = self.db_update_channel_sender.clone();
let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key.as_ref())));
let path = Arc::new(self.disk_path.clone().join(PathBuf::from(key)));
let path_0 = Arc::clone(&path);
let db_callback = |size: u32| async move {

10
src/cache/mod.rs vendored
View File

@ -1,8 +1,8 @@
use std::fmt::Display;
use std::path::PathBuf;
use std::pin::Pin;
use std::str::FromStr;
use std::task::{Context, Poll};
use std::{fmt::Display, sync::Arc};
use actix_web::http::HeaderValue;
use async_trait::async_trait;
@ -152,14 +152,12 @@ pub enum CacheError {
#[async_trait]
pub trait Cache: Send + Sync {
async fn get(
&self,
key: Arc<CacheKey>,
) -> Option<Result<(CacheStream, ImageMetadata), CacheError>>;
async fn get(&self, key: &CacheKey)
-> Option<Result<(CacheStream, ImageMetadata), CacheError>>;
async fn put(
&self,
key: Arc<CacheKey>,
key: &CacheKey,
image: BoxedImageStream,
metadata: ImageMetadata,
) -> Result<CacheStream, CacheError>;

View File

@ -1,4 +1,4 @@
use std::sync::{atomic::Ordering, Arc};
use std::sync::atomic::Ordering;
use actix_web::http::header::{
ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS, CACHE_CONTROL, CONTENT_LENGTH,
@ -191,9 +191,9 @@ async fn fetch_image(
file_name: String,
is_data_saver: bool,
) -> ServerResponse {
let key = Arc::new(CacheKey(chapter_hash, file_name, is_data_saver));
let key = CacheKey(chapter_hash, file_name, is_data_saver);
match cache.get(Arc::clone(&key)).await {
match cache.get(&key).await {
Some(Ok((image, metadata))) => {
return construct_response(image, &metadata);
}
@ -263,7 +263,7 @@ async fn fetch_image(
let metadata = ImageMetadata::new(content_type, length, last_mod).unwrap();
let stream = {
match cache.put(key, Box::new(body), metadata).await {
match cache.put(&key, Box::new(body), metadata).await {
Ok(stream) => stream,
Err(e) => {
warn!("Failed to insert into cache: {}", e);