Fix deleting legacy names
This commit is contained in:
parent
3e4260f6e1
commit
87271c85a7
3 changed files with 111 additions and 28 deletions
94
src/cache/disk.rs
vendored
94
src/cache/disk.rs
vendored
|
@ -13,6 +13,7 @@ use futures::StreamExt;
|
||||||
use log::LevelFilter;
|
use log::LevelFilter;
|
||||||
use md5::digest::generic_array::GenericArray;
|
use md5::digest::generic_array::GenericArray;
|
||||||
use md5::{Digest, Md5};
|
use md5::{Digest, Md5};
|
||||||
|
use sodiumoxide::hex;
|
||||||
use sqlx::sqlite::SqliteConnectOptions;
|
use sqlx::sqlite::SqliteConnectOptions;
|
||||||
use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction};
|
use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction};
|
||||||
use tokio::fs::{remove_file, rename, File};
|
use tokio::fs::{remove_file, rename, File};
|
||||||
|
@ -185,12 +186,22 @@ async fn db_listener(
|
||||||
for item in items {
|
for item in items {
|
||||||
debug!("deleting file due to exceeding cache size");
|
debug!("deleting file due to exceeding cache size");
|
||||||
size_freed += item.size as u64;
|
size_freed += item.size as u64;
|
||||||
tokio::spawn(async move {
|
tokio::spawn(remove_file_handler(item.id));
|
||||||
let key = item.id;
|
}
|
||||||
if let Err(e) = remove_file(key.clone()).await {
|
|
||||||
|
cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns if a file was successfully deleted.
|
||||||
|
async fn remove_file_handler(key: String) -> bool {
|
||||||
|
if let Err(e) = remove_file(&key).await {
|
||||||
match e.kind() {
|
match e.kind() {
|
||||||
std::io::ErrorKind::NotFound => {
|
std::io::ErrorKind::NotFound => {
|
||||||
let hash = Md5Hash(*GenericArray::from_slice(key.as_bytes()));
|
if let Ok(bytes) = hex::decode(&key) {
|
||||||
|
if bytes.len() == 16 {
|
||||||
|
let hash = Md5Hash(*GenericArray::from_slice(&bytes));
|
||||||
let path: PathBuf = hash.into();
|
let path: PathBuf = hash.into();
|
||||||
if let Err(e) = remove_file(&path).await {
|
if let Err(e) = remove_file(&path).await {
|
||||||
warn!(
|
warn!(
|
||||||
|
@ -198,18 +209,26 @@ async fn db_listener(
|
||||||
path.to_string_lossy(),
|
path.to_string_lossy(),
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Failed to delete file `{}`; invalid hash size.", &key);
|
||||||
|
false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Failed to delete file `{}`; not a md5hash.", &key);
|
||||||
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
warn!("Failed to delete file `{}` from cache: {}", &key, e);
|
warn!("Failed to delete file `{}` from cache: {}", &key, e);
|
||||||
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
});
|
true
|
||||||
}
|
|
||||||
|
|
||||||
cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,6 +403,61 @@ impl CallbackCache for DiskCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod remove_file_handler {
|
||||||
|
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
use tempfile::tempdir;
|
||||||
|
use tokio::fs::{create_dir_all, remove_dir_all};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_not_panic_on_invalid_path() {
|
||||||
|
assert!(!remove_file_handler("/this/is/a/non-existent/path/".to_string()).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_not_panic_on_invalid_hash() {
|
||||||
|
assert!(!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940".to_string()).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_not_panic_on_malicious_hashes() {
|
||||||
|
assert!(!remove_file_handler("68b329da9893e34".to_string()).await);
|
||||||
|
assert!(
|
||||||
|
!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940aaaaaaaaaaaaaaaaaa".to_string())
|
||||||
|
.await
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_delete_existing_file() -> Result<(), Box<dyn Error>> {
|
||||||
|
let temp_dir = tempdir()?;
|
||||||
|
let mut dir_path = temp_dir.path().to_path_buf();
|
||||||
|
dir_path.push("abc123.png");
|
||||||
|
|
||||||
|
// create a file, it can be empty
|
||||||
|
File::create(&dir_path).await?;
|
||||||
|
|
||||||
|
assert!(remove_file_handler(dir_path.to_string_lossy().into_owned()).await);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn should_delete_existing_hash() -> Result<(), Box<dyn Error>> {
|
||||||
|
create_dir_all("b/8/6").await?;
|
||||||
|
File::create("b/8/6/68b329da9893e34099c7d8ad5cb9c900").await?;
|
||||||
|
|
||||||
|
assert!(remove_file_handler("68b329da9893e34099c7d8ad5cb9c900".to_string()).await);
|
||||||
|
|
||||||
|
remove_dir_all("b").await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod disk_cache {
|
mod disk_cache {
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
|
4
src/cache/fs.rs
vendored
4
src/cache/fs.rs
vendored
|
@ -48,8 +48,8 @@ use super::{CacheKey, CacheStream, ImageMetadata, ENCRYPTION_KEY};
|
||||||
pub(super) async fn read_file(
|
pub(super) async fn read_file(
|
||||||
file: File,
|
file: File,
|
||||||
) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> {
|
) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> {
|
||||||
let mut file_0 = file.try_clone().await.unwrap();
|
let mut file_0 = file.try_clone().await.ok()?;
|
||||||
let file_1 = file.try_clone().await.unwrap();
|
let file_1 = file.try_clone().await.ok()?;
|
||||||
|
|
||||||
// Try reading decrypted header first...
|
// Try reading decrypted header first...
|
||||||
let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await);
|
let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await);
|
||||||
|
|
19
src/state.rs
19
src/state.rs
|
@ -146,9 +146,10 @@ impl ServerState {
|
||||||
pub fn init_offline() -> Self {
|
pub fn init_offline() -> Self {
|
||||||
assert!(OFFLINE_MODE.load(Ordering::Acquire));
|
assert!(OFFLINE_MODE.load(Ordering::Acquire));
|
||||||
Self {
|
Self {
|
||||||
precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES]).unwrap(),
|
precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES])
|
||||||
image_server: Url::from_file_path("/dev/null").unwrap(),
|
.expect("expect offline config to work"),
|
||||||
url: Url::from_str("http://localhost").unwrap(),
|
image_server: Url::from_file_path("/dev/null").expect("expect offline config to work"),
|
||||||
|
url: Url::from_str("http://localhost").expect("expect offline config to work"),
|
||||||
url_overridden: false,
|
url_overridden: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,8 +164,16 @@ impl ResolvesServerCert for DynamicServerCert {
|
||||||
// TODO: wait for actix-web to use a new version of rustls so we can
|
// TODO: wait for actix-web to use a new version of rustls so we can
|
||||||
// remove cloning the certs all the time
|
// remove cloning the certs all the time
|
||||||
Some(CertifiedKey {
|
Some(CertifiedKey {
|
||||||
cert: TLS_CERTS.get().unwrap().load().as_ref().clone(),
|
cert: TLS_CERTS
|
||||||
key: TLS_SIGNING_KEY.get().unwrap().load_full(),
|
.get()
|
||||||
|
.expect("tls cert to exist")
|
||||||
|
.load()
|
||||||
|
.as_ref()
|
||||||
|
.clone(),
|
||||||
|
key: TLS_SIGNING_KEY
|
||||||
|
.get()
|
||||||
|
.expect("tls signing key to exist")
|
||||||
|
.load_full(),
|
||||||
ocsp: None,
|
ocsp: None,
|
||||||
sct_list: None,
|
sct_list: None,
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in a new issue