Compare commits

...

2 commits

Author SHA1 Message Date
041760f9e9
clippy 2021-07-15 19:13:31 -04:00
87271c85a7
Fix deleting legacy names 2021-07-15 19:03:39 -04:00
5 changed files with 123 additions and 43 deletions

107
src/cache/disk.rs vendored
View file

@ -13,6 +13,7 @@ use futures::StreamExt;
use log::LevelFilter; use log::LevelFilter;
use md5::digest::generic_array::GenericArray; use md5::digest::generic_array::GenericArray;
use md5::{Digest, Md5}; use md5::{Digest, Md5};
use sodiumoxide::hex;
use sqlx::sqlite::SqliteConnectOptions; use sqlx::sqlite::SqliteConnectOptions;
use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction}; use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction};
use tokio::fs::{remove_file, rename, File}; use tokio::fs::{remove_file, rename, File};
@ -185,12 +186,34 @@ async fn db_listener(
for item in items { for item in items {
debug!("deleting file due to exceeding cache size"); debug!("deleting file due to exceeding cache size");
size_freed += item.size as u64; size_freed += item.size as u64;
tokio::spawn(async move { tokio::spawn(remove_file_handler(item.id));
let key = item.id; }
if let Err(e) = remove_file(key.clone()).await {
match e.kind() { cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
std::io::ErrorKind::NotFound => { }
let hash = Md5Hash(*GenericArray::from_slice(key.as_bytes())); }
}
/// Returns if a file was successfully deleted.
async fn remove_file_handler(key: String) -> bool {
let error = if let Err(e) = remove_file(&key).await {
e
} else {
return true;
};
if error.kind() != std::io::ErrorKind::NotFound {
warn!("Failed to delete file `{}` from cache: {}", &key, error);
return false;
}
if let Ok(bytes) = hex::decode(&key) {
if bytes.len() != 16 {
warn!("Failed to delete file `{}`; invalid hash size.", &key);
return false;
}
let hash = Md5Hash(*GenericArray::from_slice(&bytes));
let path: PathBuf = hash.into(); let path: PathBuf = hash.into();
if let Err(e) = remove_file(&path).await { if let Err(e) = remove_file(&path).await {
warn!( warn!(
@ -198,18 +221,13 @@ async fn db_listener(
path.to_string_lossy(), path.to_string_lossy(),
e e
); );
false
} else {
true
} }
} } else {
_ => { warn!("Failed to delete file `{}`; not a md5hash.", &key);
warn!("Failed to delete file `{}` from cache: {}", &key, e); false
}
}
}
});
}
cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
}
} }
} }
@ -384,6 +402,61 @@ impl CallbackCache for DiskCache {
} }
} }
#[cfg(test)]
mod remove_file_handler {
use std::error::Error;
use tempfile::tempdir;
use tokio::fs::{create_dir_all, remove_dir_all};
use super::*;
#[tokio::test]
async fn should_not_panic_on_invalid_path() {
assert!(!remove_file_handler("/this/is/a/non-existent/path/".to_string()).await);
}
#[tokio::test]
async fn should_not_panic_on_invalid_hash() {
assert!(!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940".to_string()).await);
}
#[tokio::test]
async fn should_not_panic_on_malicious_hashes() {
assert!(!remove_file_handler("68b329da9893e34".to_string()).await);
assert!(
!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940aaaaaaaaaaaaaaaaaa".to_string())
.await
);
}
#[tokio::test]
async fn should_delete_existing_file() -> Result<(), Box<dyn Error>> {
let temp_dir = tempdir()?;
let mut dir_path = temp_dir.path().to_path_buf();
dir_path.push("abc123.png");
// create a file, it can be empty
File::create(&dir_path).await?;
assert!(remove_file_handler(dir_path.to_string_lossy().into_owned()).await);
Ok(())
}
#[tokio::test]
async fn should_delete_existing_hash() -> Result<(), Box<dyn Error>> {
create_dir_all("b/8/6").await?;
File::create("b/8/6/68b329da9893e34099c7d8ad5cb9c900").await?;
assert!(remove_file_handler("68b329da9893e34099c7d8ad5cb9c900".to_string()).await);
remove_dir_all("b").await?;
Ok(())
}
}
#[cfg(test)] #[cfg(test)]
mod disk_cache { mod disk_cache {
use std::error::Error; use std::error::Error;

4
src/cache/fs.rs vendored
View file

@ -48,8 +48,8 @@ use super::{CacheKey, CacheStream, ImageMetadata, ENCRYPTION_KEY};
pub(super) async fn read_file( pub(super) async fn read_file(
file: File, file: File,
) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> { ) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> {
let mut file_0 = file.try_clone().await.unwrap(); let mut file_0 = file.try_clone().await.ok()?;
let file_1 = file.try_clone().await.unwrap(); let file_1 = file.try_clone().await.ok()?;
// Try reading decrypted header first... // Try reading decrypted header first...
let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await); let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await);

View file

@ -73,11 +73,11 @@ pub fn load_config() -> Result<Config, ConfigError> {
Ordering::Release, Ordering::Release,
); );
config.proxy.clone().map(|socket| { if let Some(socket) = config.proxy.clone() {
USE_PROXY USE_PROXY
.set(socket) .set(socket)
.expect("USE_PROXY to be set only by this function"); .expect("USE_PROXY to be set only by this function");
}); }
DISABLE_CERT_VALIDATION.store( DISABLE_CERT_VALIDATION.store(
config config

View file

@ -104,20 +104,20 @@ pub async fn load_geo_ip_data(license_key: ClientSecret) -> Result<(), DbLoadErr
// Check date of db // Check date of db
let db_date_created = metadata(DB_PATH) let db_date_created = metadata(DB_PATH)
.ok() .ok()
.and_then(|metadata| match metadata.created() { .and_then(|metadata| {
Ok(time) => Some(time), if let Ok(time) = metadata.created() {
Err(_) => { Some(time)
} else {
debug("fs didn't report birth time, fall back to last modified instead"); debug("fs didn't report birth time, fall back to last modified instead");
metadata.modified().ok() metadata.modified().ok()
} }
}) })
.unwrap_or(SystemTime::UNIX_EPOCH); .unwrap_or(SystemTime::UNIX_EPOCH);
let duration = match SystemTime::now().duration_since(db_date_created) { let duration = if let Ok(time) = SystemTime::now().duration_since(db_date_created) {
Ok(time) => Duration::from_std(time).expect("duration to fit"), Duration::from_std(time).expect("duration to fit")
Err(_) => { } else {
warn!("Clock may have gone backwards?"); warn!("Clock may have gone backwards?");
Duration::max_value() Duration::max_value()
}
}; };
// DB expired, fetch a new one // DB expired, fetch a new one
@ -172,14 +172,12 @@ async fn fetch_db(license_key: ClientSecret) -> Result<(), DbLoadError> {
} }
pub fn record_country_visit(country: Option<Country>) { pub fn record_country_visit(country: Option<Country>) {
let iso_code = if let Some(country) = country { let iso_code = country.map_or("unknown", |country| {
country country
.country .country
.and_then(|c| c.iso_code) .and_then(|c| c.iso_code)
.unwrap_or("unknown") .unwrap_or("unknown")
} else { });
"unknown"
};
COUNTRY_VISIT_COUNTER COUNTRY_VISIT_COUNTER
.get_metric_with_label_values(&[iso_code]) .get_metric_with_label_values(&[iso_code])

View file

@ -146,9 +146,10 @@ impl ServerState {
pub fn init_offline() -> Self { pub fn init_offline() -> Self {
assert!(OFFLINE_MODE.load(Ordering::Acquire)); assert!(OFFLINE_MODE.load(Ordering::Acquire));
Self { Self {
precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES]).unwrap(), precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES])
image_server: Url::from_file_path("/dev/null").unwrap(), .expect("expect offline config to work"),
url: Url::from_str("http://localhost").unwrap(), image_server: Url::from_file_path("/dev/null").expect("expect offline config to work"),
url: Url::from_str("http://localhost").expect("expect offline config to work"),
url_overridden: false, url_overridden: false,
} }
} }
@ -163,8 +164,16 @@ impl ResolvesServerCert for DynamicServerCert {
// TODO: wait for actix-web to use a new version of rustls so we can // TODO: wait for actix-web to use a new version of rustls so we can
// remove cloning the certs all the time // remove cloning the certs all the time
Some(CertifiedKey { Some(CertifiedKey {
cert: TLS_CERTS.get().unwrap().load().as_ref().clone(), cert: TLS_CERTS
key: TLS_SIGNING_KEY.get().unwrap().load_full(), .get()
.expect("tls cert to exist")
.load()
.as_ref()
.clone(),
key: TLS_SIGNING_KEY
.get()
.expect("tls signing key to exist")
.load_full(),
ocsp: None, ocsp: None,
sct_list: None, sct_list: None,
}) })