Compare commits
No commits in common. "041760f9e9fcd4345d3623f2ef6335afe2bf7ae9" and "3e4260f6e16060d0aebe87ad1a95290b536f0879" have entirely different histories.
041760f9e9
...
3e4260f6e1
5 changed files with 43 additions and 123 deletions
115
src/cache/disk.rs
vendored
115
src/cache/disk.rs
vendored
|
@ -13,7 +13,6 @@ use futures::StreamExt;
|
||||||
use log::LevelFilter;
|
use log::LevelFilter;
|
||||||
use md5::digest::generic_array::GenericArray;
|
use md5::digest::generic_array::GenericArray;
|
||||||
use md5::{Digest, Md5};
|
use md5::{Digest, Md5};
|
||||||
use sodiumoxide::hex;
|
|
||||||
use sqlx::sqlite::SqliteConnectOptions;
|
use sqlx::sqlite::SqliteConnectOptions;
|
||||||
use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction};
|
use sqlx::{ConnectOptions, Sqlite, SqlitePool, Transaction};
|
||||||
use tokio::fs::{remove_file, rename, File};
|
use tokio::fs::{remove_file, rename, File};
|
||||||
|
@ -186,7 +185,27 @@ async fn db_listener(
|
||||||
for item in items {
|
for item in items {
|
||||||
debug!("deleting file due to exceeding cache size");
|
debug!("deleting file due to exceeding cache size");
|
||||||
size_freed += item.size as u64;
|
size_freed += item.size as u64;
|
||||||
tokio::spawn(remove_file_handler(item.id));
|
tokio::spawn(async move {
|
||||||
|
let key = item.id;
|
||||||
|
if let Err(e) = remove_file(key.clone()).await {
|
||||||
|
match e.kind() {
|
||||||
|
std::io::ErrorKind::NotFound => {
|
||||||
|
let hash = Md5Hash(*GenericArray::from_slice(key.as_bytes()));
|
||||||
|
let path: PathBuf = hash.into();
|
||||||
|
if let Err(e) = remove_file(&path).await {
|
||||||
|
warn!(
|
||||||
|
"Failed to delete file `{}` from cache: {}",
|
||||||
|
path.to_string_lossy(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
warn!("Failed to delete file `{}` from cache: {}", &key, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
|
cache.disk_cur_size.fetch_sub(size_freed, Ordering::Release);
|
||||||
|
@ -194,43 +213,6 @@ async fn db_listener(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns if a file was successfully deleted.
|
|
||||||
async fn remove_file_handler(key: String) -> bool {
|
|
||||||
let error = if let Err(e) = remove_file(&key).await {
|
|
||||||
e
|
|
||||||
} else {
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
if error.kind() != std::io::ErrorKind::NotFound {
|
|
||||||
warn!("Failed to delete file `{}` from cache: {}", &key, error);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(bytes) = hex::decode(&key) {
|
|
||||||
if bytes.len() != 16 {
|
|
||||||
warn!("Failed to delete file `{}`; invalid hash size.", &key);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let hash = Md5Hash(*GenericArray::from_slice(&bytes));
|
|
||||||
let path: PathBuf = hash.into();
|
|
||||||
if let Err(e) = remove_file(&path).await {
|
|
||||||
warn!(
|
|
||||||
"Failed to delete file `{}` from cache: {}",
|
|
||||||
path.to_string_lossy(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("Failed to delete file `{}`; not a md5hash.", &key);
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(transaction))]
|
#[instrument(level = "debug", skip(transaction))]
|
||||||
async fn handle_db_get(entry: &Path, transaction: &mut Transaction<'_, Sqlite>) {
|
async fn handle_db_get(entry: &Path, transaction: &mut Transaction<'_, Sqlite>) {
|
||||||
let key = entry.as_os_str().to_str();
|
let key = entry.as_os_str().to_str();
|
||||||
|
@ -402,61 +384,6 @@ impl CallbackCache for DiskCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod remove_file_handler {
|
|
||||||
|
|
||||||
use std::error::Error;
|
|
||||||
|
|
||||||
use tempfile::tempdir;
|
|
||||||
use tokio::fs::{create_dir_all, remove_dir_all};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_not_panic_on_invalid_path() {
|
|
||||||
assert!(!remove_file_handler("/this/is/a/non-existent/path/".to_string()).await);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_not_panic_on_invalid_hash() {
|
|
||||||
assert!(!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940".to_string()).await);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_not_panic_on_malicious_hashes() {
|
|
||||||
assert!(!remove_file_handler("68b329da9893e34".to_string()).await);
|
|
||||||
assert!(
|
|
||||||
!remove_file_handler("68b329da9893e34099c7d8ad5cb9c940aaaaaaaaaaaaaaaaaa".to_string())
|
|
||||||
.await
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_delete_existing_file() -> Result<(), Box<dyn Error>> {
|
|
||||||
let temp_dir = tempdir()?;
|
|
||||||
let mut dir_path = temp_dir.path().to_path_buf();
|
|
||||||
dir_path.push("abc123.png");
|
|
||||||
|
|
||||||
// create a file, it can be empty
|
|
||||||
File::create(&dir_path).await?;
|
|
||||||
|
|
||||||
assert!(remove_file_handler(dir_path.to_string_lossy().into_owned()).await);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn should_delete_existing_hash() -> Result<(), Box<dyn Error>> {
|
|
||||||
create_dir_all("b/8/6").await?;
|
|
||||||
File::create("b/8/6/68b329da9893e34099c7d8ad5cb9c900").await?;
|
|
||||||
|
|
||||||
assert!(remove_file_handler("68b329da9893e34099c7d8ad5cb9c900".to_string()).await);
|
|
||||||
|
|
||||||
remove_dir_all("b").await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod disk_cache {
|
mod disk_cache {
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
|
4
src/cache/fs.rs
vendored
4
src/cache/fs.rs
vendored
|
@ -48,8 +48,8 @@ use super::{CacheKey, CacheStream, ImageMetadata, ENCRYPTION_KEY};
|
||||||
pub(super) async fn read_file(
|
pub(super) async fn read_file(
|
||||||
file: File,
|
file: File,
|
||||||
) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> {
|
) -> Option<Result<(CacheStream, Option<XNonce>, ImageMetadata), std::io::Error>> {
|
||||||
let mut file_0 = file.try_clone().await.ok()?;
|
let mut file_0 = file.try_clone().await.unwrap();
|
||||||
let file_1 = file.try_clone().await.ok()?;
|
let file_1 = file.try_clone().await.unwrap();
|
||||||
|
|
||||||
// Try reading decrypted header first...
|
// Try reading decrypted header first...
|
||||||
let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await);
|
let mut deserializer = serde_json::Deserializer::from_reader(file.into_std().await);
|
||||||
|
|
|
@ -73,11 +73,11 @@ pub fn load_config() -> Result<Config, ConfigError> {
|
||||||
Ordering::Release,
|
Ordering::Release,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(socket) = config.proxy.clone() {
|
config.proxy.clone().map(|socket| {
|
||||||
USE_PROXY
|
USE_PROXY
|
||||||
.set(socket)
|
.set(socket)
|
||||||
.expect("USE_PROXY to be set only by this function");
|
.expect("USE_PROXY to be set only by this function");
|
||||||
}
|
});
|
||||||
|
|
||||||
DISABLE_CERT_VALIDATION.store(
|
DISABLE_CERT_VALIDATION.store(
|
||||||
config
|
config
|
||||||
|
|
|
@ -104,20 +104,20 @@ pub async fn load_geo_ip_data(license_key: ClientSecret) -> Result<(), DbLoadErr
|
||||||
// Check date of db
|
// Check date of db
|
||||||
let db_date_created = metadata(DB_PATH)
|
let db_date_created = metadata(DB_PATH)
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|metadata| {
|
.and_then(|metadata| match metadata.created() {
|
||||||
if let Ok(time) = metadata.created() {
|
Ok(time) => Some(time),
|
||||||
Some(time)
|
Err(_) => {
|
||||||
} else {
|
|
||||||
debug("fs didn't report birth time, fall back to last modified instead");
|
debug("fs didn't report birth time, fall back to last modified instead");
|
||||||
metadata.modified().ok()
|
metadata.modified().ok()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or(SystemTime::UNIX_EPOCH);
|
.unwrap_or(SystemTime::UNIX_EPOCH);
|
||||||
let duration = if let Ok(time) = SystemTime::now().duration_since(db_date_created) {
|
let duration = match SystemTime::now().duration_since(db_date_created) {
|
||||||
Duration::from_std(time).expect("duration to fit")
|
Ok(time) => Duration::from_std(time).expect("duration to fit"),
|
||||||
} else {
|
Err(_) => {
|
||||||
warn!("Clock may have gone backwards?");
|
warn!("Clock may have gone backwards?");
|
||||||
Duration::max_value()
|
Duration::max_value()
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// DB expired, fetch a new one
|
// DB expired, fetch a new one
|
||||||
|
@ -172,12 +172,14 @@ async fn fetch_db(license_key: ClientSecret) -> Result<(), DbLoadError> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_country_visit(country: Option<Country>) {
|
pub fn record_country_visit(country: Option<Country>) {
|
||||||
let iso_code = country.map_or("unknown", |country| {
|
let iso_code = if let Some(country) = country {
|
||||||
country
|
country
|
||||||
.country
|
.country
|
||||||
.and_then(|c| c.iso_code)
|
.and_then(|c| c.iso_code)
|
||||||
.unwrap_or("unknown")
|
.unwrap_or("unknown")
|
||||||
});
|
} else {
|
||||||
|
"unknown"
|
||||||
|
};
|
||||||
|
|
||||||
COUNTRY_VISIT_COUNTER
|
COUNTRY_VISIT_COUNTER
|
||||||
.get_metric_with_label_values(&[iso_code])
|
.get_metric_with_label_values(&[iso_code])
|
||||||
|
|
19
src/state.rs
19
src/state.rs
|
@ -146,10 +146,9 @@ impl ServerState {
|
||||||
pub fn init_offline() -> Self {
|
pub fn init_offline() -> Self {
|
||||||
assert!(OFFLINE_MODE.load(Ordering::Acquire));
|
assert!(OFFLINE_MODE.load(Ordering::Acquire));
|
||||||
Self {
|
Self {
|
||||||
precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES])
|
precomputed_key: PrecomputedKey::from_slice(&[41; PRECOMPUTEDKEYBYTES]).unwrap(),
|
||||||
.expect("expect offline config to work"),
|
image_server: Url::from_file_path("/dev/null").unwrap(),
|
||||||
image_server: Url::from_file_path("/dev/null").expect("expect offline config to work"),
|
url: Url::from_str("http://localhost").unwrap(),
|
||||||
url: Url::from_str("http://localhost").expect("expect offline config to work"),
|
|
||||||
url_overridden: false,
|
url_overridden: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,16 +163,8 @@ impl ResolvesServerCert for DynamicServerCert {
|
||||||
// TODO: wait for actix-web to use a new version of rustls so we can
|
// TODO: wait for actix-web to use a new version of rustls so we can
|
||||||
// remove cloning the certs all the time
|
// remove cloning the certs all the time
|
||||||
Some(CertifiedKey {
|
Some(CertifiedKey {
|
||||||
cert: TLS_CERTS
|
cert: TLS_CERTS.get().unwrap().load().as_ref().clone(),
|
||||||
.get()
|
key: TLS_SIGNING_KEY.get().unwrap().load_full(),
|
||||||
.expect("tls cert to exist")
|
|
||||||
.load()
|
|
||||||
.as_ref()
|
|
||||||
.clone(),
|
|
||||||
key: TLS_SIGNING_KEY
|
|
||||||
.get()
|
|
||||||
.expect("tls signing key to exist")
|
|
||||||
.load_full(),
|
|
||||||
ocsp: None,
|
ocsp: None,
|
||||||
sct_list: None,
|
sct_list: None,
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in a new issue