Compare commits
2 commits
141b5b257c
...
424dc09ae3
Author | SHA1 | Date | |
---|---|---|---|
424dc09ae3 | |||
eb8a334a05 |
7 changed files with 84 additions and 32 deletions
|
@ -39,3 +39,4 @@ url = { version = "2", features = [ "serde" ] }
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
debug = 1
|
49
src/cache/disk_cache.rs
vendored
49
src/cache/disk_cache.rs
vendored
|
@ -8,7 +8,7 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use log::{warn, LevelFilter};
|
use log::{error, warn, LevelFilter};
|
||||||
use sqlx::sqlite::SqliteConnectOptions;
|
use sqlx::sqlite::SqliteConnectOptions;
|
||||||
use sqlx::{ConnectOptions, SqlitePool};
|
use sqlx::{ConnectOptions, SqlitePool};
|
||||||
use tokio::fs::remove_file;
|
use tokio::fs::remove_file;
|
||||||
|
@ -36,7 +36,7 @@ impl DiskCache {
|
||||||
pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc<Box<dyn Cache>> {
|
pub async fn new(disk_max_size: u64, disk_path: PathBuf) -> Arc<Box<dyn Cache>> {
|
||||||
let (db_tx, db_rx) = channel(128);
|
let (db_tx, db_rx) = channel(128);
|
||||||
let db_pool = {
|
let db_pool = {
|
||||||
let db_url = format!("sqlite:{}/metadata.sqlite", disk_path.to_str().unwrap());
|
let db_url = format!("sqlite:{}/metadata.sqlite", disk_path.to_string_lossy());
|
||||||
let mut options = SqliteConnectOptions::from_str(&db_url)
|
let mut options = SqliteConnectOptions::from_str(&db_url)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.create_if_missing(true);
|
.create_if_missing(true);
|
||||||
|
@ -80,7 +80,13 @@ async fn db_listener(
|
||||||
let mut recv_stream = ReceiverStream::new(db_rx).ready_chunks(128);
|
let mut recv_stream = ReceiverStream::new(db_rx).ready_chunks(128);
|
||||||
while let Some(messages) = recv_stream.next().await {
|
while let Some(messages) = recv_stream.next().await {
|
||||||
let now = chrono::Utc::now();
|
let now = chrono::Utc::now();
|
||||||
let mut transaction = db_pool.begin().await.unwrap();
|
let mut transaction = match db_pool.begin().await {
|
||||||
|
Ok(transaction) => transaction,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to start a transaction to DB, cannot update DB. Disk cache may be losing track of files! {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
for message in messages {
|
for message in messages {
|
||||||
match message {
|
match message {
|
||||||
DbMessage::Get(entry) => {
|
DbMessage::Get(entry) => {
|
||||||
|
@ -111,15 +117,42 @@ async fn db_listener(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
transaction.commit().await.unwrap();
|
|
||||||
|
if let Err(e) = transaction.commit().await {
|
||||||
|
error!(
|
||||||
|
"Failed to commit transaction to DB. Disk cache may be losing track of files! {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if cache.on_disk_size() >= max_on_disk_size {
|
if cache.on_disk_size() >= max_on_disk_size {
|
||||||
let mut conn = db_pool.acquire().await.unwrap();
|
let mut conn = match db_pool.acquire().await {
|
||||||
let items =
|
Ok(conn) => conn,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to get a DB connection and cannot prune disk cache: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let items = {
|
||||||
|
let request =
|
||||||
sqlx::query!("select id, size from Images order by accessed asc limit 1000")
|
sqlx::query!("select id, size from Images order by accessed asc limit 1000")
|
||||||
.fetch_all(&mut conn)
|
.fetch_all(&mut conn)
|
||||||
.await
|
.await;
|
||||||
.unwrap();
|
match request {
|
||||||
|
Ok(items) => items,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to fetch oldest images and cannot prune disk cache: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut size_freed = 0;
|
let mut size_freed = 0;
|
||||||
for item in items {
|
for item in items {
|
||||||
|
|
9
src/cache/fs.rs
vendored
9
src/cache/fs.rs
vendored
|
@ -84,14 +84,14 @@ pub async fn write_file<
|
||||||
|
|
||||||
let mut file = {
|
let mut file = {
|
||||||
let mut write_lock = WRITING_STATUS.write().await;
|
let mut write_lock = WRITING_STATUS.write().await;
|
||||||
let parent = path.parent().unwrap();
|
let parent = path.parent().expect("The path to have a parent");
|
||||||
create_dir_all(parent).await?;
|
create_dir_all(parent).await?;
|
||||||
let file = File::create(path).await?; // we need to make sure the file exists and is truncated.
|
let file = File::create(path).await?; // we need to make sure the file exists and is truncated.
|
||||||
write_lock.insert(path.to_path_buf(), rx.clone());
|
write_lock.insert(path.to_path_buf(), rx.clone());
|
||||||
file
|
file
|
||||||
};
|
};
|
||||||
|
|
||||||
let metadata_string = serde_json::to_string(&metadata).unwrap();
|
let metadata_string = serde_json::to_string(&metadata).expect("serialization to work");
|
||||||
let metadata_size = metadata_string.len();
|
let metadata_size = metadata_string.len();
|
||||||
// need owned variant because async lifetime
|
// need owned variant because async lifetime
|
||||||
let path_buf = path.to_path_buf();
|
let path_buf = path.to_path_buf();
|
||||||
|
@ -151,9 +151,8 @@ pub async fn write_file<
|
||||||
}
|
}
|
||||||
|
|
||||||
tokio::spawn(db_callback(bytes_written));
|
tokio::spawn(db_callback(bytes_written));
|
||||||
if accumulate {
|
if let Some(sender) = on_complete {
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let sender = on_complete.unwrap();
|
|
||||||
sender
|
sender
|
||||||
.send((
|
.send((
|
||||||
cache_key,
|
cache_key,
|
||||||
|
@ -244,7 +243,7 @@ impl Stream for ConcurrentFsStream {
|
||||||
if let Poll::Ready(Some(WritingStatus::Done(n))) =
|
if let Poll::Ready(Some(WritingStatus::Done(n))) =
|
||||||
self.receiver.as_mut().poll_next_unpin(cx)
|
self.receiver.as_mut().poll_next_unpin(cx)
|
||||||
{
|
{
|
||||||
self.bytes_total = Some(NonZeroU32::new(n).unwrap())
|
self.bytes_total = Some(NonZeroU32::new(n).expect("Stored a 0 byte image?"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Okay, now we know if we've read enough bytes or not. If the
|
// Okay, now we know if we've read enough bytes or not. If the
|
||||||
|
|
1
src/cache/mem_cache.rs
vendored
1
src/cache/mem_cache.rs
vendored
|
@ -13,7 +13,6 @@ use tokio::sync::mpsc::{channel, Sender};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
/// Memory accelerated disk cache. Uses an LRU in memory to speed up reads.
|
/// Memory accelerated disk cache. Uses an LRU in memory to speed up reads.
|
||||||
///
|
|
||||||
pub struct MemoryLruCache {
|
pub struct MemoryLruCache {
|
||||||
inner: Arc<Box<dyn Cache>>,
|
inner: Arc<Box<dyn Cache>>,
|
||||||
cur_mem_size: AtomicU64,
|
cur_mem_size: AtomicU64,
|
||||||
|
|
13
src/main.rs
13
src/main.rs
|
@ -3,11 +3,12 @@
|
||||||
#![allow(clippy::module_name_repetitions)]
|
#![allow(clippy::module_name_repetitions)]
|
||||||
|
|
||||||
use std::env::{self, VarError};
|
use std::env::{self, VarError};
|
||||||
|
use std::hint::unreachable_unchecked;
|
||||||
|
use std::num::ParseIntError;
|
||||||
use std::process;
|
use std::process;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{num::ParseIntError, sync::atomic::Ordering};
|
|
||||||
|
|
||||||
use actix_web::rt::{spawn, time, System};
|
use actix_web::rt::{spawn, time, System};
|
||||||
use actix_web::web::{self, Data};
|
use actix_web::web::{self, Data};
|
||||||
|
@ -48,7 +49,7 @@ enum ServerError {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_web::main]
|
#[actix_web::main]
|
||||||
async fn main() -> Result<(), std::io::Error> {
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
// It's ok to fail early here, it would imply we have a invalid config.
|
// It's ok to fail early here, it would imply we have a invalid config.
|
||||||
dotenv::dotenv().ok();
|
dotenv::dotenv().ok();
|
||||||
let cli_args = CliArgs::parse();
|
let cli_args = CliArgs::parse();
|
||||||
|
@ -66,10 +67,10 @@ async fn main() -> Result<(), std::io::Error> {
|
||||||
(0, 0) => LevelFilter::Info,
|
(0, 0) => LevelFilter::Info,
|
||||||
(_, 1) => LevelFilter::Debug,
|
(_, 1) => LevelFilter::Debug,
|
||||||
(_, n) if n > 1 => LevelFilter::Trace,
|
(_, n) if n > 1 => LevelFilter::Trace,
|
||||||
_ => unreachable!(),
|
_ => unsafe { unreachable_unchecked() },
|
||||||
};
|
};
|
||||||
|
|
||||||
SimpleLogger::new().with_level(log_level).init().unwrap();
|
SimpleLogger::new().with_level(log_level).init()?;
|
||||||
|
|
||||||
print_preamble_and_warnings();
|
print_preamble_and_warnings();
|
||||||
|
|
||||||
|
@ -81,7 +82,7 @@ async fn main() -> Result<(), std::io::Error> {
|
||||||
};
|
};
|
||||||
let client_secret_1 = client_secret.clone();
|
let client_secret_1 = client_secret.clone();
|
||||||
|
|
||||||
let server = ServerState::init(&client_secret, &cli_args).await.unwrap();
|
let server = ServerState::init(&client_secret, &cli_args).await?;
|
||||||
let data_0 = Arc::new(RwLockServerState(RwLock::new(server)));
|
let data_0 = Arc::new(RwLockServerState(RwLock::new(server)));
|
||||||
let data_1 = Arc::clone(&data_0);
|
let data_1 = Arc::clone(&data_0);
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,9 @@ impl<'a> Request<'a> {
|
||||||
port: config.port,
|
port: config.port,
|
||||||
disk_space: config.disk_quota,
|
disk_space: config.disk_quota,
|
||||||
network_speed: config.network_speed,
|
network_speed: config.network_speed,
|
||||||
build_version: client_api_version!().parse().unwrap(),
|
build_version: client_api_version!()
|
||||||
|
.parse()
|
||||||
|
.expect("to parse the build version"),
|
||||||
tls_created_at: Some(state.0.read().tls_config.created_at.clone()),
|
tls_created_at: Some(state.0.read().tls_config.created_at.clone()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -53,7 +55,9 @@ impl<'a> From<(&'a str, &CliArgs)> for Request<'a> {
|
||||||
port: config.port,
|
port: config.port,
|
||||||
disk_space: config.disk_quota,
|
disk_space: config.disk_quota,
|
||||||
network_speed: config.network_speed,
|
network_speed: config.network_speed,
|
||||||
build_version: client_api_version!().parse().unwrap(),
|
build_version: client_api_version!()
|
||||||
|
.parse()
|
||||||
|
.expect("to parse the build version"),
|
||||||
tls_created_at: None,
|
tls_created_at: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
31
src/state.rs
31
src/state.rs
|
@ -8,6 +8,7 @@ use parking_lot::RwLock;
|
||||||
use rustls::sign::CertifiedKey;
|
use rustls::sign::CertifiedKey;
|
||||||
use rustls::ResolvesServerCert;
|
use rustls::ResolvesServerCert;
|
||||||
use sodiumoxide::crypto::box_::PrecomputedKey;
|
use sodiumoxide::crypto::box_::PrecomputedKey;
|
||||||
|
use thiserror::Error;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
pub struct ServerState {
|
pub struct ServerState {
|
||||||
|
@ -21,8 +22,22 @@ pub struct ServerState {
|
||||||
pub static PREVIOUSLY_PAUSED: AtomicBool = AtomicBool::new(false);
|
pub static PREVIOUSLY_PAUSED: AtomicBool = AtomicBool::new(false);
|
||||||
pub static PREVIOUSLY_COMPROMISED: AtomicBool = AtomicBool::new(false);
|
pub static PREVIOUSLY_COMPROMISED: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum ServerInitError {
|
||||||
|
#[error(transparent)]
|
||||||
|
MalformedResponse(reqwest::Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
Timeout(reqwest::Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
SendFailure(reqwest::Error),
|
||||||
|
#[error("Failed to parse token key")]
|
||||||
|
KeyParseError(String),
|
||||||
|
#[error("Token key was not provided in initial request")]
|
||||||
|
MissingTokenKey,
|
||||||
|
}
|
||||||
|
|
||||||
impl ServerState {
|
impl ServerState {
|
||||||
pub async fn init(secret: &str, config: &CliArgs) -> Result<Self, ()> {
|
pub async fn init(secret: &str, config: &CliArgs) -> Result<Self, ServerInitError> {
|
||||||
let resp = reqwest::Client::new()
|
let resp = reqwest::Client::new()
|
||||||
.post(CONTROL_CENTER_PING_URL)
|
.post(CONTROL_CENTER_PING_URL)
|
||||||
.json(&Request::from((secret, config)))
|
.json(&Request::from((secret, config)))
|
||||||
|
@ -39,18 +54,18 @@ impl ServerState {
|
||||||
Ok(mut resp) => {
|
Ok(mut resp) => {
|
||||||
let key = resp
|
let key = resp
|
||||||
.token_key
|
.token_key
|
||||||
|
.ok_or(ServerInitError::MissingTokenKey)
|
||||||
.and_then(|key| {
|
.and_then(|key| {
|
||||||
if let Some(key) = base64::decode(&key)
|
if let Some(key) = base64::decode(&key)
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|k| PrecomputedKey::from_slice(&k))
|
.and_then(|k| PrecomputedKey::from_slice(&k))
|
||||||
{
|
{
|
||||||
Some(key)
|
Ok(key)
|
||||||
} else {
|
} else {
|
||||||
error!("Failed to parse token key: got {}", key);
|
error!("Failed to parse token key: got {}", key);
|
||||||
None
|
Err(ServerInitError::KeyParseError(key))
|
||||||
}
|
}
|
||||||
})
|
})?;
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
PREVIOUSLY_COMPROMISED.store(resp.paused, Ordering::Release);
|
PREVIOUSLY_COMPROMISED.store(resp.paused, Ordering::Release);
|
||||||
if resp.compromised {
|
if resp.compromised {
|
||||||
|
@ -91,17 +106,17 @@ impl ServerState {
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Got malformed response: {}", e);
|
warn!("Got malformed response: {}", e);
|
||||||
Err(())
|
Err(ServerInitError::MalformedResponse(e))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(e) => match e {
|
Err(e) => match e {
|
||||||
e if e.is_timeout() => {
|
e if e.is_timeout() => {
|
||||||
error!("Response timed out to control server. Is MangaDex down?");
|
error!("Response timed out to control server. Is MangaDex down?");
|
||||||
Err(())
|
Err(ServerInitError::Timeout(e))
|
||||||
}
|
}
|
||||||
e => {
|
e => {
|
||||||
warn!("Failed to send request: {}", e);
|
warn!("Failed to send request: {}", e);
|
||||||
Err(())
|
Err(ServerInitError::SendFailure(e))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue