Even more work

This commit is contained in:
Edward Shen 2023-06-26 22:49:30 -07:00
parent 05e0726af5
commit 0d538d4b9f
Signed by: edward
GPG key ID: 19182661E818369F
4 changed files with 270 additions and 100 deletions

44
Cargo.lock generated
View file

@ -126,6 +126,7 @@ dependencies = [
"anstyle",
"bitflags",
"clap_lex",
"once_cell",
"strsim",
]
@ -601,6 +602,15 @@ version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata",
]
[[package]]
name = "memchr"
version = "2.5.0"
@ -853,6 +863,36 @@ dependencies = [
"thiserror",
]
[[package]]
name = "regex"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370"
dependencies = [
"regex-syntax 0.7.1",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c"
[[package]]
name = "reqwest"
version = "0.11.18"
@ -1298,10 +1338,14 @@ version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]

View file

@ -9,10 +9,10 @@ license = "GPL-3.0-or-later"
reqwest = { version = "0.11", features = ["json"] }
tokio = { version = "1", features = ["full"] }
tracing = "0.1"
tracing-subscriber = "0.3"
tracing-subscriber = {version = "0.3", features = ["env-filter"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
clap = { version = "4", features = ["derive"] }
clap = { version = "4", features = ["derive", "cargo"] }
anyhow = "1"
toml = "0.7"
tabled = { version = "0.12", features = ["derive"] }

View file

@ -45,6 +45,16 @@ pub struct DnsRecord {
pub protocol_type: RecordType,
}
impl Display for DnsRecord {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if f.alternate() {
self.id.fmt(f)
} else {
self.name.fmt(f)
}
}
}
impl DnsRecord {
pub fn is_ipv4(&self) -> bool {
self.protocol_type == RecordType::A

View file

@ -1,27 +1,37 @@
#![warn(clippy::pedantic)]
#![warn(clippy::pedantic, clippy::cargo)]
use std::collections::HashSet;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::fmt::Debug;
use std::fs::File;
use std::io::{self, IsTerminal};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::path::{Path, PathBuf};
use crate::config::{Config, RecordType};
use anyhow::{Context, Result};
use clap::{Parser, Subcommand};
use clap::{Parser, Subcommand, ValueEnum};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::json;
use tabled::settings::object::Column;
use tabled::settings::{Alignment, Modify};
use tabled::{Table, Tabled};
use tracing::{debug, error, info, warn};
use tracing::{debug, info, instrument, trace, warn, Level};
use tracing_subscriber::filter::Directive;
use tracing_subscriber::fmt::Subscriber;
use tracing_subscriber::EnvFilter;
mod config;
const X_AUTH_EMAIL: &str = "X-Auth-Email";
const X_AUTH_KEY: &str = "X-Auth-Key";
/// Scuffed Cloudflare dynamic DNS script.
///
/// If std
#[derive(Parser, Clone, Debug)]
#[clap(author = clap::crate_authors!(), version = clap::crate_version!())]
pub struct Args {
/// Path to the configuration file.
///
@ -30,24 +40,80 @@ pub struct Args {
/// directory.
#[clap(short, long, global = true)]
config_file: Option<PathBuf>,
#[clap(short, long, global = true, value_delimiter = ',')]
verbose: Vec<Directive>,
// Force whether or not to print colors
#[clap(long)]
color: Color,
#[command(subcommand)]
cmd: Command,
}
#[derive(Subcommand, Clone, Debug)]
pub enum Command {
/// Fetch a reflected IP address and update A and AAAA entries in DNS.
Run,
/// List all A and AAAA entries in each zone in the config.
List(List),
}
#[derive(Parser, Clone, Debug)]
pub struct List {
/// Limit which zones to emit.
///
/// If not provided, print all zones in the config.
zones: Option<Vec<String>>,
/// Which format to output zone data in.
#[clap(short, long)]
output: OutputFormat,
}
#[derive(ValueEnum, Default, Debug, Clone, Copy)]
enum OutputFormat {
#[default]
Table,
Json,
}
#[derive(ValueEnum, Default, Debug, Clone, Copy, PartialEq, Eq)]
enum Color {
#[default]
Auto,
Never,
Always,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let env_filter = args
.verbose
.into_iter()
.fold(EnvFilter::from_default_env(), |env, directive| {
env.add_directive(directive)
});
let is_stdout_terminal = io::stdout().is_terminal();
let use_ansi = match args.color {
Color::Auto => is_stdout_terminal || io::stderr().is_terminal(),
other => other == Color::Always,
};
Subscriber::builder()
.with_env_filter(env_filter)
.with_ansi(use_ansi)
.with_writer(move || -> Box<dyn io::Write> {
// If we're redirecting stdout, use stderr for logs
// This makes json output work as expected for redirection
if is_stdout_terminal {
Box::new(io::stdout())
} else {
Box::new(io::stderr())
}
})
.init();
let config = load_config(args.config_file).context("Failed to find a suitable config file")?;
match args.cmd {
Command::Run => handle_run(config).await,
@ -56,51 +122,51 @@ async fn main() -> Result<()> {
}
async fn handle_run(conf: Config) -> Result<()> {
let ipv4 = match conf.ip_reflector.ipv4 {
Some(addr_to_req) => Some(
get_ipv4(addr_to_req)
.await
.context("Failed to query for ipv4 address, bailing.")?,
),
None => None,
let ipv4 = if let Some(addr_to_req) = conf.ip_reflector.ipv4 {
let ip = get_ipv4(addr_to_req)
.await
.context("Failed to query for IPv4 address, bailing.")?;
debug!(addr=%ip, "Found reflected IPv4");
Some(IpAddr::V4(ip))
} else {
info!("No IPv4 reflector endpoint provided. Not updating IPv6 addresses");
None
};
let ipv6 = match conf.ip_reflector.ipv6 {
Some(addr_to_req) => Some(
get_ipv6(addr_to_req)
.await
.context("Failed to query for ipv4 address, bailing.")?,
),
None => None,
let ipv6 = if let Some(addr_to_req) = conf.ip_reflector.ipv6 {
let ip = get_ipv6(addr_to_req)
.await
.context("Failed to query for IPv6 address, bailing.")?;
debug!(addr=%ip, "Found reflected IPv6");
Some(IpAddr::V6(ip))
} else {
debug!("No IPv6 reflector endpoint provided. Not updating IPv6 addresses");
None
};
let ip_cache_path = ip_cache_path().context("while getting the ip cache path")?;
let mut cache_file = load_ip_cache(&ip_cache_path).context("while loading the ip cache")?;
// Only start processing requests if our current IP doesn't match our cached IP
if !needs_update(&ip_cache_path, ipv4, ipv6).context("while checking the ip cache")? {
return Ok(());
}
for (human_readable_name, zone) in conf.zone {
let span = tracing::span!(Level::TRACE, "zone", domain = %human_readable_name);
let _enter = span.enter();
let ipv4_addr = ipv4.map(IpAddr::V4);
let ipv6_addr = ipv6.map(IpAddr::V6);
let records_to_process = zone
.record
.into_iter()
.filter(|record| !record.disabled)
.filter_map(|record| {
// Only process ipv4 entries if we have a reflected ip
if record.is_ipv4() {
return ipv4.map(|ip| (ip, record));
}
for zone in conf.zone.into_values() {
let zone_id = zone.id;
// Only process ipv6 entries if we have a reflected ip
if record.is_ipv6() {
return ipv6.map(|ip| (ip, record));
}
let records_to_process = zone.record.into_iter().filter_map(|record| {
if record.disabled {
return None;
}
if ipv4.is_some() && record.is_ipv4() {
return Some((&ipv4_addr, record));
}
if ipv6.is_some() && record.is_ipv6() {
return Some((&ipv6_addr, record));
}
None
});
None
});
for (addr, record) in records_to_process {
#[derive(Deserialize, Debug)]
@ -111,10 +177,29 @@ async fn handle_run(conf: Config) -> Result<()> {
messages: Vec<Message>,
}
let record_id = record.id;
let span = tracing::span!(Level::TRACE, "record", name = %record);
let _enter = span.enter();
// Can't put this in a filter combinator because cache_file gets
// immutably borrowed for the duration of the iterator
let cache_entry = cache_file.0.get(&record.id).copied();
let should_skip = match cache_entry {
entry @ Some(IpAddr::V4(_)) => entry == ipv4,
entry @ Some(IpAddr::V6(_)) => entry == ipv6,
None => false,
};
if should_skip {
debug!("Skipping entry since it was up to date in cache");
continue;
}
debug!(cached_ip=?cache_entry, "Need to update entry");
let resp: UpdateDnsResponse = reqwest::Client::new()
.put(format!(
"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{record_id}"
"https://api.cloudflare.com/client/v4/zones/{}/dns_records/{}",
&zone.id, &record.id
))
.header(X_AUTH_EMAIL, &conf.account.email.to_string())
.header(X_AUTH_KEY, &conf.account.api_key)
@ -133,6 +218,8 @@ async fn handle_run(conf: Config) -> Result<()> {
.context("while parsing into a json")?;
if resp.success {
trace!("Update successful");
cache_file.0.insert(record.id, addr);
continue;
}
}
@ -140,59 +227,35 @@ async fn handle_run(conf: Config) -> Result<()> {
// Updating the ip cache last is better in case we get interrupted. Better
// to update too frequently than not enough.
update_ip_cache(ip_cache_path, ipv4, ipv6)?;
update_ip_cache(ip_cache_path, &cache_file).context("while updating the cache file")?;
Ok(())
}
fn update_ip_cache<P: AsRef<Path>>(
path: P,
ipv4: Option<Ipv4Addr>,
ipv6: Option<Ipv6Addr>,
) -> Result<()> {
let data = serde_json::to_string(&CacheFile { ipv4, ipv6 }).expect("serialization to work");
std::fs::write(path, data).context("while updating the ip cache file")?;
fn update_ip_cache<P: AsRef<Path>>(path: P, data: &CacheFile) -> Result<()> {
let data = serde_json::to_string(data).expect("serialization to work");
std::fs::write(path, data).context("while writing the ip cache file")?;
Ok(())
}
#[derive(Serialize, Deserialize, Default)]
struct CacheFile {
ipv4: Option<Ipv4Addr>,
ipv6: Option<Ipv6Addr>,
}
#[derive(Serialize, Deserialize, Default, Debug)]
struct CacheFile(HashMap<String, IpAddr>);
fn needs_update<P: AsRef<Path>>(
path: P,
cur_ipv4: Option<Ipv4Addr>,
cur_ipv6: Option<Ipv6Addr>,
) -> Result<bool> {
let data = std::fs::read_to_string(path).context("while reading the ip cache file")?;
let cache: CacheFile = match serde_json::from_str(&data) {
#[instrument(level = "trace", ret)]
fn load_ip_cache<P: AsRef<Path> + Debug>(path: P) -> Result<CacheFile> {
let file = File::options()
.create(true)
.read(true)
.write(true)
.open(path)
.context("while opening the ip cache file")?;
let data = std::io::read_to_string(file).context("while reading the ip cache file")?;
Ok(match serde_json::from_str(&data) {
Ok(cache) => cache,
Err(e) => {
warn!("Failed to parse the ip cache file; assuming empty: {e}");
CacheFile::default()
}
};
if matches!((cache.ipv4, cur_ipv4), (Some(_), None)) {
warn!("Cache previously reported an IPv4 address, but we didn't find one. Not updating IPv4 entries");
}
let update_ipv4 = match (cache.ipv4, cur_ipv4) {
(Some(cached), Some(current)) => cached != current,
(cached, current) => cached.xor(current).is_some(),
};
if matches!((cache.ipv6, cur_ipv6), (Some(_), None)) {
warn!("Cache previously reported an IPv6 address, but we didn't find one. Not updating IPv6 entries");
}
let update_ipv6 = match (cache.ipv6, cur_ipv6) {
(Some(cached), Some(current)) => cached != current,
(cached, current) => cached.xor(current).is_some(),
};
Ok(update_ipv4 || update_ipv6)
})
}
fn ip_cache_path() -> Result<PathBuf> {
@ -224,7 +287,7 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
return Some(zone.id.clone());
}
eprintln!("Unknown zone {maybe_zone_id}, skipping");
warn!("Unknown zone {maybe_zone_id}, skipping");
None
})
.collect()
@ -232,6 +295,7 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
None => known_zones.into_iter().cloned().collect(),
};
let mut output = BTreeMap::new();
for zone in zones {
#[derive(Deserialize, Debug)]
#[allow(dead_code)]
@ -242,7 +306,7 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
result: Vec<DnsResponse>,
}
#[derive(Deserialize, Debug, Tabled)]
#[derive(Serialize, Deserialize, Debug, Tabled)]
#[tabled(rename_all = "PascalCase")]
struct DnsResponse {
name: String,
@ -275,18 +339,49 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
if resp.result.is_empty() {
break;
} else {
entries.extend(resp.result);
}
entries.extend(resp.result);
}
// Sort by subdomain, with higher level subdomains taking higher precedence than lower ones.
entries.sort_unstable_by(|a, b| a.name.split('.').rev().cmp(b.name.split('.').rev()));
println!(
"{}",
Table::new(entries).with(Modify::new(Column::from(0)).with(Alignment::right()))
);
output.insert(zone, entries);
}
match args.output {
OutputFormat::Table => {
for (zone, data) in output {
println!(
"{zone}\n{}",
Table::new(data).with(Modify::new(Column::from(0)).with(Alignment::right()))
);
}
}
OutputFormat::Json => {
let human_readable_mapping: HashMap<_, _> = conf
.zone
.into_iter()
.map(|(human, zone)| (zone.id, human))
.collect();
let map: serde_json::Map<String, serde_json::Value> = output
.into_iter()
.map(|(zone_id, data)| {
(
(*human_readable_mapping.get(&zone_id).unwrap()).to_owned(),
json!({
"id": zone_id,
"records": data,
}),
)
})
.collect();
println!(
"{}",
serde_json::to_string(&map).expect("serialization to work")
)
}
}
Ok(())
@ -316,24 +411,45 @@ async fn get_ipv6(url: Url) -> Result<Ipv6Addr> {
fn load_config(user_provided_path: Option<PathBuf>) -> Option<Config> {
if let Some(path) = user_provided_path {
return load_config_from_path(path);
tracing::trace!("User provided path to config");
let maybe_config = load_config_from_path(&path);
if maybe_config.is_some() {
tracing::info!(
path = %path.to_string_lossy(),
"Loaded config file"
);
}
return maybe_config;
}
let file_path = Path::new("./cloudflare-ddns.toml");
let resolved_path = file_path.canonicalize();
let resolved_path = resolved_path.as_deref().unwrap_or(file_path);
if let Some(config) = load_config_from_path(resolved_path) {
tracing::info!(
path = %resolved_path.to_string_lossy(),
"Loaded config file"
);
return Some(config);
}
if let Some(config) = dirs::config_dir()
if let Some((path, config)) = dirs::config_dir()
.map(|path| path.join(file_path))
.and_then(load_config_from_path)
.and_then(|path| load_config_from_path(&path).map(|conf| (path, conf)))
{
tracing::info!(
path = %path.to_string_lossy(),
"Loaded config file"
);
return Some(config);
}
load_config_from_path("/etc/cloudflare-ddns.toml")
if let Some(config) = load_config_from_path("/etc/cloudflare-ddns.toml") {
tracing::info!(path = "/etc/cloudflare-ddns.toml", "Loaded config file");
return Some(config);
}
None
}
fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
@ -345,7 +461,7 @@ fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
"Failed to parse config file at {}: {}",
path.as_ref().to_string_lossy(),
err
)
);
}
},
Err(err) => {
@ -353,7 +469,7 @@ fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
"Unable to read the config file at {}: {}",
path.as_ref().to_string_lossy(),
err
)
);
}
}
None