From 784d7674ee00d80c2f4e1352a06fa1ce0c5e45f6 Mon Sep 17 00:00:00 2001 From: Edward Shen Date: Mon, 26 Jun 2023 22:49:30 -0700 Subject: [PATCH] Even more work --- .gitignore | 2 +- Cargo.lock | 44 ++++++ Cargo.toml | 11 +- README.md | 0 cloudflare-ddns.toml | 2 +- src/config.rs | 10 ++ src/main.rs | 340 ++++++++++++++++++++++++++++++------------- 7 files changed, 306 insertions(+), 103 deletions(-) create mode 100644 README.md diff --git a/.gitignore b/.gitignore index aafb6ba..8bb15a8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,3 @@ /target .env -config.toml \ No newline at end of file +cloudflare-ddns.toml \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index e9a72b5..94474af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,6 +126,7 @@ dependencies = [ "anstyle", "bitflags", "clap_lex", + "once_cell", "strsim", ] @@ -601,6 +602,15 @@ version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "memchr" version = "2.5.0" @@ -853,6 +863,36 @@ dependencies = [ "thiserror", ] +[[package]] +name = "regex" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +dependencies = [ + "regex-syntax 0.7.1", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" + [[package]] name = "reqwest" version = "0.11.18" @@ -1298,10 +1338,14 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", ] diff --git a/Cargo.toml b/Cargo.toml index 7731bbe..bbbe0c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,16 +3,21 @@ name = "cloudflare-ddns" version = "0.1.0" authors = ["Edward Shen "] edition = "2021" +description = "Small DDNS binary for Cloudflare" +repository = "https://github.com/edward-shen/cloudflare-ddns" license = "GPL-3.0-or-later" +keywords = ["cloudflare", "ddns"] +categories = ["command-line-utilities"] +include = ["src/**/*", "LICENSE", "README.md"] [dependencies] reqwest = { version = "0.11", features = ["json"] } tokio = { version = "1", features = ["full"] } tracing = "0.1" -tracing-subscriber = "0.3" +tracing-subscriber = {version = "0.3", features = ["env-filter"] } serde = { version = "1", features = ["derive"] } serde_json = "1" -clap = { version = "4", features = ["derive"] } +clap = { version = "4", features = ["derive", "cargo"] } anyhow = "1" toml = "0.7" tabled = { version = "0.12", features = ["derive"] } @@ -23,4 +28,4 @@ dirs = "5" [profile.release] strip = "symbols" lto = "thin" -codegen-units = 1 \ No newline at end of file +codegen-units = 1 diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/cloudflare-ddns.toml b/cloudflare-ddns.toml index b3ffbf6..12e5f34 100644 --- a/cloudflare-ddns.toml +++ b/cloudflare-ddns.toml @@ -15,4 +15,4 @@ id = "9ecad461d77ae54d4b8cd942bd4e7be7" name = "auth" id = "9d4b09b0d86c2f136ab3cd7600e72390" type = "A" -proxy = true \ No newline at end of file +proxy = true diff --git a/src/config.rs b/src/config.rs index 603e241..3047e28 100644 --- a/src/config.rs +++ b/src/config.rs @@ -45,6 +45,16 @@ pub struct DnsRecord { pub protocol_type: RecordType, } +impl Display for DnsRecord { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if f.alternate() { + self.id.fmt(f) + } else { + self.name.fmt(f) + } + } +} + impl DnsRecord { pub fn is_ipv4(&self) -> bool { self.protocol_type == RecordType::A diff --git a/src/main.rs b/src/main.rs index d4039d9..7827f17 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,27 +1,39 @@ -#![warn(clippy::pedantic)] +#![warn(clippy::pedantic, clippy::cargo)] -use std::collections::HashSet; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::fmt::{Debug, Display}; +use std::fs::File; +use std::io::{self, IsTerminal}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::path::{Path, PathBuf}; +use std::time::Duration; use crate::config::{Config, RecordType}; use anyhow::{Context, Result}; -use clap::{Parser, Subcommand}; +use clap::{Parser, Subcommand, ValueEnum}; use reqwest::Url; use serde::{Deserialize, Serialize}; use serde_json::json; use tabled::settings::object::Column; use tabled::settings::{Alignment, Modify}; use tabled::{Table, Tabled}; -use tracing::{debug, error, info, warn}; +use tokio::time; +use tracing::{debug, info, instrument, trace, warn, Level}; +use tracing_subscriber::filter::Directive; +use tracing_subscriber::fmt::Subscriber; +use tracing_subscriber::EnvFilter; mod config; const X_AUTH_EMAIL: &str = "X-Auth-Email"; const X_AUTH_KEY: &str = "X-Auth-Key"; +/// Scuffed Cloudflare dynamic DNS script. +/// +/// If std #[derive(Parser, Clone, Debug)] +#[clap(author = clap::crate_authors!(), version = clap::crate_version!())] pub struct Args { /// Path to the configuration file. /// @@ -30,24 +42,99 @@ pub struct Args { /// directory. #[clap(short, long, global = true)] config_file: Option, + #[clap(short, long, global = true, value_delimiter = ',')] + verbose: Vec, + // Force whether or not to print colors + #[clap(long, default_value_t = Color::default())] + color: Color, #[command(subcommand)] cmd: Command, } #[derive(Subcommand, Clone, Debug)] pub enum Command { + /// Fetch a reflected IP address and update A and AAAA entries in DNS. Run, + /// List all A and AAAA entries in each zone in the config. List(List), } #[derive(Parser, Clone, Debug)] pub struct List { + /// Limit which zones to emit. + /// + /// If not provided, print all zones in the config. zones: Option>, + /// Which format to output zone data in. + #[clap(short, long, default_value_t = OutputFormat::default())] + output: OutputFormat, +} + +#[derive(ValueEnum, Default, Debug, Clone, Copy)] +enum OutputFormat { + #[default] + Table, + Json, +} + +impl Display for OutputFormat { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + OutputFormat::Table => Display::fmt("table", f), + OutputFormat::Json => Display::fmt("json", f), + } + } +} + +#[derive(ValueEnum, Default, Debug, Clone, Copy, PartialEq, Eq)] +enum Color { + #[default] + Auto, + Never, + Always, +} + +impl Display for Color { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Color::Auto => Display::fmt("auto", f), + Color::Never => Display::fmt("never", f), + Color::Always => Display::fmt("always", f), + } + } } #[tokio::main] async fn main() -> Result<()> { let args = Args::parse(); + + let env_filter = args + .verbose + .into_iter() + .fold(EnvFilter::from_default_env(), |env, directive| { + env.add_directive(directive) + }); + + let is_stdout_terminal = io::stdout().is_terminal(); + let use_ansi = match args.color { + Color::Auto => is_stdout_terminal || io::stderr().is_terminal(), + other => other == Color::Always, + }; + + Subscriber::builder() + .with_env_filter(env_filter) + .with_ansi(use_ansi) + .with_writer(move || -> Box { + // If we're redirecting stdout, use stderr for logs + // This makes json output work as expected for redirection + if is_stdout_terminal { + Box::new(io::stdout()) + } else { + Box::new(io::stderr()) + } + }) + .init(); + let config = load_config(args.config_file).context("Failed to find a suitable config file")?; match args.cmd { Command::Run => handle_run(config).await, @@ -56,51 +143,52 @@ async fn main() -> Result<()> { } async fn handle_run(conf: Config) -> Result<()> { - let ipv4 = match conf.ip_reflector.ipv4 { - Some(addr_to_req) => Some( - get_ipv4(addr_to_req) - .await - .context("Failed to query for ipv4 address, bailing.")?, - ), - None => None, + let ipv4 = if let Some(addr_to_req) = conf.ip_reflector.ipv4 { + let ip = get_ipv4(addr_to_req) + .await + .context("Failed to query for IPv4 address, bailing.")?; + debug!(addr=%ip, "Found reflected IPv4"); + Some(IpAddr::V4(ip)) + } else { + info!("No IPv4 reflector endpoint provided. Not updating IPv6 addresses"); + None }; - let ipv6 = match conf.ip_reflector.ipv6 { - Some(addr_to_req) => Some( - get_ipv6(addr_to_req) - .await - .context("Failed to query for ipv4 address, bailing.")?, - ), - None => None, + let ipv6 = if let Some(addr_to_req) = conf.ip_reflector.ipv6 { + let ip = get_ipv6(addr_to_req) + .await + .context("Failed to query for IPv6 address, bailing.")?; + debug!(addr=%ip, "Found reflected IPv6"); + Some(IpAddr::V6(ip)) + } else { + debug!("No IPv6 reflector endpoint provided. Not updating IPv6 addresses"); + None }; let ip_cache_path = ip_cache_path().context("while getting the ip cache path")?; + let mut cache_file = load_ip_cache(&ip_cache_path).context("while loading the ip cache")?; - // Only start processing requests if our current IP doesn't match our cached IP - if !needs_update(&ip_cache_path, ipv4, ipv6).context("while checking the ip cache")? { - return Ok(()); - } + let mut rate_limit = time::interval(Duration::from_millis(250)); + for (human_readable_name, zone) in conf.zone { + let span = tracing::span!(Level::TRACE, "zone", domain = %human_readable_name); + let _enter = span.enter(); - let ipv4_addr = ipv4.map(IpAddr::V4); - let ipv6_addr = ipv6.map(IpAddr::V6); + let records_to_process = zone + .record + .into_iter() + .filter(|record| !record.disabled) + .filter_map(|record| { + // Only process ipv4 entries if we have a reflected ip + if record.is_ipv4() { + return ipv4.map(|ip| (ip, record)); + } - for zone in conf.zone.into_values() { - let zone_id = zone.id; + // Only process ipv6 entries if we have a reflected ip + if record.is_ipv6() { + return ipv6.map(|ip| (ip, record)); + } - let records_to_process = zone.record.into_iter().filter_map(|record| { - if record.disabled { - return None; - } - - if ipv4.is_some() && record.is_ipv4() { - return Some((&ipv4_addr, record)); - } - - if ipv6.is_some() && record.is_ipv6() { - return Some((&ipv6_addr, record)); - } - - None - }); + None + }); for (addr, record) in records_to_process { #[derive(Deserialize, Debug)] @@ -111,10 +199,30 @@ async fn handle_run(conf: Config) -> Result<()> { messages: Vec, } - let record_id = record.id; + let span = tracing::span!(Level::TRACE, "record", name = %record); + let _enter = span.enter(); + + // Can't put this in a filter combinator because cache_file gets + // immutably borrowed for the duration of the iterator + let cache_entry = cache_file.0.get(&record.id).copied(); + let should_skip = match cache_entry { + entry @ Some(IpAddr::V4(_)) => entry == ipv4, + entry @ Some(IpAddr::V6(_)) => entry == ipv6, + None => false, + }; + + if should_skip { + debug!("Skipping entry since it was up to date in cache"); + continue; + } + + debug!(cached_ip=?cache_entry, "Need to update entry"); + + rate_limit.tick().await; let resp: UpdateDnsResponse = reqwest::Client::new() .put(format!( - "https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{record_id}" + "https://api.cloudflare.com/client/v4/zones/{}/dns_records/{}", + &zone.id, &record.id )) .header(X_AUTH_EMAIL, &conf.account.email.to_string()) .header(X_AUTH_KEY, &conf.account.api_key) @@ -133,6 +241,8 @@ async fn handle_run(conf: Config) -> Result<()> { .context("while parsing into a json")?; if resp.success { + trace!("Update successful"); + cache_file.0.insert(record.id, addr); continue; } } @@ -140,59 +250,35 @@ async fn handle_run(conf: Config) -> Result<()> { // Updating the ip cache last is better in case we get interrupted. Better // to update too frequently than not enough. - update_ip_cache(ip_cache_path, ipv4, ipv6)?; + update_ip_cache(ip_cache_path, &cache_file).context("while updating the cache file")?; Ok(()) } -fn update_ip_cache>( - path: P, - ipv4: Option, - ipv6: Option, -) -> Result<()> { - let data = serde_json::to_string(&CacheFile { ipv4, ipv6 }).expect("serialization to work"); - std::fs::write(path, data).context("while updating the ip cache file")?; +fn update_ip_cache>(path: P, data: &CacheFile) -> Result<()> { + let data = serde_json::to_string(data).expect("serialization to work"); + std::fs::write(path, data).context("while writing the ip cache file")?; Ok(()) } -#[derive(Serialize, Deserialize, Default)] -struct CacheFile { - ipv4: Option, - ipv6: Option, -} +#[derive(Serialize, Deserialize, Default, Debug)] +struct CacheFile(HashMap); -fn needs_update>( - path: P, - cur_ipv4: Option, - cur_ipv6: Option, -) -> Result { - let data = std::fs::read_to_string(path).context("while reading the ip cache file")?; - let cache: CacheFile = match serde_json::from_str(&data) { +#[instrument(level = "trace", ret)] +fn load_ip_cache + Debug>(path: P) -> Result { + let file = File::options() + .create(true) + .read(true) + .write(true) + .open(path) + .context("while opening the ip cache file")?; + let data = std::io::read_to_string(file).context("while reading the ip cache file")?; + Ok(match serde_json::from_str(&data) { Ok(cache) => cache, Err(e) => { warn!("Failed to parse the ip cache file; assuming empty: {e}"); CacheFile::default() } - }; - - if matches!((cache.ipv4, cur_ipv4), (Some(_), None)) { - warn!("Cache previously reported an IPv4 address, but we didn't find one. Not updating IPv4 entries"); - } - - let update_ipv4 = match (cache.ipv4, cur_ipv4) { - (Some(cached), Some(current)) => cached != current, - (cached, current) => cached.xor(current).is_some(), - }; - - if matches!((cache.ipv6, cur_ipv6), (Some(_), None)) { - warn!("Cache previously reported an IPv6 address, but we didn't find one. Not updating IPv6 entries"); - } - - let update_ipv6 = match (cache.ipv6, cur_ipv6) { - (Some(cached), Some(current)) => cached != current, - (cached, current) => cached.xor(current).is_some(), - }; - - Ok(update_ipv4 || update_ipv6) + }) } fn ip_cache_path() -> Result { @@ -220,11 +306,12 @@ async fn handle_list(conf: Config, args: List) -> Result<()> { if known_zones.contains(&maybe_zone_id) { return Some(maybe_zone_id); } + if let Some(zone) = conf.zone.get(&maybe_zone_id) { return Some(zone.id.clone()); } - eprintln!("Unknown zone {maybe_zone_id}, skipping"); + warn!("Unknown zone {maybe_zone_id}, skipping"); None }) .collect() @@ -232,6 +319,8 @@ async fn handle_list(conf: Config, args: List) -> Result<()> { None => known_zones.into_iter().cloned().collect(), }; + let mut output = BTreeMap::new(); + let mut rate_limit = time::interval(Duration::from_millis(250)); for zone in zones { #[derive(Deserialize, Debug)] #[allow(dead_code)] @@ -242,7 +331,7 @@ async fn handle_list(conf: Config, args: List) -> Result<()> { result: Vec, } - #[derive(Deserialize, Debug, Tabled)] + #[derive(Serialize, Deserialize, Debug, Tabled)] #[tabled(rename_all = "PascalCase")] struct DnsResponse { name: String, @@ -258,6 +347,8 @@ async fn handle_list(conf: Config, args: List) -> Result<()> { for page_no in 1.. { // This technically requests one more than optimal, but tbh it // doesn't really matter + + rate_limit.tick().await; let resp: ListZoneResponse = reqwest::Client::new() .get(format!( "https://api.cloudflare.com/client/v4/zones/{zone}/dns_records?type=A,AAAA&page={page_no}" @@ -275,18 +366,50 @@ async fn handle_list(conf: Config, args: List) -> Result<()> { if resp.result.is_empty() { break; - } else { - entries.extend(resp.result); } + + entries.extend(resp.result); } // Sort by subdomain, with higher level subdomains taking higher precedence than lower ones. entries.sort_unstable_by(|a, b| a.name.split('.').rev().cmp(b.name.split('.').rev())); - println!( - "{}", - Table::new(entries).with(Modify::new(Column::from(0)).with(Alignment::right())) - ); + output.insert(zone, entries); + } + + let human_readable_mapping: HashMap<_, _> = conf + .zone + .into_iter() + .map(|(human, zone)| (zone.id, human)) + .collect(); + match args.output { + OutputFormat::Table => { + for (zone_id, data) in output { + println!( + "{} ({zone_id})\n{}", + human_readable_mapping.get(&zone_id).unwrap(), + Table::new(data).with(Modify::new(Column::from(0)).with(Alignment::right())) + ); + } + } + OutputFormat::Json => { + let map: serde_json::Map = output + .into_iter() + .map(|(zone_id, data)| { + ( + human_readable_mapping.get(&zone_id).unwrap().clone(), + json!({ + "id": zone_id, + "records": data, + }), + ) + }) + .collect(); + println!( + "{}", + serde_json::to_string(&map).expect("serialization to work") + ); + } } Ok(()) @@ -316,24 +439,45 @@ async fn get_ipv6(url: Url) -> Result { fn load_config(user_provided_path: Option) -> Option { if let Some(path) = user_provided_path { - return load_config_from_path(path); + tracing::trace!("User provided path to config"); + let maybe_config = load_config_from_path(&path); + if maybe_config.is_some() { + tracing::info!( + path = %path.to_string_lossy(), + "Loaded config file" + ); + } + return maybe_config; } let file_path = Path::new("./cloudflare-ddns.toml"); let resolved_path = file_path.canonicalize(); let resolved_path = resolved_path.as_deref().unwrap_or(file_path); if let Some(config) = load_config_from_path(resolved_path) { + tracing::info!( + path = %resolved_path.to_string_lossy(), + "Loaded config file" + ); return Some(config); } - if let Some(config) = dirs::config_dir() + if let Some((path, config)) = dirs::config_dir() .map(|path| path.join(file_path)) - .and_then(load_config_from_path) + .and_then(|path| load_config_from_path(&path).map(|conf| (path, conf))) { + tracing::info!( + path = %path.to_string_lossy(), + "Loaded config file" + ); return Some(config); } - load_config_from_path("/etc/cloudflare-ddns.toml") + if let Some(config) = load_config_from_path("/etc/cloudflare-ddns.toml") { + tracing::info!(path = "/etc/cloudflare-ddns.toml", "Loaded config file"); + return Some(config); + } + + None } fn load_config_from_path>(path: P) -> Option { @@ -345,7 +489,7 @@ fn load_config_from_path>(path: P) -> Option { "Failed to parse config file at {}: {}", path.as_ref().to_string_lossy(), err - ) + ); } }, Err(err) => { @@ -353,7 +497,7 @@ fn load_config_from_path>(path: P) -> Option { "Unable to read the config file at {}: {}", path.as_ref().to_string_lossy(), err - ) + ); } } None