Even more work
This commit is contained in:
parent
05e0726af5
commit
784d7674ee
7 changed files with 306 additions and 103 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,3 +1,3 @@
|
||||||
/target
|
/target
|
||||||
.env
|
.env
|
||||||
config.toml
|
cloudflare-ddns.toml
|
44
Cargo.lock
generated
44
Cargo.lock
generated
|
@ -126,6 +126,7 @@ dependencies = [
|
||||||
"anstyle",
|
"anstyle",
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"clap_lex",
|
"clap_lex",
|
||||||
|
"once_cell",
|
||||||
"strsim",
|
"strsim",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -601,6 +602,15 @@ version = "0.4.19"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "matchers"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
|
||||||
|
dependencies = [
|
||||||
|
"regex-automata",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "memchr"
|
name = "memchr"
|
||||||
version = "2.5.0"
|
version = "2.5.0"
|
||||||
|
@ -853,6 +863,36 @@ dependencies = [
|
||||||
"thiserror",
|
"thiserror",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex"
|
||||||
|
version = "1.8.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370"
|
||||||
|
dependencies = [
|
||||||
|
"regex-syntax 0.7.1",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-automata"
|
||||||
|
version = "0.1.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||||
|
dependencies = [
|
||||||
|
"regex-syntax 0.6.29",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-syntax"
|
||||||
|
version = "0.6.29"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-syntax"
|
||||||
|
version = "0.7.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.11.18"
|
version = "0.11.18"
|
||||||
|
@ -1298,10 +1338,14 @@ version = "0.3.17"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77"
|
checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"matchers",
|
||||||
"nu-ansi-term",
|
"nu-ansi-term",
|
||||||
|
"once_cell",
|
||||||
|
"regex",
|
||||||
"sharded-slab",
|
"sharded-slab",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"thread_local",
|
"thread_local",
|
||||||
|
"tracing",
|
||||||
"tracing-core",
|
"tracing-core",
|
||||||
"tracing-log",
|
"tracing-log",
|
||||||
]
|
]
|
||||||
|
|
|
@ -3,16 +3,21 @@ name = "cloudflare-ddns"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["Edward Shen <code@eddie.sh>"]
|
authors = ["Edward Shen <code@eddie.sh>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
description = "Small DDNS binary for Cloudflare"
|
||||||
|
repository = "https://github.com/edward-shen/cloudflare-ddns"
|
||||||
license = "GPL-3.0-or-later"
|
license = "GPL-3.0-or-later"
|
||||||
|
keywords = ["cloudflare", "ddns"]
|
||||||
|
categories = ["command-line-utilities"]
|
||||||
|
include = ["src/**/*", "LICENSE", "README.md"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
reqwest = { version = "0.11", features = ["json"] }
|
reqwest = { version = "0.11", features = ["json"] }
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = "0.3"
|
tracing-subscriber = {version = "0.3", features = ["env-filter"] }
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive", "cargo"] }
|
||||||
anyhow = "1"
|
anyhow = "1"
|
||||||
toml = "0.7"
|
toml = "0.7"
|
||||||
tabled = { version = "0.12", features = ["derive"] }
|
tabled = { version = "0.12", features = ["derive"] }
|
||||||
|
|
0
README.md
Normal file
0
README.md
Normal file
|
@ -45,6 +45,16 @@ pub struct DnsRecord {
|
||||||
pub protocol_type: RecordType,
|
pub protocol_type: RecordType,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Display for DnsRecord {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
if f.alternate() {
|
||||||
|
self.id.fmt(f)
|
||||||
|
} else {
|
||||||
|
self.name.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl DnsRecord {
|
impl DnsRecord {
|
||||||
pub fn is_ipv4(&self) -> bool {
|
pub fn is_ipv4(&self) -> bool {
|
||||||
self.protocol_type == RecordType::A
|
self.protocol_type == RecordType::A
|
||||||
|
|
340
src/main.rs
340
src/main.rs
|
@ -1,27 +1,39 @@
|
||||||
#![warn(clippy::pedantic)]
|
#![warn(clippy::pedantic, clippy::cargo)]
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
|
use std::fmt::{Debug, Display};
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::{self, IsTerminal};
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use crate::config::{Config, RecordType};
|
use crate::config::{Config, RecordType};
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand, ValueEnum};
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tabled::settings::object::Column;
|
use tabled::settings::object::Column;
|
||||||
use tabled::settings::{Alignment, Modify};
|
use tabled::settings::{Alignment, Modify};
|
||||||
use tabled::{Table, Tabled};
|
use tabled::{Table, Tabled};
|
||||||
use tracing::{debug, error, info, warn};
|
use tokio::time;
|
||||||
|
use tracing::{debug, info, instrument, trace, warn, Level};
|
||||||
|
use tracing_subscriber::filter::Directive;
|
||||||
|
use tracing_subscriber::fmt::Subscriber;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
mod config;
|
mod config;
|
||||||
|
|
||||||
const X_AUTH_EMAIL: &str = "X-Auth-Email";
|
const X_AUTH_EMAIL: &str = "X-Auth-Email";
|
||||||
const X_AUTH_KEY: &str = "X-Auth-Key";
|
const X_AUTH_KEY: &str = "X-Auth-Key";
|
||||||
|
|
||||||
|
/// Scuffed Cloudflare dynamic DNS script.
|
||||||
|
///
|
||||||
|
/// If std
|
||||||
#[derive(Parser, Clone, Debug)]
|
#[derive(Parser, Clone, Debug)]
|
||||||
|
#[clap(author = clap::crate_authors!(), version = clap::crate_version!())]
|
||||||
pub struct Args {
|
pub struct Args {
|
||||||
/// Path to the configuration file.
|
/// Path to the configuration file.
|
||||||
///
|
///
|
||||||
|
@ -30,24 +42,99 @@ pub struct Args {
|
||||||
/// directory.
|
/// directory.
|
||||||
#[clap(short, long, global = true)]
|
#[clap(short, long, global = true)]
|
||||||
config_file: Option<PathBuf>,
|
config_file: Option<PathBuf>,
|
||||||
|
#[clap(short, long, global = true, value_delimiter = ',')]
|
||||||
|
verbose: Vec<Directive>,
|
||||||
|
// Force whether or not to print colors
|
||||||
|
#[clap(long, default_value_t = Color::default())]
|
||||||
|
color: Color,
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
cmd: Command,
|
cmd: Command,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Subcommand, Clone, Debug)]
|
#[derive(Subcommand, Clone, Debug)]
|
||||||
pub enum Command {
|
pub enum Command {
|
||||||
|
/// Fetch a reflected IP address and update A and AAAA entries in DNS.
|
||||||
Run,
|
Run,
|
||||||
|
/// List all A and AAAA entries in each zone in the config.
|
||||||
List(List),
|
List(List),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser, Clone, Debug)]
|
#[derive(Parser, Clone, Debug)]
|
||||||
pub struct List {
|
pub struct List {
|
||||||
|
/// Limit which zones to emit.
|
||||||
|
///
|
||||||
|
/// If not provided, print all zones in the config.
|
||||||
zones: Option<Vec<String>>,
|
zones: Option<Vec<String>>,
|
||||||
|
/// Which format to output zone data in.
|
||||||
|
#[clap(short, long, default_value_t = OutputFormat::default())]
|
||||||
|
output: OutputFormat,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Default, Debug, Clone, Copy)]
|
||||||
|
enum OutputFormat {
|
||||||
|
#[default]
|
||||||
|
Table,
|
||||||
|
Json,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for OutputFormat {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
OutputFormat::Table => Display::fmt("table", f),
|
||||||
|
OutputFormat::Json => Display::fmt("json", f),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Default, Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
enum Color {
|
||||||
|
#[default]
|
||||||
|
Auto,
|
||||||
|
Never,
|
||||||
|
Always,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Color {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Color::Auto => Display::fmt("auto", f),
|
||||||
|
Color::Never => Display::fmt("never", f),
|
||||||
|
Color::Always => Display::fmt("always", f),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let env_filter = args
|
||||||
|
.verbose
|
||||||
|
.into_iter()
|
||||||
|
.fold(EnvFilter::from_default_env(), |env, directive| {
|
||||||
|
env.add_directive(directive)
|
||||||
|
});
|
||||||
|
|
||||||
|
let is_stdout_terminal = io::stdout().is_terminal();
|
||||||
|
let use_ansi = match args.color {
|
||||||
|
Color::Auto => is_stdout_terminal || io::stderr().is_terminal(),
|
||||||
|
other => other == Color::Always,
|
||||||
|
};
|
||||||
|
|
||||||
|
Subscriber::builder()
|
||||||
|
.with_env_filter(env_filter)
|
||||||
|
.with_ansi(use_ansi)
|
||||||
|
.with_writer(move || -> Box<dyn io::Write> {
|
||||||
|
// If we're redirecting stdout, use stderr for logs
|
||||||
|
// This makes json output work as expected for redirection
|
||||||
|
if is_stdout_terminal {
|
||||||
|
Box::new(io::stdout())
|
||||||
|
} else {
|
||||||
|
Box::new(io::stderr())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.init();
|
||||||
|
|
||||||
let config = load_config(args.config_file).context("Failed to find a suitable config file")?;
|
let config = load_config(args.config_file).context("Failed to find a suitable config file")?;
|
||||||
match args.cmd {
|
match args.cmd {
|
||||||
Command::Run => handle_run(config).await,
|
Command::Run => handle_run(config).await,
|
||||||
|
@ -56,51 +143,52 @@ async fn main() -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_run(conf: Config) -> Result<()> {
|
async fn handle_run(conf: Config) -> Result<()> {
|
||||||
let ipv4 = match conf.ip_reflector.ipv4 {
|
let ipv4 = if let Some(addr_to_req) = conf.ip_reflector.ipv4 {
|
||||||
Some(addr_to_req) => Some(
|
let ip = get_ipv4(addr_to_req)
|
||||||
get_ipv4(addr_to_req)
|
.await
|
||||||
.await
|
.context("Failed to query for IPv4 address, bailing.")?;
|
||||||
.context("Failed to query for ipv4 address, bailing.")?,
|
debug!(addr=%ip, "Found reflected IPv4");
|
||||||
),
|
Some(IpAddr::V4(ip))
|
||||||
None => None,
|
} else {
|
||||||
|
info!("No IPv4 reflector endpoint provided. Not updating IPv6 addresses");
|
||||||
|
None
|
||||||
};
|
};
|
||||||
let ipv6 = match conf.ip_reflector.ipv6 {
|
let ipv6 = if let Some(addr_to_req) = conf.ip_reflector.ipv6 {
|
||||||
Some(addr_to_req) => Some(
|
let ip = get_ipv6(addr_to_req)
|
||||||
get_ipv6(addr_to_req)
|
.await
|
||||||
.await
|
.context("Failed to query for IPv6 address, bailing.")?;
|
||||||
.context("Failed to query for ipv4 address, bailing.")?,
|
debug!(addr=%ip, "Found reflected IPv6");
|
||||||
),
|
Some(IpAddr::V6(ip))
|
||||||
None => None,
|
} else {
|
||||||
|
debug!("No IPv6 reflector endpoint provided. Not updating IPv6 addresses");
|
||||||
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let ip_cache_path = ip_cache_path().context("while getting the ip cache path")?;
|
let ip_cache_path = ip_cache_path().context("while getting the ip cache path")?;
|
||||||
|
let mut cache_file = load_ip_cache(&ip_cache_path).context("while loading the ip cache")?;
|
||||||
|
|
||||||
// Only start processing requests if our current IP doesn't match our cached IP
|
let mut rate_limit = time::interval(Duration::from_millis(250));
|
||||||
if !needs_update(&ip_cache_path, ipv4, ipv6).context("while checking the ip cache")? {
|
for (human_readable_name, zone) in conf.zone {
|
||||||
return Ok(());
|
let span = tracing::span!(Level::TRACE, "zone", domain = %human_readable_name);
|
||||||
}
|
let _enter = span.enter();
|
||||||
|
|
||||||
let ipv4_addr = ipv4.map(IpAddr::V4);
|
let records_to_process = zone
|
||||||
let ipv6_addr = ipv6.map(IpAddr::V6);
|
.record
|
||||||
|
.into_iter()
|
||||||
|
.filter(|record| !record.disabled)
|
||||||
|
.filter_map(|record| {
|
||||||
|
// Only process ipv4 entries if we have a reflected ip
|
||||||
|
if record.is_ipv4() {
|
||||||
|
return ipv4.map(|ip| (ip, record));
|
||||||
|
}
|
||||||
|
|
||||||
for zone in conf.zone.into_values() {
|
// Only process ipv6 entries if we have a reflected ip
|
||||||
let zone_id = zone.id;
|
if record.is_ipv6() {
|
||||||
|
return ipv6.map(|ip| (ip, record));
|
||||||
|
}
|
||||||
|
|
||||||
let records_to_process = zone.record.into_iter().filter_map(|record| {
|
None
|
||||||
if record.disabled {
|
});
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ipv4.is_some() && record.is_ipv4() {
|
|
||||||
return Some((&ipv4_addr, record));
|
|
||||||
}
|
|
||||||
|
|
||||||
if ipv6.is_some() && record.is_ipv6() {
|
|
||||||
return Some((&ipv6_addr, record));
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
});
|
|
||||||
|
|
||||||
for (addr, record) in records_to_process {
|
for (addr, record) in records_to_process {
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
|
@ -111,10 +199,30 @@ async fn handle_run(conf: Config) -> Result<()> {
|
||||||
messages: Vec<Message>,
|
messages: Vec<Message>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let record_id = record.id;
|
let span = tracing::span!(Level::TRACE, "record", name = %record);
|
||||||
|
let _enter = span.enter();
|
||||||
|
|
||||||
|
// Can't put this in a filter combinator because cache_file gets
|
||||||
|
// immutably borrowed for the duration of the iterator
|
||||||
|
let cache_entry = cache_file.0.get(&record.id).copied();
|
||||||
|
let should_skip = match cache_entry {
|
||||||
|
entry @ Some(IpAddr::V4(_)) => entry == ipv4,
|
||||||
|
entry @ Some(IpAddr::V6(_)) => entry == ipv6,
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if should_skip {
|
||||||
|
debug!("Skipping entry since it was up to date in cache");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(cached_ip=?cache_entry, "Need to update entry");
|
||||||
|
|
||||||
|
rate_limit.tick().await;
|
||||||
let resp: UpdateDnsResponse = reqwest::Client::new()
|
let resp: UpdateDnsResponse = reqwest::Client::new()
|
||||||
.put(format!(
|
.put(format!(
|
||||||
"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{record_id}"
|
"https://api.cloudflare.com/client/v4/zones/{}/dns_records/{}",
|
||||||
|
&zone.id, &record.id
|
||||||
))
|
))
|
||||||
.header(X_AUTH_EMAIL, &conf.account.email.to_string())
|
.header(X_AUTH_EMAIL, &conf.account.email.to_string())
|
||||||
.header(X_AUTH_KEY, &conf.account.api_key)
|
.header(X_AUTH_KEY, &conf.account.api_key)
|
||||||
|
@ -133,6 +241,8 @@ async fn handle_run(conf: Config) -> Result<()> {
|
||||||
.context("while parsing into a json")?;
|
.context("while parsing into a json")?;
|
||||||
|
|
||||||
if resp.success {
|
if resp.success {
|
||||||
|
trace!("Update successful");
|
||||||
|
cache_file.0.insert(record.id, addr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -140,59 +250,35 @@ async fn handle_run(conf: Config) -> Result<()> {
|
||||||
|
|
||||||
// Updating the ip cache last is better in case we get interrupted. Better
|
// Updating the ip cache last is better in case we get interrupted. Better
|
||||||
// to update too frequently than not enough.
|
// to update too frequently than not enough.
|
||||||
update_ip_cache(ip_cache_path, ipv4, ipv6)?;
|
update_ip_cache(ip_cache_path, &cache_file).context("while updating the cache file")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_ip_cache<P: AsRef<Path>>(
|
fn update_ip_cache<P: AsRef<Path>>(path: P, data: &CacheFile) -> Result<()> {
|
||||||
path: P,
|
let data = serde_json::to_string(data).expect("serialization to work");
|
||||||
ipv4: Option<Ipv4Addr>,
|
std::fs::write(path, data).context("while writing the ip cache file")?;
|
||||||
ipv6: Option<Ipv6Addr>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let data = serde_json::to_string(&CacheFile { ipv4, ipv6 }).expect("serialization to work");
|
|
||||||
std::fs::write(path, data).context("while updating the ip cache file")?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
#[derive(Serialize, Deserialize, Default, Debug)]
|
||||||
struct CacheFile {
|
struct CacheFile(HashMap<String, IpAddr>);
|
||||||
ipv4: Option<Ipv4Addr>,
|
|
||||||
ipv6: Option<Ipv6Addr>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn needs_update<P: AsRef<Path>>(
|
#[instrument(level = "trace", ret)]
|
||||||
path: P,
|
fn load_ip_cache<P: AsRef<Path> + Debug>(path: P) -> Result<CacheFile> {
|
||||||
cur_ipv4: Option<Ipv4Addr>,
|
let file = File::options()
|
||||||
cur_ipv6: Option<Ipv6Addr>,
|
.create(true)
|
||||||
) -> Result<bool> {
|
.read(true)
|
||||||
let data = std::fs::read_to_string(path).context("while reading the ip cache file")?;
|
.write(true)
|
||||||
let cache: CacheFile = match serde_json::from_str(&data) {
|
.open(path)
|
||||||
|
.context("while opening the ip cache file")?;
|
||||||
|
let data = std::io::read_to_string(file).context("while reading the ip cache file")?;
|
||||||
|
Ok(match serde_json::from_str(&data) {
|
||||||
Ok(cache) => cache,
|
Ok(cache) => cache,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Failed to parse the ip cache file; assuming empty: {e}");
|
warn!("Failed to parse the ip cache file; assuming empty: {e}");
|
||||||
CacheFile::default()
|
CacheFile::default()
|
||||||
}
|
}
|
||||||
};
|
})
|
||||||
|
|
||||||
if matches!((cache.ipv4, cur_ipv4), (Some(_), None)) {
|
|
||||||
warn!("Cache previously reported an IPv4 address, but we didn't find one. Not updating IPv4 entries");
|
|
||||||
}
|
|
||||||
|
|
||||||
let update_ipv4 = match (cache.ipv4, cur_ipv4) {
|
|
||||||
(Some(cached), Some(current)) => cached != current,
|
|
||||||
(cached, current) => cached.xor(current).is_some(),
|
|
||||||
};
|
|
||||||
|
|
||||||
if matches!((cache.ipv6, cur_ipv6), (Some(_), None)) {
|
|
||||||
warn!("Cache previously reported an IPv6 address, but we didn't find one. Not updating IPv6 entries");
|
|
||||||
}
|
|
||||||
|
|
||||||
let update_ipv6 = match (cache.ipv6, cur_ipv6) {
|
|
||||||
(Some(cached), Some(current)) => cached != current,
|
|
||||||
(cached, current) => cached.xor(current).is_some(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(update_ipv4 || update_ipv6)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ip_cache_path() -> Result<PathBuf> {
|
fn ip_cache_path() -> Result<PathBuf> {
|
||||||
|
@ -220,11 +306,12 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
|
||||||
if known_zones.contains(&maybe_zone_id) {
|
if known_zones.contains(&maybe_zone_id) {
|
||||||
return Some(maybe_zone_id);
|
return Some(maybe_zone_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(zone) = conf.zone.get(&maybe_zone_id) {
|
if let Some(zone) = conf.zone.get(&maybe_zone_id) {
|
||||||
return Some(zone.id.clone());
|
return Some(zone.id.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
eprintln!("Unknown zone {maybe_zone_id}, skipping");
|
warn!("Unknown zone {maybe_zone_id}, skipping");
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
|
@ -232,6 +319,8 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
|
||||||
None => known_zones.into_iter().cloned().collect(),
|
None => known_zones.into_iter().cloned().collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut output = BTreeMap::new();
|
||||||
|
let mut rate_limit = time::interval(Duration::from_millis(250));
|
||||||
for zone in zones {
|
for zone in zones {
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
@ -242,7 +331,7 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
|
||||||
result: Vec<DnsResponse>,
|
result: Vec<DnsResponse>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Tabled)]
|
#[derive(Serialize, Deserialize, Debug, Tabled)]
|
||||||
#[tabled(rename_all = "PascalCase")]
|
#[tabled(rename_all = "PascalCase")]
|
||||||
struct DnsResponse {
|
struct DnsResponse {
|
||||||
name: String,
|
name: String,
|
||||||
|
@ -258,6 +347,8 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
|
||||||
for page_no in 1.. {
|
for page_no in 1.. {
|
||||||
// This technically requests one more than optimal, but tbh it
|
// This technically requests one more than optimal, but tbh it
|
||||||
// doesn't really matter
|
// doesn't really matter
|
||||||
|
|
||||||
|
rate_limit.tick().await;
|
||||||
let resp: ListZoneResponse = reqwest::Client::new()
|
let resp: ListZoneResponse = reqwest::Client::new()
|
||||||
.get(format!(
|
.get(format!(
|
||||||
"https://api.cloudflare.com/client/v4/zones/{zone}/dns_records?type=A,AAAA&page={page_no}"
|
"https://api.cloudflare.com/client/v4/zones/{zone}/dns_records?type=A,AAAA&page={page_no}"
|
||||||
|
@ -275,18 +366,50 @@ async fn handle_list(conf: Config, args: List) -> Result<()> {
|
||||||
|
|
||||||
if resp.result.is_empty() {
|
if resp.result.is_empty() {
|
||||||
break;
|
break;
|
||||||
} else {
|
|
||||||
entries.extend(resp.result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entries.extend(resp.result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by subdomain, with higher level subdomains taking higher precedence than lower ones.
|
// Sort by subdomain, with higher level subdomains taking higher precedence than lower ones.
|
||||||
entries.sort_unstable_by(|a, b| a.name.split('.').rev().cmp(b.name.split('.').rev()));
|
entries.sort_unstable_by(|a, b| a.name.split('.').rev().cmp(b.name.split('.').rev()));
|
||||||
|
|
||||||
println!(
|
output.insert(zone, entries);
|
||||||
"{}",
|
}
|
||||||
Table::new(entries).with(Modify::new(Column::from(0)).with(Alignment::right()))
|
|
||||||
);
|
let human_readable_mapping: HashMap<_, _> = conf
|
||||||
|
.zone
|
||||||
|
.into_iter()
|
||||||
|
.map(|(human, zone)| (zone.id, human))
|
||||||
|
.collect();
|
||||||
|
match args.output {
|
||||||
|
OutputFormat::Table => {
|
||||||
|
for (zone_id, data) in output {
|
||||||
|
println!(
|
||||||
|
"{} ({zone_id})\n{}",
|
||||||
|
human_readable_mapping.get(&zone_id).unwrap(),
|
||||||
|
Table::new(data).with(Modify::new(Column::from(0)).with(Alignment::right()))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
OutputFormat::Json => {
|
||||||
|
let map: serde_json::Map<String, serde_json::Value> = output
|
||||||
|
.into_iter()
|
||||||
|
.map(|(zone_id, data)| {
|
||||||
|
(
|
||||||
|
human_readable_mapping.get(&zone_id).unwrap().clone(),
|
||||||
|
json!({
|
||||||
|
"id": zone_id,
|
||||||
|
"records": data,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
println!(
|
||||||
|
"{}",
|
||||||
|
serde_json::to_string(&map).expect("serialization to work")
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -316,24 +439,45 @@ async fn get_ipv6(url: Url) -> Result<Ipv6Addr> {
|
||||||
|
|
||||||
fn load_config(user_provided_path: Option<PathBuf>) -> Option<Config> {
|
fn load_config(user_provided_path: Option<PathBuf>) -> Option<Config> {
|
||||||
if let Some(path) = user_provided_path {
|
if let Some(path) = user_provided_path {
|
||||||
return load_config_from_path(path);
|
tracing::trace!("User provided path to config");
|
||||||
|
let maybe_config = load_config_from_path(&path);
|
||||||
|
if maybe_config.is_some() {
|
||||||
|
tracing::info!(
|
||||||
|
path = %path.to_string_lossy(),
|
||||||
|
"Loaded config file"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return maybe_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
let file_path = Path::new("./cloudflare-ddns.toml");
|
let file_path = Path::new("./cloudflare-ddns.toml");
|
||||||
let resolved_path = file_path.canonicalize();
|
let resolved_path = file_path.canonicalize();
|
||||||
let resolved_path = resolved_path.as_deref().unwrap_or(file_path);
|
let resolved_path = resolved_path.as_deref().unwrap_or(file_path);
|
||||||
if let Some(config) = load_config_from_path(resolved_path) {
|
if let Some(config) = load_config_from_path(resolved_path) {
|
||||||
|
tracing::info!(
|
||||||
|
path = %resolved_path.to_string_lossy(),
|
||||||
|
"Loaded config file"
|
||||||
|
);
|
||||||
return Some(config);
|
return Some(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(config) = dirs::config_dir()
|
if let Some((path, config)) = dirs::config_dir()
|
||||||
.map(|path| path.join(file_path))
|
.map(|path| path.join(file_path))
|
||||||
.and_then(load_config_from_path)
|
.and_then(|path| load_config_from_path(&path).map(|conf| (path, conf)))
|
||||||
{
|
{
|
||||||
|
tracing::info!(
|
||||||
|
path = %path.to_string_lossy(),
|
||||||
|
"Loaded config file"
|
||||||
|
);
|
||||||
return Some(config);
|
return Some(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
load_config_from_path("/etc/cloudflare-ddns.toml")
|
if let Some(config) = load_config_from_path("/etc/cloudflare-ddns.toml") {
|
||||||
|
tracing::info!(path = "/etc/cloudflare-ddns.toml", "Loaded config file");
|
||||||
|
return Some(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
|
fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
|
||||||
|
@ -345,7 +489,7 @@ fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
|
||||||
"Failed to parse config file at {}: {}",
|
"Failed to parse config file at {}: {}",
|
||||||
path.as_ref().to_string_lossy(),
|
path.as_ref().to_string_lossy(),
|
||||||
err
|
err
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -353,7 +497,7 @@ fn load_config_from_path<P: AsRef<Path>>(path: P) -> Option<Config> {
|
||||||
"Unable to read the config file at {}: {}",
|
"Unable to read the config file at {}: {}",
|
||||||
path.as_ref().to_string_lossy(),
|
path.as_ref().to_string_lossy(),
|
||||||
err
|
err
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
|
|
Loading…
Reference in a new issue