diff --git a/Cargo.lock b/Cargo.lock index 55160271..786b324a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -674,6 +674,7 @@ dependencies = [ "auths-id", "auths-pairing-daemon", "auths-policy", + "auths-sdk", "auths-storage", "auths-telemetry", "auths-verifier", diff --git a/crates/auths-cli/src/adapters/allowed_signers_store.rs b/crates/auths-cli/src/adapters/allowed_signers_store.rs new file mode 100644 index 00000000..f663a889 --- /dev/null +++ b/crates/auths-cli/src/adapters/allowed_signers_store.rs @@ -0,0 +1,53 @@ +//! File-based adapter for [`AllowedSignersStore`]. + +use std::path::Path; + +use auths_sdk::ports::allowed_signers::AllowedSignersStore; +use auths_sdk::workflows::allowed_signers::AllowedSignersError; + +/// Reads and writes allowed_signers files using the local filesystem. +/// Uses atomic writes via `tempfile::NamedTempFile::persist`. +pub struct FileAllowedSignersStore; + +impl AllowedSignersStore for FileAllowedSignersStore { + fn read(&self, path: &Path) -> Result, AllowedSignersError> { + match std::fs::read_to_string(path) { + Ok(content) => Ok(Some(content)), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(AllowedSignersError::FileRead { + path: path.to_path_buf(), + source: e, + }), + } + } + + #[allow(clippy::expect_used)] // INVARIANT: path always has a parent (caller provides full file paths) + fn write(&self, path: &Path, content: &str) -> Result<(), AllowedSignersError> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).map_err(|e| AllowedSignersError::FileWrite { + path: path.to_path_buf(), + source: e, + })?; + } + + use std::io::Write; + let dir = path.parent().expect("path has parent"); + let tmp = + tempfile::NamedTempFile::new_in(dir).map_err(|e| AllowedSignersError::FileWrite { + path: path.to_path_buf(), + source: e, + })?; + (&tmp) + .write_all(content.as_bytes()) + .map_err(|e| AllowedSignersError::FileWrite { + path: path.to_path_buf(), + source: e, + })?; + tmp.persist(path) + .map_err(|e| AllowedSignersError::FileWrite { + path: path.to_path_buf(), + source: e.error, + })?; + Ok(()) + } +} diff --git a/crates/auths-cli/src/adapters/config_store.rs b/crates/auths-cli/src/adapters/config_store.rs new file mode 100644 index 00000000..789c52c2 --- /dev/null +++ b/crates/auths-cli/src/adapters/config_store.rs @@ -0,0 +1,34 @@ +//! File-based adapter for the `ConfigStore` port. + +use std::path::Path; + +use auths_core::ports::config_store::{ConfigStore, ConfigStoreError}; + +/// Reads and writes config files from the local filesystem. +pub struct FileConfigStore; + +impl ConfigStore for FileConfigStore { + fn read(&self, path: &Path) -> Result, ConfigStoreError> { + match std::fs::read_to_string(path) { + Ok(content) => Ok(Some(content)), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(ConfigStoreError::Read { + path: path.to_path_buf(), + source: e, + }), + } + } + + fn write(&self, path: &Path, content: &str) -> Result<(), ConfigStoreError> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).map_err(|e| ConfigStoreError::Write { + path: path.to_path_buf(), + source: e, + })?; + } + std::fs::write(path, content).map_err(|e| ConfigStoreError::Write { + path: path.to_path_buf(), + source: e, + }) + } +} diff --git a/crates/auths-cli/src/adapters/doctor_fixes.rs b/crates/auths-cli/src/adapters/doctor_fixes.rs index f39bd32c..6b768868 100644 --- a/crates/auths-cli/src/adapters/doctor_fixes.rs +++ b/crates/auths-cli/src/adapters/doctor_fixes.rs @@ -43,13 +43,14 @@ impl DiagnosticFix for AllowedSignersFix { let signers_path = ssh_dir.join("allowed_signers"); let storage = RegistryAttestationStorage::new(&self.repo_path); - let mut signers = AllowedSigners::load(&signers_path) + let store = super::allowed_signers_store::FileAllowedSignersStore; + let mut signers = AllowedSigners::load(&signers_path, &store) .unwrap_or_else(|_| AllowedSigners::new(&signers_path)); let report = signers .sync(&storage) .map_err(|e| DiagnosticError::ExecutionFailed(format!("sync signers: {e}")))?; signers - .save() + .save(&store) .map_err(|e| DiagnosticError::ExecutionFailed(format!("save signers: {e}")))?; let signers_str = signers_path diff --git a/crates/auths-cli/src/adapters/mod.rs b/crates/auths-cli/src/adapters/mod.rs index a0a3714c..292a0895 100644 --- a/crates/auths-cli/src/adapters/mod.rs +++ b/crates/auths-cli/src/adapters/mod.rs @@ -1,4 +1,6 @@ pub mod agent; +pub mod allowed_signers_store; +pub mod config_store; pub mod doctor_fixes; pub mod git_config; pub mod local_file; diff --git a/crates/auths-cli/src/bin/sign.rs b/crates/auths-cli/src/bin/sign.rs index c59ca218..783fffd8 100644 --- a/crates/auths-cli/src/bin/sign.rs +++ b/crates/auths-cli/src/bin/sign.rs @@ -34,6 +34,7 @@ use std::sync::Arc; use anyhow::{Context, Result, anyhow, bail}; use clap::Parser; +use auths_cli::adapters::config_store::FileConfigStore; use auths_cli::core::pubkey_cache::get_cached_pubkey; use auths_cli::factories::build_agent_provider; use auths_core::config::{EnvironmentConfig, load_config}; @@ -117,7 +118,7 @@ fn build_signing_context(alias: &str) -> Result { if let Some(passphrase) = env_config.keychain.passphrase.clone() { Arc::new(auths_core::PrefilledPassphraseProvider::new(&passphrase)) } else { - let config = load_config(); + let config = load_config(&FileConfigStore); let cache = get_passphrase_cache(config.passphrase.biometric); let ttl_secs = config .passphrase diff --git a/crates/auths-cli/src/commands/config.rs b/crates/auths-cli/src/commands/config.rs index 0506cd7b..e73b035b 100644 --- a/crates/auths-cli/src/commands/config.rs +++ b/crates/auths-cli/src/commands/config.rs @@ -4,6 +4,8 @@ use crate::commands::executable::ExecutableCommand; use crate::config::CliConfig; use anyhow::{Result, bail}; use auths_core::config::{AuthsConfig, PassphraseCachePolicy, load_config, save_config}; + +use crate::adapters::config_store::FileConfigStore; use clap::{Parser, Subcommand}; /// Manage Auths configuration. @@ -44,7 +46,8 @@ impl ExecutableCommand for ConfigCommand { } fn execute_set(key: &str, value: &str) -> Result<()> { - let mut config = load_config(); + let store = FileConfigStore; + let mut config = load_config(&store); match key { "passphrase.cache" => { @@ -68,13 +71,13 @@ fn execute_set(key: &str, value: &str) -> Result<()> { ), } - save_config(&config)?; + save_config(&config, &store)?; println!("Set {} = {}", key, value); Ok(()) } fn execute_get(key: &str) -> Result<()> { - let config = load_config(); + let config = load_config(&FileConfigStore); match key { "passphrase.cache" => { @@ -100,7 +103,7 @@ fn execute_get(key: &str) -> Result<()> { } fn execute_show() -> Result<()> { - let config = load_config(); + let config = load_config(&FileConfigStore); let toml_str = toml::to_string_pretty(&config) .map_err(|e| anyhow::anyhow!("Failed to serialize config: {}", e))?; println!("{}", toml_str); @@ -138,7 +141,8 @@ fn parse_bool(s: &str) -> Result { } fn _ensure_default_config_exists() -> Result { - let config = load_config(); - save_config(&config)?; + let store = FileConfigStore; + let config = load_config(&store); + save_config(&config, &store)?; Ok(config) } diff --git a/crates/auths-cli/src/commands/doctor.rs b/crates/auths-cli/src/commands/doctor.rs index 78ffb076..0e0eb410 100644 --- a/crates/auths-cli/src/commands/doctor.rs +++ b/crates/auths-cli/src/commands/doctor.rs @@ -308,7 +308,10 @@ fn check_allowed_signers_file() -> Check { Some(path_str) => { let file_path = std::path::Path::new(&path_str); if file_path.exists() { - match AllowedSigners::load(file_path) { + match AllowedSigners::load( + file_path, + &crate::adapters::allowed_signers_store::FileAllowedSignersStore, + ) { Ok(signers) => { let entries = signers.list(); let attestation_count = entries diff --git a/crates/auths-cli/src/commands/git.rs b/crates/auths-cli/src/commands/git.rs index 21ac3ac5..e6bb60bd 100644 --- a/crates/auths-cli/src/commands/git.rs +++ b/crates/auths-cli/src/commands/git.rs @@ -132,7 +132,9 @@ fn handle_install_hooks( let mut signers = AllowedSigners::new(&cmd.allowed_signers_path); match signers.sync(&storage) { Ok(report) => { - if let Err(e) = signers.save() { + if let Err(e) = + signers.save(&crate::adapters::allowed_signers_store::FileAllowedSignersStore) + { eprintln!("Warning: Could not write allowed_signers: {}", e); } else { println!( diff --git a/crates/auths-cli/src/commands/init/helpers.rs b/crates/auths-cli/src/commands/init/helpers.rs index 25852935..590eb46e 100644 --- a/crates/auths-cli/src/commands/init/helpers.rs +++ b/crates/auths-cli/src/commands/init/helpers.rs @@ -107,13 +107,14 @@ pub(crate) fn write_allowed_signers(key_alias: &str, out: &Output) -> Result<()> std::fs::create_dir_all(&ssh_dir)?; let signers_path = ssh_dir.join("allowed_signers"); - let mut signers = - AllowedSigners::load(&signers_path).unwrap_or_else(|_| AllowedSigners::new(&signers_path)); + let store = crate::adapters::allowed_signers_store::FileAllowedSignersStore; + let mut signers = AllowedSigners::load(&signers_path, &store) + .unwrap_or_else(|_| AllowedSigners::new(&signers_path)); let report = signers .sync(&storage) .map_err(|e| anyhow!("Failed to sync allowed signers: {}", e))?; signers - .save() + .save(&store) .map_err(|e| anyhow!("Failed to write allowed signers: {}", e))?; let signers_str = signers_path diff --git a/crates/auths-cli/src/commands/signers.rs b/crates/auths-cli/src/commands/signers.rs index c1d91f02..a8f8da46 100644 --- a/crates/auths-cli/src/commands/signers.rs +++ b/crates/auths-cli/src/commands/signers.rs @@ -11,6 +11,7 @@ use ssh_key::PublicKey as SshPublicKey; use std::path::PathBuf; use super::git::expand_tilde; +use crate::adapters::allowed_signers_store::FileAllowedSignersStore; #[derive(Parser, Debug, Clone)] #[command(about = "Manage allowed signers for Git commit verification.")] @@ -98,7 +99,7 @@ fn resolve_signers_path() -> Result { fn handle_list(args: &SignersListArgs) -> Result<()> { let path = resolve_signers_path()?; - let signers = AllowedSigners::load(&path) + let signers = AllowedSigners::load(&path, &FileAllowedSignersStore) .with_context(|| format!("Failed to load {}", path.display()))?; if args.json { @@ -134,7 +135,7 @@ fn handle_list(args: &SignersListArgs) -> Result<()> { fn handle_add(args: &SignersAddArgs) -> Result<()> { let path = resolve_signers_path()?; - let mut signers = AllowedSigners::load(&path) + let mut signers = AllowedSigners::load(&path, &FileAllowedSignersStore) .with_context(|| format!("Failed to load {}", path.display()))?; let principal = SignerPrincipal::Email( @@ -147,7 +148,7 @@ fn handle_add(args: &SignersAddArgs) -> Result<()> { .add(principal, pubkey, SignerSource::Manual) .map_err(|e| anyhow::anyhow!("{}", e))?; signers - .save() + .save(&FileAllowedSignersStore) .with_context(|| format!("Failed to write {}", path.display()))?; println!("Added {} to {}", args.email, path.display()); @@ -156,7 +157,7 @@ fn handle_add(args: &SignersAddArgs) -> Result<()> { fn handle_remove(args: &SignersRemoveArgs) -> Result<()> { let path = resolve_signers_path()?; - let mut signers = AllowedSigners::load(&path) + let mut signers = AllowedSigners::load(&path, &FileAllowedSignersStore) .with_context(|| format!("Failed to load {}", path.display()))?; let principal = SignerPrincipal::Email( @@ -166,7 +167,7 @@ fn handle_remove(args: &SignersRemoveArgs) -> Result<()> { match signers.remove(&principal) { Ok(true) => { signers - .save() + .save(&FileAllowedSignersStore) .with_context(|| format!("Failed to write {}", path.display()))?; println!("Removed {} from {}", args.email, path.display()); } @@ -196,7 +197,7 @@ fn handle_sync(args: &SignersSyncArgs) -> Result<()> { resolve_signers_path()? }; - let mut signers = AllowedSigners::load(&path) + let mut signers = AllowedSigners::load(&path, &FileAllowedSignersStore) .with_context(|| format!("Failed to load {}", path.display()))?; let report = signers @@ -204,7 +205,7 @@ fn handle_sync(args: &SignersSyncArgs) -> Result<()> { .context("Failed to sync attestations")?; signers - .save() + .save(&FileAllowedSignersStore) .with_context(|| format!("Failed to write {}", path.display()))?; println!( @@ -244,7 +245,7 @@ fn handle_add_from_github(args: &SignersAddFromGithubArgs) -> Result<()> { } let path = resolve_signers_path()?; - let mut signers = AllowedSigners::load(&path) + let mut signers = AllowedSigners::load(&path, &FileAllowedSignersStore) .with_context(|| format!("Failed to load {}", path.display()))?; let email = format!("{}@github.com", args.username); @@ -282,7 +283,7 @@ fn handle_add_from_github(args: &SignersAddFromGithubArgs) -> Result<()> { if added > 0 { signers - .save() + .save(&FileAllowedSignersStore) .with_context(|| format!("Failed to write {}", path.display()))?; println!( "Added {} key(s) for {} to {}", diff --git a/crates/auths-core/clippy.toml b/crates/auths-core/clippy.toml new file mode 100644 index 00000000..30374cbb --- /dev/null +++ b/crates/auths-core/clippy.toml @@ -0,0 +1,51 @@ +# Duplicated from workspace clippy.toml — keep in sync +# Clippy does NOT merge per-crate configs with workspace config. +# Any changes to the workspace clippy.toml must be replicated here. + +allow-unwrap-in-tests = true +allow-expect-in-tests = true + +disallowed-methods = [ + # === Workspace rules (duplicated from root clippy.toml) === + { path = "chrono::offset::Utc::now", reason = "inject ClockProvider instead of calling Utc::now() directly", allow-invalid = true }, + { path = "std::time::SystemTime::now", reason = "inject ClockProvider instead of calling SystemTime::now() directly", allow-invalid = true }, + { path = "std::env::var", reason = "use EnvironmentConfig abstraction instead of reading env vars directly", allow-invalid = true }, + { path = "uuid::Uuid::new_v4", reason = "Use UuidProvider::new_id() instead. Inject SystemUuidProvider in production and DeterministicUuidProvider in tests." }, + + # === Sans-IO: filesystem === + { path = "std::fs::read", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_to_string", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::write", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_file", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::copy", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::rename", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::metadata", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::canonicalize", reason = "sans-IO crate — use a port trait" }, + + # === Sans-IO: process === + { path = "std::process::Command::new", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::exit", reason = "sans-IO crate — return errors instead" }, + + # === Sans-IO: dirs === + { path = "dirs::home_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::config_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_local_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + + # === Sans-IO: network === + { path = "reqwest::Client::new", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, + { path = "reqwest::get", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, +] + +disallowed-types = [ + { path = "std::fs::File", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::OpenOptions", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::Command", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpStream", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpListener", reason = "sans-IO crate — use a port trait" }, +] diff --git a/crates/auths-core/src/agent/handle.rs b/crates/auths-core/src/agent/handle.rs index afdf2676..4ad0c6e2 100644 --- a/crates/auths-core/src/agent/handle.rs +++ b/crates/auths-core/src/agent/handle.rs @@ -240,6 +240,7 @@ impl AgentHandle { /// 1. Clears all keys from the agent core (zeroizing sensitive data) /// 2. Marks the agent as not running /// 3. Optionally removes the socket file and PID file + #[allow(clippy::disallowed_methods)] // INVARIANT: daemon lifecycle — socket/PID cleanup is inherently I/O pub fn shutdown(&self) -> Result<(), AgentError> { log::info!("Shutting down agent at {:?}", self.socket_path); @@ -332,6 +333,7 @@ impl Clone for AgentHandle { } #[cfg(test)] +#[allow(clippy::disallowed_methods)] mod tests { use super::*; use ring::rand::SystemRandom; diff --git a/crates/auths-core/src/api/ffi.rs b/crates/auths-core/src/api/ffi.rs index 9e129496..520dd97f 100644 --- a/crates/auths-core/src/api/ffi.rs +++ b/crates/auths-core/src/api/ffi.rs @@ -67,6 +67,7 @@ static FFI_AGENT: LazyLock>>> = LazyLock::new(|| /// - 1 if the socket path is invalid /// - FFI_ERR_PANIC (-127) if a panic occurred #[unsafe(no_mangle)] +#[allow(clippy::disallowed_methods)] // INVARIANT: FFI boundary — home-dir fallback for default socket path pub unsafe extern "C" fn ffi_init_agent(socket_path: *const c_char) -> c_int { let result = panic::catch_unwind(|| { let path_str = match unsafe { c_str_to_str_safe(socket_path) } { diff --git a/crates/auths-core/src/api/runtime.rs b/crates/auths-core/src/api/runtime.rs index bbc4edef..5181a57b 100644 --- a/crates/auths-core/src/api/runtime.rs +++ b/crates/auths-core/src/api/runtime.rs @@ -526,6 +526,9 @@ pub fn get_agent_key_count_with_handle(handle: &AgentHandle) -> Result, @@ -723,6 +726,7 @@ fn register_keys_with_macos_agent_internal( /// - `Ok(())` if the listener starts successfully (runs indefinitely). /// - `Err(AgentError)` if binding/setup fails or the listener loop exits with an error. #[cfg(unix)] +#[allow(clippy::disallowed_methods)] // INVARIANT: Unix socket lifecycle — socket dir creation and cleanup is inherently I/O pub async fn start_agent_listener_with_handle(handle: Arc) -> Result<(), AgentError> { let socket_path = handle.socket_path(); info!("Attempting to start agent listener at {:?}", socket_path); diff --git a/crates/auths-core/src/config.rs b/crates/auths-core/src/config.rs index 9234c46a..53ea3d9d 100644 --- a/crates/auths-core/src/config.rs +++ b/crates/auths-core/src/config.rs @@ -304,23 +304,26 @@ pub struct AuthsConfig { /// Loads `~/.auths/config.toml`, returning defaults on any error. /// +/// Args: +/// * `store`: The config store implementation for file I/O. +/// /// Usage: /// ```ignore -/// let config = auths_core::config::load_config(); +/// let config = auths_core::config::load_config(&file_store); /// match config.passphrase.cache { /// PassphraseCachePolicy::Always => { /* ... */ } /// _ => {} /// } /// ``` -pub fn load_config() -> AuthsConfig { +pub fn load_config(store: &dyn crate::ports::config_store::ConfigStore) -> AuthsConfig { let home = match auths_home() { Ok(h) => h, Err(_) => return AuthsConfig::default(), }; let path = home.join("config.toml"); - match std::fs::read_to_string(&path) { - Ok(contents) => toml::from_str(&contents).unwrap_or_default(), - Err(_) => AuthsConfig::default(), + match store.read(&path) { + Ok(Some(contents)) => toml::from_str(&contents).unwrap_or_default(), + _ => AuthsConfig::default(), } } @@ -328,18 +331,28 @@ pub fn load_config() -> AuthsConfig { /// /// Args: /// * `config`: The configuration to persist. +/// * `store`: The config store implementation for file I/O. /// /// Usage: /// ```ignore -/// let mut config = load_config(); +/// let mut config = load_config(&file_store); /// config.passphrase.cache = PassphraseCachePolicy::Always; -/// save_config(&config)?; +/// save_config(&config, &file_store)?; /// ``` -pub fn save_config(config: &AuthsConfig) -> Result<(), std::io::Error> { - let home = auths_home().map_err(|e| std::io::Error::other(e.to_string()))?; - std::fs::create_dir_all(&home)?; +pub fn save_config( + config: &AuthsConfig, + store: &dyn crate::ports::config_store::ConfigStore, +) -> Result<(), crate::ports::config_store::ConfigStoreError> { + let home = auths_home().map_err(|e| crate::ports::config_store::ConfigStoreError::Write { + path: PathBuf::from("~/.auths"), + source: std::io::Error::other(e.to_string()), + })?; let path = home.join("config.toml"); - let contents = - toml::to_string_pretty(config).map_err(|e| std::io::Error::other(e.to_string()))?; - std::fs::write(&path, contents) + let contents = toml::to_string_pretty(config).map_err(|e| { + crate::ports::config_store::ConfigStoreError::Write { + path: path.clone(), + source: std::io::Error::other(e.to_string()), + } + })?; + store.write(&path, &contents) } diff --git a/crates/auths-core/src/paths.rs b/crates/auths-core/src/paths.rs index 54d2d373..02a1a7d0 100644 --- a/crates/auths-core/src/paths.rs +++ b/crates/auths-core/src/paths.rs @@ -23,6 +23,7 @@ use crate::config::EnvironmentConfig; /// let env = auths_core::config::EnvironmentConfig::from_env(); /// let dir = auths_core::paths::auths_home_with_config(&env)?; /// ``` +#[allow(clippy::disallowed_methods)] // INVARIANT: designated home-dir resolution boundary — dirs::home_dir is the OS-level fallback pub fn auths_home_with_config(config: &EnvironmentConfig) -> Result { if let Some(ref home) = config.auths_home { return Ok(home.clone()); diff --git a/crates/auths-core/src/ports/config_store.rs b/crates/auths-core/src/ports/config_store.rs new file mode 100644 index 00000000..08876dda --- /dev/null +++ b/crates/auths-core/src/ports/config_store.rs @@ -0,0 +1,47 @@ +//! Config file I/O port for reading and writing `config.toml`. + +use std::path::PathBuf; + +use thiserror::Error; + +/// Errors that can occur during config store operations. +#[derive(Debug, Error)] +pub enum ConfigStoreError { + /// Failed to read the config file. + #[error("failed to read config from {path}")] + Read { + /// The path that could not be read. + path: PathBuf, + /// The underlying I/O error. + #[source] + source: std::io::Error, + }, + /// Failed to write the config file. + #[error("failed to write config to {path}")] + Write { + /// The path that could not be written. + path: PathBuf, + /// The underlying I/O error. + #[source] + source: std::io::Error, + }, +} + +/// Abstracts filesystem access for config file operations. +/// +/// Args: +/// * `path` - The path to the config file. +/// +/// Usage: +/// ```ignore +/// let content = store.read(Path::new("~/.auths/config.toml"))?; +/// store.write(Path::new("~/.auths/config.toml"), "content")?; +/// ``` +pub trait ConfigStore: Send + Sync { + /// Read the config file content. + /// Returns `None` if the file does not exist. + fn read(&self, path: &std::path::Path) -> Result, ConfigStoreError>; + + /// Write content to the config file, creating parent dirs as needed. + fn write(&self, path: &std::path::Path, content: &str) -> Result<(), ConfigStoreError>; +} diff --git a/crates/auths-core/src/ports/mod.rs b/crates/auths-core/src/ports/mod.rs index c5608de0..8e994b8e 100644 --- a/crates/auths-core/src/ports/mod.rs +++ b/crates/auths-core/src/ports/mod.rs @@ -1,6 +1,8 @@ //! Port traits for dependency injection. pub mod clock; +/// Config file I/O port for reading and writing `config.toml`. +pub mod config_store; pub mod id; pub mod network; /// Pairing relay client port for session-based device pairing. diff --git a/crates/auths-core/src/storage/encrypted_file.rs b/crates/auths-core/src/storage/encrypted_file.rs index 27735cd9..74876cec 100644 --- a/crates/auths-core/src/storage/encrypted_file.rs +++ b/crates/auths-core/src/storage/encrypted_file.rs @@ -13,6 +13,8 @@ use chacha20poly1305::{ }; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +#[allow(clippy::disallowed_types)] +// INVARIANT: file-backed keychain adapter — these types are its core purpose use std::fs::{self, File, OpenOptions}; use std::io::{Read, Write}; use std::path::PathBuf; @@ -65,6 +67,8 @@ pub struct EncryptedFileStorage { password: Mutex>>, } +#[allow(clippy::disallowed_methods)] // INVARIANT: file-backed keychain adapter — I/O is its purpose +#[allow(clippy::disallowed_types)] impl EncryptedFileStorage { /// Create a new EncryptedFileStorage with default path (`/keys.enc`). /// @@ -283,6 +287,8 @@ impl EncryptedFileStorage { } } +#[allow(clippy::disallowed_methods)] // INVARIANT: file-backed keychain adapter +#[allow(clippy::disallowed_types)] impl KeyStorage for EncryptedFileStorage { fn store_key( &self, @@ -386,6 +392,8 @@ impl KeyStorage for EncryptedFileStorage { } #[cfg(test)] +#[allow(clippy::disallowed_methods)] +#[allow(clippy::disallowed_types)] mod tests { use super::*; use tempfile::TempDir; diff --git a/crates/auths-core/src/storage/windows_credential.rs b/crates/auths-core/src/storage/windows_credential.rs index 9360b9ae..59f66268 100644 --- a/crates/auths-core/src/storage/windows_credential.rs +++ b/crates/auths-core/src/storage/windows_credential.rs @@ -64,6 +64,8 @@ struct AliasIndex { aliases: HashMap, } +#[allow(clippy::disallowed_methods)] // INVARIANT: Windows credential adapter — file-based alias index and dirs are inherently I/O +#[allow(clippy::disallowed_types)] impl WindowsCredentialStorage { /// Create a new WindowsCredentialStorage with the given service name. /// diff --git a/crates/auths-core/src/testing/builder.rs b/crates/auths-core/src/testing/builder.rs index bec1c795..da6362e8 100644 --- a/crates/auths-core/src/testing/builder.rs +++ b/crates/auths-core/src/testing/builder.rs @@ -180,6 +180,8 @@ impl TestIdentityBuilder { /// - Temporary directory creation fails /// - Git repository initialization fails /// - Key generation or storage fails + #[allow(clippy::disallowed_methods)] // INVARIANT: test helper — needs real git init and temp dirs + #[allow(clippy::disallowed_types)] pub fn build(self) -> Result { // Create temp directory for Git repo let temp_dir = TempDir::new().map_err(|e| { diff --git a/crates/auths-core/src/testing/fake_config_store.rs b/crates/auths-core/src/testing/fake_config_store.rs new file mode 100644 index 00000000..a6efc37c --- /dev/null +++ b/crates/auths-core/src/testing/fake_config_store.rs @@ -0,0 +1,105 @@ +//! In-memory fake for the `ConfigStore` port. + +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::sync::Mutex; + +use crate::ports::config_store::{ConfigStore, ConfigStoreError}; + +/// In-memory fake for [`ConfigStore`]. +/// +/// Stores file contents in a `HashMap` and records write calls. +/// +/// Usage: +/// ```ignore +/// let store = FakeConfigStore::new(); +/// let store = FakeConfigStore::new().with_content(path, "toml content"); +/// ``` +pub struct FakeConfigStore { + files: Mutex>, + write_calls: Mutex>, + fail_on_write: Mutex>, +} + +impl Default for FakeConfigStore { + fn default() -> Self { + Self::new() + } +} + +impl FakeConfigStore { + /// Create an empty fake with no files. + pub fn new() -> Self { + Self { + files: Mutex::new(HashMap::new()), + write_calls: Mutex::new(Vec::new()), + fail_on_write: Mutex::new(None), + } + } + + /// Pre-populate a file with content. + pub fn with_content(self, path: &Path, content: &str) -> Self { + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(path.to_path_buf(), content.to_string()); + self + } + + /// Configure all writes to fail with the given message. + pub fn write_fails_with(self, msg: &str) -> Self { + *self.fail_on_write.lock().unwrap_or_else(|e| e.into_inner()) = Some(msg.to_string()); + self + } + + /// Return recorded write calls as `(path, content)` pairs. + pub fn write_calls(&self) -> Vec<(PathBuf, String)> { + self.write_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .clone() + } + + /// Read file content from the in-memory store (for test assertions). + pub fn content(&self, path: &Path) -> Option { + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .get(path) + .cloned() + } +} + +impl ConfigStore for FakeConfigStore { + fn read(&self, path: &Path) -> Result, ConfigStoreError> { + Ok(self + .files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .get(path) + .cloned()) + } + + fn write(&self, path: &Path, content: &str) -> Result<(), ConfigStoreError> { + if let Some(msg) = self + .fail_on_write + .lock() + .unwrap_or_else(|e| e.into_inner()) + .as_ref() + { + return Err(ConfigStoreError::Write { + path: path.to_path_buf(), + source: std::io::Error::new(std::io::ErrorKind::PermissionDenied, msg.clone()), + }); + } + self.write_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push((path.to_path_buf(), content.to_string())); + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(path.to_path_buf(), content.to_string()); + Ok(()) + } +} diff --git a/crates/auths-core/src/testing/mod.rs b/crates/auths-core/src/testing/mod.rs index ef23e24c..fd77b705 100644 --- a/crates/auths-core/src/testing/mod.rs +++ b/crates/auths-core/src/testing/mod.rs @@ -38,6 +38,7 @@ mod builder; mod deterministic_uuid; +mod fake_config_store; mod in_memory_storage; // Re-export test utilities from storage::memory @@ -51,4 +52,5 @@ pub use builder::{TestIdentity, TestIdentityBuilder, TestPassphraseProvider}; // Re-export deterministic UUID provider and in-memory storage pub use deterministic_uuid::DeterministicUuidProvider; +pub use fake_config_store::FakeConfigStore; pub use in_memory_storage::InMemoryStorage; diff --git a/crates/auths-core/src/trust/pinned.rs b/crates/auths-core/src/trust/pinned.rs index 4141fc1d..683e1e58 100644 --- a/crates/auths-core/src/trust/pinned.rs +++ b/crates/auths-core/src/trust/pinned.rs @@ -100,6 +100,8 @@ pub struct PinnedIdentityStore { path: PathBuf, } +#[allow(clippy::disallowed_methods)] // INVARIANT: PinnedIdentityStore is a file-backed adapter — I/O is its purpose +#[allow(clippy::disallowed_types)] impl PinnedIdentityStore { /// Create a store at the given path. pub fn new(path: PathBuf) -> Self { @@ -107,6 +109,7 @@ impl PinnedIdentityStore { } /// Default path: `~/.auths/known_identities.json` + #[allow(clippy::disallowed_methods)] // INVARIANT: designated home-dir resolution for pin store default pub fn default_path() -> PathBuf { dirs::home_dir() .unwrap_or_else(|| PathBuf::from(".")) @@ -233,10 +236,13 @@ impl PinnedIdentityStore { /// /// The lock file is NOT deleted on drop. Deleting creates a race where two /// threads acquire flock on different inodes simultaneously. +#[allow(clippy::disallowed_types)] // INVARIANT: file-lock guard — holds an open file descriptor struct LockGuard { _file: fs::File, } +#[allow(clippy::disallowed_methods)] // INVARIANT: file locking is inherently I/O +#[allow(clippy::disallowed_types)] impl LockGuard { fn acquire(path: PathBuf) -> Result { let file = fs::OpenOptions::new() diff --git a/crates/auths-core/src/trust/roots_file.rs b/crates/auths-core/src/trust/roots_file.rs index 2246d70b..ba6df6fd 100644 --- a/crates/auths-core/src/trust/roots_file.rs +++ b/crates/auths-core/src/trust/roots_file.rs @@ -56,19 +56,11 @@ pub struct RootEntry { } impl RootsFile { - /// Load and validate a roots.json file. - /// - /// # Errors + /// Parse roots from a JSON string (pure — no I/O). /// - /// Returns an error if: - /// - The file cannot be read - /// - The JSON is malformed - /// - The version is not 1 - /// - Any public_key_hex is invalid (not valid hex, wrong length) - pub fn load(path: &Path) -> Result { - let content = std::fs::read_to_string(path)?; - - let file: Self = serde_json::from_str(&content)?; + /// Prefer this over `load` when the caller already has the content. + pub fn parse(content: &str) -> Result { + let file: Self = serde_json::from_str(content)?; if file.version != 1 { return Err(TrustError::InvalidData(format!( @@ -96,6 +88,13 @@ impl RootsFile { Ok(file) } + /// Load and validate a roots.json file. + #[allow(clippy::disallowed_methods)] // INVARIANT: convenience wrapper; prefer parse() for sans-IO callers + pub fn load(path: &Path) -> Result { + let content = std::fs::read_to_string(path)?; + Self::parse(&content) + } + /// Find a root entry by DID. pub fn find(&self, did: &str) -> Option<&RootEntry> { self.roots.iter().find(|r| r.did == did) @@ -116,6 +115,8 @@ impl RootEntry { } #[cfg(test)] +#[allow(clippy::disallowed_methods)] +#[allow(clippy::disallowed_types)] mod tests { use super::*; use std::io::Write; diff --git a/crates/auths-core/tests/integration.rs b/crates/auths-core/tests/integration.rs index d5dc4c4c..4c66f947 100644 --- a/crates/auths-core/tests/integration.rs +++ b/crates/auths-core/tests/integration.rs @@ -1,5 +1,6 @@ #![allow( clippy::disallowed_methods, + clippy::disallowed_types, clippy::print_stdout, clippy::print_stderr, clippy::unwrap_used, diff --git a/crates/auths-crypto/clippy.toml b/crates/auths-crypto/clippy.toml new file mode 100644 index 00000000..a00552bc --- /dev/null +++ b/crates/auths-crypto/clippy.toml @@ -0,0 +1,49 @@ +# Duplicated from workspace clippy.toml — keep in sync +# Clippy does NOT merge per-crate configs with workspace config. +# Any changes to the workspace clippy.toml must be replicated here. + +allow-unwrap-in-tests = true +allow-expect-in-tests = true + +disallowed-methods = [ + # === Workspace rules (duplicated from root clippy.toml) === + { path = "chrono::offset::Utc::now", reason = "inject ClockProvider instead of calling Utc::now() directly", allow-invalid = true }, + { path = "std::time::SystemTime::now", reason = "inject ClockProvider instead of calling SystemTime::now() directly", allow-invalid = true }, + { path = "std::env::var", reason = "use EnvironmentConfig abstraction instead of reading env vars directly", allow-invalid = true }, + { path = "uuid::Uuid::new_v4", reason = "Use UuidProvider::new_id() instead. Inject SystemUuidProvider in production and DeterministicUuidProvider in tests." }, + + # === Sans-IO: filesystem === + { path = "std::fs::read", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_to_string", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::write", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_file", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::copy", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::rename", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::metadata", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::canonicalize", reason = "sans-IO crate — use a port trait" }, + + # === Sans-IO: process === + { path = "std::process::Command::new", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::exit", reason = "sans-IO crate — return errors instead" }, + + # === Sans-IO: dirs === + { path = "dirs::home_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::config_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + + # === Sans-IO: network === + { path = "reqwest::Client::new", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, + { path = "reqwest::get", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, +] + +disallowed-types = [ + { path = "std::fs::File", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::Command", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpStream", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpListener", reason = "sans-IO crate — use a port trait" }, +] diff --git a/crates/auths-id/clippy.toml b/crates/auths-id/clippy.toml new file mode 100644 index 00000000..30374cbb --- /dev/null +++ b/crates/auths-id/clippy.toml @@ -0,0 +1,51 @@ +# Duplicated from workspace clippy.toml — keep in sync +# Clippy does NOT merge per-crate configs with workspace config. +# Any changes to the workspace clippy.toml must be replicated here. + +allow-unwrap-in-tests = true +allow-expect-in-tests = true + +disallowed-methods = [ + # === Workspace rules (duplicated from root clippy.toml) === + { path = "chrono::offset::Utc::now", reason = "inject ClockProvider instead of calling Utc::now() directly", allow-invalid = true }, + { path = "std::time::SystemTime::now", reason = "inject ClockProvider instead of calling SystemTime::now() directly", allow-invalid = true }, + { path = "std::env::var", reason = "use EnvironmentConfig abstraction instead of reading env vars directly", allow-invalid = true }, + { path = "uuid::Uuid::new_v4", reason = "Use UuidProvider::new_id() instead. Inject SystemUuidProvider in production and DeterministicUuidProvider in tests." }, + + # === Sans-IO: filesystem === + { path = "std::fs::read", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_to_string", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::write", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_file", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::copy", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::rename", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::metadata", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::canonicalize", reason = "sans-IO crate — use a port trait" }, + + # === Sans-IO: process === + { path = "std::process::Command::new", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::exit", reason = "sans-IO crate — return errors instead" }, + + # === Sans-IO: dirs === + { path = "dirs::home_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::config_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_local_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + + # === Sans-IO: network === + { path = "reqwest::Client::new", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, + { path = "reqwest::get", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, +] + +disallowed-types = [ + { path = "std::fs::File", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::OpenOptions", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::Command", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpStream", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpListener", reason = "sans-IO crate — use a port trait" }, +] diff --git a/crates/auths-id/src/agent_identity.rs b/crates/auths-id/src/agent_identity.rs index 35f31286..2d2ee16a 100644 --- a/crates/auths-id/src/agent_identity.rs +++ b/crates/auths-id/src/agent_identity.rs @@ -187,6 +187,7 @@ fn resolve_repo_path(mode: &AgentStorageMode) -> Result<(PathBuf, bool), AgentPr } } +#[allow(clippy::disallowed_methods)] // INVARIANT: agent repo setup — directory creation before git init fn ensure_git_repo(path: &Path) -> Result<(), AgentProvisioningError> { if !path.exists() { std::fs::create_dir_all(path)?; @@ -197,6 +198,7 @@ fn ensure_git_repo(path: &Path) -> Result<(), AgentProvisioningError> { Ok(()) } +#[allow(clippy::disallowed_methods)] // INVARIANT: designated home-dir resolution for agent repo default path fn default_agent_repo_path() -> Result { let home = dirs::home_dir().ok_or_else(|| { AgentProvisioningError::ConfigWrite(std::io::Error::new( @@ -337,6 +339,7 @@ fn build_attestation_meta( // ── Config File ───────────────────────────────────────────────────────────── +#[allow(clippy::disallowed_methods)] // INVARIANT: agent config file write — one-shot file creation during provisioning fn write_agent_toml( repo_path: &Path, did: &str, @@ -383,6 +386,7 @@ pub fn format_agent_toml(did: &str, key_alias: &str, config: &AgentProvisioningC // ── Tests ─────────────────────────────────────────────────────────────────── #[cfg(test)] +#[allow(clippy::disallowed_methods)] mod tests { use super::*; diff --git a/crates/auths-id/src/freeze.rs b/crates/auths-id/src/freeze.rs index 78516657..05d52457 100644 --- a/crates/auths-id/src/freeze.rs +++ b/crates/auths-id/src/freeze.rs @@ -72,6 +72,7 @@ pub fn freeze_file_path(repo_path: &Path) -> PathBuf { /// Args: /// * `repo_path`: Path to the identity repository. /// * `now`: The reference time used to check if the freeze is still active. +#[allow(clippy::disallowed_methods)] // INVARIANT: freeze state is a simple file — extracting a port adds complexity for 2 lines of I/O pub fn load_active_freeze( repo_path: &Path, now: DateTime, @@ -93,6 +94,7 @@ pub fn load_active_freeze( } /// Write a freeze state to disk. +#[allow(clippy::disallowed_methods)] // INVARIANT: freeze file write — simple file persistence pub fn store_freeze(repo_path: &Path, state: &FreezeState) -> Result<(), FreezeError> { let path = freeze_file_path(repo_path); let json = serde_json::to_string_pretty(state)?; @@ -101,6 +103,7 @@ pub fn store_freeze(repo_path: &Path, state: &FreezeState) -> Result<(), FreezeE } /// Remove the freeze file (unfreeze). +#[allow(clippy::disallowed_methods)] // INVARIANT: freeze file removal — simple file cleanup pub fn remove_freeze(repo_path: &Path) -> Result { let path = freeze_file_path(repo_path); if path.exists() { diff --git a/crates/auths-id/src/keri/cache.rs b/crates/auths-id/src/keri/cache.rs index 249d3c3d..5d39e33d 100644 --- a/crates/auths-id/src/keri/cache.rs +++ b/crates/auths-id/src/keri/cache.rs @@ -15,6 +15,8 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +#[allow(clippy::disallowed_types)] +// INVARIANT: file-based cache adapter — fs types are core to this module use std::fs::{self, OpenOptions}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; diff --git a/crates/auths-id/src/keri/mod.rs b/crates/auths-id/src/keri/mod.rs index 5e9b7b04..ece15e7e 100644 --- a/crates/auths-id/src/keri/mod.rs +++ b/crates/auths-id/src/keri/mod.rs @@ -98,6 +98,8 @@ #[cfg(feature = "git-storage")] pub mod anchor; +#[allow(clippy::disallowed_methods, clippy::disallowed_types)] +// INVARIANT: file-based KEL cache — entire module is an I/O adapter pub mod cache; pub mod event; #[cfg(feature = "git-storage")] diff --git a/crates/auths-id/src/storage/layout.rs b/crates/auths-id/src/storage/layout.rs index 08085590..9c4d017d 100644 --- a/crates/auths-id/src/storage/layout.rs +++ b/crates/auths-id/src/storage/layout.rs @@ -265,6 +265,7 @@ pub fn sanitize_did_for_ref(did: &str) -> String { /// `~/.auths` work correctly (the shell does not expand tildes when they /// arrive via clap default values or programmatic callers). #[cfg(feature = "git-storage")] +#[allow(clippy::disallowed_methods)] // INVARIANT: designated home-dir resolution for repo path pub fn resolve_repo_path(repo_arg: Option) -> Result { match repo_arg { Some(pathbuf) if !pathbuf.as_os_str().is_empty() => Ok(expand_tilde(&pathbuf)?), @@ -278,6 +279,7 @@ pub fn resolve_repo_path(repo_arg: Option) -> Result Result { let s = path.to_string_lossy(); if s.starts_with("~/") || s == "~" { @@ -336,6 +338,7 @@ pub fn default_attestation_prefixes(config: &StorageLayoutConfig) -> Vec } #[cfg(test)] +#[allow(clippy::disallowed_methods)] mod tests { use super::*; diff --git a/crates/auths-id/src/storage/registry/mod.rs b/crates/auths-id/src/storage/registry/mod.rs index 878347c2..4f48366e 100644 --- a/crates/auths-id/src/storage/registry/mod.rs +++ b/crates/auths-id/src/storage/registry/mod.rs @@ -3,6 +3,8 @@ //! Concrete Git/Postgres implementations have moved to `auths-storage`. pub mod backend; +#[allow(clippy::disallowed_methods)] +// INVARIANT: entire module is an I/O adapter — installs Git hooks to disk pub mod hooks; pub mod org_member; pub mod schemas; diff --git a/crates/auths-sdk/Cargo.toml b/crates/auths-sdk/Cargo.toml index 9ed9102c..c630927d 100644 --- a/crates/auths-sdk/Cargo.toml +++ b/crates/auths-sdk/Cargo.toml @@ -21,6 +21,7 @@ ring.workspace = true thiserror.workspace = true serde = { version = "1", features = ["derive"] } serde_json = "1" +sha2 = "0.10" json-canon.workspace = true base64 = "0.22" chrono = "0.4" @@ -38,12 +39,12 @@ mcp = ["dep:reqwest"] lan-pairing = ["dep:auths-pairing-daemon"] [dev-dependencies] +auths-sdk = { path = ".", features = ["test-utils"] } auths-core = { workspace = true, features = ["test-utils"] } auths-id = { workspace = true, features = ["test-utils"] } auths-verifier = { workspace = true, features = ["test-utils"] } auths-storage = { workspace = true, features = ["backend-git"] } git2.workspace = true -sha2 = "0.10" tempfile = "3" [lints] diff --git a/crates/auths-sdk/clippy.toml b/crates/auths-sdk/clippy.toml new file mode 100644 index 00000000..a00552bc --- /dev/null +++ b/crates/auths-sdk/clippy.toml @@ -0,0 +1,49 @@ +# Duplicated from workspace clippy.toml — keep in sync +# Clippy does NOT merge per-crate configs with workspace config. +# Any changes to the workspace clippy.toml must be replicated here. + +allow-unwrap-in-tests = true +allow-expect-in-tests = true + +disallowed-methods = [ + # === Workspace rules (duplicated from root clippy.toml) === + { path = "chrono::offset::Utc::now", reason = "inject ClockProvider instead of calling Utc::now() directly", allow-invalid = true }, + { path = "std::time::SystemTime::now", reason = "inject ClockProvider instead of calling SystemTime::now() directly", allow-invalid = true }, + { path = "std::env::var", reason = "use EnvironmentConfig abstraction instead of reading env vars directly", allow-invalid = true }, + { path = "uuid::Uuid::new_v4", reason = "Use UuidProvider::new_id() instead. Inject SystemUuidProvider in production and DeterministicUuidProvider in tests." }, + + # === Sans-IO: filesystem === + { path = "std::fs::read", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_to_string", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::write", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::create_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_file", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::remove_dir_all", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::copy", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::rename", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::metadata", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::read_dir", reason = "sans-IO crate — use a port trait" }, + { path = "std::fs::canonicalize", reason = "sans-IO crate — use a port trait" }, + + # === Sans-IO: process === + { path = "std::process::Command::new", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::exit", reason = "sans-IO crate — return errors instead" }, + + # === Sans-IO: dirs === + { path = "dirs::home_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::config_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + { path = "dirs::data_dir", reason = "sans-IO crate — inject paths via config", allow-invalid = true }, + + # === Sans-IO: network === + { path = "reqwest::Client::new", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, + { path = "reqwest::get", reason = "sans-IO crate — use a port trait for HTTP", allow-invalid = true }, +] + +disallowed-types = [ + { path = "std::fs::File", reason = "sans-IO crate — use a port trait" }, + { path = "std::process::Command", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpStream", reason = "sans-IO crate — use a port trait" }, + { path = "std::net::TcpListener", reason = "sans-IO crate — use a port trait" }, +] diff --git a/crates/auths-sdk/src/ports/allowed_signers.rs b/crates/auths-sdk/src/ports/allowed_signers.rs new file mode 100644 index 00000000..04e27b57 --- /dev/null +++ b/crates/auths-sdk/src/ports/allowed_signers.rs @@ -0,0 +1,25 @@ +//! Allowed signers file I/O port for reading and writing SSH allowed_signers files. + +use std::path::Path; + +use crate::workflows::allowed_signers::AllowedSignersError; + +/// Abstracts filesystem access for allowed_signers file operations. +/// +/// Args: +/// * `path` - The path to the allowed_signers file. +/// +/// Usage: +/// ```ignore +/// let content = store.read(Path::new("~/.ssh/allowed_signers"))?; +/// store.write(Path::new("~/.ssh/allowed_signers"), "content")?; +/// ``` +pub trait AllowedSignersStore: Send + Sync { + /// Read the allowed_signers file content. + /// Returns `None` if the file does not exist. + fn read(&self, path: &Path) -> Result, AllowedSignersError>; + + /// Write content to the allowed_signers file, creating parent dirs as needed. + /// Should use atomic writes where possible. + fn write(&self, path: &Path, content: &str) -> Result<(), AllowedSignersError>; +} diff --git a/crates/auths-sdk/src/ports/mod.rs b/crates/auths-sdk/src/ports/mod.rs index 74c19768..cc04b2ca 100644 --- a/crates/auths-sdk/src/ports/mod.rs +++ b/crates/auths-sdk/src/ports/mod.rs @@ -1,5 +1,7 @@ /// Agent-based signing port for delegating operations to a running agent process. pub mod agent; +/// Allowed signers file I/O port for reading and writing SSH allowed_signers files. +pub mod allowed_signers; /// Artifact source port for computing digests and metadata. pub mod artifact; /// Diagnostic provider ports for system health checks. diff --git a/crates/auths-sdk/src/testing/contracts/artifact.rs b/crates/auths-sdk/src/testing/contracts/artifact.rs new file mode 100644 index 00000000..260eae08 --- /dev/null +++ b/crates/auths-sdk/src/testing/contracts/artifact.rs @@ -0,0 +1,57 @@ +/// Contract test suite for [`ArtifactSource`] implementations. +/// +/// Args: +/// * `$name` — identifier for the generated module. +/// * `$setup` — expression that returns `(impl ArtifactSource, _guard)`. +/// +/// Usage: +/// ```ignore +/// artifact_source_contract_tests!( +/// fake, +/// { (FakeArtifactSource::new("test.bin", "sha256", "abc123", 42), ()) }, +/// ); +/// ``` +#[macro_export] +macro_rules! artifact_source_contract_tests { + ($name:ident, $setup:expr $(,)?) => { + mod $name { + use $crate::ports::artifact::ArtifactSource as _; + + use super::*; + + #[test] + fn contract_digest_returns_valid_result() { + let (source, _guard) = $setup; + let digest = source.digest().unwrap(); + assert!( + !digest.algorithm.is_empty(), + "digest algorithm should not be empty" + ); + assert!(!digest.hex.is_empty(), "digest hex should not be empty"); + } + + #[test] + fn contract_digest_is_deterministic() { + let (source, _guard) = $setup; + let d1 = source.digest().unwrap(); + let d2 = source.digest().unwrap(); + assert_eq!(d1, d2, "consecutive digest calls should return same value"); + } + + #[test] + fn contract_metadata_returns_valid_result() { + let (source, _guard) = $setup; + let meta = source.metadata().unwrap(); + assert!( + !meta.artifact_type.is_empty(), + "artifact_type should not be empty" + ); + assert_eq!( + meta.digest, + source.digest().unwrap(), + "metadata digest should match direct digest call" + ); + } + } + }; +} diff --git a/crates/auths-sdk/src/testing/contracts/diagnostics.rs b/crates/auths-sdk/src/testing/contracts/diagnostics.rs new file mode 100644 index 00000000..41601608 --- /dev/null +++ b/crates/auths-sdk/src/testing/contracts/diagnostics.rs @@ -0,0 +1,78 @@ +/// Contract test suite for [`GitDiagnosticProvider`] implementations. +/// +/// Args: +/// * `$name` — identifier for the generated module. +/// * `$setup` — expression that returns `(impl GitDiagnosticProvider, _guard)`. +/// +/// Usage: +/// ```ignore +/// git_diagnostic_provider_contract_tests!( +/// fake, +/// { (FakeGitDiagnosticProvider::new(true, vec![]), ()) }, +/// ); +/// ``` +#[macro_export] +macro_rules! git_diagnostic_provider_contract_tests { + ($name:ident, $setup:expr $(,)?) => { + mod $name { + use $crate::ports::diagnostics::GitDiagnosticProvider as _; + + use super::*; + + #[test] + fn contract_check_git_version_returns_result() { + let (provider, _guard) = $setup; + let result = provider.check_git_version(); + assert!(result.is_ok(), "check_git_version should return Ok"); + let check = result.unwrap(); + assert!(!check.name.is_empty(), "check name should not be empty"); + } + + #[test] + fn contract_get_git_config_returns_result() { + let (provider, _guard) = $setup; + let result = provider.get_git_config("gpg.format"); + assert!( + result.is_ok(), + "get_git_config should return Ok even for missing keys" + ); + } + } + }; +} + +/// Contract test suite for [`CryptoDiagnosticProvider`] implementations. +/// +/// Args: +/// * `$name` — identifier for the generated module. +/// * `$setup` — expression that returns `(impl CryptoDiagnosticProvider, _guard)`. +/// +/// Usage: +/// ```ignore +/// crypto_diagnostic_provider_contract_tests!( +/// fake, +/// { (FakeCryptoDiagnosticProvider::new(true), ()) }, +/// ); +/// ``` +#[macro_export] +macro_rules! crypto_diagnostic_provider_contract_tests { + ($name:ident, $setup:expr $(,)?) => { + mod $name { + use $crate::ports::diagnostics::CryptoDiagnosticProvider as _; + + use super::*; + + #[test] + fn contract_check_ssh_keygen_returns_result() { + let (provider, _guard) = $setup; + let result = provider.check_ssh_keygen_available(); + assert!( + result.is_ok(), + "check_ssh_keygen_available should return Ok" + ); + let check = result.unwrap(); + assert!(!check.name.is_empty(), "check name should not be empty"); + } + } + }; +} diff --git a/crates/auths-sdk/src/testing/contracts/git_config.rs b/crates/auths-sdk/src/testing/contracts/git_config.rs new file mode 100644 index 00000000..393af5da --- /dev/null +++ b/crates/auths-sdk/src/testing/contracts/git_config.rs @@ -0,0 +1,49 @@ +/// Contract test suite for [`GitConfigProvider`] implementations. +/// +/// Generates a module with `#[test]` cases that verify behavioural correctness +/// for any [`GitConfigProvider`] implementation. +/// +/// Args: +/// * `$name` — identifier for the generated module. +/// * `$setup` — expression that returns `(impl GitConfigProvider, _guard)`. +/// +/// Usage: +/// ```ignore +/// git_config_provider_contract_tests!( +/// fake, +/// { (FakeGitConfigProvider::new(), ()) }, +/// ); +/// ``` +#[macro_export] +macro_rules! git_config_provider_contract_tests { + ($name:ident, $setup:expr $(,)?) => { + mod $name { + use $crate::ports::git_config::GitConfigProvider as _; + + use super::*; + + #[test] + fn contract_set_stores_value() { + let (provider, _guard) = $setup; + let result = provider.set("gpg.format", "ssh"); + assert!(result.is_ok(), "set should succeed"); + } + + #[test] + fn contract_set_overwrites_existing() { + let (provider, _guard) = $setup; + provider.set("gpg.format", "ssh").unwrap(); + let result = provider.set("gpg.format", "gpg"); + assert!(result.is_ok(), "overwriting an existing key should succeed"); + } + + #[test] + fn contract_set_different_keys() { + let (provider, _guard) = $setup; + provider.set("gpg.format", "ssh").unwrap(); + let result = provider.set("user.signingkey", "/path/to/key"); + assert!(result.is_ok(), "setting a different key should succeed"); + } + } + }; +} diff --git a/crates/auths-sdk/src/testing/contracts/mod.rs b/crates/auths-sdk/src/testing/contracts/mod.rs index 0e5914fb..8495e8dd 100644 --- a/crates/auths-sdk/src/testing/contracts/mod.rs +++ b/crates/auths-sdk/src/testing/contracts/mod.rs @@ -1,2 +1,9 @@ +/// Contract tests for [`ArtifactSource`](crate::ports::artifact::ArtifactSource) implementations. +pub mod artifact; +/// Contract tests for [`GitDiagnosticProvider`](crate::ports::diagnostics::GitDiagnosticProvider) +/// and [`CryptoDiagnosticProvider`](crate::ports::diagnostics::CryptoDiagnosticProvider) implementations. +pub mod diagnostics; +/// Contract tests for [`GitConfigProvider`](crate::ports::git_config::GitConfigProvider) implementations. +pub mod git_config; /// Contract tests for [`GitLogProvider`](crate::ports::git::GitLogProvider) implementations. pub mod git_log; diff --git a/crates/auths-sdk/src/testing/fakes/allowed_signers_store.rs b/crates/auths-sdk/src/testing/fakes/allowed_signers_store.rs new file mode 100644 index 00000000..5addd327 --- /dev/null +++ b/crates/auths-sdk/src/testing/fakes/allowed_signers_store.rs @@ -0,0 +1,105 @@ +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::sync::Mutex; + +use crate::ports::allowed_signers::AllowedSignersStore; +use crate::workflows::allowed_signers::AllowedSignersError; + +/// In-memory fake for [`AllowedSignersStore`]. +/// +/// Stores file contents in a `HashMap` and records calls +/// for test assertions. +/// +/// Usage: +/// ```ignore +/// let store = FakeAllowedSignersStore::new(); +/// let store = FakeAllowedSignersStore::new().with_file(path, "content"); +/// ``` +pub struct FakeAllowedSignersStore { + files: Mutex>, + write_calls: Mutex>, + fail_on_write: Mutex>, +} + +impl Default for FakeAllowedSignersStore { + fn default() -> Self { + Self::new() + } +} + +impl FakeAllowedSignersStore { + /// Create an empty fake with no files. + pub fn new() -> Self { + Self { + files: Mutex::new(HashMap::new()), + write_calls: Mutex::new(Vec::new()), + fail_on_write: Mutex::new(None), + } + } + + /// Pre-populate a file with content. + pub fn with_file(self, path: &Path, content: &str) -> Self { + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(path.to_path_buf(), content.to_string()); + self + } + + /// Configure all writes to fail with the given message. + pub fn write_fails_with(self, msg: &str) -> Self { + *self.fail_on_write.lock().unwrap_or_else(|e| e.into_inner()) = Some(msg.to_string()); + self + } + + /// Return recorded write calls as `(path, content)` pairs. + pub fn write_calls(&self) -> Vec<(PathBuf, String)> { + self.write_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .clone() + } + + /// Read file content from the in-memory store (for test assertions). + pub fn content(&self, path: &Path) -> Option { + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .get(path) + .cloned() + } +} + +impl AllowedSignersStore for FakeAllowedSignersStore { + fn read(&self, path: &Path) -> Result, AllowedSignersError> { + Ok(self + .files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .get(path) + .cloned()) + } + + fn write(&self, path: &Path, content: &str) -> Result<(), AllowedSignersError> { + if let Some(msg) = self + .fail_on_write + .lock() + .unwrap_or_else(|e| e.into_inner()) + .as_ref() + { + return Err(AllowedSignersError::FileWrite { + path: path.to_path_buf(), + source: std::io::Error::new(std::io::ErrorKind::PermissionDenied, msg.clone()), + }); + } + self.write_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push((path.to_path_buf(), content.to_string())); + self.files + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(path.to_path_buf(), content.to_string()); + Ok(()) + } +} diff --git a/crates/auths-sdk/src/testing/fakes/artifact.rs b/crates/auths-sdk/src/testing/fakes/artifact.rs new file mode 100644 index 00000000..9b9de582 --- /dev/null +++ b/crates/auths-sdk/src/testing/fakes/artifact.rs @@ -0,0 +1,133 @@ +use std::sync::Mutex; + +use sha2::{Digest, Sha256}; + +use crate::ports::artifact::{ArtifactDigest, ArtifactError, ArtifactMetadata, ArtifactSource}; + +/// Configurable fake for [`ArtifactSource`]. +/// +/// Returns canned digest/metadata or configurable errors for failure-path testing. +/// +/// Usage: +/// ```ignore +/// let fake = FakeArtifactSource::new("release.tar.gz", "sha256", "abcdef...", 1024); +/// let fake = FakeArtifactSource::digest_fails_with("read error"); +/// ``` +pub struct FakeArtifactSource { + digest: ArtifactDigest, + name: String, + size: u64, + fail_digest: Mutex>, + fail_metadata: Mutex>, +} + +impl FakeArtifactSource { + /// Create a fake that returns the given digest and metadata. + pub fn new(name: &str, algorithm: &str, hex: &str, size: u64) -> Self { + Self { + digest: ArtifactDigest { + algorithm: algorithm.to_string(), + hex: hex.to_string(), + }, + name: name.to_string(), + size, + fail_digest: Mutex::new(None), + fail_metadata: Mutex::new(None), + } + } + + /// Create a fake where `digest()` always returns an error. + pub fn digest_fails_with(msg: &str) -> Self { + Self { + digest: ArtifactDigest { + algorithm: String::new(), + hex: String::new(), + }, + name: String::new(), + size: 0, + fail_digest: Mutex::new(Some(msg.to_string())), + fail_metadata: Mutex::new(None), + } + } + + /// Create a fake from raw bytes, computing a real SHA-256 digest. + /// + /// Args: + /// * `name`: Artifact name for metadata. + /// * `data`: Raw bytes to hash. + /// + /// Usage: + /// ```ignore + /// let fake = FakeArtifactSource::from_data("release.bin", b"binary content"); + /// ``` + pub fn from_data(name: &str, data: &[u8]) -> Self { + let hash = Sha256::digest(data); + Self { + digest: ArtifactDigest { + algorithm: "sha256".to_string(), + hex: hex::encode(hash), + }, + name: name.to_string(), + size: data.len() as u64, + fail_digest: Mutex::new(None), + fail_metadata: Mutex::new(None), + } + } + + /// Create a fake where `metadata()` always returns an error. + pub fn metadata_fails_with(msg: &str) -> Self { + Self { + digest: ArtifactDigest { + algorithm: String::new(), + hex: String::new(), + }, + name: String::new(), + size: 0, + fail_digest: Mutex::new(None), + fail_metadata: Mutex::new(Some(msg.to_string())), + } + } +} + +impl ArtifactSource for FakeArtifactSource { + fn digest(&self) -> Result { + if let Some(msg) = self + .fail_digest + .lock() + .unwrap_or_else(|e| e.into_inner()) + .as_ref() + { + return Err(ArtifactError::Io(msg.clone())); + } + Ok(self.digest.clone()) + } + + fn metadata(&self) -> Result { + if let Some(msg) = self + .fail_metadata + .lock() + .unwrap_or_else(|e| e.into_inner()) + .as_ref() + { + return Err(ArtifactError::Metadata(msg.clone())); + } + Ok(ArtifactMetadata { + artifact_type: "memory".to_string(), + digest: self.digest.clone(), + name: Some(self.name.clone()), + size: Some(self.size), + }) + } +} + +#[cfg(test)] +mod tests { + use crate::testing::fakes::artifact::FakeArtifactSource; + + crate::artifact_source_contract_tests!(fake, { + ( + FakeArtifactSource::new("test.bin", "sha256", "abc123def456", 42), + (), + ) + },); +} diff --git a/crates/auths-sdk/src/testing/fakes/diagnostics.rs b/crates/auths-sdk/src/testing/fakes/diagnostics.rs index 28b2005a..91b07f83 100644 --- a/crates/auths-sdk/src/testing/fakes/diagnostics.rs +++ b/crates/auths-sdk/src/testing/fakes/diagnostics.rs @@ -73,3 +73,18 @@ impl CryptoDiagnosticProvider for FakeCryptoDiagnosticProvider { }) } } + +#[cfg(test)] +mod tests { + use crate::testing::fakes::diagnostics::{ + FakeCryptoDiagnosticProvider, FakeGitDiagnosticProvider, + }; + + crate::git_diagnostic_provider_contract_tests!(fake_git, { + (FakeGitDiagnosticProvider::new(true, vec![]), ()) + },); + + crate::crypto_diagnostic_provider_contract_tests!(fake_crypto, { + (FakeCryptoDiagnosticProvider::new(true), ()) + },); +} diff --git a/crates/auths-sdk/src/testing/fakes/git_config.rs b/crates/auths-sdk/src/testing/fakes/git_config.rs new file mode 100644 index 00000000..6c12c6e7 --- /dev/null +++ b/crates/auths-sdk/src/testing/fakes/git_config.rs @@ -0,0 +1,111 @@ +use std::collections::HashMap; +use std::sync::Mutex; + +use crate::ports::git_config::{GitConfigError, GitConfigProvider}; + +/// Recorded call from [`FakeGitConfigProvider`]. +#[derive(Debug, Clone)] +pub struct GitConfigSetCall { + /// The git config key that was set. + pub key: String, + /// The value it was set to. + pub value: String, +} + +/// Configurable fake for [`GitConfigProvider`]. +/// +/// Stores config values in memory and records all calls for assertion. +/// +/// Usage: +/// ```ignore +/// let fake = FakeGitConfigProvider::new(); +/// let fake = FakeGitConfigProvider::new().with_config("gpg.format", "ssh"); +/// let fake = FakeGitConfigProvider::new().set_fails_with("permission denied"); +/// assert_eq!(fake.set_calls().len(), 1); +/// ``` +pub struct FakeGitConfigProvider { + configs: Mutex>, + set_calls: Mutex>, + fail_on_set: Mutex>, +} + +impl Default for FakeGitConfigProvider { + fn default() -> Self { + Self::new() + } +} + +impl FakeGitConfigProvider { + /// Create a fake with empty config state. + pub fn new() -> Self { + Self { + configs: Mutex::new(HashMap::new()), + set_calls: Mutex::new(Vec::new()), + fail_on_set: Mutex::new(None), + } + } + + /// Pre-populate a config key-value pair. + pub fn with_config(self, key: &str, value: &str) -> Self { + self.configs + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(key.into(), value.into()); + self + } + + /// Configure all `set` calls to fail with the given message. + pub fn set_fails_with(self, msg: &str) -> Self { + *self.fail_on_set.lock().unwrap_or_else(|e| e.into_inner()) = Some(msg.into()); + self + } + + /// Return all recorded `set` calls. + pub fn set_calls(&self) -> Vec { + self.set_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .clone() + } + + /// Read a config value by key (for test assertions). + pub fn get(&self, key: &str) -> Option { + self.configs + .lock() + .unwrap_or_else(|e| e.into_inner()) + .get(key) + .cloned() + } +} + +impl GitConfigProvider for FakeGitConfigProvider { + fn set(&self, key: &str, value: &str) -> Result<(), GitConfigError> { + if let Some(msg) = self + .fail_on_set + .lock() + .unwrap_or_else(|e| e.into_inner()) + .as_ref() + { + return Err(GitConfigError::CommandFailed(msg.clone())); + } + self.set_calls + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(GitConfigSetCall { + key: key.into(), + value: value.into(), + }); + self.configs + .lock() + .unwrap_or_else(|e| e.into_inner()) + .insert(key.into(), value.into()); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::testing::fakes::git_config::FakeGitConfigProvider; + + crate::git_config_provider_contract_tests!(fake, { (FakeGitConfigProvider::new(), ()) },); +} diff --git a/crates/auths-sdk/src/testing/fakes/mod.rs b/crates/auths-sdk/src/testing/fakes/mod.rs index b7bdb4b9..546134f5 100644 --- a/crates/auths-sdk/src/testing/fakes/mod.rs +++ b/crates/auths-sdk/src/testing/fakes/mod.rs @@ -1,7 +1,15 @@ mod agent; +mod allowed_signers_store; +mod artifact; mod diagnostics; mod git; +mod git_config; +mod signer; pub use agent::FakeAgentProvider; +pub use allowed_signers_store::FakeAllowedSignersStore; +pub use artifact::FakeArtifactSource; pub use diagnostics::{FakeCryptoDiagnosticProvider, FakeGitDiagnosticProvider}; pub use git::FakeGitLogProvider; +pub use git_config::{FakeGitConfigProvider, GitConfigSetCall}; +pub use signer::FakeSecureSigner; diff --git a/crates/auths-sdk/src/testing/fakes/signer.rs b/crates/auths-sdk/src/testing/fakes/signer.rs new file mode 100644 index 00000000..c8e28505 --- /dev/null +++ b/crates/auths-sdk/src/testing/fakes/signer.rs @@ -0,0 +1,33 @@ +use auths_core::AgentError; +use auths_core::signing::{PassphraseProvider, SecureSigner}; +use auths_core::storage::keychain::{IdentityDID, KeyAlias}; + +/// Fake [`SecureSigner`] that returns a dummy 64-byte signature. +/// +/// Usage: +/// ```ignore +/// let signer = FakeSecureSigner; +/// let sig = signer.sign_with_alias(&alias, &provider, b"msg").unwrap(); +/// assert_eq!(sig.len(), 64); +/// ``` +pub struct FakeSecureSigner; + +impl SecureSigner for FakeSecureSigner { + fn sign_with_alias( + &self, + _alias: &KeyAlias, + _passphrase_provider: &dyn PassphraseProvider, + _message: &[u8], + ) -> Result, AgentError> { + Ok(vec![1u8; 64]) + } + + fn sign_for_identity( + &self, + _identity_did: &IdentityDID, + _passphrase_provider: &dyn PassphraseProvider, + _message: &[u8], + ) -> Result, AgentError> { + Ok(vec![1u8; 64]) + } +} diff --git a/crates/auths-sdk/src/workflows/allowed_signers.rs b/crates/auths-sdk/src/workflows/allowed_signers.rs index 5e150079..84f90d99 100644 --- a/crates/auths-sdk/src/workflows/allowed_signers.rs +++ b/crates/auths-sdk/src/workflows/allowed_signers.rs @@ -265,68 +265,48 @@ impl AllowedSigners { } } - /// Loads and parses an allowed_signers file. + /// Loads and parses an allowed_signers file via the given store. /// /// If the file doesn't exist, returns an empty instance. /// Files without section markers are treated as all-manual entries. /// /// Args: /// * `path`: Path to the allowed_signers file. + /// * `store`: I/O backend for reading the file. /// /// Usage: /// ```ignore - /// let signers = AllowedSigners::load("~/.ssh/allowed_signers")?; + /// let signers = AllowedSigners::load("~/.ssh/allowed_signers", &store)?; /// ``` - pub fn load(path: impl Into) -> Result { + pub fn load( + path: impl Into, + store: &dyn crate::ports::allowed_signers::AllowedSignersStore, + ) -> Result { let path = path.into(); - let content = match std::fs::read_to_string(&path) { - Ok(c) => c, - Err(e) if e.kind() == std::io::ErrorKind::NotFound => { - return Ok(Self::new(path)); - } - Err(e) => { - return Err(AllowedSignersError::FileRead { path, source: e }); - } + let content = match store.read(&path)? { + Some(c) => c, + None => return Ok(Self::new(path)), }; let mut signers = Self::new(path); signers.parse_content(&content)?; Ok(signers) } - /// Atomically writes the allowed_signers file with section markers. + /// Atomically writes the allowed_signers file via the given store. + /// + /// Args: + /// * `store`: I/O backend for writing the file. /// /// Usage: /// ```ignore - /// signers.save()?; + /// signers.save(&store)?; /// ``` - pub fn save(&self) -> Result<(), AllowedSignersError> { + pub fn save( + &self, + store: &dyn crate::ports::allowed_signers::AllowedSignersStore, + ) -> Result<(), AllowedSignersError> { let content = self.format_content(); - if let Some(parent) = self.file_path.parent() { - std::fs::create_dir_all(parent).map_err(|e| AllowedSignersError::FileWrite { - path: self.file_path.clone(), - source: e, - })?; - } - - use std::io::Write; - let dir = self.file_path.parent().unwrap_or_else(|| Path::new(".")); - let tmp = - tempfile::NamedTempFile::new_in(dir).map_err(|e| AllowedSignersError::FileWrite { - path: self.file_path.clone(), - source: e, - })?; - (&tmp) - .write_all(content.as_bytes()) - .map_err(|e| AllowedSignersError::FileWrite { - path: self.file_path.clone(), - source: e, - })?; - tmp.persist(&self.file_path) - .map_err(|e| AllowedSignersError::FileWrite { - path: self.file_path.clone(), - source: e.error, - })?; - Ok(()) + store.write(&self.file_path, &content) } /// Returns all signer entries. diff --git a/crates/auths-sdk/tests/cases/allowed_signers.rs b/crates/auths-sdk/tests/cases/allowed_signers.rs index 2c8782f9..9123614f 100644 --- a/crates/auths-sdk/tests/cases/allowed_signers.rs +++ b/crates/auths-sdk/tests/cases/allowed_signers.rs @@ -1,3 +1,4 @@ +use auths_sdk::testing::fakes::FakeAllowedSignersStore; use auths_sdk::workflows::allowed_signers::*; use auths_verifier::core::Ed25519PublicKey; use auths_verifier::types::DeviceDID; @@ -42,7 +43,8 @@ fn signer_principal_display_did() { #[test] fn load_nonexistent_file_returns_empty() { - let signers = AllowedSigners::load("/tmp/auths-test-nonexistent-12345").unwrap(); + let store = FakeAllowedSignersStore::new(); + let signers = AllowedSigners::load("/tmp/auths-test-nonexistent-12345", &store).unwrap(); assert!(signers.list().is_empty()); } @@ -123,9 +125,10 @@ fn save_and_load_roundtrip() { SignerSource::Attestation, ) .unwrap(); - signers.save().unwrap(); + let store = FakeAllowedSignersStore::new(); + signers.save(&store).unwrap(); - let loaded = AllowedSigners::load(&path).unwrap(); + let loaded = AllowedSigners::load(&path, &store).unwrap(); assert_eq!(loaded.list().len(), 2); let manual = loaded @@ -152,9 +155,9 @@ fn load_unmarked_file_treats_as_manual() { let key = Ed25519PublicKey::from_bytes([1u8; 32]); let ssh_key = auths_sdk::workflows::git_integration::public_key_to_ssh(key.as_bytes()).unwrap(); let content = format!("user@example.com namespaces=\"git\" {}\n", ssh_key); - std::fs::write(&path, content).unwrap(); + let store = FakeAllowedSignersStore::new().with_file(&path, &content); - let loaded = AllowedSigners::load(&path).unwrap(); + let loaded = AllowedSigners::load(&path, &store).unwrap(); assert_eq!(loaded.list().len(), 1); assert_eq!(loaded.list()[0].source, SignerSource::Manual); } diff --git a/crates/auths-sdk/tests/cases/artifact.rs b/crates/auths-sdk/tests/cases/artifact.rs index de5ab28f..99266ccd 100644 --- a/crates/auths-sdk/tests/cases/artifact.rs +++ b/crates/auths-sdk/tests/cases/artifact.rs @@ -3,55 +3,15 @@ use auths_sdk::ports::artifact::{ArtifactDigest, ArtifactError, ArtifactMetadata use auths_sdk::signing::{ ArtifactSigningError, ArtifactSigningParams, SigningKeyMaterial, sign_artifact, }; +use auths_sdk::testing::fakes::FakeArtifactSource; use auths_sdk::workflows::artifact::compute_digest; use std::sync::Arc; use crate::cases::helpers::{build_empty_test_context, setup_signed_artifact_context}; -struct InMemoryArtifact { - data: Vec, - name: String, -} - -impl ArtifactSource for InMemoryArtifact { - fn digest(&self) -> Result { - use sha2::{Digest, Sha256}; - let hash = Sha256::digest(&self.data); - Ok(ArtifactDigest { - algorithm: "sha256".to_string(), - hex: hex::encode(hash), - }) - } - - fn metadata(&self) -> Result { - let digest = self.digest()?; - Ok(ArtifactMetadata { - artifact_type: "memory".to_string(), - digest, - name: Some(self.name.clone()), - size: Some(self.data.len() as u64), - }) - } -} - -struct FailingArtifact; - -impl ArtifactSource for FailingArtifact { - fn digest(&self) -> Result { - Err(ArtifactError::Io("simulated read failure".to_string())) - } - - fn metadata(&self) -> Result { - Err(ArtifactError::Metadata("no metadata available".to_string())) - } -} - #[test] -fn in_memory_artifact_digest_is_deterministic() { - let artifact = InMemoryArtifact { - data: b"hello world".to_vec(), - name: "test.bin".to_string(), - }; +fn fake_artifact_from_data_digest_is_deterministic() { + let artifact = FakeArtifactSource::from_data("test.bin", b"hello world"); let d1 = artifact.digest().unwrap(); let d2 = artifact.digest().unwrap(); @@ -65,11 +25,8 @@ fn in_memory_artifact_digest_is_deterministic() { } #[test] -fn in_memory_artifact_metadata_includes_name_and_size() { - let artifact = InMemoryArtifact { - data: b"some content".to_vec(), - name: "payload.tar.gz".to_string(), - }; +fn fake_artifact_from_data_metadata_includes_name_and_size() { + let artifact = FakeArtifactSource::from_data("payload.tar.gz", b"some content"); let meta = artifact.metadata().unwrap(); @@ -81,10 +38,7 @@ fn in_memory_artifact_metadata_includes_name_and_size() { #[test] fn compute_digest_delegates_to_source() { - let artifact = InMemoryArtifact { - data: b"test data".to_vec(), - name: "test.bin".to_string(), - }; + let artifact = FakeArtifactSource::from_data("test.bin", b"test data"); let direct = artifact.digest().unwrap(); let via_workflow = compute_digest(&artifact).unwrap(); @@ -94,7 +48,7 @@ fn compute_digest_delegates_to_source() { #[test] fn failing_artifact_returns_io_error() { - let artifact = FailingArtifact; + let artifact = FakeArtifactSource::digest_fails_with("simulated read failure"); let result = artifact.digest(); assert!(result.is_err()); @@ -108,7 +62,7 @@ fn failing_artifact_returns_io_error() { #[test] fn failing_artifact_metadata_returns_error() { - let artifact = FailingArtifact; + let artifact = FakeArtifactSource::metadata_fails_with("no metadata available"); let result = artifact.metadata(); assert!(result.is_err()); @@ -163,10 +117,10 @@ fn artifact_metadata_serialization_roundtrip() { fn sign_artifact_with_alias_keys_produces_valid_json() { let (_tmp, key_alias, ctx) = setup_signed_artifact_context(); - let artifact = Arc::new(InMemoryArtifact { - data: b"release binary content".to_vec(), - name: "release.bin".to_string(), - }); + let artifact = Arc::new(FakeArtifactSource::from_data( + "release.bin", + b"release binary content", + )); let params = ArtifactSigningParams { artifact, @@ -193,10 +147,10 @@ fn sign_artifact_with_direct_device_key_produces_valid_json() { let (_tmp, key_alias, ctx) = setup_signed_artifact_context(); let device_seed = SecureSeed::new([42u8; 32]); - let artifact = Arc::new(InMemoryArtifact { - data: b"release binary content v2".to_vec(), - name: "release-v2.bin".to_string(), - }); + let artifact = Arc::new(FakeArtifactSource::from_data( + "release-v2.bin", + b"release binary content v2", + )); let params = ArtifactSigningParams { artifact, @@ -218,10 +172,7 @@ fn sign_artifact_identity_not_found_returns_error() { let (_tmp, empty_ctx) = build_empty_test_context(); let device_seed = SecureSeed::new([1u8; 32]); - let artifact = Arc::new(InMemoryArtifact { - data: b"data".to_vec(), - name: "file.bin".to_string(), - }); + let artifact = Arc::new(FakeArtifactSource::from_data("file.bin", b"data")); let params = ArtifactSigningParams { artifact, diff --git a/crates/auths-sdk/tests/cases/org.rs b/crates/auths-sdk/tests/cases/org.rs index faab3b8e..b22c59e6 100644 --- a/crates/auths-sdk/tests/cases/org.rs +++ b/crates/auths-sdk/tests/cases/org.rs @@ -1,11 +1,11 @@ -use auths_core::AgentError; use auths_core::ports::id::UuidProvider; -use auths_core::signing::{PassphraseProvider, SecureSigner}; +use auths_core::signing::{PassphraseProvider, PrefilledPassphraseProvider, SecureSigner}; use auths_core::storage::keychain::KeyAlias; use auths_core::testing::DeterministicUuidProvider; use auths_id::ports::registry::RegistryBackend; use auths_id::testing::fakes::FakeRegistryBackend; use auths_sdk::error::OrgError; +use auths_sdk::testing::fakes::FakeSecureSigner; use auths_sdk::workflows::org::{ AddMemberCommand, OrgContext, RevokeMemberCommand, Role, UpdateCapabilitiesCommand, add_organization_member, revoke_organization_member, update_member_capabilities, @@ -37,36 +37,6 @@ fn org_issuer() -> IdentityDID { IdentityDID::new(format!("did:keri:{ORG}")) } -struct FakeSecureSigner; - -impl SecureSigner for FakeSecureSigner { - fn sign_with_alias( - &self, - _alias: &KeyAlias, - _passphrase_provider: &dyn PassphraseProvider, - _message: &[u8], - ) -> Result, AgentError> { - Ok(vec![1u8; 64]) - } - - fn sign_for_identity( - &self, - _identity_did: &auths_core::storage::keychain::IdentityDID, - _passphrase_provider: &dyn PassphraseProvider, - _message: &[u8], - ) -> Result, AgentError> { - Ok(vec![1u8; 64]) - } -} - -struct FakePassphraseProvider; - -impl PassphraseProvider for FakePassphraseProvider { - fn get_passphrase(&self, _prompt: &str) -> Result, AgentError> { - Ok(zeroize::Zeroizing::new("test".to_string())) - } -} - fn base_admin_attestation() -> Attestation { Attestation { version: 1, @@ -146,7 +116,7 @@ fn find_admin_returns_attestation_when_admin_exists() { let backend = FakeRegistryBackend::new(); seed_admin(&backend); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -176,7 +146,7 @@ fn find_admin_returns_not_found_when_pubkey_mismatch() { let backend = FakeRegistryBackend::new(); seed_admin(&backend); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -206,7 +176,7 @@ fn find_admin_returns_not_found_when_no_manage_members_capability() { backend.store_org_member(ORG, &att).unwrap(); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -239,7 +209,7 @@ fn add_member_stores_signed_attestation_with_injected_clock_and_uuid() { let id_provider = DeterministicUuidProvider::new(); let expected_rid = "00000000-0000-0000-0000-000000000000"; let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let ctx = make_ctx(&backend, &clock, &id_provider, &signer, &pp); let result = add_organization_member( @@ -268,7 +238,7 @@ fn add_member_creates_attestation_with_signatures() { seed_admin(&backend); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -300,7 +270,7 @@ fn add_member_creates_attestation_with_signatures() { fn add_member_fails_when_admin_not_found() { let backend = FakeRegistryBackend::new(); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -327,7 +297,7 @@ fn add_member_fails_with_invalid_capability() { let backend = FakeRegistryBackend::new(); seed_admin(&backend); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -360,7 +330,7 @@ fn revoke_member_creates_signed_revocation_with_injected_clock() { let fixed_time = chrono::Utc.with_ymd_and_hms(2025, 6, 2, 12, 0, 0).unwrap(); let clock = MockClock(fixed_time); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -389,7 +359,7 @@ fn revoke_member_fails_when_member_not_found() { let backend = FakeRegistryBackend::new(); seed_admin(&backend); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); @@ -419,7 +389,7 @@ fn revoke_member_fails_when_already_revoked() { backend.store_org_member(ORG, &att).unwrap(); let signer = FakeSecureSigner; - let pp = FakePassphraseProvider; + let pp = PrefilledPassphraseProvider::new("test"); let uuid = DeterministicUuidProvider::new(); let clock = MockClock(chrono::Utc::now()); let ctx = make_ctx(&backend, &clock, &uuid, &signer, &pp); diff --git a/crates/auths-sdk/tests/cases/rotation.rs b/crates/auths-sdk/tests/cases/rotation.rs index edef2ac0..f5332831 100644 --- a/crates/auths-sdk/tests/cases/rotation.rs +++ b/crates/auths-sdk/tests/cases/rotation.rs @@ -47,7 +47,8 @@ fn setup_test_identity(registry_path: &std::path::Path) -> KeyAlias { result.key_alias } -/// A `KeyStorage` implementation that always fails on `store_key`. +/// Test-local: failure-mode `KeyStorage` for testing error paths. +/// Not shared because no other test file needs a universally-failing keychain. struct FailingKeyStorage; impl KeyStorage for FailingKeyStorage { diff --git a/docs/plans/NITS_focus.md b/docs/plans/NITS_focus.md deleted file mode 100644 index d22a782b..00000000 --- a/docs/plans/NITS_focus.md +++ /dev/null @@ -1,160 +0,0 @@ -# NIST NCCoE Alignment: Documentation Gaps - -Assessment of README and docs against the NIST "Software and AI Agent Identity and Authorization" concept paper. - ---- - -## What already maps well (but isn't framed for the NIST audience) - -| NIST Concern | Auths Feature | Where it lives | -|---|---|---| -| Agent identification | `did:keri` (stable), `did:key` (per-device), `signer_type: Agent/Human/Workload` | `docs/architecture/identity-model.md`, `docs/architecture/attestation-format.md` | -| Key management lifecycle | KERI inception, rotation, revocation, pre-rotation | `docs/getting-started/identity-lifecycle.md`, `docs/getting-started/trust-model.md` | -| Delegation of authority | `delegated_by` field, attestation chains, `verify_chain()` | `docs/architecture/attestation-format.md` (briefly) | -| Least privilege | Capability-scoped attestations, expiration | `docs/architecture/attestation-format.md` | -| Non-repudiation / audit | KEL is tamper-evident hash chain, dual signatures, Git commit history | `docs/getting-started/trust-model.md`, `docs/architecture/git-as-storage.md` | -| Offline/zero-trust verification | Stateless `auths-verifier`, no server needed | `docs/architecture/crates/auths-verifier.md` | - -The building blocks exist. The problem is they're framed entirely around "developers signing commits" -- the NIST paper cares about **agents acting autonomously in enterprise systems**. - ---- - -## Gaps: What's missing or buried - -### 1. No agent-specific framing anywhere - -**Problem:** The README says "Decentralized identity for individuals, AI agents, and their organizations" in the first line, then never mentions agents again. The NIST audience needs to see agents as first-class citizens, not an afterthought. - -The `signer_type` enum (`Human`, `Agent`, `Workload`) exists in code and is mentioned once in `attestation-format.md` but is never explained or motivated. - -**Tasks:** - -- [x] **README.md** -- Add a section "Agent & Workload Identity" after "What can you do with Auths?" showing: - - How a CI runner or AI agent gets a `did:keri` identity - - How an org issues a scoped attestation to an agent (`signer_type: Agent`, time-limited, capability-restricted) - - How a human delegates authority to an agent with `delegated_by` - - Keep it to ~20 lines with a code example - -- [x] **docs/architecture/attestation-format.md** -- Expand the `signer_type` field documentation. Currently it's one cell in a table. Add a subsection "Signer Types: Human, Agent, Workload" explaining: - - When to use each type - - How `signer_type` enables policy engines to distinguish human vs. automated actions - - Brief example of an agent attestation JSON - -### 2. No delegation walkthrough - -**Problem:** The NIST paper asks: "How do we handle delegation of authority for 'on behalf of' scenarios?" and "How do we bind agent identity with human identity to support 'human-in-the-loop' authorizations?" - -Auths has `delegated_by`, attestation chains, and `verify_chain()` -- but there's no doc showing the full delegation flow. - -**Tasks:** - -- [x] **docs/getting-started/delegation.md** (new file) -- A short guide: - - Human creates identity, links device - - Human issues attestation to an AI agent with `delegated_by` pointing to the human's attestation - - Agent acts, signs artifacts - - Verifier walks the chain back to the human - - Show the JSON at each step - - Explain how capabilities narrow at each delegation hop - - Link from README's new "Agent & Workload Identity" section - - Add to mkdocs.yml 's navigation - -### 3. No OIDC / OAuth bridge documentation - -**Problem:** The NIST paper lists OAuth 2.0/2.1 and OIDC as primary standards. Auths already does GitHub OAuth for platform claims. But there's no doc explaining how Auths identities bridge to enterprise OIDC -- how an org can issue attestations based on OIDC tokens, or how an Auths `did:keri` can be presented alongside an OIDC flow. - -**Tasks:** - -- [x] **docs/architecture/oidc-bridge.md** (new file) -- Explain the design pattern: - - base this on the /Users/bordumb/workspace/repositories/auths-base/auths/crates/auths-oidc-bridge crate - - Auths identity is the root; OIDC is a claim/proof that can be linked - - GitHub OAuth flow already works this way (show it) - - How an enterprise IdP (Okta, Azure AD) could issue attestations after OIDC verification - - How MCP's OAuth requirement maps: the MCP server verifies an OAuth token, then the Auths attestation chain provides the cryptographic identity behind it - - Keep it architectural, not implementation-heavy -- this is a "here's how it fits" doc - -### 4. No zero-trust framing - -**Problem:** The NIST paper explicitly asks about zero-trust principles for agent authorization (SP 800-207). Auths IS zero-trust by design -- no implicit trust, verify every attestation, no central authority -- but the docs never use the phrase or map to zero-trust concepts. - -**Tasks:** - -- [x] **docs/getting-started/trust-model.md** -- Add a section "Zero-Trust by Design" near the top, mapping: - - "Never trust, always verify" = every attestation is verified cryptographically, no server trust - - "Least privilege" = capability-scoped attestations with expiration - - "Assume breach" = pre-rotation means key compromise is survivable - - "Verify explicitly" = dual signatures, chain verification, witness receipts - - Keep it to ~15 lines, referencing SP 800-207 in passing - -### 5. No enterprise deployment or CI/CD agent story - -**Problem:** The NIST paper's use case #3 is "Enterprise AI agents for software development and deployment." The vision doc mentions CI/CD but the actual docs don't show how a CI runner gets an identity and signs under an org policy. - -**Tasks:** - -- [x] **docs/getting-started/sharing-your-identity.md** -- Expand the "Export an identity bundle for CI" section into a fuller "CI/CD & Automated Agent Identity" section: - - How to create a dedicated agent identity (not just export a human's bundle) - - How to issue a time-limited, capability-restricted attestation to a CI agent - - How the CI agent signs artifacts and the org verifies them - - Show the `signer_type: Workload` usage - -### 6. No MCP integration story - -**Problem:** MCP is the first standard listed in the NIST paper. There's no mention of MCP anywhere in Auths docs. - -**Tasks:** - -- [x] **README.md** -- In the new "Agent & Workload Identity" section, add one sentence: "Auths attestations can serve as the cryptographic identity layer behind MCP's OAuth-based authorization, providing verifiable delegation chains from human principals to AI agents." - -- [x] **docs/architecture/oidc-bridge.md** -- Include an "MCP Integration" subsection showing where Auths fits in the MCP auth flow (MCP uses OAuth; Auths provides the identity that the OAuth token represents) - -### 7. No comparison to SPIFFE/SPIRE - -**Problem:** The NIST paper mentions SPIFFE/SPIRE for workload identity. Auths solves a similar problem differently. A brief comparison would help the NIST reader understand positioning. - -**Tasks:** - -- [x] **docs/architecture/oidc-bridge.md** (or rename to `enterprise-integration.md`) -- Add a "Comparison with SPIFFE/SPIRE" subsection: - - SPIFFE: centralized SPIRE server issues SVIDs, runtime attestation - - Auths: self-certifying, no central issuer, Git-native, works offline - - Complementary: SPIFFE for service mesh workload identity, Auths for developer/agent identity with delegation chains - - 10-15 lines max - -### 8. No logging/audit trail documentation - -**Problem:** The NIST paper asks: "How can we ensure that agents log their actions and intent in a tamper-proof and verifiable manner?" - -The KEL IS a tamper-proof log. Attestation lifecycle is tracked in Git commits. But there's no doc that explicitly frames this as an audit capability. - -**Tasks:** - -- [x] **docs/getting-started/trust-model.md** -- Add a section "Audit Trail" after the trust boundaries summary: - - KEL = tamper-evident history of every key operation - - Attestation Git refs = lifecycle audit (creation, extension, revocation as commits) - - Seals = cryptographic binding of external events to the identity timeline - - Every agent action that produces a signature is traceable through the attestation chain back to the authorizing human - - 10-15 lines - ---- - -## Summary of file changes - -| File | Action | Priority | -|---|---|---| -| `README.md` | Add "Agent & Workload Identity" section with MCP mention | High | -| `docs/getting-started/delegation.md` | New file: delegation walkthrough with agent examples | High | -| `docs/getting-started/trust-model.md` | Add "Zero-Trust by Design" and "Audit Trail" sections | High | -| `docs/architecture/attestation-format.md` | Expand `signer_type` documentation | Medium | -| `docs/architecture/oidc-bridge.md` (or `enterprise-integration.md`) | New file: OIDC bridge, MCP integration, SPIFFE comparison | Medium | -| `docs/getting-started/sharing-your-identity.md` | Expand CI/CD section for agent identities | Medium | - ---- - -## Tone guidance - -The NIST audience is enterprise security architects and standards people. They don't need to be sold on decentralization -- they need to see: - -1. How Auths maps to their existing frameworks (OAuth, OIDC, SPIFFE, zero-trust) -2. How it solves the specific problems the paper raises (delegation, audit, non-repudiation, key management for agents) -3. Concrete examples, not philosophy - -The vision doc is great for investors/community. The NIST-facing docs should be drier, more standards-aware, and show the mapping explicitly. diff --git a/docs/plans/cli_cleanup.md b/docs/plans/cli_cleanup.md new file mode 100644 index 00000000..3ccf9cf9 --- /dev/null +++ b/docs/plans/cli_cleanup.md @@ -0,0 +1,890 @@ +# CLI Cleanup Plan + +## Design Principle + +> **All business logic lives in `auths-sdk`. The `auths-cli` is a thin presentation layer.** +> +> SDK workflows return structured results (reports, status enums). CLI calls SDK, then formats and prints the results. CLI never does file I/O, git operations, or config parsing directly — it delegates to SDK. + +## Source Map + +Key files an implementer needs to know about: + +| Area | File | What it does | +|------|------|-------------| +| `auths init` handler | `crates/auths-cli/src/commands/init/mod.rs` | Entry point: `handle_init()` (L141), `run_developer_setup()` (L166) | +| `auths init` helpers | `crates/auths-cli/src/commands/init/helpers.rs` | `write_allowed_signers()` (L99), `set_git_config()` (L137) | +| `auths doctor` handler | `crates/auths-cli/src/commands/doctor.rs` | `handle_doctor()` (L43), `run_checks()` (L97) | +| Doctor fix adapters | `crates/auths-cli/src/adapters/doctor_fixes.rs` | `GitSigningConfigFix::apply()` (L99), `AllowedSignersFix::apply()` (L37) | +| Allowed signers workflow | `crates/auths-sdk/src/workflows/allowed_signers.rs` | `AllowedSigners` struct (L254), `sync()` (L376), `save()` (L302) | +| SDK setup / init | `crates/auths-sdk/src/setup.rs` | `initialize()` (L49) — orchestrates identity creation | +| Registry ref store | `crates/auths-infra-git/src/ref_store.rs` | `GitRefStore` — reads/writes `refs/auths/registry` | +| Identity init | `crates/auths-id/src/identity/initialize.rs` | `initialize_registry_identity()` (L104) | +| Git hooks | `crates/auths-id/src/storage/registry/hooks.rs` | `install_cache_hooks()` (L60), `install_linearity_hook()` (L271) | +| Diagnostics workflow | `crates/auths-sdk/src/workflows/diagnostics.rs` | `DiagnosticsWorkflow` — used by `auths doctor` | +| SSH config workflow | `crates/auths-sdk/src/workflows/ssh_config.rs` | **New.** `SshConfigWorkflow::ensure_config()`, `check_config()` | +| Registry sync workflow | `crates/auths-sdk/src/workflows/registry_sync.rs` | **New.** `RegistrySyncWorkflow::sync_to_repo()` | +| Key backup workflow | `crates/auths-sdk/src/workflows/key_backup.rs` | **New.** `KeyBackupWorkflow::export()`, `is_backed_up()` | + +## Execution Order + +Tasks have dependencies. Do them in this order: + +1. **Task 1** (SSH config) — standalone, no deps +2. **Task 3** (repo allowed_signers) — standalone, no deps +3. **Task 6** (auto-push registry on init) — standalone, no deps +4. **Task 2** (pre-push hook) — after task 6 (same area, don't want conflicts) +5. **Task 5** (doctor checks) — after tasks 1, 3, 6 (doctor needs to check what they write) +6. **Task 4** (identity reset) — after tasks 3, 5, 6 (uses all the new cleanup logic) +7. **Task 7** (umbrella: single-command onboarding) — after all above (integration) +8. **Task 8** (pre-rotation backup nudge) — independent, can be done anytime + +## Testing + +For each task, verify by running the end-to-end flow on a **clean machine** (or with `rm -rf ~/.auths`): + +```bash +# 1. Fresh init +auths init + +# 2. Check everything was set up +auths doctor # should pass all checks +cat ~/.ssh/config # should have IgnoreUnknown UseKeychain +cat .auths/allowed_signers # should have the new key +git for-each-ref refs/auths/ # should have registry ref + +# 3. Make a signed commit and push +git commit --allow-empty -m "test: signed commit" +git push origin main # should also push refs/auths/registry + +# 4. Verify the commit +auths verify HEAD # should pass +``` + +--- + +## Tasks + +### 1. SSH config: add `IgnoreUnknown UseKeychain` + +## Problem + +`auths init` writes `UseKeychain yes` to `~/.ssh/config` under a `Host *` block. This is a macOS-specific OpenSSH option. If the user's SSH version doesn't recognize it, **all git+SSH operations fail**: + +``` +/Users/.../.ssh/config: line 7: Bad configuration option: usekeychain +/Users/.../.ssh/config: terminating, 1 bad configuration options +fatal: Could not read from remote repository. +``` + +## Fix + +### 1. `auths init` (onboarding) +When writing the SSH config, prepend `IgnoreUnknown UseKeychain` on the same `Host *` block: + +``` +Host * + IgnoreUnknown UseKeychain + AddKeysToAgent yes + UseKeychain yes + IdentityFile ~/.ssh/id_ed25519_... +``` + +This tells SSH to silently skip `UseKeychain` if unsupported, rather than failing. + +### 2. `auths doctor` (diagnostics) +`auths doctor` should check for this condition: +- If `~/.ssh/config` contains `UseKeychain` without a preceding `IgnoreUnknown UseKeychain`, flag it as a warning +- Print the location of the SSH config and suggest adding the directive +- Users who break their auths setup will likely reach for `auths doctor` first, so this is an important diagnostic to surface + +## Implementation + +### Design note + +All business logic goes in **auths-sdk**. The CLI is a thin presentation layer that calls SDK functions and prints output. + +### `auths init` — write SSH config (SDK) + +**File:** `crates/auths-sdk/src/workflows/ssh_config.rs` (new file) + +There is currently **no function that writes `~/.ssh/config`**. The existing `write_allowed_signers()` in CLI helpers (L99) only writes `~/.ssh/allowed_signers`. Create a new SDK workflow: + +```rust +pub struct SshConfigWorkflow; + +impl SshConfigWorkflow { + /// Ensures ~/.ssh/config has IgnoreUnknown UseKeychain and the identity file. + /// Returns a description of what was changed, or None if no change needed. + pub fn ensure_config(identity_file: &Path) -> Result> { + let home = dirs::home_dir().context("no home directory")?; + let ssh_dir = home.join(".ssh"); + std::fs::create_dir_all(&ssh_dir)?; + let config_path = ssh_dir.join("config"); + + let existing = std::fs::read_to_string(&config_path).unwrap_or_default(); + + // Skip if IgnoreUnknown UseKeychain already present + if existing.contains("IgnoreUnknown UseKeychain") { + return Ok(None); + } + + let block = format!( + "\nHost *\n IgnoreUnknown UseKeychain\n AddKeysToAgent yes\n UseKeychain yes\n IdentityFile {}\n", + identity_file.display() + ); + + let mut f = std::fs::OpenOptions::new().create(true).append(true).open(&config_path)?; + f.write_all(block.as_bytes())?; + Ok(Some(format!("Added IgnoreUnknown UseKeychain to {}", config_path.display()))) + } + + /// Checks if UseKeychain exists without IgnoreUnknown. Returns diagnostic info. + pub fn check_config() -> Result { /* ... */ } +} +``` + +Register the module in `crates/auths-sdk/src/workflows/mod.rs`. + +**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): +```rust +if let Some(msg) = SshConfigWorkflow::ensure_config(&ssh_key_path)? { + out.println(&format!("✓ {msg}")); +} +``` + +### `auths doctor` — check SSH config + +**File:** `crates/auths-sdk/src/workflows/diagnostics.rs` + +Add a new check method alongside `check_git_signing_config()` (L72): + +```rust +fn check_ssh_config(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { + let home = dirs::home_dir().ok_or_else(|| DiagnosticError::ExecutionFailed("no home".into()))?; + let config_path = home.join(".ssh").join("config"); + let content = std::fs::read_to_string(&config_path).unwrap_or_default(); + + let has_usekeychain = content.lines().any(|l| l.trim().eq_ignore_ascii_case("usekeychain yes")); + let has_ignore = content.lines().any(|l| l.trim().starts_with("IgnoreUnknown") && l.contains("UseKeychain")); + + if has_usekeychain && !has_ignore { + checks.push(CheckResult { + name: "ssh_config_usekeychain".into(), + passed: false, + message: Some(format!( + "~/.ssh/config has UseKeychain without IgnoreUnknown UseKeychain. Add 'IgnoreUnknown UseKeychain' to the Host * block in {}", + config_path.display() + )), + config_issues: vec![ConfigIssue::Absent("IgnoreUnknown UseKeychain".into())], + }); + } else { + checks.push(CheckResult { name: "ssh_config_usekeychain".into(), passed: true, message: None, config_issues: vec![] }); + } + Ok(()) +} +``` + +Register in `available_checks()` (L31) and call from `run()` (L61). + +**File:** `crates/auths-cli/src/adapters/doctor_fixes.rs` + +Add `SshConfigFix` implementing `DiagnosticFix` (same pattern as `AllowedSignersFix`). Register it in `build_available_fixes()` in `crates/auths-cli/src/commands/doctor.rs` (L193). + +## Context + +Discovered while dogfooding the `@auths-dev/verify` widget. After wiping and re-creating an identity, `git push` failed due to this SSH config issue. + + +### 2. Pre-push hook to sync `refs/auths/registry` + +## Problem + +After `auths init`, the registry (`refs/auths/registry`) is written to `~/.auths/.git`, not to the current project repo. Users must manually run: + +```bash +git fetch ~/.auths refs/auths/registry:refs/auths/registry +git push origin refs/auths/registry --force +``` + +This is undiscoverable — nothing in the CLI tells users they need to do this, and downstream tools (e.g., the `@auths-dev/verify` widget) silently fail because the project repo on GitHub has no `refs/auths/registry`. + +## Proposal + +Add a **pre-push Git hook** that automatically syncs `refs/auths/registry` from `~/.auths` into the project repo before pushing. + +### Why pre-push (not pre-commit) + +- Not every commit needs the registry synced — only when pushing to a remote +- Catches all pushes including direct-to-main workflows +- Pre-commit would be too frequent and noisy + +### Suggested behavior + +1. On `git push`, the hook checks if `~/.auths/.git/refs/auths/registry` exists +2. If so, fetch it into the local repo: `git fetch ~/.auths refs/auths/registry:refs/auths/registry` +3. Include `refs/auths/registry` in the push +4. If `~/.auths` has no registry, skip silently (user hasn't run `auths init`) + +### Installation + +The hook could be installed automatically by `auths init` or `auths git setup`, similar to how git signing is configured. + +## Implementation + +**File:** `crates/auths-id/src/storage/registry/hooks.rs` + +Follow the existing pattern from `install_cache_hooks()` (L60) and `install_linearity_hook()` (L271): + +1. Add a constant for the hook marker: +```rust +const REGISTRY_SYNC_MARKER: &str = "# auths-registry-sync"; +``` + +2. Add hook script: + +Does this work for Mac, Linux and Windows? (e.g. `$HOME`) +```rust +const REGISTRY_SYNC_HOOK: &str = r#"#!/bin/sh +# auths-registry-sync +# Syncs refs/auths/registry from ~/.auths into this repo before pushing + +AUTHS_HOME="$HOME/.auths" +REGISTRY_REF="refs/auths/registry" + +if [ -d "$AUTHS_HOME/.git" ] && git --git-dir="$AUTHS_HOME/.git" rev-parse --verify "$REGISTRY_REF" >/dev/null 2>&1; then + git fetch "$AUTHS_HOME" "$REGISTRY_REF:$REGISTRY_REF" --quiet 2>/dev/null || true + # Read push args from stdin (pre-push receives: ) + # After the normal push completes, push the registry ref too + REMOTE="$1" + git push "$REMOTE" "$REGISTRY_REF" --force --quiet 2>/dev/null || true +fi +"#; +``` + +3. Add installation function following the same pattern as `install_cache_hooks()`: +```rust +pub fn install_pre_push_hook(repo_path: &Path) -> Result<()> { + let git_dir = find_git_dir(repo_path)?; + let hooks_dir = git_dir.join("hooks"); + std::fs::create_dir_all(&hooks_dir)?; + install_hook(&hooks_dir, "pre-push", REGISTRY_SYNC_HOOK, REGISTRY_SYNC_MARKER)?; + Ok(()) +} +``` + +The `install_hook()` helper (L87) already handles idempotency (checks for marker), appending to existing hooks, and setting `0o755` permissions. + +**Caller:** Add to `run_developer_setup()` in `crates/auths-cli/src/commands/init/mod.rs` (L236, post-setup phase). Only install when running inside a git repo (check `.git` exists in cwd or parents). + +## Context + +Discovered while dogfooding the verify widget (`@auths-dev/verify`) with the [example-verify-badge](https://github.com/auths-dev/example-verify-badge) repo. The widget fetches `refs/auths/registry` from the GitHub API to verify attestations, but the ref was missing from the remote because the manual sync step was not documented or automated. + +### 3. Auto-populate `.auths/allowed_signers` in repo + +## Problem + +After running `auths init`, the user's signing key is added to `~/.ssh/allowed_signers` (global), but the repo's `.auths/allowed_signers` is not created or updated. This means: + +1. The GitHub Action (`auths-verify-github-action`) can't verify commits because it reads `.auths/allowed_signers` from the repo +2. The user has to manually figure out the correct format (` namespaces="git" ssh-ed25519 `) +3. New contributors have no obvious way to add their key + +## Expected behavior + +`auths init` should: +- Create `.auths/allowed_signers` in the current repo if it doesn't exist +- Append the user's device DID principal + SSH public key in the correct format +- Match the format used in `~/.ssh/allowed_signers` (e.g., `z6Mk...@auths.local namespaces="git" ssh-ed25519 AAAA...`) + +## Implementation + +### Design note + +All business logic goes in **auths-sdk**. The CLI is a thin presentation layer. + +**File:** `crates/auths-sdk/src/workflows/allowed_signers.rs` + +The `AllowedSigners` struct (L254) and `sync()` (L376) already exist and handle the correct format: `@auths.local namespaces="git" ssh-ed25519 `. Add a convenience method to the existing workflow: + +```rust +impl AllowedSigners { + /// Sync allowed_signers for a specific repo's .auths/ directory. + /// Creates .auths/allowed_signers if it doesn't exist. + /// Returns the number of signers added. + pub fn sync_repo(repo_root: &Path) -> Result { + let auths_dir = repo_root.join(".auths"); + std::fs::create_dir_all(&auths_dir)?; + let signers_path = auths_dir.join("allowed_signers"); + + let home = auths_core::paths::auths_home()?; + let storage = RegistryAttestationStorage::new(&home); + let mut signers = AllowedSigners::load(&signers_path) + .unwrap_or_else(|_| AllowedSigners::new(&signers_path)); + let report = signers.sync(&storage)?; + signers.save()?; + Ok(report) + } +} +``` + +This belongs in the SDK because it reuses `AllowedSigners`, `RegistryAttestationStorage`, and `sync()` — all SDK types. + +**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): +```rust +if let Ok(repo_root) = detect_repo_root() { + let report = AllowedSigners::sync_repo(&repo_root)?; + out.println(&format!("✓ Wrote {} signer(s) to .auths/allowed_signers", report.added)); +} +``` + +## Context + +Discovered during dogfooding. The example repos had placeholder keys in `.auths/allowed_signers` that had to be manually replaced with real keys before the GitHub Action would pass. + +### 4. Identity reset (`auths init --reset`) + +## Problem + +When a user needs to wipe and recreate their identity (e.g., during development or after key compromise), the process is manual and error-prone: + +1. Must manually `rm -rf ~/.auths` to remove the old identity +2. `auths init --force` creates a new identity but doesn't clean up stale data: + - Old `refs/auths/registry` refs remain in repos with mismatched attestations + - Old entries in `~/.ssh/allowed_signers` accumulate (though this is harmless) + - Old SSH key files remain in `~/.ssh/` + - `.auths/allowed_signers` in repos still references the old key +3. Must manually `git update-ref -d refs/auths/registry` in each repo, then re-push +4. Multiple `auths init` runs can accumulate broken attestations in the registry + +## Expected behavior + +Provide a clean reset path: + +- `auths init --reset` that: + - Removes the old identity from `~/.auths` + - Cleans up `refs/auths/registry` in the current repo + - Updates `~/.ssh/allowed_signers` (removes old entry, adds new) + - Updates `.auths/allowed_signers` in the current repo + - Warns about other repos that may still reference the old identity + +## Implementation + +### Design note + +All business logic goes in **auths-sdk**. The CLI only adds the `--reset` flag and calls SDK. + +### SDK — reset workflow + +**File:** `crates/auths-sdk/src/setup.rs` + +Add `reset()` alongside the existing `initialize()` (L49). It's the inverse operation: + +```rust +/// Result of resetting an identity. CLI uses this to display what happened. +pub struct ResetReport { + pub identity_removed: bool, + pub registry_cleaned: bool, + pub global_signers_cleaned: usize, // number of entries removed + pub repo_signers_cleaned: usize, // number of entries removed +} + +/// Wipe the current identity and clean up all artifacts. +/// Call this before `initialize()` to do a full reset+reinit. +pub fn reset(repo_root: Option<&Path>) -> Result { + let mut report = ResetReport { identity_removed: false, registry_cleaned: false, global_signers_cleaned: 0, repo_signers_cleaned: 0 }; + let home = auths_core::paths::auths_home()?; + + // 1. Remove old identity + if home.exists() { + std::fs::remove_dir_all(&home)?; + report.identity_removed = true; + } + + // 2. Clean refs/auths/registry in current repo + if let Some(root) = repo_root { + let status = Command::new("git") + .current_dir(root) + .args(["update-ref", "-d", "refs/auths/registry"]) + .status(); + report.registry_cleaned = status.map(|s| s.success()).unwrap_or(false); + } + + // 3. Clean old entries from ~/.ssh/allowed_signers + let ssh_signers = dirs::home_dir().unwrap().join(".ssh/allowed_signers"); + if ssh_signers.exists() { + let content = std::fs::read_to_string(&ssh_signers)?; + let original_count = content.lines().count(); + let filtered: Vec<&str> = content.lines() + .filter(|l| !l.contains("@auths.local")) + .collect(); + report.global_signers_cleaned = original_count - filtered.len(); + std::fs::write(&ssh_signers, filtered.join("\n") + "\n")?; + } + + // 4. Clean .auths/allowed_signers in current repo + if let Some(root) = repo_root { + let repo_signers = root.join(".auths/allowed_signers"); + if repo_signers.exists() { + let content = std::fs::read_to_string(&repo_signers)?; + let original_count = content.lines().count(); + let filtered: Vec<&str> = content.lines() + .filter(|l| !l.contains("@auths.local")) + .collect(); + report.repo_signers_cleaned = original_count - filtered.len(); + std::fs::write(&repo_signers, filtered.join("\n") + "\n")?; + } + } + + Ok(report) +} +``` + +### CLI — thin wrapper + +**File:** `crates/auths-cli/src/commands/init/mod.rs` + +1. Add `--reset` flag to `InitCommand` struct (around L101): +```rust +/// Reset and reinitialize identity (implies --force) +#[clap(long)] +pub reset: bool, +``` + +2. Add reset logic at the top of `handle_init()` (L141), before profile selection: +```rust +if cmd.reset { + cmd.force = true; + let repo_root = detect_repo_root().ok(); + let report = auths_sdk::setup::reset(repo_root.as_deref())?; + + // CLI only does presentation + if report.identity_removed { out.println("Removed old identity."); } + if report.registry_cleaned { out.println("Cleaned refs/auths/registry."); } + if report.global_signers_cleaned > 0 { out.println(&format!("Removed {} old entries from ~/.ssh/allowed_signers.", report.global_signers_cleaned)); } + if report.repo_signers_cleaned > 0 { out.println(&format!("Removed {} old entries from .auths/allowed_signers.", report.repo_signers_cleaned)); } + out.println("Warning: other repos may still reference the old identity. Run 'auths doctor' in each repo."); +} +``` + +After reset, the normal `auths init` flow continues and creates a fresh identity. + +## Context + +During dogfooding, multiple identity recreations left stale attestations in the registry. The widget showed "InvalidSignature" because old attestations referenced a different identity's key. Had to manually `git update-ref -d refs/auths/registry` and re-init to fix. + + +### 5. Expand `auths doctor` checks + +## Problem + +`auths doctor` is the natural place users go when things break, but it currently doesn't catch several common issues discovered during dogfooding: + +## Checks to add + +### SSH config +- Detect `UseKeychain` without `IgnoreUnknown UseKeychain` (see #74) +- Verify the SSH identity file referenced in config actually exists +- Check `gpg.format = ssh` and `commit.gpgsign = true` in git config + +### Registry +- Check if `refs/auths/registry` exists in the current repo +- Verify the identity in the registry matches the current active identity +- Warn if the registry has attestations signed by a different identity (stale data from identity recreation) +- Check if registry is pushed to the remote + +### Allowed signers +- Check if `~/.ssh/allowed_signers` exists and contains the current device's key +- Check if `.auths/allowed_signers` exists in the current repo +- Warn if repo's allowed_signers has placeholder/example keys +- Verify format is correct (` namespaces="git" ssh-ed25519 `) + +### Signing +- Verify a test signature can be created and verified (round-trip check) +- Check that `git log --show-signature` works for recent commits + +## Implementation + +### Architecture + +The diagnostics system has three layers: + +1. **Provider traits** (`crates/auths-sdk/src/ports/diagnostics.rs`): `GitDiagnosticProvider` (L64) and `CryptoDiagnosticProvider` (L78) — define what the system can check +2. **Workflow** (`crates/auths-sdk/src/workflows/diagnostics.rs`): `DiagnosticsWorkflow` — orchestrates checks, returns `DiagnosticReport` +3. **Fix adapters** (`crates/auths-cli/src/adapters/doctor_fixes.rs`): Implement `DiagnosticFix` trait — each fix addresses a specific `CheckResult` + +Currently only 3 checks exist: `git_version`, `ssh_keygen`, `git_signing_config`. Add the new ones below. + +### New checks to add + +For each check, add a method to `DiagnosticsWorkflow` following the pattern of `check_git_signing_config()` (L72): + +**1. `check_ssh_config`** — See Task 1 implementation above. + +**2. `check_ssh_identity_file`** — Verify the SSH key file referenced in `~/.ssh/config` exists: +```rust +// Read ~/.ssh/config, find IdentityFile lines, check each file exists +``` + +**3. `check_registry_exists`** — Check `refs/auths/registry` in current repo: +```rust +fn check_registry(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { + let output = Command::new("git") + .args(["rev-parse", "--verify", "refs/auths/registry"]) + .output(); + // If fails, push ConfigIssue::Absent("refs/auths/registry") + // Also: compare identity in registry with active identity from ~/.auths +} +``` + +**4. `check_repo_allowed_signers`** — Check `.auths/allowed_signers` exists and has current key: +```rust +// Read .auths/allowed_signers, check for current device DID principal +// Warn if contains placeholder keys (e.g., "ssh-ed25519 AAAA..." with no real principal) +``` + +**5. `check_signing_roundtrip`** — Verify sign + verify works: +```rust +// Create a temp file, sign it with ssh-keygen, verify it — confirms the full chain works +``` + +**6. `check_pre_rotation_backup`** (Task 8) — Gentle nudge about backup. + +### Extending the provider traits + +Some new checks (registry, allowed signers) don't fit neatly into `GitDiagnosticProvider` or `CryptoDiagnosticProvider`. Options: +- Add methods to the existing traits +- Add a new `IdentityDiagnosticProvider` trait +- Keep the checks as standalone methods in `DiagnosticsWorkflow` that use `Command::new("git")` directly (simplest, matches the pattern of `check_git_signing_config`) + +Recommended: keep them as private methods on `DiagnosticsWorkflow` (simplest). Only add new traits if the checks need mocking in tests. + +### Fix adapters + +For each new check that has a fix, add a struct implementing `DiagnosticFix` in `doctor_fixes.rs` and register it in `build_available_fixes()` (doctor.rs L193). Follow the pattern: + +```rust +pub struct RegistryFix { /* fields */ } + +impl DiagnosticFix for RegistryFix { + fn name(&self) -> &str { "registry_sync" } + fn is_safe(&self) -> bool { true } + fn can_fix(&self, check: &CheckResult) -> bool { check.name == "registry_exists" && !check.passed } + fn apply(&self) -> Result { + // git fetch ~/.auths refs/auths/registry:refs/auths/registry + Ok("Synced registry from ~/.auths".into()) + } +} +``` + +### Updating `available_checks()` + +Update the static slice in `available_checks()` (L31) to include all new check names, and add dispatch branches in `run_single()` (L38). + +## Context + +During dogfooding, every one of these issues was hit. `auths doctor` surfacing them with actionable fix commands would have saved significant debugging time. + +### 6. Auto-push registry on `auths init` + +## Problem + +`auths init` creates the identity and writes attestations to `refs/auths/registry` in `~/.auths/.git`, but the user must manually: + +1. `git fetch ~/.auths refs/auths/registry:refs/auths/registry` — pull registry into the project repo +2. `git push origin refs/auths/registry` — push to remote + +This is non-obvious and undiscoverable. New users don't know the registry exists in `~/.auths`, and the fetch-from-local-path pattern is uncommon. + +## Expected behavior + +After `auths init` (when run inside a git repo): +- Automatically copy `refs/auths/registry` from `~/.auths` into the current repo +- Prompt or auto-push to the remote + +This is related to but distinct from #73 (pre-push hook for ongoing sync). This issue is about the **initial setup** experience. + +## Implementation + +### Design note + +All business logic goes in **auths-sdk**. The CLI is a thin presentation layer. + +### SDK — registry sync workflow + +**File:** `crates/auths-sdk/src/workflows/registry_sync.rs` (new file) + +```rust +pub struct RegistrySyncReport { + pub fetched: bool, + pub pushed: bool, + pub skipped_reason: Option, +} + +pub struct RegistrySyncWorkflow; + +impl RegistrySyncWorkflow { + /// Sync refs/auths/registry from ~/.auths into the given repo, optionally push to remote. + pub fn sync_to_repo(repo_root: &Path) -> Result { + let home = auths_core::paths::auths_home()?; + let mut report = RegistrySyncReport { fetched: false, pushed: false, skipped_reason: None }; + + // Fetch registry from ~/.auths into this repo + let status = Command::new("git") + .current_dir(repo_root) + .args(["fetch", &home.to_string_lossy(), "refs/auths/registry:refs/auths/registry"]) + .status()?; + if !status.success() { + report.skipped_reason = Some("could not fetch registry from ~/.auths".into()); + return Ok(report); + } + report.fetched = true; + + // Push to remote (if remote exists) + let remote_check = Command::new("git") + .current_dir(repo_root) + .args(["remote", "get-url", "origin"]) + .output()?; + if remote_check.status.success() { + let push_status = Command::new("git") + .current_dir(repo_root) + .args(["push", "origin", "refs/auths/registry", "--force"]) + .status()?; + report.pushed = push_status.success(); + } + + Ok(report) + } +} +``` + +Register the module in `crates/auths-sdk/src/workflows/mod.rs`. + +### CLI — thin wrapper + +**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): +```rust +if let Ok(repo_root) = detect_repo_root() { + let report = RegistrySyncWorkflow::sync_to_repo(&repo_root)?; + if report.fetched { out.println("✓ Synced refs/auths/registry into this repo."); } + if report.pushed { out.println("✓ Pushed refs/auths/registry to origin."); } + if let Some(reason) = report.skipped_reason { out.println(&format!("⚠ Registry sync skipped: {reason}")); } +} +``` + +**Note:** This is related to Task 2 (pre-push hook) but handles the **initial** sync. Task 2 handles **ongoing** sync on subsequent pushes. Both should be implemented. + +## Context + +During dogfooding, `auths init --force` completed successfully but the widget showed errors because the registry was never pushed to the remote. Required manual git plumbing to fix. + +### 7. Single-command onboarding (`auths init` in a repo) + +## Problem + +The current onboarding flow requires multiple manual steps that aren't documented in sequence: + +1. `auths init` — create identity, configure git signing +2. Manually create/update `.auths/allowed_signers` in the repo +3. Manually fetch registry from `~/.auths` into the project repo +4. Manually push `refs/auths/registry` to the remote +5. Manually add `.github/workflows/verify-commits.yml` +6. Manually fix SSH config if `UseKeychain` breaks + +A first-time user hitting any of these steps without guidance will get stuck. + +## Expected behavior + +`auths init` (when run in a git repo) should handle the full happy path: + +1. Create identity + configure signing (already works) +2. Write `.auths/allowed_signers` with the new key (#77) +3. Copy registry into the repo and push to remote (#80) +4. Fix SSH config issues (#74) +5. Optionally scaffold the CI workflow (or print the command to do so) + +Each step should have clear output showing what was done. If any step fails, `auths doctor` (#79) should catch it. + +## Non-goals + +- Don't force GitHub Pages setup (that's for the widget, not core signing) +- Don't require network access for the identity creation itself + +## Implementation + +### Design note + +This is an **integration task**. The CLI orchestrates SDK functions and displays results. All business logic lives in SDK workflows (Tasks 1, 3, 6) and auths-id (Task 2). + +**File:** `crates/auths-cli/src/commands/init/mod.rs` + +Update `run_developer_setup()` (L166) to add new steps after identity creation. The current flow has 5 phases. Add to the POST-SETUP phase (L236): + +```rust +// === POST-SETUP (existing) === +offer_shell_completions(interactive, &out)?; +write_allowed_signers(&config)?; // existing: writes ~/.ssh/allowed_signers + +// === NEW STEPS — CLI calls SDK, then prints results === + +// Task 1: SSH config (SDK: SshConfigWorkflow) +if let Some(msg) = SshConfigWorkflow::ensure_config(&ssh_key_path)? { + out.println(&format!("✓ {msg}")); +} + +// Task 3: Repo allowed_signers (SDK: AllowedSigners::sync_repo) +if let Ok(repo_root) = detect_repo_root() { + let report = AllowedSigners::sync_repo(&repo_root)?; + out.println(&format!("✓ Wrote {} signer(s) to .auths/allowed_signers", report.added)); + + // Task 6: Registry sync (SDK: RegistrySyncWorkflow) + let sync = RegistrySyncWorkflow::sync_to_repo(&repo_root)?; + if sync.fetched { out.println("✓ Synced refs/auths/registry into this repo."); } + if sync.pushed { out.println("✓ Pushed refs/auths/registry to origin."); } + if let Some(reason) = sync.skipped_reason { out.println(&format!("⚠ Registry sync: {reason}")); } + + // Task 2: Pre-push hook (auths-id: install_pre_push_hook) + install_pre_push_hook(&repo_root)?; + out.println("✓ Pre-push hook installed"); +} + +// Optional: print CI workflow instructions +out.println("\nTo add CI verification, create .github/workflows/verify-commits.yml:"); +out.println(" See: https://github.com/marketplace/actions/verify-commit-signatures-with-auths"); +``` + +Each step should be wrapped in error handling that warns but doesn't fail the overall init (non-fatal). The init should complete even if, e.g., the user has no remote configured. + +**Output:** Each step prints what it did. On failure, print a warning and suggest running `auths doctor` for diagnosis. + +## Context + +End-to-end dogfooding session: took ~2 hours to get from `auths init` to a working verification badge, mostly due to undocumented manual steps between the init and the verification actually working. + +### 8. Pre-rotation key backup nudge + +## Problem + +KERI pre-rotation is one of the strongest features of the identity model — the next rotation key is committed to at inception, so key compromise doesn't mean identity loss. But currently, users are never prompted to back up or even know about their pre-rotation key. + +We shouldn't surface this during onboarding. The `auths init` flow should stay fast and frictionless — like how `ssh-keygen` lets you skip the passphrase and most tutorials tell you to. Security-conscious users set one later. Same principle: don't front-load complexity that blocks adoption. + +## Proposed behavior + +### 1. `auths doctor` — gentle nudge +After identity creation, `auths doctor` should include a check: +> "You have a pre-rotation key but haven't backed it up. Run `auths key backup` to export it." + +Low severity, informational — not a blocker. + +### 2. `auths key backup` / `auths recovery export` — explicit command +A dedicated command to export the pre-rotation key material when the user is ready. Clear warnings about what it is and how to store it safely. + +### 3. Post-rotation prompt +After a user performs their first key rotation (`auths key rotate`), prompt them: +> "You just rotated keys. Your new pre-rotation commitment is set. Run `auths key backup` to save your recovery key." + +This is the natural moment where pre-rotation becomes concrete and meaningful. + +### 4. Enterprise/team docs +For organizations that need formal key ceremony procedures, document the pre-rotation backup as part of team onboarding — but keep it out of the individual developer fast path. + +## Implementation + +### `auths doctor` — backup check + +**File:** `crates/auths-sdk/src/workflows/diagnostics.rs` + +Add `check_pre_rotation_backup()` as a private method on `DiagnosticsWorkflow`: + +```rust +fn check_pre_rotation_backup(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { + let home = auths_core::paths::auths_home() + .map_err(|e| DiagnosticError::ExecutionFailed(e.to_string()))?; + + // Check if a backup marker file exists (e.g., ~/.auths/.backup_exported) + let backup_marker = home.join(".backup_exported"); + if home.exists() && !backup_marker.exists() { + checks.push(CheckResult { + name: "pre_rotation_backup".into(), + passed: true, // informational, not a failure + message: Some( + "You have a pre-rotation key but haven't backed it up. Run `auths key backup` to export it.".into() + ), + config_issues: vec![], + }); + } + Ok(()) +} +``` + +This is informational only — `passed: true` means it won't fail the doctor run, but the message will be displayed. + +### `auths key backup` — new command + +#### SDK — export logic + +**File:** `crates/auths-sdk/src/workflows/key_backup.rs` (new file) + +```rust +pub struct KeyBackupResult { + pub key_material: Vec, // the exported pre-rotation private key + pub key_hash: String, // the pre-rotation commitment hash +} + +pub struct KeyBackupWorkflow; + +impl KeyBackupWorkflow { + /// Export the pre-rotation key material. Marks backup as completed. + pub fn export() -> Result { + let home = auths_core::paths::auths_home()?; + // Read the next_key_hash from state.json (the pre-rotation commitment) + // Export the pre-rotation private key from the keychain + // Touch ~/.auths/.backup_exported as marker + todo!() + } + + /// Check if backup has been performed. + pub fn is_backed_up() -> Result { + let home = auths_core::paths::auths_home()?; + Ok(home.join(".backup_exported").exists()) + } +} +``` + +Register the module in `crates/auths-sdk/src/workflows/mod.rs`. + +#### CLI — thin wrapper + +**File:** `crates/auths-cli/src/commands/mod.rs` — register new subcommand +**File:** `crates/auths-cli/src/commands/key/backup.rs` — new file + +```rust +pub fn handle_key_backup() -> Result<()> { + let out = Output::new(); + out.println("⚠ This exports your pre-rotation recovery key."); + out.println(" Store it securely (password manager, hardware token, etc.)."); + out.println(" Anyone with this key can recover your identity after key rotation.\n"); + + let result = KeyBackupWorkflow::export()?; + // Display key material to user + out.println(&format!("Pre-rotation key hash: {}", result.key_hash)); + // ... display key_material in a safe format + Ok(()) +} +``` + +### Post-rotation prompt + +**File:** wherever `auths key rotate` is handled — after successful rotation, print: +``` +You just rotated keys. Your new pre-rotation commitment is set. +Run `auths key backup` to save your recovery key. +``` + +## Non-goals + +- Don't require backup during `auths init` +- Don't block any workflow on missing backup +- Don't make the user think about key management before they've signed their first commit diff --git a/docs/plans/dependency-architecture-refactor.md b/docs/plans/dependency-architecture-refactor.md deleted file mode 100644 index d25e403b..00000000 --- a/docs/plans/dependency-architecture-refactor.md +++ /dev/null @@ -1,403 +0,0 @@ -# Dependency Architecture Refactor - -**Status**: Complete -**Scope**: Workspace-wide restructuring of crate dependencies, test utilities, and publish pipeline -**Breaking changes**: Yes (pre-launch, acceptable) - ---- - -## Problem Statement - -Publishing any crate to crates.io requires manually removing dev-dependencies, inlining test helpers, and publishing with `--no-verify` or `--allow-dirty`. This is because: - -1. **`auths-test-utils` is a monolith** that depends on 7 workspace crates (`auths-core`, `auths-crypto`, `auths-id`, `auths-storage`, `auths-sdk`, `auths-telemetry`, `auths-verifier`). Any crate that dev-depends on it cannot publish until it's on crates.io — but it can't be published until all its dependencies are. - -2. **`auths-id` ↔ `auths-storage` circular dev-dependency**: `auths-storage` depends on `auths-id` (for traits), `auths-id` dev-depends on `auths-storage` (for testing with real Git backend). Neither can publish first. - -3. **`auths-id` has a `git-storage` feature** that pulls in `git2`, `dirs`, `tempfile`, `tokio` — mixing domain logic with infrastructure concerns. Storage implementation code is split between `auths-id` and `auths-storage`. - -4. **No automated publish ordering** — manual `sleep 60` between publishes, fragile and error-prone. - ---- - -## Principles - -1. **Dependency flow is strictly downward.** Foundation → Domain → Infrastructure → Orchestration → Presentation. No reverse dependencies, not even dev-deps pointing upward. -2. **Each crate owns its own test helpers.** Feature-gated `test-utils` modules replace the monolithic test-utils crate. This is the pattern used by reth (150+ crates), alloy, and tokio. -3. **Traits live with their domain, implementations live in infrastructure.** `auths-id` defines what storage looks like; `auths-storage` provides the implementations. Tests in `auths-id` use in-memory fakes, not real backends. -4. **Contract tests live with the trait they verify.** Exported as macros so implementations can pull them in. - ---- - -## Target Architecture - -``` -Layer 0 — Foundation (no workspace deps) -┌─────────────┐ ┌──────────────┐ ┌─────────────────┐ ┌─────────────┐ -│ auths-crypto│ │ auths-policy │ │ auths-telemetry │ │ auths-index │ -└─────────────┘ └──────────────┘ └─────────────────┘ └─────────────┘ - -Layer 1 — Domain (depends only on Layer 0) -┌──────────────────┐ ┌──────────┐ -│ auths-verifier │ │ auths-id │ -│ (crypto) │ │ (crypto, policy, verifier) -└──────────────────┘ └──────────┘ - -Layer 2 — Infrastructure (depends on Layer 0 + 1) -┌────────────────┐ ┌────────────────┐ ┌──────────────────┐ -│ auths-storage │ │ auths-infra-git│ │ auths-infra-http │ -│ (id, core, │ │ (core, sdk, │ │ (core, verifier) │ -│ verifier) │ │ verifier) │ │ │ -└────────────────┘ └────────────────┘ └──────────────────┘ - -Layer 3 — Orchestration (depends on all above) -┌───────────┐ -│ auths-sdk │ -│ (core, id, policy, crypto, verifier) -└───────────┘ - -Layer 4 — Presentation (depends on all above) -┌───────────┐ -│ auths-cli │ -└───────────┘ -``` - -**Key change**: No arrows point upward. No dev-dependencies cross layer boundaries upward. - ---- - -## Phase 1: Distribute test utilities into per-crate `test-utils` features - -This is the highest-impact change. It eliminates the `auths-test-utils` monolith and all circular dev-dependency issues. - -### 1a. `auths-crypto` — add `test-utils` feature - -Move from `auths-test-utils/src/crypto.rs`: -- `create_test_keypair(seed: &[u8; 32]) -> (Ed25519KeyPair, [u8; 32])` -- `get_shared_keypair() -> &'static [u8]` -- `gen_keypair() -> Ed25519KeyPair` - -```toml -# auths-crypto/Cargo.toml -[features] -test-utils = ["dep:ring"] # ring is already an optional dep -``` - -```rust -// auths-crypto/src/testing.rs -#[cfg(feature = "test-utils")] -pub mod testing { - use ring::signature::{Ed25519KeyPair, KeyPair}; - use std::sync::OnceLock; - - pub fn create_test_keypair(seed: &[u8; 32]) -> (Ed25519KeyPair, [u8; 32]) { - let keypair = Ed25519KeyPair::from_seed_unchecked(seed).unwrap(); - let public_key: [u8; 32] = keypair.public_key().as_ref().try_into().unwrap(); - (keypair, public_key) - } - - pub fn get_shared_keypair() -> &'static [u8] { /* OnceLock pattern */ } - pub fn gen_keypair() -> Ed25519KeyPair { /* random seed */ } -} -``` - -**Consumers**: Every crate that currently imports `auths_test_utils::crypto::*` switches to: -```toml -[dev-dependencies] -auths-crypto = { workspace = true, features = ["test-utils"] } -``` - -### 1b. `auths-id` — add `test-utils` feature - -Move from `auths-test-utils/src/fakes/`, `contracts/`, `fixtures/`, `mocks/`, `storage_fakes.rs`: - -**Fakes** (implement traits defined in `auths-id` itself — no cross-crate dependency needed): -- `FakeRegistryBackend` (implements `RegistryBackend`) -- `FakeAttestationSink` / `FakeAttestationSource` (implements `AttestationSink` / `AttestationSource`) -- `FakeIdentityStorage` (implements `IdentityStorage`) -- `InMemoryStorage` (implements `BlobReader`, `BlobWriter`, `RefReader`, `RefWriter`, `EventLogReader`, `EventLogWriter`) -- `MockClock` (implements `ClockProvider`) -- `MockCryptoProvider` (implements `CryptoProvider`) -- `DeterministicUuidProvider` (implements `UuidProvider`) -- `FakeGitDiagnosticProvider`, `FakeCryptoDiagnosticProvider` -- `FakeGitLogProvider` (implements `GitLogProvider`) - -**Contract test macros**: -- `registry_backend_contract_tests!` -- `git_log_provider_contract_tests!` -- `session_store_contract_tests!` -- `event_sink_contract_tests!` - -**Fixtures**: -- `test_inception_event(key_seed: &str) -> Event` -- `test_attestation(device_did, issuer) -> Attestation` - -**Mockall mocks**: -- `MockIdentityStorage` -- `MockAttestationSource` - -```toml -# auths-id/Cargo.toml -[features] -test-utils = [ - "auths-crypto/test-utils", # chain the feature - "dep:mockall", - "dep:rand", - "dep:tempfile", -] -``` - -```rust -// auths-id/src/testing/mod.rs -#[cfg(feature = "test-utils")] -pub mod testing { - pub mod fakes; // FakeRegistryBackend, FakeAttestationSource, etc. - pub mod contracts; // contract test macros - pub mod fixtures; // test_inception_event, test_attestation - pub mod mocks; // MockIdentityStorage, MockAttestationSource -} -``` - -**Why this works**: All the fakes implement traits defined in `auths-id` itself. The mock implementations use only types from `auths-id` and its dependencies (Layer 0). No upward dependency on `auths-storage` or `auths-sdk`. - -### 1c. `auths-telemetry` — add `test-utils` feature - -Move from `auths-test-utils/src/fakes/telemetry.rs`: -- `MemoryEventSink` (implements `EventSink`) - -```toml -# auths-telemetry/Cargo.toml -[features] -test-utils = [] -``` - -### 1d. `auths-core` — expand existing `test-utils` feature - -`auths-core` already declares `test-utils = []` as a feature. Populate it with any test helpers specific to core (if any exist beyond what's in `auths-crypto`). - -### 1e. Git test helpers — move to `auths-infra-git` - -Move from `auths-test-utils/src/git.rs`: -- `init_test_repo() -> (TempDir, git2::Repository)` -- `get_cloned_test_repo() -> TempDir` -- `copy_directory(src, dst)` - -```toml -# auths-infra-git/Cargo.toml -[features] -test-utils = ["dep:tempfile"] -``` - -These are only needed by crates that test against real Git repositories. - -### 1f. Delete `auths-test-utils` - -After all helpers are distributed, remove `crates/auths-test-utils/` entirely: -- Remove from workspace `members` in root `Cargo.toml` -- Remove from `[workspace.dependencies]` -- Remove all `auths-test-utils.workspace = true` lines from every crate - ---- - -## Phase 2: Clean up `auths-id` — remove infrastructure dependencies - -Currently `auths-id` has a `git-storage` feature that brings in `git2`, `dirs`, `tempfile`, `tokio`. This mixes domain logic with infrastructure. - -### 2a. Audit what `git-storage` feature provides in `auths-id` - -Identify all code gated behind `#[cfg(feature = "git-storage")]` in `auths-id/src/`. This likely includes: -- Git-based `IdentityStorage` implementation -- Local `~/.auths` directory management -- Git ref reading/writing for identity data - -### 2b. Move git-storage code from `auths-id` to `auths-storage` - -All Git-based storage implementations should live in `auths-storage`: -- Move the git-gated code to `auths-storage/src/git/` -- `auths-storage` already depends on `auths-id` — it can implement the traits -- Remove `git-storage` feature from `auths-id` -- Remove `git2`, `dirs`, `tempfile` from `auths-id`'s dependencies - -### 2c. Remove `auths-storage` dev-dependency from `auths-id` - -After Phase 1, `auths-id` tests use in-memory fakes (from its own `test-utils` feature) instead of `GitRegistryBackend`. The real Git backend is tested in `auths-storage` using the contract test macros exported by `auths-id/test-utils`. - -```rust -// auths-storage/tests/cases/registry_contract.rs -// Import the contract test macro from auths-id -auths_id::testing::contracts::registry_backend_contract_tests!( - git_backend, - { /* construct GitRegistryBackend */ }, -); -``` - -This is how reth does it: the trait crate exports contract tests, the implementation crate runs them. - -### 2d. Result — `auths-id` becomes a pure domain crate - -After this phase, `auths-id`'s dependencies are: -```toml -[dependencies] -auths-core.workspace = true -auths-crypto.workspace = true -auths-policy.workspace = true -auths-verifier.workspace = true -# ... plus pure Rust deps (chrono, serde, etc.) -# NO git2, NO dirs, NO tempfile, NO tokio -``` - -No dev-dependencies on infrastructure crates. Clean Layer 1 crate. - ---- - -## Phase 3: Consolidate `auths-core` role - -`auths-core` currently depends on `auths-crypto` and `auths-verifier`. It provides: -- Platform keychains (macOS, Linux, Windows) -- Agent/passphrase management -- Encryption primitives -- Config management - -### 3a. Evaluate whether `auths-core` should depend on `auths-verifier` - -`auths-verifier` is designed as a minimal, embeddable crate. If `auths-core` pulls it in as a dependency, that adds `auths-core` to `auths-verifier`'s reverse dependency tree, which complicates the layer model. - -If the dependency is only used in a few places, consider: -- Making it optional: `auths-verifier = { workspace = true, optional = true }` -- Or duplicating the minimal verification logic needed - -### 3b. Ensure `auths-core` stays at Layer 0 - -`auths-core` should only depend on `auths-crypto` (Layer 0). If it needs types from `auths-id`, that's a sign those types should be in a lower layer. - ---- - -## Phase 4: Automate publishing - -### 4a. Adopt `cargo publish --workspace` (Rust 1.90+) - -Since Rust 1.90 (September 2025), Cargo natively supports workspace publishing: -```bash -cargo publish --workspace -``` - -This topologically sorts crates and publishes in dependency order. After Phases 1-3 eliminate all circular dev-deps, this works out of the box. - -### 4b. Consider `release-plz` for CI - -For automated releases via GitHub PRs: -- Auto-generates changelogs from conventional commits -- Integrates `cargo-semver-checks` for breaking change detection -- Opens a Release PR, publishes on merge -- Handles `sleep` between publishes automatically - -### 4c. Define publish order explicitly - -After the refactor, the publish order is deterministic: -``` -Tier 0 (parallel): auths-crypto, auths-policy, auths-telemetry, auths-index -Tier 1 (parallel): auths-verifier, auths-core -Tier 2 (sequential): auths-id (after verifier, core) -Tier 3 (parallel): auths-storage, auths-infra-git, auths-infra-http -Tier 4: auths-sdk -Tier 5: auths-cli -``` - -No tier depends on a crate in the same or later tier. No circular dependencies. - ---- - -## Phase 5: Cleanup and verification - -### 5a. Remove all temporary inlined helpers - -Remove the `create_test_keypair` functions that were inlined in: -- `auths-crypto/tests/cases/provider.rs` -- `auths-verifier/src/verify.rs` -- `auths-verifier/src/witness.rs` -- `auths-verifier/tests/cases/expiration_skew.rs` -- `auths-verifier/tests/cases/revocation_adversarial.rs` - -Replace with: -```rust -use auths_crypto::testing::create_test_keypair; -``` - -### 5b. Re-add dev-dependencies that were removed for publishing - -Restore any dev-deps that were stripped purely for the initial publish (e.g., `auths-storage` in `auths-id` — though after Phase 2, this should no longer be needed). - -### 5c. Full workspace verification - -```bash -cargo fmt --check --all -cargo clippy --all-targets --all-features -- -D warnings -cargo nextest run --workspace -cargo test --all --doc -cargo publish --workspace --dry-run -``` - -### 5d. WASM verification - -```bash -cd crates/auths-verifier && cargo check --target wasm32-unknown-unknown --no-default-features --features wasm -``` - ---- - -## Migration Map - -| Current location | Target location | What | -|---|---|---| -| `auths-test-utils/src/crypto.rs` | `auths-crypto/src/testing.rs` | `create_test_keypair`, `get_shared_keypair`, `gen_keypair` | -| `auths-test-utils/src/git.rs` | `auths-infra-git/src/testing.rs` | `init_test_repo`, `get_cloned_test_repo` | -| `auths-test-utils/src/fakes/*.rs` | `auths-id/src/testing/fakes/*.rs` | All fake trait implementations | -| `auths-test-utils/src/contracts/*.rs` | `auths-id/src/testing/contracts/*.rs` | All contract test macros | -| `auths-test-utils/src/fixtures/*.rs` | `auths-id/src/testing/fixtures/*.rs` | `test_inception_event`, `test_attestation` | -| `auths-test-utils/src/mocks/*.rs` | `auths-id/src/testing/mocks/*.rs` | `MockIdentityStorage`, `MockAttestationSource` | -| `auths-test-utils/src/storage_fakes.rs` | `auths-id/src/testing/fakes/storage.rs` | `InMemoryStorage` | -| `auths-test-utils/src/fakes/telemetry.rs` | `auths-telemetry/src/testing.rs` | `MemoryEventSink` | -| `auths-id` git-storage code | `auths-storage/src/git/` | Git-based identity storage | -| `crates/auths-test-utils/` | **deleted** | — | - ---- - -## Consumer Migration - -Every crate that currently has `auths-test-utils.workspace = true` in dev-dependencies gets replaced: - -```toml -# Before -[dev-dependencies] -auths-test-utils.workspace = true - -# After — only enable the features you actually use -[dev-dependencies] -auths-crypto = { workspace = true, features = ["test-utils"] } -auths-id = { workspace = true, features = ["test-utils"] } -``` - -The `test-utils` features chain transitively — `auths-id/test-utils` enables `auths-crypto/test-utils` automatically. - ---- - -## Risks and Mitigations - -| Risk | Mitigation | -|---|---| -| Large diff touching many files | Execute in phases; each phase is independently shippable | -| Contract test macros may have complex dependencies | Audit macro expansions before moving; may need to simplify | -| `auths-id` git-storage removal may break `auths-cli` | `auths-cli` already depends on `auths-storage`; rewire imports | -| Feature flag proliferation | Only two feature flags per crate max (`test-utils` + one domain feature) | -| `mockall` and `rand` become regular deps (optional) of published crates | Gated behind `test-utils` feature; not compiled by default consumers | - ---- - -## Success Criteria - -1. `cargo publish --workspace --dry-run` passes with zero manual intervention -2. `auths-test-utils` crate no longer exists -3. No crate has dev-dependencies on crates in the same or higher layer -4. All 1395+ tests pass -5. WASM build passes -6. Each crate's dependency list fits its architectural layer diff --git a/docs/plans/frontend_identity_unification.md b/docs/plans/frontend_identity_unification.md deleted file mode 100644 index bf9ed87f..00000000 --- a/docs/plans/frontend_identity_unification.md +++ /dev/null @@ -1,170 +0,0 @@ -# Detailed Plan: Frontend Identity Unification (did:keri) - -This plan outlines the multi-repository effort to unify decentralized multi-device identity under a single `did:keri` profile in the Radicle frontend. - -## Required Repositories -1. `/Users/bordumb/workspace/repositories/auths-base/auths/crates`: The identity and bridge logic. -2. `/Users/bordumb/workspace/repositories/heartwood/crates`: The Radicle node, storage, and API. -3. `/Users/bordumb/workspace/repositories/radicle.xyz`: The Svelte-based frontend (or relevant UI repo). -4. `/Users/bordumb/workspace/repositories/radicle-base/radicle-explorer/radicle-httpd`: crate that holds the API logic from heartwood to radicle.xyz - -## Phase 1: Bridge & Logic (Repository: `auths-base/auths`) - -**Objective:** Finalize the resolution logic and provide the WASM-ready verifier for the frontend. - -### 1.1 Complete `did:keri` resolution in `auths-radicle` -* **File:** `crates/auths-radicle/src/identity.rs` -* **Change:** Implement the `resolve_keri` arm to replay the KEL and derive the controller DID. -* **Pseudo-code:** - ```rust - // identity.rs - impl RadicleIdentityResolver { - pub fn resolve(&self, did: &str) -> Result { - match did { - d if d.starts_with("did:key:") => self.resolve_key(d), - d if d.starts_with("did:keri:") => self.resolve_keri(d), // <--- NEW - _ => Err(IdentityError::UnsupportedMethod), - } - } - - fn resolve_keri(&self, did: &str) -> Result { - let prefix = extract_prefix(did); - let kel = self.storage.load_kel(prefix)?; - let state = validate_kel(&kel)?; // From auths-id - Ok(RadicleIdentity { - did: did.to_string(), - keys: state.current_keys.iter().map(|k| k.to_did()).collect(), - metadata: state.metadata, - }) - } - } - ``` - -### 1.2 Prepare WASM Verifier -* **File:** `crates/auths-verifier/Cargo.toml` -* **Action:** Ensure the `wasm` feature flag is robust and exports necessary bindings for `auths-verifier-ts`. -* **Pseudo-code:** - ```rust - #[wasm_bindgen] - pub fn verify_device_link(kel_json: &str, attestation_json: &str, device_did: &str) -> bool { - let kel = deserialize_kel(kel_json); - let attestation = deserialize_attestation(attestation_json); - auths_verifier::core::verify(&kel, &attestation, device_did).is_ok() - } - ``` - ---- - -## Phase 2: API & Data Serving (Repository: `heartwood`) - -**Objective:** Update the Radicle node to serve identity data and recognize the `did:keri` hierarchy. - -### 2.1 Update Node API to expose Controller -* **File:** `radicle-node/src/api/v1/users.rs` (in `heartwood`) -* **Action:** When a user profile (`did:key`) is requested, look up its controller. -* **Pseudo-code:** - ```rust - // Users API handler - #[get("/users/:did")] - fn get_user(did: Did) -> Json { - let controller = bridge.find_identity_for_device(&did, &repo_id); - UserResponse { - did: did, - controller_did: controller, // <--- NEW: did:keri:... - is_keri: did.is_keri(), - devices: if did.is_keri() { bridge.list_devices(did) } else { vec![] } - } - } - ``` - -### 2.2 Serve KEL and Attestations over HTTP -* **File:** `radicle-node/src/api/v1/identity.rs` -* **Action:** Add raw blob access for the frontend verifier. -* **Endpoints:** - - `GET /v1/identity/:did/kel` -> returns `refs/keri/kel` commit chain - - `GET /v1/identity/:did/attestations` -> returns all `refs/keys/*/signatures/*` blobs - ---- - -## Phase 3: Client-Side UI (Repository: `radicle.xyz`) - -**Objective:** Unify the UI around the controller identity and verify links locally. - -### 3.1 Unify Profile Rendering -* **File:** `src/lib/views/User.svelte` -* **Action:** If a `did:key` is visited, check if it has a `controller_did`. If so, render the controller's profile name/bio. -* **Pseudo-code:** - ```javascript - // User.svelte - async function loadProfile(did) { - const user = await api.get(`/users/${did}`); - if (user.controller_did) { - // Unify: Fetch the Person's profile instead of the Device's - this.profile = await api.get(`/users/${user.controller_did}`); - this.is_device = true; - this.device_did = did; - } - } - ``` - -### 3.2 Implement Client-Side Verification -* **File:** `src/lib/auths.ts` -* **Action:** Use `auths-verifier-ts` to prove the identity link. -* **Pseudo-code:** - ```javascript - import { verify_device_link } from 'auths-verifier-ts'; - - async function verifyUser(did_keri, did_key) { - const kel = await api.get(`/identity/${did_keri}/kel`); - const attest = await api.get(`/identity/${did_key}/attestations`); - const isValid = verify_device_link(kel, attest, did_key); - return isValid; // UI shows a "Verified" badge if true - } - ``` - ---- - -## Phase 4: Integration & E2E (Script: `radicle-e2e.sh`) - -**Objective:** Validate the 3-repo cycle using the existing E2E framework. - -### 4.1 Update E2E script to check API and UI -* **File:** `scripts/radicle-e2e.sh` -* **Action:** - 1. Perform the multi-device link. - 2. Query the local `radicle-node` API for `NODE1_DID`. - 3. **Assert:** The `controller_did` in the JSON response equals `CONTROLLER_DID`. - 4. (Optional) Run a headless browser check to ensure the same Profile Name appears on both device pages. - -### 4.2 Validate Revocation UI -* **Action:** - 1. Revoke `NODE2_DID` via `auths device revoke`. - 2. Query API for `NODE2_DID`. - 3. **Assert:** The response marks the device as `revoked: true` or the `controller_did` lookup now returns `None`. - ---- - -## Remaining Tasks (Detailed) - -### Phase 1: Logic & WASM (In Progress) -- [ ] **Refactor `RadicleIdentity`**: Update the struct in `auths-radicle/src/identity.rs` to include KERI-specific fields (current key set, sequence number) to support the unified profile view. -- [ ] **Expose `resolve_keri`**: Make the KERI resolution logic public and ensure it returns the enriched `RadicleIdentity` instead of a flat `ResolvedDid`. -- [ ] **WASM Binding Audit**: Ensure `wasm_verify_device_link` in `auths-verifier` returns exactly the JSON structure required by Phase 3.2. - -### Phase 2: Heartwood API (Pending) -- Repo: /Users/bordumb/workspace/repositories/radicle-base/radicle-explorer/radicle-httpd -- [ ] **Locate API Routes**: update `radicle-httpd` API v1 implementation -- [ ] **Extend User Endpoint**: Modify `GET /v1/users/:did` to perform a bridge lookup for the controller identity. -- [ ] **New Identity Endpoints**: - - Implement `GET /v1/identity/:did/kel` to serve the full KERI Event Log from the identity repo. - - Implement `GET /v1/identity/:did/attestations` to serve all device signatures for that identity. - -### Phase 3: Frontend / UI (Pending) -- Repo: /Users/bordumb/workspace/repositories/radicle-base/radicle.xyz -- [ ] **`auths-verifier-ts` Integration**: Add the WASM verifier dependency to the frontend `package.json`. -- [ ] **Profile Unification**: Update the User profile component to check for `controller_did` and toggle between "Device View" and "Person View". -- [ ] **Local Verification Link**: Implement the `auths.ts` helper to fetch KEL/Attestations and run the WASM verifier on page load. - -### Phase 4: Verification & E2E (Pending) -- [ ] **API Assertions**: Add `curl` checks to `scripts/radicle-e2e.sh` to verify that `controller_did` is correctly populated after a `pair` operation. -- [ ] **UI Integration Test**: (Optional) Add a basic Playwright test to verify the "Verified" badge appears in the browser. diff --git a/docs/plans/full_radicle_integration.md b/docs/plans/full_radicle_integration.md deleted file mode 100644 index faa24fc4..00000000 --- a/docs/plans/full_radicle_integration.md +++ /dev/null @@ -1,90 +0,0 @@ -# Full Radicle + Auths Integration Plan - -## Executive Summary - -The goal is to provide **invisible multi-device identity** for Radicle users. A user should be able to manage their identity across multiple machines (laptop, desktop, server) using standard Radicle commands, without needing to understand KERI, attestations, or the underlying `auths` infrastructure. - -Currently, the integration is at the "Bridge" level (verification only). This plan outlines the path to full "Lifecycle" integration (creation, pairing, and management). - -## Feedback on Current Approach - -1. **Bridge Architecture (Excellent):** The "Zero New Crypto" principle and the clean trait-based boundary between Heartwood and `auths-radicle` are correct. It prevents dependency bloat and keeps Radicle's core cryptographic assumptions intact. -2. **Two-CLI Barrier (Needs Improvement):** Requiring users to install and run both `rad` and `auths` (as seen in `radicle-e2e.sh`) is a significant friction point. -3. **Manual Key Import (Needs Automation):** The manual step of `auths key import --seed-file` is error-prone and exposes sensitive data. `rad` already has the seed; it should handle this internally. - -## Proposed "Invisible" User Workflow - -### 1. Initial Identity Creation -**Command:** `rad auth` -- **Current:** Creates an Ed25519 keypair and a `did:key:z6Mk...`. -- **Integrated:** - 1. Creates the Radicle keypair. - 2. Automatically initializes a KERI identity (Controller DID). - 3. Links the Radicle key as the first "device". - 4. Stores all state in the standard Radicle storage location (`~/.radicle/storage`). - -### 2. Adding a Second Device -**Command:** `rad auth device pair` (on the new machine) -- **Workflow:** - 1. The new machine generates its own `did:key`. - 2. It starts a local discovery service (mDNS/LAN). - 3. The user runs `rad auth device pair --accept` on the *existing* machine. - 4. The machines perform a secure handshake. - 5. The existing machine (controller) signs a linking attestation for the new device and gossips it. - -### 3. Revocation -**Command:** `rad auth device revoke ` -- **Workflow:** Marks the device as revoked in the KERI log. All subsequent fetches by peers will automatically reject signatures from that NID. - ---- - -## Technical Integration Plan - -### Phase 1: Bridge Finalization (Current Focus) -- Complete `auths-radicle` implementation. -- Finalize `CompositeAuthorityChecker` in Heartwood to use the bridge. -- Align RIP-X ref paths across both codebases. -- **Status:** In progress (Tasks fn-1.x and fn-2.x). - -### Phase 2: CLI & SDK Embedding -- **Goal:** Move `auths` logic into the `rad` binary. -- **Action:** Heartwood's `rad` CLI should depend on `auths-sdk` and `auths-id`. -- **Action:** Implement `rad auth` subcommands that wrap `auths-sdk` functionality. -- **Action:** Automate "Seed to Keychain" flow. `rad` should pass the seed directly to `auths-id`'s keychain without writing to temporary files. - -### Phase 3: Storage Layout Unification -- **Goal:** Single source of truth for identity state. -- **Action:** Default all `auths` operations to use Radicle's storage layout: - - KEL: `refs/keri/kel` - - Attestations: `refs/keys//link-attestation.json` - - Identity Repo: A dedicated Radicle repo (RID) for the user's identity state. - -### Phase 4: Identity Document Evolution -- **Goal:** Formalize `did:keri` in Radicle. -- **Action:** Update Radicle's Identity Document schema to allow `did:keri:...` as a delegate. -- **Action:** When a `did:keri` is a delegate, Radicle's fetch pipeline automatically invokes the bridge to verify which `did:key` is currently authorized. - -### Phase 5: P2P Gossip & Sync -- **Goal:** Instant revocation propagation. -- **Action:** Integrate KERI event propagation into Radicle's gossip protocol. -- **Action:** Ensure `rad sync` prioritized fetching the identity repository to prevent "Quarantine" states during fetch. - ---- - -## Strategic Feedback: The "Device" as a Delegate - -The most important conceptual shift is treating the **Radicle Node (NID)** as a transient "Device" and the **KERI Identity** as the permanent "User". - -- **Legacy Mode:** `Doc.delegates = [did:key:z6Mk...node1]` -- **Multi-Device Mode:** `Doc.delegates = [did:keri:E...user]` - -By making this change, Radicle can support: -- Rotating keys without updating every project's ID document. -- Revoking lost laptops instantly. -- Shared organization accounts with threshold signatures (M-of-N devices). - -## Next Steps for Development - -1. **Refactor `auths-cli` commands** into reusable library functions in `auths-sdk` that take `AuthsStorage` and `Keychain` as arguments. -2. **Create a `rad-auths-integration` crate** (or expand `auths-radicle`) that provides the high-level `pair`, `link`, and `revoke` logic specifically for the `rad` CLI. -3. **Update `scripts/radicle-e2e.sh`** to use the unified `rad` commands once they are implemented, slowly phasing out the use of the `auths` binary. diff --git a/docs/plans/gemini_feedback.md b/docs/plans/gemini_feedback.md deleted file mode 100644 index 3e7f9ff0..00000000 --- a/docs/plans/gemini_feedback.md +++ /dev/null @@ -1,143 +0,0 @@ -# Gemini Feedback: A CTO's Playbook for the Auths v0.1.0 Launch - -**To:** Auths Leadership -**From:** Gemini (CTO / DX Lead Persona) -**Date:** 2026-03-06 -**Subject:** An Actionable Roadmap for the Auths v0.1.0 Launch - -## 1. Executive Summary & Restructured Plan - -Our goal for the v0.1.0 launch is to establish `auths` as the most polished, trustworthy, and developer-obsessed identity platform on the market. Our current codebase is functionally powerful but lacks the stability and seamless developer experience (DX) required for a public launch. - -This document has been restructured from a simple list of issues into a **chronological, dependency-aware roadmap**. It is organized into four distinct phases of work. Each phase builds upon the last, ensuring that we solidify our foundation before building upon it. This is the critical path to a successful v0.1.0 launch. - ---- - -## Phase 1: Solidify the Core (Rust SDK & Verifier) - -**Objective:** Create a stable, predictable, and secure foundation. All work in this phase is a prerequisite for subsequent phases. - -### 1.1. Implement Native Commit Verification -* **Why:** The current Python-based commit verification shells out to `ssh-keygen`, which is slow, brittle, and not portable. This is our biggest reliability risk. -* **The Problem:** - ```python - # in packages/auths-python/python/auths/git.py - proc = subprocess.run( - ["ssh-keygen", "-Y", "verify", ...], ... - ) - ``` -* **Action:** Implement the entire commit signature verification logic in pure Rust within the `crates/auths-verifier` crate. This single change will dramatically improve performance and reliability for a key feature. - -### 1.2. Refactor SDK Configuration for Compile-Time Safety -* **Why:** The SDK must be impossible to misconfigure. We can prevent entire classes of runtime errors at compile time. -* **The Problem:** The `AuthsContextBuilder` in `crates/auths-sdk/src/context.rs` uses a `NoopPassphraseProvider` that causes a runtime error if signing is attempted without a real provider. - ```rust - // This defers a configuration error to a runtime crash. - passphrase_provider: self - .passphrase_provider - .unwrap_or_else(|| Arc::new(NoopPassphraseProvider)), - ``` -* **Action:** Eliminate the `Noop` providers. Use the typestate pattern to create distinct `AuthsContext` types, such as `AuthsContext` and `AuthsContext`. Workflows that require signing must take the `SigningReady` context as an argument, making it a compile-time error to call them without the correct configuration. - -### 1.3. Eradicate Panics from the Public API -* **Why:** A library that can `panic` is a library that cannot be trusted in production. It is the most hostile behavior an SDK can exhibit. -* **The Problem:** The codebase is littered with `.unwrap()` and `.expect()` calls that can crash the host application. - ```rust - // in crates/auths-sdk/src/workflows/mcp.rs:80 - .expect("failed to build HTTP client") // Will crash if host TLS is misconfigured. - ``` -* **Action:** Audit and refactor every `.unwrap()` and `.expect()` in the `auths-sdk` crate's public-facing workflows. Replace them with proper, descriptive error variants (e.g., `McpAuthError::HttpClientBuildFailed`). - -### 1.4. Unify and Seal the Public API Surface -* **Why:** We are making a promise of stability with `v0.1.0`. The API we launch with is the API we must support. -* **Action:** - 1. **Unify:** Refactor the `initialize_developer`, `initialize_ci`, and `initialize_agent` functions in `crates/auths-sdk/src/setup.rs` into private helpers. The single public entry point must be `pub fn initialize(config: IdentityConfig, ...)`. - 2. **Seal:** Run `cargo public-api` to generate a definitive list of our public API. Anything we are not ready to commit to for the long term must be hidden (`pub(crate)` or `#[doc(hidden)]`). - ---- - -## Phase 2: Refine the Developer Experience (Python FFI & SDK) - -**Objective:** Create an idiomatic, robust, and effortless experience for Python developers. This phase depends heavily on the stability provided by Phase 1. - -### 2.1. Implement Robust FFI Error Handling -* **Dependency:** Phase 1.3 (Eradicate Panics). The Rust layer must return errors, not panic. -* **Why:** The current error handling is based on string-matching messages from Rust, which is extremely fragile. -* **The Problem:** - ```python - # in packages/auths-python/python/auths/_client.py - def _map_verify_error(exc: Exception) -> Exception: - msg = str(exc) - if "public key" in msg.lower(): # This will break silently. - return CryptoError(msg, code="invalid_key") - ``` -* **Action:** Modify the Rust FFI layer to return a stable, machine-readable error code (a C-style enum or integer). The Python `_map_verify_error` function must be rewritten to dispatch on this reliable code. -This MUST be consistent across all such files - -### 2.2. Consume Native Commit Verification -* **Dependency:** Phase 1.1 (Native Commit Verification). -* **Why:** To eliminate the slow and brittle `subprocess` calls. -* **Action:** Remove the `verify_commit_range` implementation from `packages/auths-python/python/auths/git.py` and replace its body with a single call to the new native Rust function (exposed via the `auths._native` module). - -### 2.3. Adopt Pythonic Types and Conventions -* **Why:** The Python SDK must respect the conventions of its ecosystem to feel natural to developers. -* **The Problem:** The API uses strings for timestamps and may not return idiomatic `dataclass` instances. - ```python - # in packages/auths-python/python/auths/_client.py - def verify(self, ..., at: str | None = None) -> VerificationResult: - # ... - ``` -* **Action:** - 1. Modify methods like `verify` to accept `datetime.datetime` objects. The implementation can then convert them to Unix timestamps (integers) to pass to the Rust layer. - 2. Audit all functions that return data from Rust. Ensure they return proper `@dataclass` instances, not raw dictionaries or tuples. - 3. Ensure all public methods and parameters follow `snake_case` conventions. - ---- - -## Phase 3: Polish the Public Integrations (JS Ecosystem) - -**Objective:** Ensure our integrations are seamless, easy to use, and inspire confidence. This can run in parallel with Phase 2. - -### 3.1. Manage External Dependencies for Independent Repos -* **Correction & Context:** My previous analysis incorrectly assumed a monorepo structure. Understanding these are independent repositories makes the dependency management even more critical. The current build scripts have hardcoded relative paths that will fail in any standard CI/CD environment or for any external contributor. -* **Action (`auths-verify-widget`):** The `build:wasm` script in `package.json` (`"cd ../auths/crates/auths-verifier && wasm-pack build ..."`) is a critical flaw. It relies on a local file structure that will not exist in a clean checkout. The WASM verifier *must* be treated as a versioned, third-party dependency. - 1. The `auths/crates/auths-verifier` project must be configured to compile to WASM and be published to `npm` as a standalone package (e.g., `@auths/verifier-wasm`). - 2. The `auths-verify-widget` must remove the `build:wasm` script and add `@auths/verifier-wasm` as a standard `devDependency` in its `package.json`. - This ensures the widget can be built, tested, and released independently. -* **Action (`auths-verify-github-action`):** The action correctly treats the `auths` CLI as an external dependency by downloading it at runtime. However, for a v0.1.0 launch, this introduces too much variability. - 1. For the v0.1.0 release, the action *must bundle a specific, known-good version* of the `auths` native binary for Linux x64 (the standard GitHub runner environment). This guarantees performance and reliability. - 2. This can be accomplished by adding a script to the `auths-verify-github-action` repo that downloads a specific versioned release of the `auths` CLI from its GitHub Releases page and places it in the `dist` directory as part of the build process. - -### 3.2. Improve DX for Integrations -* **Why:** These integrations are the "front door" to our product for many developers. The experience must be flawless. -* **Action (`auths-verify-widget`):** - 1. **Clarify Variants:** The `README.md` must clearly explain the "full" vs. "slim" builds. - 2. **No-Build Option:** Create a UMD bundle and publish it to a CDN (`unpkg`, `jsdelivr`) so the widget can be used with a simple `