From bafdf60144c82d3f8ea4206b4a6e8761e9b9f1e4 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 27 Mar 2026 15:00:37 +0100 Subject: [PATCH 1/6] refactor: extract native.rs and vm.rs from main.rs Move all Linux-only code (sim execution, inspect, run-in) to native.rs and VM dispatch to vm.rs. Run/Prepare/Test commands auto-detect: native on Linux, VM elsewhere. Shared RunArgs struct via clap flatten. VmCommand Test reuses TestArgs. No more scattered #[cfg(target_os)] in main.rs. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/Cargo.toml | 18 +- patchbay-cli/src/init.rs | 7 - patchbay-cli/src/main.rs | 1010 +++++++++--------------------------- patchbay-cli/src/native.rs | 371 +++++++++++++ patchbay-cli/src/vm.rs | 195 +++++++ 5 files changed, 835 insertions(+), 766 deletions(-) delete mode 100644 patchbay-cli/src/init.rs create mode 100644 patchbay-cli/src/native.rs create mode 100644 patchbay-cli/src/vm.rs diff --git a/patchbay-cli/Cargo.toml b/patchbay-cli/Cargo.toml index cbf5ca4..ddf7aaa 100644 --- a/patchbay-cli/Cargo.toml +++ b/patchbay-cli/Cargo.toml @@ -14,16 +14,12 @@ path = "src/main.rs" [dependencies] anyhow = "1" chrono = { version = "0.4", default-features = false, features = ["clock"] } -clap = { version = "4", features = ["derive"] } -patchbay = { workspace = true } -patchbay-runner = { workspace = true } -patchbay-vm = { workspace = true, optional = true } -patchbay-server = { workspace = true, optional = true } -patchbay-utils = { workspace = true } -ctor = "0.6" -nix = { version = "0.30", features = ["signal", "process"] } +clap = { version = "4", features = ["derive", "env"] } flate2 = "1" open = "5" +patchbay-server = { workspace = true, optional = true } +patchbay-utils = { workspace = true } +patchbay-vm = { workspace = true, optional = true } reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "rustls-tls"], optional = true } serde = { version = "1", features = ["derive"] } serde_json = "1" @@ -32,6 +28,12 @@ tokio = { version = "1", features = ["rt", "macros", "sync", "time", "fs", "proc toml = "1.0" tracing = "0.1" +[target.'cfg(target_os = "linux")'.dependencies] +ctor = "0.6" +nix = { version = "0.30", features = ["signal", "process"] } +patchbay = { workspace = true } +patchbay-runner = { workspace = true } + [dev-dependencies] patchbay = { workspace = true } serde_json = "1" diff --git a/patchbay-cli/src/init.rs b/patchbay-cli/src/init.rs deleted file mode 100644 index a2384b1..0000000 --- a/patchbay-cli/src/init.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! ELF .init_array bootstrap — runs before main() and before tokio creates threads. -#[cfg(target_os = "linux")] -#[ctor::ctor] -fn userns_ctor() { - // SAFETY: single-threaded ELF init context; raw libc only. - unsafe { patchbay::init_userns_for_ctor() } -} diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index d4b82cf..832d5f8 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -1,31 +1,28 @@ //! Unified CLI entrypoint for patchbay simulations (native and VM). mod compare; -mod init; +#[cfg(target_os = "linux")] +mod native; mod test; #[cfg(feature = "upload")] mod upload; mod util; +#[cfg(feature = "vm")] +mod vm; -#[cfg(target_os = "linux")] -use std::collections::HashMap; -use std::{ - path::{Path, PathBuf}, - process::Command as ProcessCommand, - time::Duration, -}; +use std::path::{Path, PathBuf}; +#[cfg(feature = "serve")] +use std::process::Command as ProcessCommand; +use std::time::Duration; -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{bail, Context, Result}; use clap::{Parser, Subcommand}; -use patchbay::check_caps; -use patchbay_runner::sim; +use serde::Deserialize; + #[cfg(feature = "serve")] use patchbay_server::DEFAULT_UI_BIND; #[cfg(not(feature = "serve"))] const DEFAULT_UI_BIND: &str = "127.0.0.1:7421"; -#[cfg(feature = "vm")] -use patchbay_vm::VmOps; -use serde::{Deserialize, Serialize}; #[derive(Parser)] #[command(name = "patchbay", about = "Run a patchbay simulation")] @@ -39,45 +36,12 @@ struct Cli { #[derive(Subcommand)] enum Command { - /// Run one or more sims locally. + /// Run one or more sims (native on Linux, VM elsewhere). Run { - /// One or more sim TOML files or directories containing `*.toml`. - #[arg()] - sims: Vec, - - /// Work directory for logs, binaries, and results. - #[arg(long, default_value = ".patchbay/work")] - work_dir: PathBuf, - - /// Binary override in `::` form. - #[arg(long = "binary")] - binary_overrides: Vec, - - /// Do not build binaries; resolve expected artifacts from target dirs. - #[arg(long, default_value_t = false)] - no_build: bool, - /// Stream live stdout/stderr lines with node prefixes. - #[arg(short = 'v', long, default_value_t = false)] - verbose: bool, - - /// Start embedded UI server and open browser. - #[arg(long, default_value_t = false)] - open: bool, - - /// Bind address for embedded UI server. - #[arg(long, default_value = DEFAULT_UI_BIND)] - bind: String, - - /// Project root directory for resolving binaries and cargo builds. - /// Defaults to the current working directory. - #[arg(long)] - project_root: Option, - - /// Per-sim timeout (e.g. "120s", "5m"). Sims that exceed this are aborted. - #[arg(long)] - timeout: Option, + #[command(flatten)] + args: RunArgs, }, - /// Resolve sims and build all required assets without running simulations. + /// Resolve sims and build all required assets without running. Prepare { /// One or more sim TOML files or directories containing `*.toml`. #[arg()] @@ -141,7 +105,7 @@ enum Command { #[arg(trailing_var_arg = true, allow_hyphen_values = true, required = true)] cmd: Vec, }, - /// Run tests (delegates to cargo test on native, VM test flow on VM). + /// Run tests (native on Linux, VM elsewhere; --vm forces VM backend). Test { #[command(flatten)] args: test::TestArgs, @@ -177,7 +141,7 @@ enum Command { #[cfg(feature = "vm")] Vm { #[command(subcommand)] - command: VmCommand, + command: vm::VmCommand, /// Which VM backend to use. #[arg(long, default_value = "auto", global = true)] backend: patchbay_vm::Backend, @@ -221,85 +185,6 @@ enum CompareCommand { }, } -/// VM sub-subcommands (mirrors patchbay-vm's standalone CLI). -#[cfg(feature = "vm")] -#[derive(Subcommand)] -enum VmCommand { - /// Boot or reuse VM and ensure mounts. - Up { - #[arg(long)] - recreate: bool, - }, - /// Stop VM and helper processes. - Down, - /// Show VM running status. - Status, - /// Best-effort cleanup of VM helper artifacts/processes. - Cleanup, - /// Execute command in the guest (SSH for QEMU, exec for container). - Ssh { - #[arg(trailing_var_arg = true, allow_hyphen_values = true)] - cmd: Vec, - }, - /// Run one or more sims in VM using guest patchbay binary. - Run { - #[arg(required = true)] - sims: Vec, - #[arg(long, default_value = ".patchbay/work")] - work_dir: PathBuf, - #[arg(long = "binary")] - binary_overrides: Vec, - #[arg(short = 'v', long, default_value_t = false)] - verbose: bool, - #[arg(long)] - recreate: bool, - #[arg(long, default_value = "latest")] - patchbay_version: String, - #[arg(long, default_value_t = false)] - open: bool, - #[arg(long, default_value = DEFAULT_UI_BIND)] - bind: String, - }, - /// Serve embedded UI + work directory over HTTP. - Serve { - #[arg(long, default_value = ".patchbay/work")] - work_dir: PathBuf, - /// Serve `/binaries/tests/testdir-current` instead of work_dir. - #[arg(long, default_value_t = false)] - testdir: bool, - #[arg(long, default_value = DEFAULT_UI_BIND)] - bind: String, - #[arg(long, default_value_t = false)] - open: bool, - }, - /// Build and run tests in VM. - Test { - /// Test name filter (passed to test binaries at runtime). - #[arg()] - filter: Option, - #[arg(long, default_value_t = patchbay_vm::default_test_target())] - target: String, - #[arg(short = 'p', long = "package")] - packages: Vec, - #[arg(long = "test")] - tests: Vec, - #[arg(short = 'j', long)] - jobs: Option, - #[arg(short = 'F', long)] - features: Vec, - #[arg(long)] - release: bool, - #[arg(long)] - lib: bool, - #[arg(long)] - no_fail_fast: bool, - #[arg(long)] - recreate: bool, - #[arg(last = true)] - cargo_args: Vec, - }, -} - fn resolve_project_root(opt: Option) -> Result { match opt { Some(p) => Ok(p), @@ -308,7 +193,8 @@ fn resolve_project_root(opt: Option) -> Result { } fn main() -> Result<()> { - patchbay::init_userns()?; + #[cfg(target_os = "linux")] + native::init()?; tokio_main() } @@ -317,76 +203,14 @@ async fn tokio_main() -> Result<()> { patchbay_utils::init_tracing(); let cli = Cli::parse(); match cli.command { - Command::Run { - sims, - work_dir, - binary_overrides, - no_build, - verbose, - open, - bind: _bind, - project_root, - timeout, - } => { - let sim_timeout = timeout - .map(|s| sim::steps::parse_duration(&s)) - .transpose() - .context("invalid --timeout value")?; - if open { - #[cfg(feature = "serve")] - { - let bind_addr = _bind.clone(); - let work = work_dir.clone(); - tokio::spawn(async move { - if let Err(e) = patchbay_server::serve(work, &bind_addr).await { - tracing::error!("server error: {e}"); - } - }); - println!("patchbay: http://{_bind}/"); - let url = format!("http://{_bind}/"); - let _ = std::process::Command::new("xdg-open").arg(&url).spawn(); - } - #[cfg(not(feature = "serve"))] - bail!("--open requires the `serve` feature"); - } - let project_root = resolve_project_root(project_root)?; - let sims = resolve_sim_args(sims, &project_root)?; - let res = sim::run_sims( - sims, - work_dir, - binary_overrides, - verbose, - Some(project_root), - no_build, - sim_timeout, - ) - .await; - if open && res.is_ok() { - println!("run finished; server still running (Ctrl-C to exit)"); - loop { - tokio::time::sleep(Duration::from_secs(60)).await; - } - } - res - } + Command::Run { args } => dispatch_run(args).await, Command::Prepare { sims, work_dir, binary_overrides, no_build, project_root, - } => { - let project_root = resolve_project_root(project_root)?; - let sims = resolve_sim_args(sims, &project_root)?; - sim::prepare_sims( - sims, - work_dir, - binary_overrides, - Some(project_root), - no_build, - ) - .await - } + } => dispatch_prepare(sims, work_dir, binary_overrides, no_build, project_root).await, #[cfg(feature = "serve")] Command::Serve { outdir, @@ -402,130 +226,21 @@ async fn tokio_main() -> Result<()> { println!("patchbay: serving {} at http://{bind}/", dir.display()); if open { let url = format!("http://{bind}/"); - let _ = std::process::Command::new("xdg-open").arg(&url).spawn(); + let _ = ProcessCommand::new("xdg-open").arg(&url).spawn(); } patchbay_server::serve(dir, &bind).await } #[cfg(target_os = "linux")] - Command::Inspect { input, work_dir } => inspect_command(input, work_dir).await, + Command::Inspect { input, work_dir } => native::inspect_command(input, work_dir).await, #[cfg(target_os = "linux")] Command::RunIn { node, inspect, work_dir, cmd, - } => run_in_command(node, inspect, work_dir, cmd), - Command::Test { args, persist, vm } => { - #[cfg(feature = "vm")] - if let Some(vm_backend) = vm { - let backend = match vm_backend.as_str() { - "auto" => patchbay_vm::Backend::Auto.resolve(), - "qemu" => patchbay_vm::Backend::Qemu, - "container" => patchbay_vm::Backend::Container, - other => bail!("unknown VM backend: {other}"), - }; - return test::run_vm(args, backend); - } - #[cfg(not(feature = "vm"))] - if vm.is_some() { - bail!("VM support not compiled (enable the `vm` feature)"); - } - test::run_native(args, cli.verbose, persist) - } - Command::Compare { command } => { - let cwd = std::env::current_dir().context("get cwd")?; - let work_dir = cwd.join(".patchbay/work"); - match command { - CompareCommand::Test { - left_ref, - right_ref, - force_build, - no_ref_build, - args, - } => { - use patchbay_utils::manifest::{self as mf, RunKind}; - - let right_label = right_ref.as_deref().unwrap_or("worktree"); - println!( - "patchbay compare test: {} \u{2194} {}", - left_ref, right_label - ); - - // Helper: resolve results for a ref, using cache or building. - let resolve_ref_results = - |git_ref: &str, label: &str| -> Result> { - let sha = mf::resolve_ref(git_ref) - .with_context(|| format!("could not resolve ref '{git_ref}'"))?; - - // Check cache (unless --force-build). - if !force_build { - if let Some((_dir, manifest)) = - mf::find_run_for_commit(&work_dir, &sha, RunKind::Test) - { - println!("Using cached run for {label} ({sha:.8})"); - return Ok(manifest.tests); - } - } - - // No cache — fail if --no-ref-build. - if no_ref_build { - bail!( - "no cached run for {label} ({sha:.8}); \ - run `patchbay test --persist` on that ref first, \ - or remove --no-ref-build" - ); - } - - // Build in worktree. - println!("Running tests in {label} ..."); - let tree_dir = compare::setup_worktree(git_ref, &cwd)?; - let (results, _output) = - compare::run_tests_in_dir(&tree_dir, &args, cli.verbose)?; - - // Persist the run so future compares can reuse it. - compare::persist_worktree_run(&tree_dir, &results, &sha)?; - - compare::cleanup_worktree(&tree_dir)?; - Ok(results) - }; - - let left_results = resolve_ref_results(&left_ref, &left_ref)?; - - let right_results = if let Some(ref r) = right_ref { - resolve_ref_results(r, r)? - } else { - // Compare against current worktree: always run fresh. - println!("Running tests in worktree ..."); - let (results, _output) = - compare::run_tests_in_dir(&cwd, &args, cli.verbose)?; - results - }; - - // Compare - let result = compare::compare_results(&left_results, &right_results); - compare::print_summary( - &left_ref, - right_label, - &left_results, - &right_results, - &result, - ); - - if result.regressions > 0 { - bail!("{} regressions detected", result.regressions); - } - Ok(()) - } - CompareCommand::Run { - sims: _, - left_ref: _, - right_ref: _, - } => { - // TODO: implement compare run (sim comparison) - bail!("compare run is not yet implemented"); - } - } - } + } => native::run_in_command(node, inspect, work_dir, cmd), + Command::Test { args, persist, vm } => dispatch_test(args, persist, vm, cli.verbose), + Command::Compare { command } => dispatch_compare(command, cli.verbose), Command::Upload { dir, project, @@ -546,118 +261,259 @@ async fn tokio_main() -> Result<()> { } } #[cfg(feature = "vm")] - Command::Vm { command, backend } => dispatch_vm(command, backend).await, + Command::Vm { command, backend } => vm::dispatch_vm(command, backend).await, } } -/// Dispatch VM subcommands to the patchbay-vm library. -#[cfg(feature = "vm")] -async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Result<()> { - let backend = backend.resolve(); +// ── Run dispatch ──────────────────────────────────────────────────────── - match command { - VmCommand::Up { recreate } => backend.up(recreate), - VmCommand::Down => backend.down(), - VmCommand::Status => backend.status(), - VmCommand::Cleanup => backend.cleanup(), - VmCommand::Ssh { cmd } => backend.exec(cmd), - VmCommand::Run { - sims, - work_dir, - binary_overrides, - verbose, - recreate, - patchbay_version, - open, - bind, - } => { - if open { - let url = format!("http://{bind}"); - println!("patchbay UI: {url}"); - let _ = open::that(&url); - let work = work_dir.clone(); - let bind_clone = bind.clone(); +#[derive(clap::Args)] +struct RunArgs { + /// One or more sim TOML files or directories containing `*.toml`. + #[arg()] + sims: Vec, + /// Work directory for logs, binaries, and results. + #[arg(long, default_value = ".patchbay/work")] + work_dir: PathBuf, + /// Binary override in `::` form. + #[arg(long = "binary")] + binary_overrides: Vec, + /// Do not build binaries; resolve expected artifacts from target dirs. + #[arg(long, default_value_t = false)] + no_build: bool, + /// Stream live stdout/stderr lines with node prefixes. + #[arg(short = 'v', long, default_value_t = false)] + verbose: bool, + /// Start embedded UI server and open browser. + #[arg(long, default_value_t = false)] + open: bool, + /// Bind address for embedded UI server. + #[arg(long, default_value = DEFAULT_UI_BIND)] + bind: String, + /// Project root for resolving binaries. Defaults to cwd. + #[arg(long)] + project_root: Option, + /// Per-sim timeout (e.g. "120s", "5m"). + #[arg(long)] + timeout: Option, +} + +#[allow(clippy::needless_return)] +async fn dispatch_run(r: RunArgs) -> Result<()> { + // On Linux: run natively. + #[cfg(target_os = "linux")] + { + let sim_timeout = r.timeout + .map(|s| native::parse_duration(&s)) + .transpose() + .context("invalid --timeout value")?; + if r.open { + #[cfg(feature = "serve")] + { + let bind_addr = r.bind.clone(); + let work = r.work_dir.clone(); tokio::spawn(async move { - if let Err(e) = patchbay_server::serve(work, &bind_clone).await { + if let Err(e) = patchbay_server::serve(work, &bind_addr).await { tracing::error!("server error: {e}"); } }); + println!("patchbay: http://{}/", r.bind); + let url = format!("http://{}/", r.bind); + let _ = ProcessCommand::new("xdg-open").arg(&url).spawn(); } - let args = patchbay_vm::RunVmArgs { - sim_inputs: sims, - work_dir, - binary_overrides, - verbose, - recreate, - patchbay_version, + #[cfg(not(feature = "serve"))] + bail!("--open requires the `serve` feature"); + } + let project_root = resolve_project_root(r.project_root)?; + let sims = resolve_sim_args(r.sims, &project_root)?; + let res = native::run_sims( + sims, r.work_dir, r.binary_overrides, r.verbose, + Some(project_root), r.no_build, sim_timeout, + ).await; + if r.open && res.is_ok() { + println!("run finished; server still running (Ctrl-C to exit)"); + loop { tokio::time::sleep(Duration::from_secs(60)).await; } + } + return res; + } + + // On non-Linux with VM feature: delegate to VM backend. + #[cfg(all(not(target_os = "linux"), feature = "vm"))] + { + let vm_args = vm::VmRunArgs { + sims: r.sims, work_dir: r.work_dir, binary_overrides: r.binary_overrides, + verbose: r.verbose, open: r.open, bind: r.bind, + }; + return vm::run_sims_vm(vm_args, patchbay_vm::Backend::Auto); + } + + #[cfg(all(not(target_os = "linux"), not(feature = "vm")))] + { let _ = r; bail!("run requires Linux or the `vm` feature"); } +} + +// ── Prepare dispatch ──────────────────────────────────────────────────── + +#[allow(clippy::needless_return)] +async fn dispatch_prepare( + sims: Vec, + work_dir: PathBuf, + binary_overrides: Vec, + no_build: bool, + project_root: Option, +) -> Result<()> { + #[cfg(target_os = "linux")] + { + let project_root = resolve_project_root(project_root)?; + let sims = resolve_sim_args(sims, &project_root)?; + return native::prepare_sims(sims, work_dir, binary_overrides, Some(project_root), no_build) + .await; + } + + #[cfg(not(target_os = "linux"))] + { + let _ = (&sims, &work_dir, &binary_overrides, &no_build, &project_root); + bail!("prepare requires Linux (use `patchbay vm run` for non-Linux)"); + } +} + +// ── Test dispatch ─────────────────────────────────────────────────────── + +#[allow(clippy::needless_return)] +fn dispatch_test( + args: test::TestArgs, + persist: bool, + vm: Option, + verbose: bool, +) -> Result<()> { + // Explicit --vm: force VM backend. + if let Some(ref vm_backend) = vm { + #[cfg(feature = "vm")] + { + let backend = match vm_backend.as_str() { + "auto" => patchbay_vm::Backend::Auto.resolve(), + "qemu" => patchbay_vm::Backend::Qemu, + "container" => patchbay_vm::Backend::Container, + other => bail!("unknown VM backend: {other}"), }; - let res = backend.run_sims(args); - if open && res.is_ok() { - println!("run finished; server still running (Ctrl-C to exit)"); - loop { - tokio::time::sleep(Duration::from_secs(60)).await; - } - } - res + return test::run_vm(args, backend); } - VmCommand::Serve { - work_dir, - testdir, - bind, - open, + #[cfg(not(feature = "vm"))] + { + let _ = vm_backend; + bail!("VM support not compiled (enable the `vm` feature)"); + } + } + + // No --vm flag: auto-detect based on platform. + #[cfg(target_os = "linux")] + { + return test::run_native(args, verbose, persist); + } + + #[cfg(all(not(target_os = "linux"), feature = "vm"))] + { + let _ = (verbose, persist); + let backend = patchbay_vm::Backend::Auto.resolve(); + return test::run_vm(args, backend); + } + + #[cfg(all(not(target_os = "linux"), not(feature = "vm")))] + { + let _ = (args, verbose, persist); + bail!("test requires Linux or the `vm` feature"); + } +} + +// ── Compare dispatch ──────────────────────────────────────────────────── + +fn dispatch_compare(command: CompareCommand, verbose: bool) -> Result<()> { + let cwd = std::env::current_dir().context("get cwd")?; + let work_dir = cwd.join(".patchbay/work"); + match command { + CompareCommand::Test { + left_ref, + right_ref, + force_build, + no_ref_build, + args, } => { - let dir = if testdir { - work_dir - .join("binaries") - .join("tests") - .join("testdir-current") + use patchbay_utils::manifest::{self as mf, RunKind}; + + let right_label = right_ref.as_deref().unwrap_or("worktree"); + println!( + "patchbay compare test: {} \u{2194} {}", + left_ref, right_label + ); + + let resolve_ref_results = + |git_ref: &str, label: &str| -> Result> { + let sha = mf::resolve_ref(git_ref) + .with_context(|| format!("could not resolve ref '{git_ref}'"))?; + + if !force_build { + if let Some((_dir, manifest)) = + mf::find_run_for_commit(&work_dir, &sha, RunKind::Test) + { + println!("Using cached run for {label} ({sha:.8})"); + return Ok(manifest.tests); + } + } + + if no_ref_build { + bail!( + "no cached run for {label} ({sha:.8}); \ + run `patchbay test --persist` on that ref first, \ + or remove --no-ref-build" + ); + } + + println!("Running tests in {label} ..."); + let tree_dir = compare::setup_worktree(git_ref, &cwd)?; + let (results, _output) = + compare::run_tests_in_dir(&tree_dir, &args, verbose)?; + + compare::persist_worktree_run(&tree_dir, &results, &sha)?; + compare::cleanup_worktree(&tree_dir)?; + Ok(results) + }; + + let left_results = resolve_ref_results(&left_ref, &left_ref)?; + + let right_results = if let Some(ref r) = right_ref { + resolve_ref_results(r, r)? } else { - work_dir + println!("Running tests in worktree ..."); + let (results, _output) = + compare::run_tests_in_dir(&cwd, &args, verbose)?; + results }; - println!("patchbay: serving {} at http://{bind}/", dir.display()); - if open { - let url = format!("http://{bind}"); - let _ = open::that(&url); + + let result = compare::compare_results(&left_results, &right_results); + compare::print_summary( + &left_ref, + right_label, + &left_results, + &right_results, + &result, + ); + + if result.regressions > 0 { + bail!("{} regressions detected", result.regressions); } - patchbay_server::serve(dir, &bind).await + Ok(()) } - VmCommand::Test { - filter, - target, - packages, - tests, - jobs, - features, - release, - lib, - no_fail_fast, - recreate, - cargo_args, + CompareCommand::Run { + sims: _, + left_ref: _, + right_ref: _, } => { - let test_args = test::TestArgs { - include_ignored: false, - ignored: false, - packages, - tests, - jobs, - features, - release, - lib, - no_fail_fast, - extra_args: { - let mut args = Vec::new(); - if let Some(f) = filter { - args.push(f); - } - args.extend(cargo_args); - args - }, - }; - backend.run_tests(test_args.into_vm_args(target, recreate)) + bail!("compare run is not yet implemented"); } } } +// ── Helpers ───────────────────────────────────────────────────────────── + /// When no sim paths are given on the CLI, look for `patchbay.toml` or /// `.patchbay.toml` in the project root and use its `simulations` path. fn resolve_sim_args(sims: Vec, project_root: &Path) -> Result> { @@ -699,10 +555,6 @@ struct PatchbayConfig { } /// Resolve `testdir-current` inside the cargo target directory. -/// -/// Runs `cargo metadata` to find the target directory, then appends -/// `testdir-current`. This matches the convention used by the `testdir` -/// crate when running tests natively. #[cfg(feature = "serve")] fn resolve_testdir_native() -> Result { let output = ProcessCommand::new("cargo") @@ -717,350 +569,6 @@ fn resolve_testdir_native() -> Result { serde_json::from_slice(&output.stdout).context("parse cargo metadata")?; let target_dir = meta["target_directory"] .as_str() - .ok_or_else(|| anyhow!("cargo metadata missing target_directory"))?; + .ok_or_else(|| anyhow::anyhow!("cargo metadata missing target_directory"))?; Ok(PathBuf::from(target_dir).join("testdir-current")) } - -#[cfg(target_os = "linux")] -#[derive(Debug, Clone, Serialize, Deserialize)] -struct InspectSession { - prefix: String, - root_ns: String, - node_namespaces: HashMap, - node_ips_v4: HashMap, - node_keeper_pids: HashMap, -} - -#[cfg(target_os = "linux")] -fn inspect_dir(work_dir: &std::path::Path) -> PathBuf { - work_dir.join("inspect") -} - -#[cfg(target_os = "linux")] -fn inspect_session_path(work_dir: &std::path::Path, prefix: &str) -> PathBuf { - inspect_dir(work_dir).join(format!("{prefix}.json")) -} - -#[cfg(target_os = "linux")] -fn env_key_suffix(name: &str) -> String { - patchbay::util::sanitize_for_env_key(name) -} - -#[cfg(target_os = "linux")] -fn load_topology_for_inspect( - input: &std::path::Path, -) -> Result<(patchbay::config::LabConfig, bool)> { - let text = - std::fs::read_to_string(input).with_context(|| format!("read {}", input.display()))?; - let value: toml::Value = - toml::from_str(&text).with_context(|| format!("parse TOML {}", input.display()))?; - let is_sim = - value.get("sim").is_some() || value.get("step").is_some() || value.get("binary").is_some(); - if is_sim { - let sim: sim::SimFile = - toml::from_str(&text).with_context(|| format!("parse sim {}", input.display()))?; - let topo = sim::topology::load_topology(&sim, input) - .with_context(|| format!("load topology from sim {}", input.display()))?; - Ok((topo, true)) - } else { - let topo: patchbay::config::LabConfig = - toml::from_str(&text).with_context(|| format!("parse topology {}", input.display()))?; - Ok((topo, false)) - } -} - -#[cfg(target_os = "linux")] -fn keeper_commmand() -> ProcessCommand { - let mut cmd = ProcessCommand::new("sh"); - cmd.args(["-lc", "while :; do sleep 3600; done"]) - .stdin(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()); - cmd -} - -#[cfg(target_os = "linux")] -async fn inspect_command(input: PathBuf, work_dir: PathBuf) -> Result<()> { - check_caps()?; - - let (topo, is_sim) = load_topology_for_inspect(&input)?; - let lab = patchbay_runner::Lab::from_config(topo.clone()) - .await - .with_context(|| format!("build lab config from {}", input.display()))?; - - let mut node_namespaces = HashMap::new(); - let mut node_ips_v4 = HashMap::new(); - let mut node_keeper_pids = HashMap::new(); - - for router in &topo.router { - let name = router.name.clone(); - let r = lab - .router_by_name(&name) - .with_context(|| format!("unknown router '{name}'"))?; - let child = r.spawn_command_sync(keeper_commmand())?; - node_keeper_pids.insert(name.clone(), child.id()); - node_namespaces.insert(name.clone(), r.ns().to_string()); - if let Some(ip) = r.uplink_ip() { - node_ips_v4.insert(name, ip.to_string()); - } - } - for name in topo.device.keys() { - let d = lab - .device_by_name(name) - .with_context(|| format!("unknown device '{name}'"))?; - let child = d.spawn_command_sync(keeper_commmand())?; - node_keeper_pids.insert(name.clone(), child.id()); - node_namespaces.insert(name.clone(), d.ns().to_string()); - if let Some(ip) = d.ip() { - node_ips_v4.insert(name.clone(), ip.to_string()); - } - } - - let prefix = lab.prefix().to_string(); - let session = InspectSession { - prefix: prefix.clone(), - root_ns: lab.ix().ns(), - node_namespaces, - node_ips_v4, - node_keeper_pids, - }; - - let session_dir = inspect_dir(&work_dir); - std::fs::create_dir_all(&session_dir) - .with_context(|| format!("create {}", session_dir.display()))?; - let session_path = inspect_session_path(&work_dir, &prefix); - std::fs::write(&session_path, serde_json::to_vec_pretty(&session)?) - .with_context(|| format!("write {}", session_path.display()))?; - - let mut keys = session - .node_namespaces - .keys() - .map(|k| k.to_string()) - .collect::>(); - keys.sort(); - - println!( - "inspect ready: {} ({})", - session.prefix, - if is_sim { "sim" } else { "topology" } - ); - println!("session file: {}", session_path.display()); - println!("export NETSIM_INSPECT={}", session.prefix); - println!("export NETSIM_INSPECT_FILE={}", session_path.display()); - for key in &keys { - if let Some(ns) = session.node_namespaces.get(key) { - println!("export NETSIM_NS_{}={ns}", env_key_suffix(key)); - } - if let Some(ip) = session.node_ips_v4.get(key) { - println!("export NETSIM_IP_{}={ip}", env_key_suffix(key)); - } - } - println!("inspect active; press Ctrl-C to stop and clean up"); - loop { - std::thread::sleep(Duration::from_secs(60)); - } -} - -#[cfg(target_os = "linux")] -fn resolve_inspect_ref(inspect: Option) -> Result { - if let Some(value) = inspect { - let trimmed = value.trim(); - if trimmed.is_empty() { - bail!("--inspect must not be empty"); - } - return Ok(trimmed.to_string()); - } - let from_env = std::env::var("NETSIM_INSPECT") - .context("missing inspect session; set --inspect or NETSIM_INSPECT")?; - let trimmed = from_env.trim(); - if trimmed.is_empty() { - bail!("NETSIM_INSPECT is set but empty"); - } - Ok(trimmed.to_string()) -} - -#[cfg(target_os = "linux")] -fn load_inspect_session(work_dir: &std::path::Path, inspect_ref: &str) -> Result { - let as_path = PathBuf::from(inspect_ref); - let session_path = if as_path.extension().and_then(|v| v.to_str()) == Some("json") - || inspect_ref.contains('/') - { - as_path - } else { - inspect_session_path(work_dir, inspect_ref) - }; - let bytes = std::fs::read(&session_path) - .with_context(|| format!("read inspect session {}", session_path.display()))?; - serde_json::from_slice(&bytes) - .with_context(|| format!("parse inspect session {}", session_path.display())) -} - -#[cfg(target_os = "linux")] -fn run_in_command( - node: String, - inspect: Option, - work_dir: PathBuf, - cmd: Vec, -) -> Result<()> { - check_caps()?; - if cmd.is_empty() { - bail!("run-in: missing command"); - } - let inspect_ref = resolve_inspect_ref(inspect)?; - let session = load_inspect_session(&work_dir, &inspect_ref)?; - let pid = *session.node_keeper_pids.get(&node).ok_or_else(|| { - anyhow!( - "node '{}' is not in inspect session '{}'", - node, - session.prefix - ) - })?; - - let mut proc = ProcessCommand::new("nsenter"); - proc.arg("-U") - .arg("-t") - .arg(pid.to_string()) - .arg("-n") - .arg("--") - .arg(&cmd[0]); - if cmd.len() > 1 { - proc.args(&cmd[1..]); - } - let status = proc - .status() - .context("run command with nsenter for inspect session")?; - if !status.success() { - bail!("run-in command exited with status {}", status); - } - Ok(()) -} - -#[cfg(test)] -mod tests { - use std::path::Path; - - use super::*; - - #[cfg(target_os = "linux")] - #[test] - fn env_key_suffix_normalizes_names() { - assert_eq!(env_key_suffix("relay"), "relay"); - assert_eq!(env_key_suffix("fetcher-1"), "fetcher_1"); - } - - #[cfg(target_os = "linux")] - #[test] - fn inspect_session_path_uses_prefix_json() { - let base = PathBuf::from("/tmp/patchbay-work"); - let path = inspect_session_path(&base, "lab-p123"); - assert!(path.ends_with("inspect/lab-p123.json")); - } - - #[cfg(target_os = "linux")] - fn write_temp_file(dir: &Path, rel: &str, body: &str) -> PathBuf { - let path = dir.join(rel); - if let Some(parent) = path.parent() { - std::fs::create_dir_all(parent).expect("create parent"); - } - std::fs::write(&path, body).expect("write file"); - path - } - - #[cfg(target_os = "linux")] - #[test] - fn inspect_loader_detects_sim_input() { - let root = std::env::temp_dir().join(format!( - "patchbay-inspect-test-{}-{}", - std::process::id(), - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .expect("time") - .as_nanos() - )); - let sim_path = write_temp_file( - &root, - "sims/case.toml", - "[sim]\nname='x'\n\n[[router]]\nname='relay'\n\n[device.fetcher.eth0]\ngateway='relay'\n", - ); - let (_topo, is_sim) = load_topology_for_inspect(&sim_path).expect("load sim topology"); - assert!(is_sim); - } - - #[cfg(target_os = "linux")] - #[test] - fn inspect_loader_detects_topology_input() { - let root = std::env::temp_dir().join(format!( - "patchbay-inspect-test-{}-{}", - std::process::id(), - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .expect("time") - .as_nanos() - )); - let topo_path = write_temp_file( - &root, - "topos/lab.toml", - "[[router]]\nname='relay'\n\n[device.fetcher.eth0]\ngateway='relay'\n", - ); - let (_topo, is_sim) = load_topology_for_inspect(&topo_path).expect("load direct topology"); - assert!(!is_sim); - } - - #[tokio::test(flavor = "current_thread")] - async fn iperf_sim_writes_results_with_mbps() { - let root = std::env::temp_dir().join(format!( - "patchbay-iperf-run-test-{}-{}", - std::process::id(), - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .expect("time") - .as_nanos() - )); - std::fs::create_dir_all(&root).expect("create temp workdir"); - let workspace_root = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .parent() - .unwrap() - .to_path_buf(); - let sim_path = workspace_root.join("iroh-integration/patchbay/sims/iperf-1to1-public.toml"); - let project_root = workspace_root; - sim::run_sims( - vec![sim_path], - root.clone(), - vec![], - false, - Some(project_root), - false, - None, - ) - .await - .expect("run iperf sim"); - - let run_root = std::fs::canonicalize(root.join("latest")).expect("resolve latest"); - let results_path = run_root - .join("iperf-1to1-public-baseline") - .join("results.json"); - let text = std::fs::read_to_string(&results_path) - .unwrap_or_else(|e| panic!("read {}: {e}", results_path.display())); - let json: serde_json::Value = serde_json::from_str(&text).expect("parse results"); - let step = &json["steps"][0]; - let down_bytes: f64 = step["down_bytes"] - .as_str() - .expect("down_bytes should be present") - .parse() - .expect("down_bytes should be numeric"); - let duration: f64 = step["duration"] - .as_str() - .expect("duration should be present") - .parse::() - .map(|us| us as f64 / 1_000_000.0) - .unwrap_or_else(|_| { - step["duration"] - .as_str() - .unwrap() - .parse::() - .expect("duration as float") - }); - let mb_s = down_bytes / (duration * 1_000_000.0); - assert!(mb_s > 0.0, "expected mb_s > 0, got {mb_s}"); - } -} diff --git a/patchbay-cli/src/native.rs b/patchbay-cli/src/native.rs new file mode 100644 index 0000000..7f94467 --- /dev/null +++ b/patchbay-cli/src/native.rs @@ -0,0 +1,371 @@ +//! Native Linux backend: sim execution and interactive namespace inspection. +//! +//! Everything in this module requires Linux user namespaces (patchbay core). + +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::process::Command as ProcessCommand; +use std::time::Duration; + +use anyhow::{anyhow, bail, Context, Result}; +use patchbay::check_caps; +use patchbay_runner::sim; +use serde::{Deserialize, Serialize}; + +/// Initialize user namespaces (must be called before tokio starts threads). +pub fn init() -> Result<()> { + patchbay::init_userns() +} + +/// Run one or more sims locally. +pub async fn run_sims( + sims: Vec, + work_dir: PathBuf, + binary_overrides: Vec, + verbose: bool, + project_root: Option, + no_build: bool, + timeout: Option, +) -> Result<()> { + sim::run_sims(sims, work_dir, binary_overrides, verbose, project_root, no_build, timeout).await +} + +/// Resolve sims and build all required assets without running. +pub async fn prepare_sims( + sims: Vec, + work_dir: PathBuf, + binary_overrides: Vec, + project_root: Option, + no_build: bool, +) -> Result<()> { + sim::prepare_sims(sims, work_dir, binary_overrides, project_root, no_build).await +} + +/// Parse a duration string like "120s" or "5m". +pub fn parse_duration(s: &str) -> Result { + sim::steps::parse_duration(s) +} + +// ── Inspect / RunIn ──────────────────────────────────────────────────── + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InspectSession { + pub prefix: String, + pub root_ns: String, + pub node_namespaces: HashMap, + pub node_ips_v4: HashMap, + pub node_keeper_pids: HashMap, +} + +pub fn inspect_dir(work_dir: &Path) -> PathBuf { + work_dir.join("inspect") +} + +pub fn inspect_session_path(work_dir: &Path, prefix: &str) -> PathBuf { + inspect_dir(work_dir).join(format!("{prefix}.json")) +} + +pub fn env_key_suffix(name: &str) -> String { + patchbay::util::sanitize_for_env_key(name) +} + +pub fn load_topology_for_inspect( + input: &Path, +) -> Result<(patchbay::config::LabConfig, bool)> { + let text = std::fs::read_to_string(input) + .with_context(|| format!("read {}", input.display()))?; + let value: toml::Value = + toml::from_str(&text).with_context(|| format!("parse TOML {}", input.display()))?; + let is_sim = + value.get("sim").is_some() || value.get("step").is_some() || value.get("binary").is_some(); + if is_sim { + let sim_file: sim::SimFile = + toml::from_str(&text).with_context(|| format!("parse sim {}", input.display()))?; + let topo = sim::topology::load_topology(&sim_file, input) + .with_context(|| format!("load topology from sim {}", input.display()))?; + Ok((topo, true)) + } else { + let topo: patchbay::config::LabConfig = + toml::from_str(&text).with_context(|| format!("parse topology {}", input.display()))?; + Ok((topo, false)) + } +} + +fn keeper_command() -> ProcessCommand { + let mut cmd = ProcessCommand::new("sh"); + cmd.args(["-lc", "while :; do sleep 3600; done"]) + .stdin(std::process::Stdio::null()) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()); + cmd +} + +pub async fn inspect_command(input: PathBuf, work_dir: PathBuf) -> Result<()> { + check_caps()?; + + let (topo, is_sim) = load_topology_for_inspect(&input)?; + let lab = patchbay_runner::Lab::from_config(topo.clone()) + .await + .with_context(|| format!("build lab config from {}", input.display()))?; + + let mut node_namespaces = HashMap::new(); + let mut node_ips_v4 = HashMap::new(); + let mut node_keeper_pids = HashMap::new(); + + for router in &topo.router { + let name = router.name.clone(); + let r = lab + .router_by_name(&name) + .with_context(|| format!("unknown router '{name}'"))?; + let child = r.spawn_command_sync(keeper_command())?; + node_keeper_pids.insert(name.clone(), child.id()); + node_namespaces.insert(name.clone(), r.ns().to_string()); + if let Some(ip) = r.uplink_ip() { + node_ips_v4.insert(name, ip.to_string()); + } + } + for name in topo.device.keys() { + let d = lab + .device_by_name(name) + .with_context(|| format!("unknown device '{name}'"))?; + let child = d.spawn_command_sync(keeper_command())?; + node_keeper_pids.insert(name.clone(), child.id()); + node_namespaces.insert(name.clone(), d.ns().to_string()); + if let Some(ip) = d.ip() { + node_ips_v4.insert(name.clone(), ip.to_string()); + } + } + + let prefix = lab.prefix().to_string(); + let session = InspectSession { + prefix: prefix.clone(), + root_ns: lab.ix().ns(), + node_namespaces, + node_ips_v4, + node_keeper_pids, + }; + + let session_dir = inspect_dir(&work_dir); + std::fs::create_dir_all(&session_dir) + .with_context(|| format!("create {}", session_dir.display()))?; + let session_path = inspect_session_path(&work_dir, &prefix); + std::fs::write(&session_path, serde_json::to_vec_pretty(&session)?) + .with_context(|| format!("write {}", session_path.display()))?; + + let mut keys: Vec<_> = session.node_namespaces.keys().map(String::as_str).collect(); + keys.sort(); + + println!( + "inspect ready: {} ({})", + session.prefix, + if is_sim { "sim" } else { "topology" } + ); + println!("session file: {}", session_path.display()); + println!("export NETSIM_INSPECT={}", session.prefix); + println!("export NETSIM_INSPECT_FILE={}", session_path.display()); + for key in &keys { + if let Some(ns) = session.node_namespaces.get(*key) { + println!("export NETSIM_NS_{}={ns}", env_key_suffix(key)); + } + if let Some(ip) = session.node_ips_v4.get(*key) { + println!("export NETSIM_IP_{}={ip}", env_key_suffix(key)); + } + } + println!("inspect active; press Ctrl-C to stop and clean up"); + loop { + std::thread::sleep(Duration::from_secs(60)); + } +} + +pub fn resolve_inspect_ref(inspect: Option) -> Result { + if let Some(value) = inspect { + let trimmed = value.trim(); + if trimmed.is_empty() { + bail!("--inspect must not be empty"); + } + return Ok(trimmed.to_string()); + } + let from_env = std::env::var("NETSIM_INSPECT") + .context("missing inspect session; set --inspect or NETSIM_INSPECT")?; + let trimmed = from_env.trim(); + if trimmed.is_empty() { + bail!("NETSIM_INSPECT is set but empty"); + } + Ok(trimmed.to_string()) +} + +pub fn load_inspect_session(work_dir: &Path, inspect_ref: &str) -> Result { + let as_path = PathBuf::from(inspect_ref); + let session_path = if as_path.extension().and_then(|v| v.to_str()) == Some("json") + || inspect_ref.contains('/') + { + as_path + } else { + inspect_session_path(work_dir, inspect_ref) + }; + let bytes = std::fs::read(&session_path) + .with_context(|| format!("read inspect session {}", session_path.display()))?; + serde_json::from_slice(&bytes) + .with_context(|| format!("parse inspect session {}", session_path.display())) +} + +pub fn run_in_command( + node: String, + inspect: Option, + work_dir: PathBuf, + cmd: Vec, +) -> Result<()> { + check_caps()?; + if cmd.is_empty() { + bail!("run-in: missing command"); + } + let inspect_ref = resolve_inspect_ref(inspect)?; + let session = load_inspect_session(&work_dir, &inspect_ref)?; + let pid = *session.node_keeper_pids.get(&node).ok_or_else(|| { + anyhow!( + "node '{}' is not in inspect session '{}'", + node, + session.prefix + ) + })?; + + let mut proc = ProcessCommand::new("nsenter"); + proc.arg("-U") + .arg("-t") + .arg(pid.to_string()) + .arg("-n") + .arg("--") + .arg(&cmd[0]); + if cmd.len() > 1 { + proc.args(&cmd[1..]); + } + let status = proc + .status() + .context("run command with nsenter for inspect session")?; + if !status.success() { + bail!("run-in command exited with status {}", status); + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn env_key_suffix_normalizes_names() { + assert_eq!(env_key_suffix("relay"), "relay"); + assert_eq!(env_key_suffix("fetcher-1"), "fetcher_1"); + } + + #[test] + fn inspect_session_path_uses_prefix_json() { + let base = PathBuf::from("/tmp/patchbay-work"); + let path = inspect_session_path(&base, "lab-p123"); + assert!(path.ends_with("inspect/lab-p123.json")); + } + + fn write_temp_file(dir: &Path, rel: &str, body: &str) -> PathBuf { + let path = dir.join(rel); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).expect("create parent"); + } + std::fs::write(&path, body).expect("write file"); + path + } + + #[test] + fn inspect_loader_detects_sim_input() { + let root = std::env::temp_dir().join(format!( + "patchbay-inspect-test-{}-{}", + std::process::id(), + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("time") + .as_nanos() + )); + let sim_path = write_temp_file( + &root, + "sims/case.toml", + "[sim]\nname='x'\n\n[[router]]\nname='relay'\n\n[device.fetcher.eth0]\ngateway='relay'\n", + ); + let (_topo, is_sim) = load_topology_for_inspect(&sim_path).expect("load sim topology"); + assert!(is_sim); + } + + #[test] + fn inspect_loader_detects_topology_input() { + let root = std::env::temp_dir().join(format!( + "patchbay-inspect-test-{}-{}", + std::process::id(), + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("time") + .as_nanos() + )); + let topo_path = write_temp_file( + &root, + "topos/lab.toml", + "[[router]]\nname='relay'\n\n[device.fetcher.eth0]\ngateway='relay'\n", + ); + let (_topo, is_sim) = load_topology_for_inspect(&topo_path).expect("load direct topology"); + assert!(!is_sim); + } + + #[tokio::test(flavor = "current_thread")] + async fn iperf_sim_writes_results_with_mbps() { + let root = std::env::temp_dir().join(format!( + "patchbay-iperf-run-test-{}-{}", + std::process::id(), + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("time") + .as_nanos() + )); + std::fs::create_dir_all(&root).expect("create temp workdir"); + let workspace_root = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .parent() + .unwrap() + .to_path_buf(); + let sim_path = workspace_root.join("iroh-integration/patchbay/sims/iperf-1to1-public.toml"); + run_sims( + vec![sim_path], + root.clone(), + vec![], + false, + Some(workspace_root), + false, + None, + ) + .await + .expect("run iperf sim"); + + let run_root = std::fs::canonicalize(root.join("latest")).expect("resolve latest"); + let results_path = run_root + .join("iperf-1to1-public-baseline") + .join("results.json"); + let text = std::fs::read_to_string(&results_path) + .unwrap_or_else(|e| panic!("read {}: {e}", results_path.display())); + let json: serde_json::Value = serde_json::from_str(&text).expect("parse results"); + let step = &json["steps"][0]; + let down_bytes: f64 = step["down_bytes"] + .as_str() + .expect("down_bytes should be present") + .parse() + .expect("down_bytes should be numeric"); + let duration: f64 = step["duration"] + .as_str() + .expect("duration should be present") + .parse::() + .map(|us| us as f64 / 1_000_000.0) + .unwrap_or_else(|_| { + step["duration"] + .as_str() + .unwrap() + .parse::() + .expect("duration as float") + }); + let mb_s = down_bytes / (duration * 1_000_000.0); + assert!(mb_s > 0.0, "expected mb_s > 0, got {mb_s}"); + } +} diff --git a/patchbay-cli/src/vm.rs b/patchbay-cli/src/vm.rs new file mode 100644 index 0000000..cbc23ac --- /dev/null +++ b/patchbay-cli/src/vm.rs @@ -0,0 +1,195 @@ +//! VM backend: subcommands and dispatch for patchbay-vm. + +use std::{path::PathBuf, time::Duration}; + +use anyhow::Result; +use clap::Subcommand; +use patchbay_vm::VmOps; + +use crate::test; + +#[cfg(feature = "serve")] +use patchbay_server::DEFAULT_UI_BIND; +#[cfg(not(feature = "serve"))] +const DEFAULT_UI_BIND: &str = "127.0.0.1:7421"; + +/// Shared args for `patchbay run` (used by both top-level Run and Vm Run). +#[derive(Debug, Clone, clap::Args)] +pub struct VmRunArgs { + /// One or more sim TOML files or directories containing `*.toml`. + #[arg(required = true)] + pub sims: Vec, + + /// Work directory for logs, binaries, and results. + #[arg(long, default_value = ".patchbay/work")] + pub work_dir: PathBuf, + + /// Binary override in `::` form. + #[arg(long = "binary")] + pub binary_overrides: Vec, + + /// Stream live stdout/stderr lines with node prefixes. + #[arg(short = 'v', long, default_value_t = false)] + pub verbose: bool, + + /// Start embedded UI server and open browser. + #[arg(long, default_value_t = false)] + pub open: bool, + + /// Bind address for embedded UI server. + #[arg(long, default_value = DEFAULT_UI_BIND)] + pub bind: String, +} + +/// VM sub-subcommands (mirrors patchbay-vm's standalone CLI). +#[derive(Subcommand)] +pub enum VmCommand { + /// Boot or reuse VM and ensure mounts. + Up { + #[arg(long)] + recreate: bool, + }, + /// Stop VM and helper processes. + Down, + /// Show VM running status. + Status, + /// Best-effort cleanup of VM helper artifacts/processes. + Cleanup, + /// Execute command in the guest (SSH for QEMU, exec for container). + Ssh { + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + cmd: Vec, + }, + /// Run one or more sims in VM using guest patchbay binary. + Run { + #[command(flatten)] + args: VmRunArgs, + + #[arg(long)] + recreate: bool, + #[arg(long, default_value = "latest")] + patchbay_version: String, + }, + /// Serve embedded UI + work directory over HTTP. + Serve { + #[arg(long, default_value = ".patchbay/work")] + work_dir: PathBuf, + /// Serve `/binaries/tests/testdir-current` instead of work_dir. + #[arg(long, default_value_t = false)] + testdir: bool, + #[arg(long, default_value = DEFAULT_UI_BIND)] + bind: String, + #[arg(long, default_value_t = false)] + open: bool, + }, + /// Build and run tests in VM. + Test { + #[command(flatten)] + args: test::TestArgs, + #[arg(long, default_value_t = patchbay_vm::default_test_target())] + target: String, + #[arg(long)] + recreate: bool, + }, +} + +/// Dispatch VM subcommands to the patchbay-vm library. +pub async fn dispatch_vm(command: VmCommand, backend: patchbay_vm::Backend) -> Result<()> { + let backend = backend.resolve(); + + match command { + VmCommand::Up { recreate } => backend.up(recreate), + VmCommand::Down => backend.down(), + VmCommand::Status => backend.status(), + VmCommand::Cleanup => backend.cleanup(), + VmCommand::Ssh { cmd } => backend.exec(cmd), + VmCommand::Run { + args, + recreate, + patchbay_version, + } => { + if args.open { + #[cfg(feature = "serve")] + { + let url = format!("http://{}", args.bind); + println!("patchbay UI: {url}"); + let _ = open::that(&url); + let work = args.work_dir.clone(); + let bind_clone = args.bind.clone(); + tokio::spawn(async move { + if let Err(e) = patchbay_server::serve(work, &bind_clone).await { + tracing::error!("server error: {e}"); + } + }); + } + #[cfg(not(feature = "serve"))] + bail!("--open requires the `serve` feature"); + } + let vm_args = patchbay_vm::RunVmArgs { + sim_inputs: args.sims, + work_dir: args.work_dir.clone(), + binary_overrides: args.binary_overrides, + verbose: args.verbose, + recreate, + patchbay_version, + }; + let res = backend.run_sims(vm_args); + if args.open && res.is_ok() { + println!("run finished; server still running (Ctrl-C to exit)"); + loop { + tokio::time::sleep(Duration::from_secs(60)).await; + } + } + res + } + VmCommand::Serve { + work_dir, + testdir, + bind, + open, + } => { + #[cfg(feature = "serve")] + { + let dir = if testdir { + work_dir + .join("binaries") + .join("tests") + .join("testdir-current") + } else { + work_dir + }; + println!("patchbay: serving {} at http://{bind}/", dir.display()); + if open { + let url = format!("http://{bind}"); + let _ = open::that(&url); + } + patchbay_server::serve(dir, &bind).await + } + #[cfg(not(feature = "serve"))] + { + let _ = (&work_dir, &testdir, &bind, &open); + bail!("serve requires the `serve` feature") + } + } + VmCommand::Test { + args, + target, + recreate, + } => backend.run_tests(args.into_vm_args(target, recreate)), + } +} + +/// Run sims via VM backend (used by top-level `Run` on non-Linux). +#[allow(dead_code)] // Only called on non-Linux targets. +pub fn run_sims_vm(args: VmRunArgs, backend: patchbay_vm::Backend) -> Result<()> { + let backend = backend.resolve(); + let vm_args = patchbay_vm::RunVmArgs { + sim_inputs: args.sims, + work_dir: args.work_dir, + binary_overrides: args.binary_overrides, + verbose: args.verbose, + recreate: false, + patchbay_version: "latest".to_string(), + }; + backend.run_sims(vm_args) +} From 521c01d88a12920dc4635fc2eb0d3b8f2aa1a393 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 27 Mar 2026 15:09:52 +0100 Subject: [PATCH 2/6] fix: use direct GitHub release download instead of cargo binstall Replace cargo binstall with a direct curl+tar from the rolling release. Faster, no extra tooling, and works reliably in CI. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/guide/testing.md | 16 +++++++++++----- patchbay-server/github-workflow-template.yml | 12 +++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/docs/guide/testing.md b/docs/guide/testing.md index ffce27f..b241043 100644 --- a/docs/guide/testing.md +++ b/docs/guide/testing.md @@ -112,8 +112,11 @@ On Linux, tests run natively. Install patchbay's CLI if you want the `serve` command for viewing results: ```bash -cargo binstall patchbay-cli --no-confirm \ - || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay +# From rolling release (fast): +curl -fsSL https://github.com/n0-computer/patchbay/releases/download/rolling/patchbay-x86_64-unknown-linux-musl.tar.gz \ + | tar xz -C ~/.cargo/bin && mv ~/.cargo/bin/patchbay-x86_64-unknown-linux-musl ~/.cargo/bin/patchbay +# Or build from source: +cargo install patchbay-cli --git https://github.com/n0-computer/patchbay ``` Then run your tests and serve the output: @@ -247,11 +250,14 @@ Install the patchbay CLI in your workflow, then add these steps **after** the test step: ```yaml - # Install patchbay CLI (binstall for speed, cargo install as fallback) + # Install patchbay CLI from rolling release - name: Install patchbay CLI run: | - cargo binstall patchbay-cli --no-confirm 2>/dev/null \ - || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay + ASSET="patchbay-x86_64-unknown-linux-musl" + curl -fsSL "https://github.com/n0-computer/patchbay/releases/download/rolling/${ASSET}.tar.gz" \ + | tar xz -C /usr/local/bin "$ASSET" + mv /usr/local/bin/"$ASSET" /usr/local/bin/patchbay + chmod +x /usr/local/bin/patchbay # Run tests with patchbay (--persist keeps the run directory) - name: Run tests diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index c4a0aeb..1706be9 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -29,13 +29,15 @@ jobs: # ── Build tools — adjust to your project ── - uses: dtolnay/rust-toolchain@stable - # ── Install patchbay CLI ── - # Install pre-built binary via binstall (fast), or build from source. + # ── Install patchbay CLI from rolling release ── - name: Install patchbay CLI run: | - curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash - cargo binstall patchbay-cli --git-url https://github.com/n0-computer/patchbay --no-confirm \ - || cargo install patchbay-cli --git https://github.com/n0-computer/patchbay + ASSET="patchbay-x86_64-unknown-linux-musl" + URL="https://github.com/n0-computer/patchbay/releases/download/rolling/${ASSET}.tar.gz" + curl -fsSL "$URL" | tar xz -C /usr/local/bin "$ASSET" + mv /usr/local/bin/"$ASSET" /usr/local/bin/patchbay + chmod +x /usr/local/bin/patchbay + patchbay --version || patchbay --help | head -1 # ── Run tests — replace with your own command ── # Use `patchbay test` which writes structured output to .patchbay/work/. From 29affa490b5d637c26a791d41ad6936b69396e72 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 27 Mar 2026 15:11:17 +0100 Subject: [PATCH 3/6] fix: restore ctor bootstrap, use direct release download in CI Add back #[ctor::ctor] init_userns_for_ctor() in native.rs so test binaries spawned by nextest set up user namespaces before main(). Replace cargo binstall with direct curl+tar from rolling release. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/native.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/patchbay-cli/src/native.rs b/patchbay-cli/src/native.rs index 7f94467..ab39d9e 100644 --- a/patchbay-cli/src/native.rs +++ b/patchbay-cli/src/native.rs @@ -12,7 +12,15 @@ use patchbay::check_caps; use patchbay_runner::sim; use serde::{Deserialize, Serialize}; -/// Initialize user namespaces (must be called before tokio starts threads). +/// Bootstrap user namespaces before main() — required for test binaries +/// where main() is not our code (nextest spawns each test as a process). +#[ctor::ctor] +fn _init_userns() { + // Safety: called from .init_array before main() and before any threads. + unsafe { patchbay::init_userns_for_ctor() }; +} + +/// Initialize user namespaces (called from main() as well for the CLI binary). pub fn init() -> Result<()> { patchbay::init_userns() } From a3fa79e2f3643a1cd05e0bef90f49ac21da61a27 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 27 Mar 2026 15:23:27 +0100 Subject: [PATCH 4/6] refactor: use nextest JSON output, share piped-output helpers Use nextest --message-format libtest-json for structured test results instead of parsing human-readable output. Extract shared helpers (run_piped, parse_nextest_json, has_nextest) from test.rs and reuse in compare.rs. Falls back to cargo test with text parsing when nextest is unavailable. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 69 +++------- patchbay-cli/src/test.rs | 245 +++++++++++++++++++----------------- 2 files changed, 150 insertions(+), 164 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index bf4c616..42728fc 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -58,62 +58,33 @@ fn sanitize_ref(r: &str) -> String { // Types re-exported from patchbay_utils::manifest: // TestResult, TestStatus, RunManifest, RunKind -pub use manifest::parse_test_output; - /// Run tests in a directory and capture results. +/// +/// Uses nextest with JSON output if available, falls back to cargo test with text parsing. pub fn run_tests_in_dir( dir: &Path, args: &crate::test::TestArgs, verbose: bool, ) -> Result<(Vec, String)> { - use std::io::BufRead; - - let mut cmd = args.cargo_test_cmd_in(Some(dir)); - // Use a per-worktree target dir to avoid sharing cached binaries - // between different git refs. - cmd.env("CARGO_TARGET_DIR", dir.join("target")); - cmd.stdout(std::process::Stdio::piped()); - cmd.stderr(std::process::Stdio::piped()); - let mut child = cmd.spawn().context("spawn cargo test")?; - - let stdout_pipe = child.stdout.take().unwrap(); - let stderr_pipe = child.stderr.take().unwrap(); - let v = verbose; - let out_t = std::thread::spawn(move || { - let mut buf = String::new(); - for line in std::io::BufReader::new(stdout_pipe) - .lines() - .map_while(Result::ok) - { - if v { - println!("{line}"); - } - buf.push_str(&line); - buf.push('\n'); - } - buf - }); - let err_t = std::thread::spawn(move || { - let mut buf = String::new(); - for line in std::io::BufReader::new(stderr_pipe) - .lines() - .map_while(Result::ok) - { - if verbose { - eprintln!("{line}"); - } - buf.push_str(&line); - buf.push('\n'); - } - buf - }); + let use_nextest = crate::test::has_nextest(); + let mut cmd = if use_nextest { + let mut c = args.nextest_cmd(Some(dir)); + c.env("CARGO_TARGET_DIR", dir.join("target")); + c + } else { + let mut c = args.cargo_test_cmd_in(Some(dir)); + c.env("CARGO_TARGET_DIR", dir.join("target")); + c + }; - let _ = child.wait().context("wait for cargo test")?; - let stdout = out_t.join().unwrap_or_default(); - let stderr = err_t.join().unwrap_or_default(); - let combined = format!("{stdout}\n{stderr}"); - let results = parse_test_output(&combined); - Ok((results, combined)) + let (_success, stdout, stderr) = crate::test::run_piped(&mut cmd, verbose)?; + let results = if use_nextest { + crate::test::parse_nextest_json(&stdout) + } else { + let combined = format!("{stdout}\n{stderr}"); + manifest::parse_test_output(&combined) + }; + Ok((results, stdout)) } /// Persist test results from a worktree run so future compares can reuse them. diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index cfa53f5..7f15f84 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -3,21 +3,11 @@ use std::{ path::{Path, PathBuf}, process::Command, + time::Duration, }; use anyhow::{bail, Context, Result}; -use patchbay_utils::manifest::{self, RunKind, RunManifest, TestStatus}; - -/// Check if cargo-nextest is available. -fn has_nextest() -> bool { - Command::new("cargo-nextest") - .arg("--version") - .stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()) - .status() - .map(|s| s.success()) - .unwrap_or(false) -} +use patchbay_utils::manifest::{self, RunKind, RunManifest, TestResult, TestStatus}; /// Shared test arguments used by both `patchbay test` and `patchbay compare test`. #[derive(Debug, Clone, clap::Args)] @@ -64,20 +54,9 @@ pub struct TestArgs { } impl TestArgs { - /// Build a `cargo test` command with all flags applied. - /// Does NOT set stdout/stderr — caller decides piping. - pub fn cargo_test_cmd(&self) -> Command { - self.cargo_test_cmd_in(None) - } - - /// Build a `cargo test` command, optionally running in a specific directory. - pub fn cargo_test_cmd_in(&self, dir: Option<&Path>) -> Command { - let mut cmd = Command::new("cargo"); - cmd.arg("test"); + /// Apply shared cargo flags to a command (packages, tests, jobs, features, etc). + fn apply_cargo_flags(&self, cmd: &mut Command) { cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); - if let Some(d) = dir { - cmd.current_dir(d); - } for p in &self.packages { cmd.arg("-p").arg(p); } @@ -99,7 +78,37 @@ impl TestArgs { if self.no_fail_fast { cmd.arg("--no-fail-fast"); } - // Everything after `--`: --ignored/--include-ignored + extra args + } + + /// Build a `cargo nextest run` command with JSON output. + pub fn nextest_cmd(&self, dir: Option<&Path>) -> Command { + let mut cmd = Command::new("cargo"); + cmd.arg("nextest").arg("run"); + cmd.env("NEXTEST_EXPERIMENTAL_LIBTEST_JSON", "1"); + cmd.arg("--message-format").arg("libtest-json"); + self.apply_cargo_flags(&mut cmd); + if let Some(d) = dir { + cmd.current_dir(d); + } + if self.include_ignored { + cmd.arg("--run-ignored").arg("all"); + } else if self.ignored { + cmd.arg("--run-ignored").arg("ignored-only"); + } + for a in &self.extra_args { + cmd.arg(a); + } + cmd + } + + /// Build a `cargo test` command (fallback when nextest is unavailable). + pub fn cargo_test_cmd_in(&self, dir: Option<&Path>) -> Command { + let mut cmd = Command::new("cargo"); + cmd.arg("test"); + self.apply_cargo_flags(&mut cmd); + if let Some(d) = dir { + cmd.current_dir(d); + } if self.include_ignored || self.ignored || !self.extra_args.is_empty() { cmd.arg("--"); if self.ignored { @@ -145,6 +154,56 @@ impl TestArgs { } } +/// Check if cargo-nextest is available. +pub fn has_nextest() -> bool { + Command::new("cargo-nextest") + .arg("--version") + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .map(|s| s.success()) + .unwrap_or(false) +} + +/// Parse nextest JSON (libtest format) lines into TestResults. +pub fn parse_nextest_json(stdout: &str) -> Vec { + let mut results = Vec::new(); + for line in stdout.lines() { + let Ok(v) = serde_json::from_str::(line) else { + continue; + }; + if v.get("type").and_then(|t| t.as_str()) != Some("test") { + continue; + } + let event = v.get("event").and_then(|e| e.as_str()).unwrap_or(""); + if event == "started" { + continue; + } + let name = v + .get("name") + .and_then(|n| n.as_str()) + .unwrap_or("") + .to_string(); + let status = match event { + "ok" => TestStatus::Pass, + "failed" => TestStatus::Fail, + "ignored" => TestStatus::Ignored, + _ => continue, + }; + let duration = v + .get("exec_time") + .and_then(|t| t.as_f64()) + .map(Duration::from_secs_f64); + results.push(TestResult { + name, + status, + duration, + dir: None, + }); + } + results +} + /// Resolve `target_directory` from cargo metadata. fn cargo_target_dir() -> Option { let output = Command::new("cargo") @@ -158,79 +217,22 @@ fn cargo_target_dir() -> Option { meta["target_directory"].as_str().map(PathBuf::from) } -/// Run tests natively via cargo test/nextest. -/// -/// Captures stdout/stderr (printing live when `verbose` is true), parses -/// test results, and writes `run.json` to `testdir-current/`. -/// When `persist` is true, copies output to `.patchbay/work/run-{timestamp}/`. -pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { +/// Run a test command, capturing output. Returns (exit success, stdout, stderr). +pub fn run_piped(cmd: &mut Command, verbose: bool) -> Result<(bool, String, String)> { use std::io::BufRead; - let use_nextest = has_nextest(); - let mut cmd = if use_nextest { - let mut cmd = Command::new("cargo"); - cmd.arg("nextest").arg("run"); - cmd.env("RUSTFLAGS", crate::util::patchbay_rustflags()); - for p in &args.packages { - cmd.arg("-p").arg(p); - } - for t in &args.tests { - cmd.arg("--test").arg(t); - } - if let Some(j) = args.jobs { - cmd.arg("-j").arg(j.to_string()); - } - for f in &args.features { - cmd.arg("-F").arg(f); - } - if args.release { - cmd.arg("--release"); - } - if args.lib { - cmd.arg("--lib"); - } - if args.no_fail_fast { - cmd.arg("--no-fail-fast"); - } - if args.include_ignored { - cmd.arg("--run-ignored").arg("all"); - } else if args.ignored { - cmd.arg("--run-ignored").arg("ignored-only"); - } - for a in &args.extra_args { - cmd.arg(a); - } - cmd - } else { - eprintln!("patchbay: cargo-nextest not found, using cargo test"); - args.cargo_test_cmd() - }; - - // Set PATCHBAY_OUTDIR so test fixtures can discover the output directory. - if let Some(target_dir) = cargo_target_dir() { - let outdir = target_dir.join("testdir-current"); - cmd.env("PATCHBAY_OUTDIR", &outdir); - } - - // Pipe stdout/stderr so we can capture output while optionally printing live. cmd.stdout(std::process::Stdio::piped()); cmd.stderr(std::process::Stdio::piped()); - let started_at = chrono::Utc::now(); let mut child = cmd.spawn().context("failed to spawn test command")?; - let stdout_pipe = child.stdout.take().unwrap(); let stderr_pipe = child.stderr.take().unwrap(); + let v = verbose; let out_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stdout_pipe) - .lines() - .map_while(Result::ok) - { - if v { - println!("{line}"); - } + for line in std::io::BufReader::new(stdout_pipe).lines().map_while(Result::ok) { + if v { println!("{line}"); } buf.push_str(&line); buf.push('\n'); } @@ -238,13 +240,8 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { }); let err_t = std::thread::spawn(move || { let mut buf = String::new(); - for line in std::io::BufReader::new(stderr_pipe) - .lines() - .map_while(Result::ok) - { - if verbose { - eprintln!("{line}"); - } + for line in std::io::BufReader::new(stderr_pipe).lines().map_while(Result::ok) { + if verbose { eprintln!("{line}"); } buf.push_str(&line); buf.push('\n'); } @@ -252,26 +249,45 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { }); let status = child.wait().context("failed to wait for test command")?; - let ended_at = chrono::Utc::now(); let stdout = out_t.join().unwrap_or_default(); let stderr = err_t.join().unwrap_or_default(); + Ok((status.success(), stdout, stderr)) +} - let combined = format!("{stdout}\n{stderr}"); - let results = manifest::parse_test_output(&combined); - - // Write run.json into testdir-current/. - let pass = results - .iter() - .filter(|r| r.status == TestStatus::Pass) - .count() as u32; - let fail = results - .iter() - .filter(|r| r.status == TestStatus::Fail) - .count() as u32; +/// Run tests natively via nextest (preferred) or cargo test (fallback). +pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { + let use_nextest = has_nextest(); + if !use_nextest { + eprintln!("patchbay: warning: cargo-nextest not found, falling back to cargo test"); + eprintln!("patchbay: install with: cargo install cargo-nextest"); + } + + let mut cmd = if use_nextest { + args.nextest_cmd(None) + } else { + args.cargo_test_cmd_in(None) + }; + + if let Some(target_dir) = cargo_target_dir() { + cmd.env("PATCHBAY_OUTDIR", target_dir.join("testdir-current")); + } + + let started_at = chrono::Utc::now(); + let (success, stdout, stderr) = run_piped(&mut cmd, verbose)?; + let ended_at = chrono::Utc::now(); + + // Parse results: structured JSON from nextest, text fallback for cargo test. + let results = if use_nextest { + parse_nextest_json(&stdout) + } else { + let combined = format!("{stdout}\n{stderr}"); + manifest::parse_test_output(&combined) + }; + + let pass = results.iter().filter(|r| r.status == TestStatus::Pass).count() as u32; + let fail = results.iter().filter(|r| r.status == TestStatus::Fail).count() as u32; let total = results.len() as u32; let git = manifest::git_context(); - let runtime = (ended_at - started_at).to_std().ok(); - let outcome = if status.success() { "pass" } else { "fail" }; let manifest = RunManifest { kind: RunKind::Test, @@ -284,8 +300,8 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { title: None, started_at: Some(started_at), ended_at: Some(ended_at), - runtime, - outcome: Some(outcome.to_string()), + runtime: (ended_at - started_at).to_std().ok(), + outcome: Some(if success { "pass" } else { "fail" }.to_string()), pass: Some(pass), fail: Some(fail), total: Some(total), @@ -311,13 +327,12 @@ pub fn run_native(args: TestArgs, verbose: bool, persist: bool) -> Result<()> { } } - // --persist: copy output dir to .patchbay/work/run-{timestamp}/ if persist { persist_run()?; } - if !status.success() { - bail!("tests failed (exit code {})", status.code().unwrap_or(-1)); + if !success { + bail!("tests failed"); } Ok(()) } From 24bdd736d23a7488f07034cd5501b6db4c262d17 Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 30 Mar 2026 10:29:21 +0200 Subject: [PATCH 5/6] feat: server-side nextest ingest, compare loading fix Server auto-generates run.json from test-results.jsonl on push when no run.json is present. Directories with run.json but no events.jsonl and no children are now treated as leaf runs (not empty groups). Push response includes view_url. Compare page no longer hangs on "Loading lab state..." for test runs without state.json. parse_nextest_json moved to patchbay-utils for reuse by server. CI template simplified to nextest + curl (no patchbay binary needed). Stderr flows to console. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/test.rs | 42 +------ patchbay-server/github-workflow-template.yml | 54 ++++---- patchbay-server/src/lib.rs | 122 +++++++++++++++---- patchbay-utils/src/manifest.rs | 65 ++++++++++ ui/e2e/compare.spec.ts | 8 +- ui/e2e/push.spec.ts | 64 ++++++++++ ui/src/components/CompareView.tsx | 6 +- ui/src/components/RunView.tsx | 6 +- 8 files changed, 271 insertions(+), 96 deletions(-) diff --git a/patchbay-cli/src/test.rs b/patchbay-cli/src/test.rs index 7f15f84..995d56e 100644 --- a/patchbay-cli/src/test.rs +++ b/patchbay-cli/src/test.rs @@ -3,11 +3,10 @@ use std::{ path::{Path, PathBuf}, process::Command, - time::Duration, }; use anyhow::{bail, Context, Result}; -use patchbay_utils::manifest::{self, RunKind, RunManifest, TestResult, TestStatus}; +use patchbay_utils::manifest::{self, RunKind, RunManifest, TestStatus}; /// Shared test arguments used by both `patchbay test` and `patchbay compare test`. #[derive(Debug, Clone, clap::Args)] @@ -166,43 +165,8 @@ pub fn has_nextest() -> bool { } /// Parse nextest JSON (libtest format) lines into TestResults. -pub fn parse_nextest_json(stdout: &str) -> Vec { - let mut results = Vec::new(); - for line in stdout.lines() { - let Ok(v) = serde_json::from_str::(line) else { - continue; - }; - if v.get("type").and_then(|t| t.as_str()) != Some("test") { - continue; - } - let event = v.get("event").and_then(|e| e.as_str()).unwrap_or(""); - if event == "started" { - continue; - } - let name = v - .get("name") - .and_then(|n| n.as_str()) - .unwrap_or("") - .to_string(); - let status = match event { - "ok" => TestStatus::Pass, - "failed" => TestStatus::Fail, - "ignored" => TestStatus::Ignored, - _ => continue, - }; - let duration = v - .get("exec_time") - .and_then(|t| t.as_f64()) - .map(Duration::from_secs_f64); - results.push(TestResult { - name, - status, - duration, - dir: None, - }); - } - results -} +/// Re-exports from patchbay_utils for use by compare.rs. +pub use manifest::parse_nextest_json; /// Resolve `target_directory` from cargo metadata. fn cargo_target_dir() -> Option { diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index 1706be9..a2ceea2 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -6,6 +6,8 @@ # 3. If your tests need a self-hosted runner, update runs-on accordingly # # For server setup see: https://github.com/n0-computer/patchbay/tree/main/patchbay-server +# +# No patchbay binary required in CI — just nextest + curl. name: Patchbay Tests @@ -28,27 +30,23 @@ jobs: # ── Build tools — adjust to your project ── - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@nextest - # ── Install patchbay CLI from rolling release ── - - name: Install patchbay CLI - run: | - ASSET="patchbay-x86_64-unknown-linux-musl" - URL="https://github.com/n0-computer/patchbay/releases/download/rolling/${ASSET}.tar.gz" - curl -fsSL "$URL" | tar xz -C /usr/local/bin "$ASSET" - mv /usr/local/bin/"$ASSET" /usr/local/bin/patchbay - chmod +x /usr/local/bin/patchbay - patchbay --version || patchbay --help | head -1 - - # ── Run tests — replace with your own command ── - # Use `patchbay test` which writes structured output to .patchbay/work/. - # --persist keeps the run directory after tests complete. + # ── Run tests with nextest JSON output ── - name: Run tests id: tests - run: patchbay test --persist -p my-crate --test my-test + run: | + NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1 \ + cargo nextest run \ + --message-format libtest-json \ + -p my-crate --test my-test \ + > .patchbay-results.jsonl env: RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} + PATCHBAY_OUTDIR: ${{ github.workspace }}/.patchbay-testdir # ── Upload results to patchbay-serve ── + # The server parses nextest JSONL and creates run.json automatically. - name: Upload results id: upload if: always() @@ -59,18 +57,25 @@ jobs: set -euo pipefail PROJECT="${{ github.event.repository.name }}" - # Find the most recent run directory - RUN_DIR=$(ls -dt .patchbay/work/run-* 2>/dev/null | head -1) - if [ -z "$RUN_DIR" ]; then - echo "No run directory found, skipping upload" - exit 0 + # Collect output into a directory for upload + UPLOAD_DIR=$(mktemp -d) + # Include nextest results + cp .patchbay-results.jsonl "$UPLOAD_DIR/test-results.jsonl" 2>/dev/null || true + # Include test output (testdir, metrics, events) if present + if [ -d ".patchbay-testdir" ]; then + cp -rL .patchbay-testdir/* "$UPLOAD_DIR/" 2>/dev/null || true fi - VIEW_URL=$(patchbay upload "$RUN_DIR" \ - --project "$PROJECT" \ - --url "$PATCHBAY_URL" \ - --api-key "$PATCHBAY_API_KEY") + # Upload as tar.gz + BODY=$(tar czf - -C "$UPLOAD_DIR" . | curl -sS -X POST \ + -H "Authorization: Bearer $PATCHBAY_API_KEY" \ + -H "Content-Type: application/gzip" \ + --data-binary @- \ + "$PATCHBAY_URL/api/push/$PROJECT") + + VIEW_URL=$(echo "$BODY" | jq -r '.view_url // empty') echo "view_url=$VIEW_URL" >> "$GITHUB_OUTPUT" + rm -rf "$UPLOAD_DIR" # ── Post or update PR comment ── - name: Comment on PR @@ -86,9 +91,10 @@ jobs: const commitUrl = `${{ github.server_url }}/${{ github.repository }}/commit/${sha}`; const date = new Date().toISOString().replace('T', ' ').slice(0, 19) + ' UTC'; const viewUrl = '${{ steps.upload.outputs.view_url }}'; + const viewLine = viewUrl ? ` | [view results](${viewUrl})` : ''; const body = [ marker, - `${icon} **patchbay:** ${status} | [view results](${viewUrl})`, + `${icon} **patchbay:** ${status}${viewLine}`, `${date} · [\`${shortSha}\`](${commitUrl})`, ].join('\n'); diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index 41702b1..fc72f0c 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -151,31 +151,41 @@ fn scan_runs_recursive( let has_events = path.join(EVENTS_JSONL).exists(); let has_run_json = path.join(RUN_JSON).exists(); - if has_events { - // Leaf run: has events.jsonl → it's an actual lab output dir. - let name = path - .strip_prefix(root) - .unwrap_or(&path) - .to_string_lossy() - .into_owned(); - let (label, status) = read_run_metadata(&path); - let group = name - .split('/') - .next() - .filter(|first| *first != name) - .map(str::to_string); - runs.push(RunInfo { - name, - path, - label, - status, - group, - manifest: None, // populated after scan - }); - } else if has_run_json { - // Group directory: has run.json but no events.jsonl. - // Recurse to find child runs, they inherit this manifest. - scan_runs_recursive(root, &path, depth + 1, runs)?; + if has_events || has_run_json { + // Check if this is a leaf run or a group with children. + let has_child_dirs = fs::read_dir(&path) + .map(|rd| { + rd.flatten() + .any(|e| e.file_type().map(|t| t.is_dir()).unwrap_or(false)) + }) + .unwrap_or(false); + + if has_run_json && !has_events && has_child_dirs { + // Group directory: has run.json, no events.jsonl, has subdirs. + // Recurse to find child runs that inherit this manifest. + scan_runs_recursive(root, &path, depth + 1, runs)?; + } else { + // Leaf run: has events.jsonl or run.json without children. + let name = path + .strip_prefix(root) + .unwrap_or(&path) + .to_string_lossy() + .into_owned(); + let (label, status) = read_run_metadata(&path); + let group = name + .split('/') + .next() + .filter(|first| *first != name) + .map(str::to_string); + runs.push(RunInfo { + name, + path, + label, + status, + group, + manifest: None, // populated after scan + }); + } } else { scan_runs_recursive(root, &path, depth + 1, runs)?; } @@ -1030,9 +1040,70 @@ async fn push_run( } } + // Auto-generate run.json from nextest JSONL if not already present. + let run_json_path = run_dir.join("run.json"); + if !run_json_path.exists() { + let nextest_jsonl = run_dir.join("test-results.jsonl"); + let results = if nextest_jsonl.exists() { + let content = std::fs::read_to_string(&nextest_jsonl).unwrap_or_default(); + patchbay_utils::manifest::parse_nextest_json(&content) + } else { + Vec::new() + }; + let pass = results + .iter() + .filter(|r| r.status == patchbay_utils::manifest::TestStatus::Pass) + .count() as u32; + let fail = results + .iter() + .filter(|r| r.status == patchbay_utils::manifest::TestStatus::Fail) + .count() as u32; + let total = results.len() as u32; + let manifest = patchbay_utils::manifest::RunManifest { + kind: patchbay_utils::manifest::RunKind::Test, + project: Some(project.clone()), + commit: None, + branch: None, + dirty: false, + pr: None, + pr_url: None, + title: None, + started_at: Some(chrono::Utc::now()), + ended_at: None, + runtime: None, + outcome: if fail > 0 { + Some("fail".into()) + } else if pass > 0 { + Some("pass".into()) + } else { + None + }, + pass: if total > 0 { Some(pass) } else { None }, + fail: if total > 0 { Some(fail) } else { None }, + total: if total > 0 { Some(total) } else { None }, + tests: results, + os: None, + arch: None, + patchbay_version: None, + }; + if let Ok(json) = serde_json::to_string_pretty(&manifest) { + let _ = std::fs::write(&run_json_path, json); + } + } + // Notify subscribers about new run let _ = state.runs_tx.send(()); + let view_url = format!( + "{}/run/{}", + headers + .get("origin") + .and_then(|v| v.to_str().ok()) + .or_else(|| headers.get("host").and_then(|v| v.to_str().ok())) + .unwrap_or(""), + run_name + ); + // run_name is the group name (first path component for all sims inside) let result = serde_json::json!({ "ok": true, @@ -1041,6 +1112,7 @@ async fn push_run( "group": run_name, "batch": run_name, // backward compat "invocation": run_name, // backward compat (old CI templates read .invocation) + "view_url": view_url, }); (StatusCode::OK, serde_json::to_string(&result).unwrap()) diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index 0869011..671fbc4 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -383,6 +383,47 @@ fn parse_nextest_duration(s: &str) -> Option { Some(Duration::from_secs_f64(secs)) } +/// Parse nextest libtest-json output into test results. +/// +/// Expects JSONL lines like: `{"type":"test","event":"ok","name":"...","exec_time":1.23}` +pub fn parse_nextest_json(output: &str) -> Vec { + let mut results = Vec::new(); + for line in output.lines() { + let Ok(v) = serde_json::from_str::(line) else { + continue; + }; + if v.get("type").and_then(|t| t.as_str()) != Some("test") { + continue; + } + let event = v.get("event").and_then(|e| e.as_str()).unwrap_or(""); + if event == "started" { + continue; + } + let name = v + .get("name") + .and_then(|n| n.as_str()) + .unwrap_or("") + .to_string(); + let status = match event { + "ok" => TestStatus::Pass, + "failed" => TestStatus::Fail, + "ignored" => TestStatus::Ignored, + _ => continue, + }; + let duration = v + .get("exec_time") + .and_then(|t| t.as_f64()) + .map(Duration::from_secs_f64); + results.push(TestResult { + name, + status, + duration, + dir: None, + }); + } + results +} + #[cfg(test)] mod tests { use super::*; @@ -468,6 +509,30 @@ test result: FAILED. 1 passed; 1 failed; 1 ignored; assert_eq!(none, t3); } + #[test] + fn test_parse_nextest_json() { + let output = r#"{"type":"suite","event":"started","test_count":3,"nextest":{"crate":"iroh","test_binary":"patchbay","kind":"test"}} +{"type":"test","event":"started","name":"iroh::patchbay$holepunch_simple"} +{"type":"test","event":"ignored","name":"iroh::patchbay$holepunch_cgnat"} +{"type":"test","event":"ok","name":"iroh::patchbay$holepunch_simple","exec_time":4.5} +{"type":"suite","event":"ok","passed":1,"failed":0,"ignored":1,"measured":0,"filtered_out":5,"exec_time":4.5,"nextest":{"crate":"iroh","test_binary":"patchbay","kind":"test"}} +{"type":"suite","event":"started","test_count":1,"nextest":{"crate":"iroh","test_binary":"patchbay","kind":"test"}} +{"type":"test","event":"started","name":"iroh::patchbay$switch_uplink"} +{"type":"test","event":"failed","name":"iroh::patchbay$switch_uplink","exec_time":10.0} +{"type":"suite","event":"failed","passed":0,"failed":1,"ignored":0}"#; + let results = parse_nextest_json(output); + assert_eq!(results.len(), 3); + assert_eq!(results[0].name, "iroh::patchbay$holepunch_cgnat"); + assert_eq!(results[0].status, TestStatus::Ignored); + assert_eq!(results[0].duration, None); + assert_eq!(results[1].name, "iroh::patchbay$holepunch_simple"); + assert_eq!(results[1].status, TestStatus::Pass); + assert_eq!(results[1].duration, Some(Duration::from_millis(4500))); + assert_eq!(results[2].name, "iroh::patchbay$switch_uplink"); + assert_eq!(results[2].status, TestStatus::Fail); + assert_eq!(results[2].duration, Some(Duration::from_secs(10))); + } + #[test] fn test_run_manifest_backward_compat() { // Old-style run.json with test_outcome instead of outcome diff --git a/ui/e2e/compare.spec.ts b/ui/e2e/compare.spec.ts index d378399..41c5287 100644 --- a/ui/e2e/compare.spec.ts +++ b/ui/e2e/compare.spec.ts @@ -137,14 +137,12 @@ test('compare view renders summary and regression', async ({ page }) => { await expect(page.getByText('1/2').first()).toBeVisible() await expect(page.getByText('regression').first()).toBeVisible() - // Negative: no fixes in this scenario - await expect(page.getByText('fix').first()).not.toBeVisible() - // Score: 0 fixes, 1 regression => score = -5 await expect(page.getByText('-5').first()).toBeVisible() - // Per-test table: verify column content, not just presence - const tableRows = page.locator('table tbody tr') + // Per-test diff table (first table on the page) + const diffTable = page.locator('table').first() + const tableRows = diffTable.locator('tbody tr') await expect(tableRows).toHaveCount(2) // two tests total // udp_counter: pass on both sides, no delta diff --git a/ui/e2e/push.spec.ts b/ui/e2e/push.spec.ts index 5257cbe..9d29a01 100644 --- a/ui/e2e/push.spec.ts +++ b/ui/e2e/push.spec.ts @@ -121,3 +121,67 @@ test('push run results and view via deep link', async ({ page }) => { rmSync(serveDataDir, { recursive: true, force: true }) } }) + +test('push nextest JSONL without run.json — server creates manifest', async ({ page }) => { + test.setTimeout(2 * 60 * 1000) + const uploadDir = mkdtempSync(`${tmpdir()}/patchbay-nextest-push-`) + const serveDir = mkdtempSync(`${tmpdir()}/patchbay-nextest-serve-`) + let serveProc: ChildProcess | null = null + + // Mock nextest libtest-json output + const NEXTEST_JSONL = [ + '{"type":"suite","event":"started","test_count":3}', + '{"type":"test","event":"started","name":"my_crate::tests::foo"}', + '{"type":"test","event":"ok","name":"my_crate::tests::foo","exec_time":0.5}', + '{"type":"test","event":"started","name":"my_crate::tests::bar"}', + '{"type":"test","event":"failed","name":"my_crate::tests::bar","exec_time":1.2}', + '{"type":"test","event":"started","name":"my_crate::tests::baz"}', + '{"type":"test","event":"ok","name":"my_crate::tests::baz","exec_time":0.3}', + '{"type":"suite","event":"failed","passed":2,"failed":1,"ignored":0}', + ].join('\n') + + try { + writeFileSync(path.join(uploadDir, 'test-results.jsonl'), NEXTEST_JSONL) + + serveProc = spawn( + PATCHBAY_SERVE_BIN, + ['--accept-push', '--api-key', API_KEY, '--data-dir', serveDir, '--http-bind', SERVE_BIND], + { cwd: REPO_ROOT, stdio: 'inherit' }, + ) + await waitForHttp(`${SERVE_URL}/api/runs`, 15_000) + + // Push the tar.gz (no run.json inside — only test-results.jsonl) + const tarGz = execSync(`tar -czf - -C "${uploadDir}" .`) + const pushRes = await fetch(`${SERVE_URL}/api/push/nextest-project`, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${API_KEY}`, + 'Content-Type': 'application/gzip', + }, + body: tarGz, + }) + expect(pushRes.status).toBe(200) + const body = await pushRes.json() as { ok: boolean; group: string; view_url?: string } + expect(body.ok).toBe(true) + + // Server should have auto-created run.json from the nextest JSONL + await new Promise(r => setTimeout(r, 2000)) + const runsRes = await fetch(`${SERVE_URL}/api/runs`) + const runs = await runsRes.json() as Array<{ name: string; manifest?: Record | null }> + const run = runs.find(r => r.manifest?.project === 'nextest-project') + expect(run).toBeTruthy() + expect(run!.manifest!.kind).toBe('test') + expect(run!.manifest!.pass).toBe(2) + expect(run!.manifest!.fail).toBe(1) + expect(run!.manifest!.total).toBe(3) + // Verify test-level results + const tests = run!.manifest!.tests as Array<{ name: string; status: string }> + expect(tests).toHaveLength(3) + expect(tests.find(t => t.name === 'my_crate::tests::foo')?.status).toBe('pass') + expect(tests.find(t => t.name === 'my_crate::tests::bar')?.status).toBe('fail') + } finally { + if (serveProc && !serveProc.killed) serveProc.kill('SIGTERM') + rmSync(uploadDir, { recursive: true, force: true }) + rmSync(serveDir, { recursive: true, force: true }) + } +}) diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 9381f44..5073c2f 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -89,7 +89,7 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri const [leftManifest, setLeftManifest] = useState(null) const [rightManifest, setRightManifest] = useState(null) const [loading, setLoading] = useState(true) - const [sharedTab, setSharedTab] = useState('topology') + const [sharedTab, setSharedTab] = useState('logs') useEffect(() => { setLoading(true) @@ -327,9 +327,11 @@ function SplitRunPanel({ runName, activeTab, onTabChange, sharedControls }: { const [events, setEvents] = useState([]) const [logs, setLogs] = useState([]) const [results, setResults] = useState(null) + const [loaded, setLoaded] = useState(false) useEffect(() => { let dead = false + setLoaded(false) Promise.all([ fetchState(runName), fetchEvents(runName), @@ -341,6 +343,7 @@ function SplitRunPanel({ runName, activeTab, onTabChange, sharedControls }: { setEvents(e ?? []) setLogs(l) setResults(r) + setLoaded(true) }) return () => { dead = true } }, [runName]) @@ -363,6 +366,7 @@ function SplitRunPanel({ runName, activeTab, onTabChange, sharedControls }: { activeTab={activeTab} onTabChange={onTabChange} externalControls={externalControls} + loaded={loaded} /> ) } diff --git a/ui/src/components/RunView.tsx b/ui/src/components/RunView.tsx index 2559498..ee8a5ab 100644 --- a/ui/src/components/RunView.tsx +++ b/ui/src/components/RunView.tsx @@ -28,9 +28,11 @@ interface RunViewProps { activeTab: RunTab onTabChange: (tab: RunTab) => void externalControls?: ExternalControls + /** When true, data fetches are complete (distinguishes "loading" from "no data"). */ + loaded?: boolean } -export default function RunView({ run, state, events, logs, results, activeTab, onTabChange, externalControls }: RunViewProps) { +export default function RunView({ run, state, events, logs, results, activeTab, onTabChange, externalControls, loaded = true }: RunViewProps) { const [selectedNode, setSelectedNode] = useState(null) const [selectedKind, setSelectedKind] = useState<'router' | 'device' | 'ix'>('router') const [logJump, setLogJump] = useState<{ node: string; path: string; timeLabel: string; nonce: number } | null>(null) @@ -98,7 +100,7 @@ export default function RunView({ run, state, events, logs, results, activeTab, )} {tab === 'topology' && !state && ( -
Loading lab state...
+
{loaded ? 'No topology data for this run' : 'Loading lab state...'}
)} {tab === 'logs' && ( From 75bbf30cd5b2ad4fd4a09f2e693b33793f636c44 Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 30 Mar 2026 12:47:04 +0200 Subject: [PATCH 6/6] feat: refine run discovery, group compare, CI template Run discovery uses events.jsonl as the sole leaf-run indicator. run.json without events.jsonl is a group manifest, not a leaf. Server merges test-results.jsonl into existing run.json on push instead of auto-generating. CompareView detects group params (no matching run) and builds synthetic manifests from child runs. Fix double-prefix bug in test click navigation by tracking leftDir/rightDir separately. CI template uses jq for run.json and gh CLI for PR comments. Co-Authored-By: Claude Opus 4.6 (1M context) --- patchbay-cli/src/compare.rs | 21 ++- patchbay-cli/src/main.rs | 11 +- patchbay-server/github-workflow-template.yml | 148 +++++++++-------- patchbay-server/src/lib.rs | 163 ++++++++++--------- patchbay-utils/src/manifest.rs | 117 +++++++++++-- ui/src/components/CompareView.tsx | 65 ++++++-- 6 files changed, 345 insertions(+), 180 deletions(-) diff --git a/patchbay-cli/src/compare.rs b/patchbay-cli/src/compare.rs index 42728fc..04562fe 100644 --- a/patchbay-cli/src/compare.rs +++ b/patchbay-cli/src/compare.rs @@ -91,9 +91,11 @@ pub fn run_tests_in_dir( /// /// Writes `run.json` into `.patchbay/work/run-{timestamp}/`. pub fn persist_worktree_run( - _tree_dir: &Path, + tree_dir: &Path, results: &[TestResult], - commit_sha: &str, + started_at: chrono::DateTime, + ended_at: chrono::DateTime, + runtime: Duration, ) -> Result<()> { use manifest::{RunKind, RunManifest}; @@ -101,6 +103,9 @@ pub fn persist_worktree_run( let dest = PathBuf::from(format!(".patchbay/work/run-{ts}")); std::fs::create_dir_all(&dest)?; + // Capture git context from the worktree directory. + let git = manifest::git_context_in(tree_dir); + let pass = results .iter() .filter(|r| r.status == TestStatus::Pass) @@ -115,15 +120,15 @@ pub fn persist_worktree_run( let manifest = RunManifest { kind: RunKind::Test, project: None, - commit: Some(commit_sha.to_string()), - branch: None, - dirty: false, + commit: git.commit, + branch: git.branch, + dirty: git.dirty, pr: None, pr_url: None, title: None, - started_at: None, - ended_at: None, - runtime: None, + started_at: Some(started_at), + ended_at: Some(ended_at), + runtime: Some(runtime), outcome: Some(outcome.to_string()), pass: Some(pass), fail: Some(fail), diff --git a/patchbay-cli/src/main.rs b/patchbay-cli/src/main.rs index 832d5f8..6591cbf 100644 --- a/patchbay-cli/src/main.rs +++ b/patchbay-cli/src/main.rs @@ -469,10 +469,17 @@ fn dispatch_compare(command: CompareCommand, verbose: bool) -> Result<()> { println!("Running tests in {label} ..."); let tree_dir = compare::setup_worktree(git_ref, &cwd)?; + let started_at = chrono::Utc::now(); let (results, _output) = compare::run_tests_in_dir(&tree_dir, &args, verbose)?; - - compare::persist_worktree_run(&tree_dir, &results, &sha)?; + let ended_at = chrono::Utc::now(); + let runtime = (ended_at - started_at) + .to_std() + .unwrap_or_default(); + + compare::persist_worktree_run( + &tree_dir, &results, started_at, ended_at, runtime, + )?; compare::cleanup_worktree(&tree_dir)?; Ok(results) }; diff --git a/patchbay-server/github-workflow-template.yml b/patchbay-server/github-workflow-template.yml index a2ceea2..4ba66ca 100644 --- a/patchbay-server/github-workflow-template.yml +++ b/patchbay-server/github-workflow-template.yml @@ -2,110 +2,122 @@ # # Prerequisites: # 1. Set repository secrets PATCHBAY_URL and PATCHBAY_API_KEY -# 2. Adjust the "Run tests" step to match your test command -# 3. If your tests need a self-hosted runner, update runs-on accordingly +# 2. Adjust the test command (nextest profile, filter) to match your project +# 3. No patchbay binary required — just nextest + curl # # For server setup see: https://github.com/n0-computer/patchbay/tree/main/patchbay-server -# -# No patchbay binary required in CI — just nextest + curl. name: Patchbay Tests on: pull_request: push: - branches: [main] + branches: + - main concurrency: group: patchbay-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +env: + RUST_BACKTRACE: 1 + NEXTEST_VERSION: "0.9.132" + jobs: patchbay_tests: name: Patchbay Tests + permissions: + contents: read + pull-requests: write timeout-minutes: 45 + # Patchbay tests require Linux with user namespace support. + # Use self-hosted runners or ubuntu-latest (adjust as needed). runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - name: Enable unprivileged user namespaces + run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 + continue-on-error: true - # ── Build tools — adjust to your project ── + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable - - uses: taiki-e/install-action@nextest - # ── Run tests with nextest JSON output ── - - name: Run tests + - name: Install cargo-nextest + uses: taiki-e/install-action@v2 + with: + tool: nextest@${{ env.NEXTEST_VERSION }} + + # ── Run tests ── + # Adjust: -p , --test , profile, filter as needed. + # RUSTFLAGS --cfg patchbay_tests enables #[cfg(patchbay_tests)] test gates. + - name: Run patchbay tests id: tests - run: | - NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1 \ - cargo nextest run \ - --message-format libtest-json \ - -p my-crate --test my-test \ - > .patchbay-results.jsonl + run: cargo nextest run --profile patchbay --release --message-format libtest-json-plus > test-results.jsonl env: RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} - PATCHBAY_OUTDIR: ${{ github.workspace }}/.patchbay-testdir + NEXTEST_EXPERIMENTAL_LIBTEST_JSON: "1" + RUSTFLAGS: "--cfg patchbay_tests" - # ── Upload results to patchbay-serve ── - # The server parses nextest JSONL and creates run.json automatically. - - name: Upload results - id: upload + # ── Push results to patchbay-serve ── + # Uploads testdir output + nextest JSON + a run.json manifest with CI context. + - name: Push results if: always() env: PATCHBAY_URL: ${{ secrets.PATCHBAY_URL }} PATCHBAY_API_KEY: ${{ secrets.PATCHBAY_API_KEY }} run: | set -euo pipefail - PROJECT="${{ github.event.repository.name }}" + TESTDIR="$(cargo metadata --format-version=1 --no-deps | jq -r .target_directory)/testdir-current" + [ ! -d "$TESTDIR" ] && echo "No testdir output, skipping" && exit 0 - # Collect output into a directory for upload - UPLOAD_DIR=$(mktemp -d) - # Include nextest results - cp .patchbay-results.jsonl "$UPLOAD_DIR/test-results.jsonl" 2>/dev/null || true - # Include test output (testdir, metrics, events) if present - if [ -d ".patchbay-testdir" ]; then - cp -rL .patchbay-testdir/* "$UPLOAD_DIR/" 2>/dev/null || true - fi + cp test-results.jsonl "$TESTDIR/test-results.jsonl" + jq -n \ + --arg project "${{ github.event.repository.name }}" \ + --arg branch "${{ github.head_ref || github.ref_name }}" \ + --arg commit "${{ github.sha }}" \ + --argjson pr ${{ github.event.pull_request.number || 'null' }} \ + --arg pr_url "${{ github.event.pull_request.html_url || '' }}" \ + --arg title "${{ github.event.pull_request.title || github.event.head_commit.message || '' }}" \ + --arg outcome "${{ steps.tests.outcome }}" \ + '{$project, $branch, $commit, $pr, $pr_url, $title, test_outcome: $outcome, created_at: (now | todate)}' \ + > "$TESTDIR/run.json" - # Upload as tar.gz - BODY=$(tar czf - -C "$UPLOAD_DIR" . | curl -sS -X POST \ - -H "Authorization: Bearer $PATCHBAY_API_KEY" \ - -H "Content-Type: application/gzip" \ - --data-binary @- \ - "$PATCHBAY_URL/api/push/$PROJECT") + RESPONSE=$(tar -czf - -C "$TESTDIR" . | \ + curl -s -w "\n%{http_code}" -X POST \ + -H "Authorization: Bearer $PATCHBAY_API_KEY" \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$PATCHBAY_URL/api/push/${{ github.event.repository.name }}") + HTTP_CODE=$(echo "$RESPONSE" | tail -1) + BODY=$(echo "$RESPONSE" | head -n -1) + [ "$HTTP_CODE" != "200" ] && echo "Push failed ($HTTP_CODE): $BODY" && exit 1 - VIEW_URL=$(echo "$BODY" | jq -r '.view_url // empty') - echo "view_url=$VIEW_URL" >> "$GITHUB_OUTPUT" - rm -rf "$UPLOAD_DIR" + echo "PATCHBAY_VIEW_URL=$(echo "$BODY" | jq -r '.view_url // empty')" >> "$GITHUB_ENV" # ── Post or update PR comment ── - name: Comment on PR - if: always() && github.event.pull_request - uses: actions/github-script@v7 - with: - script: | - const marker = ''; - const status = '${{ steps.tests.outcome }}'; - const icon = status === 'success' ? '✅' : '❌'; - const sha = '${{ github.sha }}'; - const shortSha = sha.slice(0, 7); - const commitUrl = `${{ github.server_url }}/${{ github.repository }}/commit/${sha}`; - const date = new Date().toISOString().replace('T', ' ').slice(0, 19) + ' UTC'; - const viewUrl = '${{ steps.upload.outputs.view_url }}'; - const viewLine = viewUrl ? ` | [view results](${viewUrl})` : ''; - const body = [ - marker, - `${icon} **patchbay:** ${status}${viewLine}`, - `${date} · [\`${shortSha}\`](${commitUrl})`, - ].join('\n'); + if: always() && env.PATCHBAY_VIEW_URL + env: + GH_TOKEN: ${{ github.token }} + GH_REPO: ${{ github.repository }} + run: | + set -euo pipefail + SHA="${{ github.sha }}" + STATUS="${{ steps.tests.outcome }}" + ICON=$([[ "$STATUS" == "success" ]] && echo "✅" || echo "❌") + MARKER="" + printf -v BODY '%s\n%s\n%s' \ + "$MARKER" \ + "$ICON **patchbay:** $STATUS | $PATCHBAY_VIEW_URL" \ + "$(date -u '+%Y-%m-%d %H:%M:%S UTC') · [\`${SHA:0:7}\`](${{ github.server_url }}/$GH_REPO/commit/$SHA)" - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, repo: context.repo.repo, - issue_number: context.issue.number, - }); - const existing = comments.find(c => c.body.includes(marker)); - const params = { owner: context.repo.owner, repo: context.repo.repo }; - if (existing) { - await github.rest.issues.updateComment({ ...params, comment_id: existing.id, body }); - } else { - await github.rest.issues.createComment({ ...params, issue_number: context.issue.number, body }); - } + PR_NUMBER="${{ github.event.pull_request.number }}" + if [ -z "$PR_NUMBER" ]; then + PR_NUMBER=$(gh pr list --head "${{ github.ref_name }}" --state open --json number -q '.[0].number') + fi + [ -z "$PR_NUMBER" ] && echo "No PR found, skipping comment" && exit 0 + + EXISTING=$(gh api --paginate "repos/$GH_REPO/issues/$PR_NUMBER/comments" --jq ".[] | select(.body | contains(\"$MARKER\")) | .id" | head -1) + if [ -n "$EXISTING" ]; then + gh api "repos/$GH_REPO/issues/comments/$EXISTING" -X PATCH -f body="$BODY" + else + gh pr comment "$PR_NUMBER" --body "$BODY" + fi diff --git a/patchbay-server/src/lib.rs b/patchbay-server/src/lib.rs index fc72f0c..d5f3f9d 100644 --- a/patchbay-server/src/lib.rs +++ b/patchbay-server/src/lib.rs @@ -109,16 +109,31 @@ pub fn discover_runs(base: &Path) -> anyhow::Result> { let mut runs = Vec::new(); scan_runs_recursive(base, base, 1, &mut runs)?; - // Attach run.json manifests from group directories. - let mut manifest_cache: std::collections::HashMap> = + // Attach run.json manifests: own dir first, then inherit from group dir. + let mut group_manifest_cache: std::collections::HashMap> = std::collections::HashMap::new(); for run in &mut runs { - let group_key = run.group.clone().unwrap_or_else(|| run.name.clone()); - let manifest = manifest_cache - .entry(group_key.clone()) - .or_insert_with(|| read_run_json(&base.join(&group_key))) - .clone(); - run.manifest = manifest; + // 1. Check if the run's own dir has run.json. + let own_manifest = read_run_json(&run.path); + if own_manifest.is_some() { + run.manifest = own_manifest; + } else { + // 2. Inherit from group dir (first path segment). + let group_key = run.group.clone().unwrap_or_else(|| run.name.clone()); + let group_manifest = group_manifest_cache + .entry(group_key.clone()) + .or_insert_with(|| { + let group_dir = base.join(&group_key); + let mut m = read_run_json(&group_dir); + // If group dir has both run.json AND test-results.jsonl, merge. + if let Some(ref mut manifest) = m { + merge_nextest_results(&group_dir, manifest); + } + m + }) + .clone(); + run.manifest = group_manifest; + } } runs.sort_by(|a, b| b.name.cmp(&a.name)); @@ -148,45 +163,30 @@ fn scan_runs_recursive( if !path.is_dir() { continue; } - let has_events = path.join(EVENTS_JSONL).exists(); - let has_run_json = path.join(RUN_JSON).exists(); - - if has_events || has_run_json { - // Check if this is a leaf run or a group with children. - let has_child_dirs = fs::read_dir(&path) - .map(|rd| { - rd.flatten() - .any(|e| e.file_type().map(|t| t.is_dir()).unwrap_or(false)) - }) - .unwrap_or(false); - - if has_run_json && !has_events && has_child_dirs { - // Group directory: has run.json, no events.jsonl, has subdirs. - // Recurse to find child runs that inherit this manifest. - scan_runs_recursive(root, &path, depth + 1, runs)?; - } else { - // Leaf run: has events.jsonl or run.json without children. - let name = path - .strip_prefix(root) - .unwrap_or(&path) - .to_string_lossy() - .into_owned(); - let (label, status) = read_run_metadata(&path); - let group = name - .split('/') - .next() - .filter(|first| *first != name) - .map(str::to_string); - runs.push(RunInfo { - name, - path, - label, - status, - group, - manifest: None, // populated after scan - }); - } + + if path.join(EVENTS_JSONL).exists() { + // Leaf run: directory contains events.jsonl (the only leaf indicator). + let name = path + .strip_prefix(root) + .unwrap_or(&path) + .to_string_lossy() + .into_owned(); + let (label, status) = read_run_metadata(&path); + let group = name + .split('/') + .next() + .filter(|first| *first != name) + .map(str::to_string); + runs.push(RunInfo { + name, + path, + label, + status, + group, + manifest: None, // populated after scan + }); } else { + // No events.jsonl → recurse (regardless of run.json presence). scan_runs_recursive(root, &path, depth + 1, runs)?; } } @@ -929,6 +929,7 @@ async fn scan_log_files(run_dir: &Path) -> Vec { pub use patchbay_utils::manifest::RunManifest; const RUN_JSON: &str = "run.json"; +const TEST_RESULTS_JSONL: &str = "test-results.jsonl"; fn read_run_json(dir: &Path) -> Option { let text = fs::read_to_string(dir.join(RUN_JSON)).ok()?; @@ -937,6 +938,23 @@ fn read_run_json(dir: &Path) -> Option { Some(manifest) } +/// If `test-results.jsonl` exists in `dir`, parse it and add any tests NOT +/// already present in the manifest's tests array (matched by name). +fn merge_nextest_results(dir: &Path, manifest: &mut RunManifest) { + let results_path = dir.join(TEST_RESULTS_JSONL); + let Ok(content) = fs::read_to_string(&results_path) else { + return; + }; + let nextest_results = patchbay_utils::manifest::parse_nextest_json(&content); + let existing: std::collections::HashSet<&str> = + manifest.tests.iter().map(|t| t.name.as_str()).collect(); + let new_tests: Vec<_> = nextest_results + .into_iter() + .filter(|r| !existing.contains(r.name.as_str())) + .collect(); + manifest.tests.extend(new_tests); +} + // ── Push endpoint ─────────────────────────────────────────────────── async fn push_run( @@ -1040,25 +1058,26 @@ async fn push_run( } } - // Auto-generate run.json from nextest JSONL if not already present. + // Merge test-results.jsonl into run.json if both exist, or create minimal manifest. let run_json_path = run_dir.join("run.json"); - if !run_json_path.exists() { - let nextest_jsonl = run_dir.join("test-results.jsonl"); - let results = if nextest_jsonl.exists() { - let content = std::fs::read_to_string(&nextest_jsonl).unwrap_or_default(); - patchbay_utils::manifest::parse_nextest_json(&content) - } else { - Vec::new() - }; - let pass = results - .iter() - .filter(|r| r.status == patchbay_utils::manifest::TestStatus::Pass) - .count() as u32; - let fail = results - .iter() - .filter(|r| r.status == patchbay_utils::manifest::TestStatus::Fail) - .count() as u32; - let total = results.len() as u32; + if run_json_path.exists() { + // run.json exists (CI created it). Merge nextest results if available. + if let Ok(text) = std::fs::read_to_string(&run_json_path) { + if let Ok(mut manifest) = + serde_json::from_str::(&text) + { + merge_nextest_results(&run_dir, &mut manifest); + if let Ok(json) = serde_json::to_string_pretty(&manifest) { + let _ = std::fs::write(&run_json_path, json); + } + } + } + } else { + // No run.json — backward compat: create a minimal manifest. + tracing::warn!( + "push for project '{}' has no run.json; creating minimal manifest", + project + ); let manifest = patchbay_utils::manifest::RunManifest { kind: patchbay_utils::manifest::RunKind::Test, project: Some(project.clone()), @@ -1071,17 +1090,11 @@ async fn push_run( started_at: Some(chrono::Utc::now()), ended_at: None, runtime: None, - outcome: if fail > 0 { - Some("fail".into()) - } else if pass > 0 { - Some("pass".into()) - } else { - None - }, - pass: if total > 0 { Some(pass) } else { None }, - fail: if total > 0 { Some(fail) } else { None }, - total: if total > 0 { Some(total) } else { None }, - tests: results, + outcome: None, + pass: None, + fail: None, + total: None, + tests: Vec::new(), os: None, arch: None, patchbay_version: None, diff --git a/patchbay-utils/src/manifest.rs b/patchbay-utils/src/manifest.rs index 671fbc4..2178002 100644 --- a/patchbay-utils/src/manifest.rs +++ b/patchbay-utils/src/manifest.rs @@ -228,32 +228,55 @@ pub struct GitContext { /// Capture the current git HEAD commit, branch, and dirty state. pub fn git_context() -> GitContext { - let commit = Command::new("git") - .args(["rev-parse", "HEAD"]) + git_context_in_impl(None) +} + +/// Capture git context from a specific directory (e.g. a worktree). +pub fn git_context_in(dir: &Path) -> GitContext { + git_context_in_impl(Some(dir)) +} + +fn git_context_in_impl(dir: Option<&Path>) -> GitContext { + let mut cmd = Command::new("git"); + cmd.args(["rev-parse", "HEAD"]); + if let Some(d) = dir { + cmd.current_dir(d); + } + let commit = cmd .output() .ok() .filter(|o| o.status.success()) .and_then(|o| String::from_utf8(o.stdout).ok()) .map(|s| s.trim().to_string()); - let branch = Command::new("git") - .args(["rev-parse", "--abbrev-ref", "HEAD"]) + + let mut cmd = Command::new("git"); + cmd.args(["rev-parse", "--abbrev-ref", "HEAD"]); + if let Some(d) = dir { + cmd.current_dir(d); + } + let branch = cmd .output() .ok() .filter(|o| o.status.success()) .and_then(|o| String::from_utf8(o.stdout).ok()) .map(|s| s.trim().to_string()) .filter(|s| s != "HEAD"); + // Check both unstaged and staged changes. - let unstaged = !Command::new("git") - .args(["diff", "--quiet"]) - .status() - .map(|s| s.success()) - .unwrap_or(true); - let staged = !Command::new("git") - .args(["diff", "--cached", "--quiet"]) - .status() - .map(|s| s.success()) - .unwrap_or(true); + let mut cmd = Command::new("git"); + cmd.args(["diff", "--quiet"]); + if let Some(d) = dir { + cmd.current_dir(d); + } + let unstaged = !cmd.status().map(|s| s.success()).unwrap_or(true); + + let mut cmd = Command::new("git"); + cmd.args(["diff", "--cached", "--quiet"]); + if let Some(d) = dir { + cmd.current_dir(d); + } + let staged = !cmd.status().map(|s| s.success()).unwrap_or(true); + let dirty = unstaged || staged; GitContext { commit, @@ -533,6 +556,72 @@ test result: FAILED. 1 passed; 1 failed; 1 ignored; assert_eq!(results[2].duration, Some(Duration::from_secs(10))); } + #[test] + fn test_merge_nextest_results_into_manifest() { + // Create a manifest with 2 passing tests. + let mut manifest = RunManifest { + kind: RunKind::Test, + project: Some("test-proj".to_string()), + commit: None, + branch: None, + dirty: false, + pr: None, + pr_url: None, + title: None, + started_at: None, + ended_at: None, + runtime: None, + outcome: Some("pass".to_string()), + pass: Some(2), + fail: Some(0), + total: Some(2), + tests: vec![ + TestResult { + name: "crate::test_alpha".to_string(), + status: TestStatus::Pass, + duration: Some(Duration::from_millis(100)), + dir: None, + }, + TestResult { + name: "crate::test_beta".to_string(), + status: TestStatus::Pass, + duration: Some(Duration::from_millis(200)), + dir: None, + }, + ], + os: None, + arch: None, + patchbay_version: None, + }; + + // Create nextest JSONL with 3 tests (2 pass, 1 fail). + // test_alpha and test_beta overlap; test_gamma is new and failed. + let nextest_jsonl = r#"{"type":"test","event":"ok","name":"crate::test_alpha","exec_time":0.1} +{"type":"test","event":"ok","name":"crate::test_beta","exec_time":0.2} +{"type":"test","event":"failed","name":"crate::test_gamma","exec_time":0.5}"#; + + let nextest_results = parse_nextest_json(nextest_jsonl); + assert_eq!(nextest_results.len(), 3); + + // Merge: add tests from nextest that are NOT already in manifest. + let existing: std::collections::HashSet<&str> = + manifest.tests.iter().map(|t| t.name.as_str()).collect(); + let new_tests: Vec<_> = nextest_results + .into_iter() + .filter(|r| !existing.contains(r.name.as_str())) + .collect(); + manifest.tests.extend(new_tests); + + // Manifest should now have 3 tests: the 2 original + the failed one added. + assert_eq!(manifest.tests.len(), 3); + assert_eq!(manifest.tests[0].name, "crate::test_alpha"); + assert_eq!(manifest.tests[0].status, TestStatus::Pass); + assert_eq!(manifest.tests[1].name, "crate::test_beta"); + assert_eq!(manifest.tests[1].status, TestStatus::Pass); + assert_eq!(manifest.tests[2].name, "crate::test_gamma"); + assert_eq!(manifest.tests[2].status, TestStatus::Fail); + } + #[test] fn test_run_manifest_backward_compat() { // Old-style run.json with test_outcome instead of outcome diff --git a/ui/src/components/CompareView.tsx b/ui/src/components/CompareView.tsx index 5073c2f..2f793b0 100644 --- a/ui/src/components/CompareView.tsx +++ b/ui/src/components/CompareView.tsx @@ -2,7 +2,7 @@ import { useCallback, useEffect, useMemo, useState } from 'react' import { Link, useNavigate } from 'react-router-dom' import type { LabEvent, LabState } from '../devtools-types' import type { SimResults } from '../types' -import { fetchRunJson, fetchState, fetchEvents, fetchLogs, fetchResults } from '../api' +import { fetchRunJson, fetchRuns, fetchState, fetchEvents, fetchLogs, fetchResults } from '../api' import type { RunManifest, RunInfo, LogEntry } from '../api' import RunView from './RunView' import type { RunTab } from './RunView' @@ -17,8 +17,10 @@ interface TestDelta { left?: string right?: string delta: 'fixed' | 'REGRESS' | 'new' | 'removed' | '' - /** Relative directory for this test's output, if it exists on disk. */ - dir?: string + /** Left-side run path for this test (if available). */ + leftDir?: string + /** Right-side run path for this test (if available). */ + rightDir?: string } function computeDiff(left: RunManifest, right: RunManifest) { @@ -26,7 +28,8 @@ function computeDiff(left: RunManifest, right: RunManifest) { const rightTests = right.tests ?? [] const leftMap = new Map(leftTests.map(t => [t.name, t.status])) const rightMap = new Map(rightTests.map(t => [t.name, t.status])) - const dirMap = new Map([...leftTests, ...rightTests].filter((t): t is typeof t & { dir: string } => !!t.dir).map(t => [t.name, t.dir])) + const leftDirMap = new Map(leftTests.filter((t): t is typeof t & { dir: string } => !!t.dir).map(t => [t.name, t.dir])) + const rightDirMap = new Map(rightTests.filter((t): t is typeof t & { dir: string } => !!t.dir).map(t => [t.name, t.dir])) const allNames = new Set([...leftMap.keys(), ...rightMap.keys()]) const tests: TestDelta[] = [] @@ -43,7 +46,9 @@ function computeDiff(left: RunManifest, right: RunManifest) { else if (!l && r) { delta = 'new' } else if (l && !r) { delta = 'removed' } - tests.push({ name, left: l, right: r, delta, dir: dirMap.get(name) }) + const leftDir = leftDirMap.get(name) + const rightDir = rightDirMap.get(name) + tests.push({ name, left: l, right: r, delta, leftDir, rightDir }) } const score = fixes * SCORE_FIX + regressions * SCORE_REGRESS @@ -82,6 +87,30 @@ function groupCompareUrl(leftRun: string, rightRun: string): string { return `/compare/${encodeURIComponent(leftGroup)}/${encodeURIComponent(rightGroup)}` } +/** Build a synthetic manifest from a group's child runs. */ +function manifestFromGroup(groupName: string, runs: RunInfo[]): RunManifest { + const children = runs.filter(r => r.group === groupName) + const tests = children.map(r => { + // Strip group prefix to get the test name (e.g. "testdir-2/patchbay/holepunch" → "patchbay/holepunch") + const testName = r.name.startsWith(groupName + '/') ? r.name.slice(groupName.length + 1) : r.name + const status = r.status === 'success' ? 'pass' : r.status === 'error' ? 'fail' : (r.status ?? 'pass') + return { name: testName, status, dir: r.name } + }) + const pass = tests.filter(t => t.status === 'pass').length + const fail = tests.filter(t => t.status === 'fail').length + // Use the group-level manifest for context if available + const groupManifest = children[0]?.manifest + return { + kind: groupManifest?.kind ?? 'test', + project: groupManifest?.project ?? null, + branch: groupManifest?.branch ?? null, + commit: groupManifest?.commit ?? null, + pass, fail, total: tests.length, + tests, + outcome: fail > 0 ? 'fail' : 'pass', + } as RunManifest +} + // ── Compare View (route: /compare/:left/:right) ── export default function CompareView({ leftRun, rightRun }: { leftRun: string; rightRun: string }) { @@ -92,12 +121,23 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri const [sharedTab, setSharedTab] = useState('logs') useEffect(() => { + let dead = false setLoading(true) - Promise.all([fetchRunJson(leftRun), fetchRunJson(rightRun)]).then(([l, r]) => { + // Try fetching as individual runs first. + Promise.all([fetchRunJson(leftRun), fetchRunJson(rightRun)]).then(async ([l, r]) => { + if (dead) return + // If both are null, these might be group names — build manifests from children. + if (!l || !r) { + const allRuns = await fetchRuns() + if (dead) return + if (!l) l = manifestFromGroup(leftRun, allRuns) + if (!r) r = manifestFromGroup(rightRun, allRuns) + } setLeftManifest(l) setRightManifest(r) setLoading(false) }) + return () => { dead = true } }, [leftRun, rightRun]) if (loading) { @@ -120,10 +160,8 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri const leftOutcome = leftManifest?.test_outcome ?? leftManifest?.outcome ?? null const rightOutcome = rightManifest?.test_outcome ?? rightManifest?.outcome ?? null - const handleTestClick = (dir: string) => { - const leftPath = `${leftRun}/${dir}` - const rightPath = `${rightRun}/${dir}` - navigate(`/compare/${encodeURIComponent(leftPath)}/${encodeURIComponent(rightPath)}`) + const handleTestClick = (leftDir: string, rightDir: string) => { + navigate(`/compare/${encodeURIComponent(leftDir)}/${encodeURIComponent(rightDir)}`) } return ( @@ -168,18 +206,19 @@ export default function CompareView({ leftRun, rightRun }: { leftRun: string; ri - {diff.tests.map(({ name, left, right, delta, dir }) => { + {diff.tests.map(({ name, left, right, delta, leftDir, rightDir }) => { let color = '' if (delta === 'fixed') color = 'var(--green)' else if (delta === 'REGRESS') color = 'var(--red)' + const canClick = !!(leftDir && rightDir) return ( - {dir ? ( + {canClick ? ( handleTestClick(dir)} + onClick={() => handleTestClick(leftDir, rightDir)} title={`Compare ${name} side-by-side`} > {name}