diff --git a/Cargo.lock b/Cargo.lock index 8a38542..d996bb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,7 +112,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "avocadoctl" -version = "0.5.0" +version = "0.6.0" dependencies = [ "base64", "clap", diff --git a/Cargo.toml b/Cargo.toml index ff4aab5..165bc67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "avocadoctl" -version = "0.5.0" +version = "0.6.0" edition = "2021" description = "Avocado Linux control CLI tool" authors = ["Avocado"] diff --git a/docs/features/varlink-ipc-interface.md b/docs/features/varlink-ipc-interface.md index f8d4745..7e7f224 100644 --- a/docs/features/varlink-ipc-interface.md +++ b/docs/features/varlink-ipc-interface.md @@ -172,9 +172,6 @@ After=avocadoctl.socket [Service] Type=simple ExecStart=/usr/bin/avocadoctl serve -ProtectHome=yes -ProtectSystem=full -PrivateTmp=yes NoNewPrivileges=yes [Install] diff --git a/src/commands/ext.rs b/src/commands/ext.rs index f2e34f0..b3d949b 100644 --- a/src/commands/ext.rs +++ b/src/commands/ext.rs @@ -24,30 +24,6 @@ struct Extension { merge_index: Option, } -/// Print a colored success message -fn print_colored_success(message: &str) { - // Use auto-detection but fallback gracefully - let color_choice = - if std::env::var("NO_COLOR").is_ok() || std::env::var("AVOCADO_TEST_MODE").is_ok() { - ColorChoice::Never - } else { - ColorChoice::Auto - }; - - let mut stdout = StandardStream::stdout(color_choice); - let mut color_spec = ColorSpec::new(); - color_spec.set_fg(Some(Color::Green)).set_bold(true); - - if stdout.set_color(&color_spec).is_ok() && color_choice != ColorChoice::Never { - let _ = write!(&mut stdout, "[SUCCESS]"); - let _ = stdout.reset(); - println!(" {message}"); - } else { - // Fallback for environments without color support - println!("[SUCCESS] {message}"); - } -} - /// Print a colored info message fn print_colored_info(message: &str) { // Use auto-detection but fallback gracefully @@ -402,7 +378,7 @@ pub(crate) fn merge_extensions_internal( handle_systemd_output("systemd-confext merge", &confext_result, output)?; // Process post-merge tasks only for enabled extensions - process_post_merge_tasks_for_extensions(&enabled_extensions)?; + process_post_merge_tasks_for_extensions(&enabled_extensions, output)?; Ok(()) } @@ -455,7 +431,7 @@ pub(crate) fn unmerge_extensions_internal_with_options( // Execute AVOCADO_ON_UNMERGE commands before unmerging extensions // These commands are executed while extensions are still merged - if let Err(e) = process_pre_unmerge_tasks() { + if let Err(e) = process_pre_unmerge_tasks(output) { output.progress(&format!( "Warning: Failed to process pre-unmerge tasks: {e}" )); @@ -479,7 +455,7 @@ pub(crate) fn unmerge_extensions_internal_with_options( // Run depmod after unmerge if requested if call_depmod { - run_depmod()?; + run_depmod(output)?; } // Unmount persistent loops if requested @@ -1007,6 +983,128 @@ pub fn status_extensions(config: &Config, output: &OutputManager) { } } +/// Collect extension status data for the varlink Status RPC. +/// +/// This gathers the same data as `show_enhanced_status` but returns it as +/// structured `ExtensionStatus` values instead of printing to stdout. +pub(crate) fn collect_extension_status( + config: &Config, +) -> Result, SystemdError> { + use crate::varlink::org_avocado_Extensions::ExtensionStatus; + + let base_dir = config.get_avocado_base_dir(); + let base_path = std::path::Path::new(&base_dir); + let active_manifest = crate::manifest::RuntimeManifest::load_active(base_path); + let manifest_extensions = active_manifest + .as_ref() + .map(|m| m.extensions.as_slice()) + .unwrap_or(&[]); + + let available_extensions = scan_extensions_from_all_sources_with_verbosity(false)?; + let mounted_sysext = get_mounted_systemd_extensions("systemd-sysext")?; + let mounted_confext = get_mounted_systemd_extensions("systemd-confext")?; + + // Collect all unique extension names (with versions if present) + let mut all_names = std::collections::HashSet::new(); + for ext in &available_extensions { + if let Some(ver) = &ext.version { + all_names.insert(format!("{}-{}", ext.name, ver)); + } else { + all_names.insert(ext.name.clone()); + } + } + for ext in &mounted_sysext { + all_names.insert(ext.name.clone()); + } + for ext in &mounted_confext { + all_names.insert(ext.name.clone()); + } + + let mut result: Vec = all_names + .into_iter() + .map(|ext_name| { + let available_ext = available_extensions.iter().find(|e| { + if let Some(ver) = &e.version { + format!("{}-{}", e.name, ver) == ext_name + } else { + e.name == ext_name + } + }); + + let is_sysext_mounted = mounted_sysext.iter().any(|e| e.name == ext_name); + let is_confext_mounted = mounted_confext.iter().any(|e| e.name == ext_name); + let is_merged = is_sysext_mounted || is_confext_mounted; + + let (is_sysext, is_confext) = if let Some(ext) = available_ext { + (ext.is_sysext, ext.is_confext) + } else { + (is_sysext_mounted, is_confext_mounted) + }; + + let origin = available_ext.map(get_extension_origin_short); + + let image_id_str = lookup_extension_short_id(&ext_name, manifest_extensions); + let image_id = if image_id_str == "-" { + None + } else { + Some(image_id_str) + }; + + let (name, version) = if let Some(ext) = available_ext { + (ext.name.clone(), ext.version.clone()) + } else { + (ext_name, None) + }; + + ExtensionStatus { + name, + version, + isSysext: is_sysext, + isConfext: is_confext, + isMerged: is_merged, + origin, + imageId: image_id, + } + }) + .collect(); + + // Sort descending by merge_index (highest priority / top layer first). + // Extensions without a merge_index sort to the bottom, then alphabetically. + result.sort_by(|a, b| { + let versioned_a = match &a.version { + Some(v) => format!("{}-{}", a.name, v), + None => a.name.clone(), + }; + let versioned_b = match &b.version { + Some(v) => format!("{}-{}", b.name, v), + None => b.name.clone(), + }; + let idx_a = available_extensions + .iter() + .find(|e| { + if let Some(ver) = &e.version { + format!("{}-{}", e.name, ver) == versioned_a + } else { + e.name == versioned_a + } + }) + .and_then(|e| e.merge_index); + let idx_b = available_extensions + .iter() + .find(|e| { + if let Some(ver) = &e.version { + format!("{}-{}", e.name, ver) == versioned_b + } else { + e.name == versioned_b + } + }) + .and_then(|e| e.merge_index); + idx_b.cmp(&idx_a).then_with(|| a.name.cmp(&b.name)) + }); + + Ok(result) +} + /// Show enhanced status with extension origins and HITL information pub(crate) fn show_enhanced_status( config: &Config, @@ -1086,7 +1184,7 @@ fn display_active_runtime(config: &Config, output: &OutputManager) { }; println!("Active Runtime:"); println!( - " {} v{} (build {short_id})", + " {} {} ({short_id})", manifest.runtime.name, manifest.runtime.version ); println!(" Built: {}", manifest.built_at); @@ -1095,7 +1193,7 @@ fn display_active_runtime(config: &Config, output: &OutputManager) { println!(" Build ID: {}", manifest.id); for ext in &manifest.extensions { let id_display = ext.image_id.as_deref().unwrap_or("?"); - println!(" - {} v{} ({})", ext.name, ext.version, id_display); + println!(" - {} {} ({})", ext.name, ext.version, id_display); } } println!(); @@ -1873,8 +1971,10 @@ fn scan_extensions_from_all_sources_with_verbosity( let used_manifest = if let Some(ref manifest) = active_manifest { if verbose { println!( - "Found active runtime manifest: {} v{} (build {})", - manifest.runtime.name, manifest.runtime.version, manifest.id + "Found active runtime manifest: {} {} ({})", + manifest.runtime.name, + manifest.runtime.version, + &manifest.id[..8.min(manifest.id.len())] ); } @@ -3437,6 +3537,7 @@ fn scan_directory_for_release_files( /// Process post-merge tasks for only the enabled extensions fn process_post_merge_tasks_for_extensions( enabled_extensions: &[Extension], + output: &OutputManager, ) -> Result<(), SystemdError> { let (on_merge_commands, modprobe_modules) = scan_release_files_for_enabled_extensions(enabled_extensions)?; @@ -3451,12 +3552,12 @@ fn process_post_merge_tasks_for_extensions( // Execute accumulated AVOCADO_ON_MERGE commands if !unique_commands.is_empty() { - run_avocado_on_merge_commands(&unique_commands)?; + run_avocado_on_merge_commands(&unique_commands, output)?; } // Call modprobe for each module after commands complete if !modprobe_modules.is_empty() { - run_modprobe(&modprobe_modules)?; + run_modprobe(&modprobe_modules, output)?; } Ok(()) @@ -3632,7 +3733,7 @@ fn scan_directory_for_on_unmerge_commands( } /// Process pre-unmerge tasks: execute AVOCADO_ON_UNMERGE commands -fn process_pre_unmerge_tasks() -> Result<(), SystemdError> { +fn process_pre_unmerge_tasks(output: &OutputManager) -> Result<(), SystemdError> { let on_unmerge_commands = scan_merged_extensions_for_on_unmerge_commands()?; // Remove duplicates while preserving order @@ -3645,7 +3746,7 @@ fn process_pre_unmerge_tasks() -> Result<(), SystemdError> { // Execute accumulated AVOCADO_ON_UNMERGE commands if !unique_commands.is_empty() { - run_avocado_on_unmerge_commands(&unique_commands)?; + run_avocado_on_unmerge_commands(&unique_commands, output)?; } Ok(()) @@ -3706,8 +3807,8 @@ pub fn parse_avocado_enable_services(content: &str) -> Vec { } /// Run the depmod command -fn run_depmod() -> Result<(), SystemdError> { - print_colored_info("Running depmod to update kernel module dependencies..."); +fn run_depmod(out: &OutputManager) -> Result<(), SystemdError> { + out.log_info("Running depmod to update kernel module dependencies..."); // Check if we're in test mode and should use mock commands let command_name = if std::env::var("AVOCADO_TEST_MODE").is_ok() { @@ -3734,17 +3835,17 @@ fn run_depmod() -> Result<(), SystemdError> { }); } - print_colored_success("depmod completed successfully."); + out.log_success("depmod completed successfully."); Ok(()) } /// Run modprobe for a list of modules -fn run_modprobe(modules: &[String]) -> Result<(), SystemdError> { +fn run_modprobe(modules: &[String], out: &OutputManager) -> Result<(), SystemdError> { if modules.is_empty() { return Ok(()); } - print_colored_info(&format!("Loading kernel modules: {}", modules.join(", "))); + out.log_info(&format!("Loading kernel modules: {}", modules.join(", "))); for module in modules { // Check if we're in test mode and should use mock commands @@ -3770,16 +3871,16 @@ fn run_modprobe(modules: &[String]) -> Result<(), SystemdError> { // Don't fail the entire operation for individual module failures // Just log the warning and continue with other modules } else { - print_colored_success(&format!("Module {module} loaded successfully.")); + out.log_success(&format!("Module {module} loaded successfully.")); } } - print_colored_success("Module loading completed."); + out.log_success("Module loading completed."); Ok(()) } /// Execute a single command with its arguments -fn execute_single_command(command_str: &str) -> Result<(), SystemdError> { +fn execute_single_command(command_str: &str, out: &OutputManager) -> Result<(), SystemdError> { // Parse the command string to handle commands with arguments // Commands may be quoted or contain spaces let parts: Vec<&str> = if command_str.starts_with('"') && command_str.ends_with('"') { @@ -3834,22 +3935,25 @@ fn execute_single_command(command_str: &str) -> Result<(), SystemdError> { // Log warning but don't fail the entire operation // This matches the behavior of modprobe failures } else { - print_colored_success(&format!("Command '{command_str}' completed successfully")); + out.log_success(&format!("Command '{command_str}' completed successfully")); } Ok(()) } /// Run accumulated AVOCADO_ON_MERGE commands -fn run_avocado_on_merge_commands(commands: &[String]) -> Result<(), SystemdError> { +fn run_avocado_on_merge_commands( + commands: &[String], + out: &OutputManager, +) -> Result<(), SystemdError> { if commands.is_empty() { return Ok(()); } - print_colored_info(&format!("Executing {} post-merge commands", commands.len())); + out.log_info(&format!("Executing {} post-merge commands", commands.len())); for command_str in commands { - print_colored_info(&format!("Running command: {command_str}")); + out.log_info(&format!("Running command: {command_str}")); // Check if the command contains shell operators like semicolons if command_str.contains(';') { @@ -3858,33 +3962,36 @@ fn run_avocado_on_merge_commands(commands: &[String]) -> Result<(), SystemdError for sub_command in sub_commands { if !sub_command.is_empty() { - print_colored_info(&format!("Running sub-command: {sub_command}")); - execute_single_command(sub_command)?; + out.log_info(&format!("Running sub-command: {sub_command}")); + execute_single_command(sub_command, out)?; } } } else { // Execute as a single command - execute_single_command(command_str)?; + execute_single_command(command_str, out)?; } } - print_colored_success("Post-merge command execution completed."); + out.log_success("Post-merge command execution completed."); Ok(()) } /// Run accumulated AVOCADO_ON_UNMERGE commands -fn run_avocado_on_unmerge_commands(commands: &[String]) -> Result<(), SystemdError> { +fn run_avocado_on_unmerge_commands( + commands: &[String], + out: &OutputManager, +) -> Result<(), SystemdError> { if commands.is_empty() { return Ok(()); } - print_colored_info(&format!( + out.log_info(&format!( "Executing {} pre-unmerge commands", commands.len() )); for command_str in commands { - print_colored_info(&format!("Running command: {command_str}")); + out.log_info(&format!("Running command: {command_str}")); // Check if the command contains shell operators like semicolons if command_str.contains(';') { @@ -3893,17 +4000,17 @@ fn run_avocado_on_unmerge_commands(commands: &[String]) -> Result<(), SystemdErr for sub_command in sub_commands { if !sub_command.is_empty() { - print_colored_info(&format!("Running sub-command: {sub_command}")); - execute_single_command(sub_command)?; + out.log_info(&format!("Running sub-command: {sub_command}")); + execute_single_command(sub_command, out)?; } } } else { // Execute as a single command - execute_single_command(command_str)?; + execute_single_command(command_str, out)?; } } - print_colored_success("Pre-unmerge command execution completed."); + out.log_success("Pre-unmerge command execution completed."); Ok(()) } diff --git a/src/commands/runtime.rs b/src/commands/runtime.rs index f42ff37..dee711a 100644 --- a/src/commands/runtime.rs +++ b/src/commands/runtime.rs @@ -88,7 +88,14 @@ fn handle_add(matches: &ArgMatches, config: &Config, output: &OutputManager) { println!(" Adding runtime from {url}"); println!(); - match update::perform_update(url, base_path, output.is_verbose()) { + let auth_token = std::env::var("AVOCADO_TUF_AUTH_TOKEN").ok(); + match update::perform_update( + url, + base_path, + auth_token.as_deref(), + None, + output.is_verbose(), + ) { Ok(()) => { crate::commands::ext::refresh_extensions(config, output); println!(); @@ -138,11 +145,10 @@ fn handle_add(matches: &ArgMatches, config: &Config, output: &OutputManager) { std::process::exit(1); } + let short_id = &manifest.id[..8.min(manifest.id.len())]; println!( - " Activated runtime: {} v{} ({})", - manifest.runtime.name, - manifest.runtime.version, - &manifest.id[..8.min(manifest.id.len())] + " Activated runtime: {} {} ({short_id})", + manifest.runtime.name, manifest.runtime.version, ); crate::commands::ext::refresh_extensions(config, output); @@ -167,14 +173,13 @@ fn handle_remove(matches: &ArgMatches, config: &Config, output: &OutputManager) std::process::exit(1); } + let short_id = &matched.id[..8.min(matched.id.len())]; println!(); output.success( "Runtime Remove", &format!( - "Removed runtime: {} v{} ({})", - matched.runtime.name, - matched.runtime.version, - &matched.id[..8.min(matched.id.len())] + "Removed runtime: {} {} ({short_id})", + matched.runtime.name, matched.runtime.version, ), ); } @@ -190,14 +195,14 @@ fn handle_activate(matches: &ArgMatches, config: &Config, output: &OutputManager None => return, }; + let short_id = &matched.id[..8.min(matched.id.len())]; + if is_active { output.info( "Runtime Activate", &format!( - "Runtime {} v{} ({}) is already active.", - matched.runtime.name, - matched.runtime.version, - &matched.id[..8.min(matched.id.len())] + "Runtime {} {} ({short_id}) is already active.", + matched.runtime.name, matched.runtime.version, ), ); return; @@ -209,10 +214,8 @@ fn handle_activate(matches: &ArgMatches, config: &Config, output: &OutputManager } println!( - " Activated runtime: {} v{} ({})", - matched.runtime.name, - matched.runtime.version, - &matched.id[..8.min(matched.id.len())] + " Activated runtime: {} {} ({short_id})", + matched.runtime.name, matched.runtime.version, ); crate::commands::ext::refresh_extensions(config, output); @@ -220,10 +223,8 @@ fn handle_activate(matches: &ArgMatches, config: &Config, output: &OutputManager output.success( "Runtime Activate", &format!( - "Switched to runtime: {} v{} ({})", - matched.runtime.name, - matched.runtime.version, - &matched.id[..8.min(matched.id.len())] + "Switched to runtime: {} {} ({short_id})", + matched.runtime.name, matched.runtime.version, ), ); } @@ -260,10 +261,10 @@ fn handle_inspect(matches: &ArgMatches, config: &Config, output: &OutputManager) println!(); println!( - " Runtime: {} v{}{active_marker}", + " Runtime: {} {} ({short_id}){active_marker}", matched.runtime.name, matched.runtime.version ); - println!(" Build ID: {} ({short_id})", matched.id); + println!(" Build ID: {}", matched.id); println!(" Built: {}", matched.built_at); println!(" Manifest: v{}", matched.manifest_version); println!(); @@ -309,7 +310,7 @@ fn handle_inspect(matches: &ArgMatches, config: &Config, output: &OutputManager) println!(" Full image IDs:"); for ext in &matched.extensions { let id_display = ext.image_id.as_deref().unwrap_or("-"); - println!(" {} v{}: {}", ext.name, ext.version, id_display); + println!(" {} {}: {}", ext.name, ext.version, id_display); } println!(); } @@ -341,12 +342,10 @@ fn resolve_runtime_id<'a>( .iter() .map(|(m, active)| { let marker = if *active { " (active)" } else { "" }; + let sid = &m.id[..8.min(m.id.len())]; format!( - " {} {} v{}{}", - &m.id[..8.min(m.id.len())], - m.runtime.name, - m.runtime.version, - marker + " {} {} ({sid}){}", + m.runtime.name, m.runtime.version, marker ) }) .collect(); @@ -396,25 +395,21 @@ fn list_runtimes(config: &Config, output: &OutputManager) { } println!(); - println!( - " {:<16} {:<12} {:<10} {:<24} STATUS", - "NAME", "VERSION", "BUILD ID", "BUILT AT" - ); + println!(" {:<32} {:<12} BUILT AT", "RUNTIME", "ACTIVE"); for (manifest, is_active) in &runtimes { - let short_id = if manifest.id.len() >= 8 { - &manifest.id[..8] - } else { - &manifest.id - }; + let short_id = &manifest.id[..8.min(manifest.id.len())]; + let runtime_label = format!( + "{} {} ({short_id})", + manifest.runtime.name, manifest.runtime.version + ); let built_at_display = manifest.built_at.replace('T', " ").replace('Z', ""); - - let status = if *is_active { "active" } else { "" }; + let status = if *is_active { "* active" } else { "" }; println!( - " {:<16} {:<12} {:<10} {:<24} {}", - manifest.runtime.name, manifest.runtime.version, short_id, built_at_display, status + " {:<32} {:<12} {}", + runtime_label, status, built_at_display ); } diff --git a/src/config.rs b/src/config.rs index b96f52c..9158818 100644 --- a/src/config.rs +++ b/src/config.rs @@ -20,6 +20,10 @@ pub struct AvocadoConfig { /// Override for the avocado base directory (default: /var/lib/avocado) #[serde(default, skip_serializing_if = "Option::is_none")] pub runtimes_dir: Option, + /// Varlink socket address for daemon communication + /// (default: unix:/run/avocado/avocadoctl.sock) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub socket: Option, } /// Extension configuration @@ -47,6 +51,7 @@ impl Default for Config { mutable: None, }, runtimes_dir: None, + socket: None, }, } } @@ -81,6 +86,15 @@ impl Config { Self::load(config_path) } + /// Get the varlink socket address for daemon communication. + /// Resolution order: config file → hardcoded default. + pub fn socket_address(&self) -> &str { + self.avocado + .socket + .as_deref() + .unwrap_or("unix:/run/avocado/avocadoctl.sock") + } + /// Get the extensions directory, checking environment variable first pub fn get_extensions_dir(&self) -> String { // Environment variable takes precedence (for testing) diff --git a/src/main.rs b/src/main.rs index 565a55f..b595e61 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,12 +6,20 @@ pub mod service; pub mod staging; pub mod update; mod varlink; +mod varlink_client; mod varlink_server; use clap::{Arg, Command}; use commands::{ext, hitl, root_authority, runtime}; use config::Config; use output::OutputManager; +use varlink::org_avocado_Extensions as vl_ext; +use varlink::org_avocado_Hitl as vl_hitl; +use varlink::org_avocado_RootAuthority as vl_ra; +use varlink::org_avocado_Runtimes as vl_rt; +use varlink_client::{ + ExtClientInterface, HitlClientInterface, RaClientInterface, RtClientInterface, +}; fn main() { let app = Command::new(env!("CARGO_PKG_NAME")) @@ -43,10 +51,17 @@ fn main() { .global(true) .default_value("table"), ) - .subcommand(ext::create_command()) - .subcommand(hitl::create_command()) - .subcommand(root_authority::create_command()) - .subcommand(runtime::create_command()) + .arg( + Arg::new("socket") + .long("socket") + .value_name("ADDRESS") + .help("Varlink daemon socket address (overrides config)") + .global(true), + ) + .subcommand(commands::ext::create_command()) + .subcommand(commands::hitl::create_command()) + .subcommand(commands::root_authority::create_command()) + .subcommand(commands::runtime::create_command()) .subcommand( Command::new("status").about("Show overall system status including extensions"), ) @@ -144,19 +159,262 @@ fn main() { } }; + // Resolve socket address: CLI flag > config > default + let socket_address = matches + .get_one::("socket") + .cloned() + .unwrap_or_else(|| config.socket_address().to_string()); + + // In test mode, skip the varlink daemon and call service functions directly. + // This allows existing integration tests (which use AVOCADO_TEST_MODE=1 with mock + // executables) to keep running without needing a live daemon. + if std::env::var("AVOCADO_TEST_MODE").is_ok() { + handle_direct(&matches, &config, &output); + return; + } + match matches.subcommand() { + // ── ext subcommands ────────────────────────────────────────────────── Some(("ext", ext_matches)) => { - ext::handle_command(ext_matches, &config, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + match ext_matches.subcommand() { + Some(("list", _)) => { + let mut client = vl_ext::VarlinkClient::new(conn); + match client.list().call() { + Ok(reply) => varlink_client::print_extensions(&reply.extensions, &output), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + Some(("merge", _)) => { + let mut client = vl_ext::VarlinkClient::new(conn); + match client.merge().more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Merge", "Extensions merged successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("unmerge", unmerge_matches)) => { + let unmount = unmerge_matches.get_flag("unmount"); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.unmerge(Some(unmount)).more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Unmerge", "Extensions unmerged successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("refresh", _)) => { + let mut client = vl_ext::VarlinkClient::new(conn); + match client.refresh().more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Refresh", "Extensions refreshed successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("status", _)) => { + let mut client = vl_ext::VarlinkClient::new(conn); + match client.status().call() { + Ok(reply) => { + varlink_client::print_extension_status(&reply.extensions, &output) + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + _ => { + println!("Use 'avocadoctl ext --help' for available extension commands"); + } + } } + + // ── hitl subcommands ───────────────────────────────────────────────── Some(("hitl", hitl_matches)) => { - hitl::handle_command(hitl_matches, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + match hitl_matches.subcommand() { + Some(("mount", mount_matches)) => { + let server_ip = mount_matches + .get_one::("server-ip") + .expect("server-ip is required") + .clone(); + let server_port = mount_matches.get_one::("server-port").cloned(); + let extensions: Vec = mount_matches + .get_many::("extension") + .expect("at least one extension is required") + .cloned() + .collect(); + let mut client = vl_hitl::VarlinkClient::new(conn); + match client.mount(server_ip, server_port, extensions).call() { + Ok(_) => output.success("HITL Mount", "Extensions mounted successfully"), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("unmount", unmount_matches)) => { + let extensions: Vec = unmount_matches + .get_many::("extension") + .expect("at least one extension is required") + .cloned() + .collect(); + let mut client = vl_hitl::VarlinkClient::new(conn); + match client.unmount(extensions).call() { + Ok(_) => { + output.success("HITL Unmount", "Extensions unmounted successfully") + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + _ => { + println!("Use 'avocadoctl hitl --help' for available HITL commands"); + } + } } + + // ── root-authority ─────────────────────────────────────────────────── Some(("root-authority", _)) => { - root_authority::handle_command(&config, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ra::VarlinkClient::new(conn); + match client.show().call() { + Ok(reply) => varlink_client::print_root_authority(&reply.authority, &output), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } } + + // ── runtime subcommands ────────────────────────────────────────────── Some(("runtime", runtime_matches)) => { - runtime::handle_command(runtime_matches, &config, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + match runtime_matches.subcommand() { + Some(("list", _)) => { + let mut client = vl_rt::VarlinkClient::new(conn); + match client.list().call() { + Ok(reply) => varlink_client::print_runtimes(&reply.runtimes, &output), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + Some(("add", add_matches)) => { + if let Some(url) = add_matches.get_one::("url") { + let auth_token = std::env::var("AVOCADO_TUF_AUTH_TOKEN").ok(); + let mut client = vl_rt::VarlinkClient::new(conn); + match client.add_from_url(url.clone(), auth_token, None).more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Runtime Add", "Runtime added successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } else if let Some(manifest) = add_matches.get_one::("manifest") { + let mut client = vl_rt::VarlinkClient::new(conn); + match client.add_from_manifest(manifest.clone()).more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Runtime Add", "Runtime added successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + json_ok(&output); + } + Some(("remove", remove_matches)) => { + let id = remove_matches + .get_one::("id") + .expect("id is required") + .clone(); + let mut client = vl_rt::VarlinkClient::new(conn); + match client.remove(id).call() { + Ok(_) => output.success("Runtime Remove", "Runtime removed successfully"), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("activate", activate_matches)) => { + let id = activate_matches + .get_one::("id") + .expect("id is required") + .clone(); + let mut client = vl_rt::VarlinkClient::new(conn); + match client.activate(id).more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Runtime Activate", "Runtime activated successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("inspect", inspect_matches)) => { + let id = inspect_matches + .get_one::("id") + .expect("id is required") + .clone(); + let mut client = vl_rt::VarlinkClient::new(conn); + match client.inspect(id).call() { + Ok(reply) => varlink_client::print_runtime_detail(&reply.runtime, &output), + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + _ => { + println!("Use 'runtime list' to see available runtimes."); + println!("Run 'avocadoctl runtime --help' for more information."); + } + } } + + // ── serve (starts the daemon — direct, no varlink client) ──────────── Some(("serve", serve_matches)) => { let address = serve_matches .get_one::("address") @@ -166,23 +424,187 @@ fn main() { std::process::exit(1); } } + + // ── status (top-level) ─────────────────────────────────────────────── Some(("status", _)) => { - show_system_status(&config, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.status().call() { + Ok(reply) => { + output.status_header("System Status"); + varlink_client::print_extension_status(&reply.extensions, &output); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } } - // Top-level command aliases + + // ── Top-level aliases ──────────────────────────────────────────────── Some(("merge", _)) => { - ext::merge_extensions_direct(&output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.merge().more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Merge", "Extensions merged successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } json_ok(&output); } Some(("unmerge", unmerge_matches)) => { let unmount = unmerge_matches.get_flag("unmount"); - ext::unmerge_extensions_direct(unmount, &output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.unmerge(Some(unmount)).more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Unmerge", "Extensions unmerged successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } json_ok(&output); } Some(("refresh", _)) => { - ext::refresh_extensions_direct(&output); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.refresh().more() { + Ok(iter) => { + for reply in iter { + match reply { + Ok(r) if !r.done => { + varlink_client::print_single_log(&r.message, &output) + } + Ok(_) => {} + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + } + output.success("Refresh", "Extensions refreshed successfully"); + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } json_ok(&output); } + Some(("enable", enable_matches)) => { + let os_release = enable_matches.get_one::("os_release").cloned(); + let extensions: Vec = enable_matches + .get_many::("extensions") + .unwrap() + .cloned() + .collect(); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.enable(extensions, os_release).call() { + Ok(reply) => { + if !output.is_json() { + output.success( + "Enable", + &format!( + "{} extension(s) enabled, {} failed", + reply.enabled, reply.failed + ), + ); + } + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + Some(("disable", disable_matches)) => { + let os_release = disable_matches.get_one::("os_release").cloned(); + let all = disable_matches.get_flag("all"); + let extensions: Option> = disable_matches + .get_many::("extensions") + .map(|values| values.cloned().collect()); + let conn = varlink_client::connect_or_exit(&socket_address, &output); + let mut client = vl_ext::VarlinkClient::new(conn); + match client.disable(extensions, Some(all), os_release).call() { + Ok(reply) => { + if !output.is_json() { + output.success( + "Disable", + &format!( + "{} extension(s) disabled, {} failed", + reply.disabled, reply.failed + ), + ); + } + } + Err(e) => varlink_client::exit_with_rpc_error(e, &output), + } + json_ok(&output); + } + + _ => { + println!( + "{} - {}", + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_DESCRIPTION") + ); + println!("Use --help for more information or --version for version details"); + } + } +} + +/// Direct dispatch used when AVOCADO_TEST_MODE is set. +/// Calls service functions directly, bypassing the varlink daemon. +/// This keeps existing integration tests (with mock executables) working +/// without needing a live daemon process. +fn handle_direct(matches: &clap::ArgMatches, config: &Config, output: &OutputManager) { + match matches.subcommand() { + Some(("ext", ext_matches)) => { + ext::handle_command(ext_matches, config, output); + } + Some(("hitl", hitl_matches)) => { + hitl::handle_command(hitl_matches, output); + } + Some(("root-authority", _)) => { + root_authority::handle_command(config, output); + } + Some(("runtime", runtime_matches)) => { + runtime::handle_command(runtime_matches, config, output); + } + Some(("serve", serve_matches)) => { + let address = serve_matches + .get_one::("address") + .expect("address has a default value"); + if let Err(e) = varlink_server::run_server(address, config.clone()) { + output.error("Server Error", &format!("Varlink server failed: {e}")); + std::process::exit(1); + } + } + Some(("status", _)) => { + output.status_header("System Status"); + ext::status_extensions(config, output); + } + Some(("merge", _)) => { + ext::merge_extensions_direct(output); + json_ok(output); + } + Some(("unmerge", unmerge_matches)) => { + let unmount = unmerge_matches.get_flag("unmount"); + ext::unmerge_extensions_direct(unmount, output); + json_ok(output); + } + Some(("refresh", _)) => { + ext::refresh_extensions_direct(output); + json_ok(output); + } Some(("enable", enable_matches)) => { let os_release = enable_matches .get_one::("os_release") @@ -192,8 +614,8 @@ fn main() { .unwrap() .map(|s| s.as_str()) .collect(); - ext::enable_extensions(os_release, &extensions, &config, &output); - json_ok(&output); + ext::enable_extensions(os_release, &extensions, config, output); + json_ok(output); } Some(("disable", disable_matches)) => { let os_release = disable_matches @@ -203,8 +625,8 @@ fn main() { let extensions: Option> = disable_matches .get_many::("extensions") .map(|values| values.map(|s| s.as_str()).collect()); - ext::disable_extensions(os_release, extensions.as_deref(), all, &config, &output); - json_ok(&output); + ext::disable_extensions(os_release, extensions.as_deref(), all, config, output); + json_ok(output); } _ => { println!( @@ -217,12 +639,6 @@ fn main() { } } -/// Show overall system status including extensions -fn show_system_status(config: &Config, output: &OutputManager) { - output.info("System Status", "Checking overall system status"); - ext::status_extensions(config, output); -} - /// Emit a JSON success result when in JSON mode (no-op otherwise). /// Action commands that exit(1) on failure never reach this, /// so it only runs on success. diff --git a/src/manifest.rs b/src/manifest.rs index 65ef7fb..329930c 100644 --- a/src/manifest.rs +++ b/src/manifest.rs @@ -109,12 +109,11 @@ impl RuntimeManifest { } } - results.sort_by(|(a, _), (b, _)| { - a.runtime - .name - .cmp(&b.runtime.name) - .then_with(|| a.runtime.version.cmp(&b.runtime.version)) - .then_with(|| b.built_at.cmp(&a.built_at)) // newest first + results.sort_by(|(a, a_active), (b, b_active)| { + // Active runtime always first, then newest-built first + b_active + .cmp(a_active) + .then_with(|| b.built_at.cmp(&a.built_at)) }); results @@ -207,13 +206,12 @@ mod tests { let list = RuntimeManifest::list_all(tmp.path()); assert_eq!(list.len(), 3); - // Same name+version group: newest first + // Active first, then newest-built first assert_eq!(list[0].0.id, "bbb"); assert!(list[0].1); // active - assert_eq!(list[1].0.id, "aaa"); + assert_eq!(list[1].0.id, "aaa"); // 2026-02-17 assert!(!list[1].1); - // Different version - assert_eq!(list[2].0.id, "ccc"); + assert_eq!(list[2].0.id, "ccc"); // 2026-02-16 assert!(!list[2].1); } diff --git a/src/output.rs b/src/output.rs index f4326cf..7bc9dcf 100644 --- a/src/output.rs +++ b/src/output.rs @@ -4,18 +4,36 @@ //! handling verbosity levels and formatting consistently across all commands. use std::io::Write; +use std::sync::mpsc::SyncSender; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; /// Output manager that handles verbosity and formatting consistently pub struct OutputManager { verbose: bool, json: bool, + /// When set, messages are streamed through this channel as they are produced. + /// Used by the varlink streaming handlers for real-time progress. + sender: Option>, } impl OutputManager { /// Create a new output manager with the specified verbosity and format level pub fn new(verbose: bool, json: bool) -> Self { - Self { verbose, json } + Self { + verbose, + json, + sender: None, + } + } + + /// Create an output manager that streams messages through a channel. + /// Each `log_info` / `log_success` call sends a message immediately. + pub fn new_streaming(sender: SyncSender) -> Self { + Self { + verbose: false, + json: false, + sender: Some(sender), + } } /// Whether output should be machine-readable JSON @@ -23,14 +41,18 @@ impl OutputManager { self.json } + /// Determine the color choice for terminal output + fn color_choice() -> ColorChoice { + if std::env::var("NO_COLOR").is_ok() || std::env::var("AVOCADO_TEST_MODE").is_ok() { + ColorChoice::Never + } else { + ColorChoice::Auto + } + } + /// Print a colored prefix with message fn print_colored_prefix(&self, prefix: &str, color: Color, message: &str) { - let color_choice = - if std::env::var("NO_COLOR").is_ok() || std::env::var("AVOCADO_TEST_MODE").is_ok() { - ColorChoice::Never - } else { - ColorChoice::Auto - }; + let color_choice = Self::color_choice(); let mut stdout = StandardStream::stdout(color_choice); let mut color_spec = ColorSpec::new(); @@ -54,12 +76,7 @@ impl OutputManager { operation: &str, message: &str, ) { - let color_choice = - if std::env::var("NO_COLOR").is_ok() || std::env::var("AVOCADO_TEST_MODE").is_ok() { - ColorChoice::Never - } else { - ColorChoice::Auto - }; + let color_choice = Self::color_choice(); let mut stdout = StandardStream::stdout(color_choice); let mut color_spec = ColorSpec::new(); @@ -93,12 +110,7 @@ impl OutputManager { /// Print an error message /// Always shows detailed error information for developers pub fn error(&self, operation: &str, message: &str) { - let color_choice = - if std::env::var("NO_COLOR").is_ok() || std::env::var("AVOCADO_TEST_MODE").is_ok() { - ColorChoice::Never - } else { - ColorChoice::Auto - }; + let color_choice = Self::color_choice(); let mut stderr = StandardStream::stderr(color_choice); let mut color_spec = ColorSpec::new(); @@ -184,4 +196,29 @@ impl OutputManager { } println!("{message}"); } + + /// Log an informational message. + /// + /// In normal mode: prints to stdout with color (always, regardless of verbosity). + /// In streaming mode: sends through channel immediately. + /// In capture mode: captures to the message buffer for returning via varlink. + pub fn log_info(&self, message: &str) { + if let Some(ref tx) = self.sender { + let _ = tx.send(format!("[INFO] {message}")); + } else if !self.json { + self.print_colored_prefix("INFO", Color::Blue, message); + } + } + + /// Log a success message. + /// + /// In normal mode: prints to stdout with color (always, regardless of verbosity). + /// In streaming mode: sends through channel immediately. + pub fn log_success(&self, message: &str) { + if let Some(ref tx) = self.sender { + let _ = tx.send(format!("[SUCCESS] {message}")); + } else if !self.json { + self.print_colored_prefix("SUCCESS", Color::Green, message); + } + } } diff --git a/src/service/ext.rs b/src/service/ext.rs index 8a56386..c1df3d4 100644 --- a/src/service/ext.rs +++ b/src/service/ext.rs @@ -6,18 +6,21 @@ use crate::service::types::{DisableResult, EnableResult, ExtensionInfo}; use std::fs; use std::os::unix::fs as unix_fs; use std::path::Path; - -/// A quiet OutputManager for service-layer calls (no terminal output). -fn quiet_output() -> OutputManager { - OutputManager::new(false, false) -} +use std::sync::mpsc; +use std::thread; /// List all available extensions from the extensions directory. pub fn list_extensions(config: &Config) -> Result, AvocadoError> { let extensions_path = config.get_extensions_dir(); - let entries = fs::read_dir(&extensions_path).map_err(|e| AvocadoError::ConfigurationError { - message: format!("Cannot read extensions directory '{extensions_path}': {e}"), - })?; + let entries = match fs::read_dir(&extensions_path) { + Ok(e) => e, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()), + Err(e) => { + return Err(AvocadoError::ConfigurationError { + message: format!("Cannot read extensions directory '{extensions_path}': {e}"), + }) + } + }; let mut result = Vec::new(); for entry in entries.flatten() { @@ -50,34 +53,106 @@ pub fn list_extensions(config: &Config) -> Result, AvocadoErr Ok(result) } -/// Merge extensions using systemd-sysext and systemd-confext. -pub fn merge_extensions(config: &Config) -> Result<(), AvocadoError> { - let output = quiet_output(); - ext::merge_extensions_internal(config, &output).map_err(AvocadoError::from) +// ── Streaming service functions ────────────────────────────────────────────── + +/// Merge extensions with streaming output. +/// Returns a receiver that yields log messages as they are produced, +/// and a join handle for the worker thread. +pub fn merge_extensions_streaming( + config: &Config, +) -> ( + mpsc::Receiver, + thread::JoinHandle>, +) { + let (tx, rx) = mpsc::sync_channel(4); + let config = config.clone(); + let handle = thread::spawn(move || { + let output = OutputManager::new_streaming(tx); + ext::merge_extensions_internal(&config, &output).map_err(AvocadoError::from) + }); + (rx, handle) } -/// Unmerge extensions using systemd-sysext and systemd-confext. -pub fn unmerge_extensions(unmount: bool) -> Result<(), AvocadoError> { - let output = quiet_output(); - ext::unmerge_extensions_internal_with_options(true, unmount, &output) - .map_err(AvocadoError::from) +/// Unmerge extensions with streaming output. +pub fn unmerge_extensions_streaming( + unmount: bool, +) -> ( + mpsc::Receiver, + thread::JoinHandle>, +) { + let (tx, rx) = mpsc::sync_channel(4); + let handle = thread::spawn(move || { + let output = OutputManager::new_streaming(tx); + ext::unmerge_extensions_internal_with_options(true, unmount, &output) + .map_err(AvocadoError::from) + }); + (rx, handle) } -/// Refresh extensions (unmerge then merge). -pub fn refresh_extensions(config: &Config) -> Result<(), AvocadoError> { - let output = quiet_output(); +/// Refresh extensions (unmerge then merge) with streaming output. +pub fn refresh_extensions_streaming( + config: &Config, +) -> ( + mpsc::Receiver, + thread::JoinHandle>, +) { + let (tx, rx) = mpsc::sync_channel(4); + let config = config.clone(); + let handle = thread::spawn(move || { + let output = OutputManager::new_streaming(tx); + + // First unmerge (skip depmod since we'll call it after merge, don't unmount loops) + ext::unmerge_extensions_internal_with_options(false, false, &output) + .map_err(AvocadoError::from)?; + + // Invalidate NFS caches for any HITL-mounted extensions + ext::invalidate_hitl_caches(&output); + + // Then merge (this will call depmod via post-merge processing) + ext::merge_extensions_internal(&config, &output).map_err(AvocadoError::from) + }); + (rx, handle) +} - // First unmerge (skip depmod since we'll call it after merge, don't unmount loops) - ext::unmerge_extensions_internal_with_options(false, false, &output) - .map_err(AvocadoError::from)?; +// ── Batch service functions (used by non-streaming clients and tests) ──────── - // Invalidate NFS caches for any HITL-mounted extensions - ext::invalidate_hitl_caches(&output); +/// Merge extensions using systemd-sysext and systemd-confext. +/// Returns log messages produced during the operation. +pub fn merge_extensions(config: &Config) -> Result, AvocadoError> { + let (rx, handle) = merge_extensions_streaming(config); + let messages: Vec = rx.into_iter().collect(); + handle.join().unwrap_or_else(|_| { + Err(AvocadoError::MergeFailed { + reason: "internal panic".into(), + }) + })?; + Ok(messages) +} - // Then merge (this will call depmod via post-merge processing) - ext::merge_extensions_internal(config, &output).map_err(AvocadoError::from)?; +/// Unmerge extensions using systemd-sysext and systemd-confext. +/// Returns log messages produced during the operation. +pub fn unmerge_extensions(unmount: bool) -> Result, AvocadoError> { + let (rx, handle) = unmerge_extensions_streaming(unmount); + let messages: Vec = rx.into_iter().collect(); + handle.join().unwrap_or_else(|_| { + Err(AvocadoError::UnmergeFailed { + reason: "internal panic".into(), + }) + })?; + Ok(messages) +} - Ok(()) +/// Refresh extensions (unmerge then merge). +/// Returns log messages produced during the operation. +pub fn refresh_extensions(config: &Config) -> Result, AvocadoError> { + let (rx, handle) = refresh_extensions_streaming(config); + let messages: Vec = rx.into_iter().collect(); + handle.join().unwrap_or_else(|_| { + Err(AvocadoError::MergeFailed { + reason: "internal panic".into(), + }) + })?; + Ok(messages) } /// Enable extensions for a specific OS release version. @@ -266,7 +341,5 @@ pub fn disable_extensions( pub fn status_extensions( config: &Config, ) -> Result, AvocadoError> { - let output = quiet_output(); - ext::show_enhanced_status(config, &output).map_err(AvocadoError::from)?; - Ok(vec![]) + ext::collect_extension_status(config).map_err(AvocadoError::from) } diff --git a/src/service/hitl.rs b/src/service/hitl.rs index fcd7d1a..52ca3e7 100644 --- a/src/service/hitl.rs +++ b/src/service/hitl.rs @@ -120,12 +120,22 @@ pub fn unmount(extensions: &[String]) -> Result<(), AvocadoError> { } } - // Step 2: Clean up service drop-ins + // Step 2: Unmerge extensions before unmounting NFS shares. + // Extensions must be unmerged first so the sysext/confext overlay no longer + // references the HITL mount points we are about to remove. + let _ = crate::service::ext::unmerge_extensions(false); + + // Step 3: Clean up service drop-ins for (extension, services) in &extension_services { let _ = hitl::cleanup_service_dropins(extension, services, &output); } - // Step 3: Unmount each extension + // Step 4: Reload systemd to apply drop-in removals + if !extension_services.is_empty() { + let _ = hitl::systemd_daemon_reload(&output); + } + + // Step 5: Unmount each extension for extension in extensions { let mount_point = format!("{extensions_base_dir}/{extension}"); @@ -160,12 +170,9 @@ pub fn unmount(extensions: &[String]) -> Result<(), AvocadoError> { } } - // Reload systemd - let _ = hitl::systemd_daemon_reload(&output); - - // Refresh extensions + // Step 6: Merge remaining extensions (without the removed HITL ones) let config = Config::default(); - let _ = crate::service::ext::refresh_extensions(&config); + let _ = crate::service::ext::merge_extensions(&config); Ok(()) } diff --git a/src/service/runtime.rs b/src/service/runtime.rs index 9bbe055..5b74d61 100644 --- a/src/service/runtime.rs +++ b/src/service/runtime.rs @@ -4,6 +4,14 @@ use crate::service::error::AvocadoError; use crate::service::types::{RuntimeEntry, RuntimeExtensionInfo}; use crate::{staging, update}; use std::path::Path; +use std::sync::mpsc; +use std::thread; + +/// A streaming operation: a channel receiver for log messages and a join handle for the result. +type StreamHandle = ( + mpsc::Receiver, + thread::JoinHandle>, +); /// Convert a RuntimeManifest + active flag to a RuntimeEntry. fn manifest_to_entry(manifest: &RuntimeManifest, active: bool) -> RuntimeEntry { @@ -38,16 +46,88 @@ pub fn list_runtimes(config: &Config) -> Result, AvocadoError> .collect()) } +// ── Streaming service functions ────────────────────────────────────────────── + +/// Add a runtime from a TUF repository URL with streaming output. +/// Performs the TUF update synchronously, then streams the refresh operation. +pub fn add_from_url_streaming( + url: &str, + auth_token: Option<&str>, + artifacts_url: Option<&str>, + config: &Config, +) -> Result { + let base_dir = config.get_avocado_base_dir(); + let base_path = Path::new(&base_dir); + update::perform_update(url, base_path, auth_token, artifacts_url, false)?; + Ok(super::ext::refresh_extensions_streaming(config)) +} + +/// Add a runtime from a local manifest file with streaming output. +/// Performs staging synchronously, then streams the refresh operation. +pub fn add_from_manifest_streaming( + manifest_path: &str, + config: &Config, +) -> Result { + let base_dir = config.get_avocado_base_dir(); + let base_path = Path::new(&base_dir); + + let manifest_content = + std::fs::read_to_string(manifest_path).map_err(|e| AvocadoError::StagingFailed { + reason: format!("Failed to read manifest: {e}"), + })?; + + let manifest: RuntimeManifest = + serde_json::from_str(&manifest_content).map_err(|e| AvocadoError::StagingFailed { + reason: format!("Invalid manifest.json: {e}"), + })?; + + staging::validate_manifest_images(&manifest, base_path)?; + staging::stage_manifest(&manifest, &manifest_content, base_path, false)?; + staging::activate_runtime(&manifest.id, base_path)?; + Ok(super::ext::refresh_extensions_streaming(config)) +} + +/// Activate a staged runtime by ID (or prefix) with streaming output. +/// Performs activation synchronously, then streams the refresh operation. +pub fn activate_runtime_streaming( + id_prefix: &str, + config: &Config, +) -> Result, AvocadoError> { + let base_dir = config.get_avocado_base_dir(); + let base_path = Path::new(&base_dir); + let runtimes = RuntimeManifest::list_all(base_path); + + let (matched, is_active) = resolve_runtime_with_active(id_prefix, &runtimes)?; + if is_active { + return Ok(None); // Already active, nothing to do + } + + staging::activate_runtime(&matched.id, base_path)?; + Ok(Some(super::ext::refresh_extensions_streaming(config))) +} + +// ── Batch service functions ────────────────────────────────────────────────── + /// Add a runtime from a TUF repository URL. -pub fn add_from_url(url: &str, config: &Config) -> Result<(), AvocadoError> { +/// Returns log messages from the refresh operation. +pub fn add_from_url( + url: &str, + auth_token: Option<&str>, + artifacts_url: Option<&str>, + config: &Config, +) -> Result, AvocadoError> { let base_dir = config.get_avocado_base_dir(); let base_path = Path::new(&base_dir); - update::perform_update(url, base_path, false)?; - Ok(()) + update::perform_update(url, base_path, auth_token, artifacts_url, false)?; + super::ext::refresh_extensions(config) } /// Add a runtime from a local manifest file. -pub fn add_from_manifest(manifest_path: &str, config: &Config) -> Result<(), AvocadoError> { +/// Returns log messages from the refresh operation. +pub fn add_from_manifest( + manifest_path: &str, + config: &Config, +) -> Result, AvocadoError> { let base_dir = config.get_avocado_base_dir(); let base_path = Path::new(&base_dir); @@ -64,8 +144,7 @@ pub fn add_from_manifest(manifest_path: &str, config: &Config) -> Result<(), Avo staging::validate_manifest_images(&manifest, base_path)?; staging::stage_manifest(&manifest, &manifest_content, base_path, false)?; staging::activate_runtime(&manifest.id, base_path)?; - - Ok(()) + super::ext::refresh_extensions(config) } /// Remove a staged runtime by ID (or prefix). @@ -80,18 +159,19 @@ pub fn remove_runtime(id_prefix: &str, config: &Config) -> Result<(), AvocadoErr } /// Activate a staged runtime by ID (or prefix). -pub fn activate_runtime(id_prefix: &str, config: &Config) -> Result<(), AvocadoError> { +/// Returns log messages from the refresh operation. +pub fn activate_runtime(id_prefix: &str, config: &Config) -> Result, AvocadoError> { let base_dir = config.get_avocado_base_dir(); let base_path = Path::new(&base_dir); let runtimes = RuntimeManifest::list_all(base_path); let (matched, is_active) = resolve_runtime_with_active(id_prefix, &runtimes)?; if is_active { - return Ok(()); // Already active, nothing to do + return Ok(Vec::new()); // Already active, nothing to do } staging::activate_runtime(&matched.id, base_path)?; - Ok(()) + super::ext::refresh_extensions(config) } /// Inspect a runtime's details by ID (or prefix). diff --git a/src/staging.rs b/src/staging.rs index 33d13a2..48d352f 100644 --- a/src/staging.rs +++ b/src/staging.rs @@ -39,7 +39,7 @@ pub fn validate_manifest_images( None } else { Some(MissingImage { - extension_name: format!("{} v{}", ext.name, ext.version), + extension_name: format!("{} {}", ext.name, ext.version), expected_path: path.display().to_string(), }) } @@ -75,7 +75,7 @@ pub fn stage_manifest( if verbose { println!( - " Staged runtime: {} v{} (build {})", + " Staged runtime: {} {} ({})", manifest.runtime.name, manifest.runtime.version, &manifest.id[..8.min(manifest.id.len())] @@ -96,12 +96,17 @@ pub fn install_images_from_staging( let images_dir = base_dir.join(IMAGES_DIR_NAME); let _ = fs::create_dir_all(&images_dir); + let mut missing = Vec::new(); + for ext in &manifest.extensions { if let Some(ref image_id) = ext.image_id { let dest = images_dir.join(format!("{image_id}.raw")); if dest.exists() { if verbose { - println!(" Image already present: {} ({})", ext.name, image_id); + println!( + " Image already present: {} {} ({})", + ext.name, ext.version, image_id + ); } continue; } @@ -114,12 +119,29 @@ pub fn install_images_from_staging( )) })?; if verbose { - println!(" Installed image: {} -> {}.raw", ext.name, image_id); + println!( + " Installed image: {} {} -> {}.raw", + ext.name, ext.version, image_id + ); } + } else { + println!( + " WARNING: Image not in staging and not on disk: {} {} ({})", + ext.name, ext.version, image_id + ); + missing.push(format!("{} {} ({})", ext.name, ext.version, image_id)); } } } + if !missing.is_empty() { + let details = missing.join(", "); + return Err(StagingError::StagingFailed(format!( + "{} extension image(s) missing after staging: {details}", + missing.len() + ))); + } + Ok(()) } @@ -216,7 +238,7 @@ mod tests { let result = validate_manifest_images(&manifest, tmp.path()); assert!(result.is_err()); let err = result.unwrap_err().to_string(); - assert!(err.contains("app v0.1.0")); + assert!(err.contains("app 0.1.0")); assert!(err.contains("a1b2c3d4-e5f6-5789-abcd-ef0123456789.raw")); } diff --git a/src/update.rs b/src/update.rs index dc18ffb..ca2f7e7 100644 --- a/src/update.rs +++ b/src/update.rs @@ -32,7 +32,13 @@ pub enum UpdateError { MetadataError(String), } -pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), UpdateError> { +pub fn perform_update( + url: &str, + base_dir: &Path, + auth_token: Option<&str>, + artifacts_url: Option<&str>, + verbose: bool, +) -> Result<(), UpdateError> { let url = url.trim_end_matches('/'); // 1. Load the local trust anchor @@ -47,19 +53,17 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U let root = &signed_root.signed; let trusted_keys = extract_trusted_keys(root)?; - if verbose { - println!( - " Loaded trust anchor: version {}, {} trusted key(s)", - root.version, - trusted_keys.len() - ); - } + println!( + " Trust anchor: version {}, {} trusted key(s)", + root.version, + trusted_keys.len() + ); // 2. Fetch and verify remote metadata (TUF order: timestamp -> snapshot -> targets) println!(" Fetching update metadata..."); let timestamp_url = format!("{url}/metadata/timestamp.json"); - let timestamp_raw = fetch_url(×tamp_url)?; + let timestamp_raw = fetch_url(×tamp_url, auth_token)?; let timestamp: tough::schema::Signed = parse_metadata("timestamp.json", ×tamp_raw)?; verify_signatures( @@ -71,15 +75,13 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U &tough::schema::RoleType::Timestamp, )?; - if verbose { - println!( - " Verified timestamp.json (version {})", - timestamp.signed.version - ); - } + println!( + " Verified timestamp.json (version {})", + timestamp.signed.version + ); let snapshot_url = format!("{url}/metadata/snapshot.json"); - let snapshot_raw = fetch_url(&snapshot_url)?; + let snapshot_raw = fetch_url(&snapshot_url, auth_token)?; let snapshot: tough::schema::Signed = parse_metadata("snapshot.json", &snapshot_raw)?; verify_signatures( @@ -91,15 +93,13 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U &tough::schema::RoleType::Snapshot, )?; - if verbose { - println!( - " Verified snapshot.json (version {})", - snapshot.signed.version - ); - } + println!( + " Verified snapshot.json (version {})", + snapshot.signed.version + ); let targets_url = format!("{url}/metadata/targets.json"); - let targets_raw = fetch_url(&targets_url)?; + let targets_raw = fetch_url(&targets_url, auth_token)?; let targets: tough::schema::Signed = parse_metadata("targets.json", &targets_raw)?; verify_signatures( @@ -111,16 +111,78 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U &tough::schema::RoleType::Targets, )?; + let inline_count = targets.signed.targets.len(); + println!( + " Verified targets.json (version {}, {} inline target(s))", + targets.signed.version, inline_count + ); if verbose { + for (name, _) in targets.signed.targets.iter() { + println!(" inline target: {}", name.raw()); + } + } + + // 3a. Walk delegations if present — collect delegated targets + let mut delegated_targets: Vec<(String, tough::schema::Target)> = Vec::new(); + + if let Some(delegations) = &targets.signed.delegations { println!( - " Verified targets.json (version {})", - targets.signed.version + " Found {} delegation(s) in targets.json", + delegations.roles.len() ); + for role in &delegations.roles { + let role_path = format!("delegations/{}.json", role.name); + let delegation_url = format!("{url}/metadata/{role_path}"); + println!(" Fetching delegation: {}", role.name); + let delegation_raw = fetch_url(&delegation_url, auth_token)?; + + // Verify hash + length against snapshot meta entry + verify_delegation_hash(&role_path, &delegation_raw, &snapshot)?; + + // Parse and verify signature against content key from targets.json delegations.keys + let delegation: tough::schema::Signed = + parse_metadata(&role_path, &delegation_raw)?; + verify_delegation_signatures( + &role_path, + &delegation_raw, + &delegation.signatures, + &delegations.keys, + &role.keyids, + role.threshold, + )?; + + println!( + " Verified delegation {} ({} target(s))", + role.name, + delegation.signed.targets.len() + ); + if verbose { + for (name, _) in delegation.signed.targets.iter() { + println!(" delegated target: {}", name.raw()); + } + } + if delegation.signed.targets.is_empty() { + println!(" WARNING: Delegation '{}' has no targets — extension images will not be downloaded!", role.name); + } + + for (name, info) in &delegation.signed.targets { + delegated_targets.push((name.raw().to_string(), info.clone())); + } + } + } else { + println!(" No delegations found in targets.json"); } - // 3. Enumerate and download targets - let target_map = &targets.signed.targets; - println!(" Downloading {} target(s)...", target_map.len()); + // 3b. Enumerate and download targets (inline + delegated) + let inline_targets: Vec<(String, &tough::schema::Target)> = targets + .signed + .targets + .iter() + .map(|(k, v)| (k.raw().to_string(), v)) + .collect(); + + let all_count = inline_targets.len() + delegated_targets.len(); + println!(" Downloading {all_count} target(s)..."); let staging_dir = base_dir.join(".update-staging"); fs::create_dir_all(&staging_dir).map_err(|e| { @@ -144,49 +206,32 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U }) .unwrap_or_default(); - for (target_name, target_info) in target_map { - let name_str = target_name.raw(); - - // Content-addressable skip: if this target is an image that already - // exists locally, the UUIDv5 name guarantees identical content. - if name_str != "manifest.json" && existing_images.contains(name_str) { - if verbose { - println!(" Already present, skipping: {name_str}"); - } - continue; - } - - let target_url = format!("{url}/targets/{name_str}"); - - if verbose { - println!(" Downloading {name_str}..."); - } - - let data = fetch_url_bytes(&target_url)?; - - // Verify length - if data.len() as u64 != target_info.length { - return Err(UpdateError::HashMismatch { - target: name_str.to_string(), - expected: format!("{} bytes", target_info.length), - actual: format!("{} bytes", data.len()), - }); - } - - // Verify sha256 hash - let expected_hex = hex_encode(target_info.hashes.sha256.as_ref()); - let actual_hash = sha256_hex(&data); - if actual_hash != expected_hex { - return Err(UpdateError::HashMismatch { - target: name_str.to_string(), - expected: expected_hex, - actual: actual_hash, - }); - } + // Download inline targets (empty for delegated format; kept for backward compat) + for (name_str, target_info) in &inline_targets { + download_target( + url, + name_str, + target_info, + &staging_dir, + &existing_images, + auth_token, + artifacts_url, + verbose, + )?; + } - let target_path = staging_dir.join(name_str); - fs::write(&target_path, &data) - .map_err(|e| UpdateError::StagingFailed(format!("Failed to write {name_str}: {e}")))?; + // Download delegated targets + for (name_str, target_info) in &delegated_targets { + download_target( + url, + name_str, + target_info, + &staging_dir, + &existing_images, + auth_token, + artifacts_url, + verbose, + )?; } // 4. Parse the downloaded manifest and stage the update @@ -200,13 +245,18 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U let new_manifest: RuntimeManifest = serde_json::from_str(&manifest_content) .map_err(|e| UpdateError::StagingFailed(format!("Invalid manifest.json: {e}")))?; - if verbose { - println!( - " New runtime: {} v{} (build {})", - new_manifest.runtime.name, - new_manifest.runtime.version, - &new_manifest.id[..8.min(new_manifest.id.len())] - ); + let short_id = &new_manifest.id[..8.min(new_manifest.id.len())]; + println!( + " New runtime: {} {} ({short_id})", + new_manifest.runtime.name, new_manifest.runtime.version, + ); + println!( + " Manifest lists {} extension(s):", + new_manifest.extensions.len() + ); + for ext in &new_manifest.extensions { + let img = ext.image_id.as_deref().unwrap_or("none"); + println!(" {} {} (image: {})", ext.name, ext.version, img); } staging::install_images_from_staging(&new_manifest, &staging_dir, base_dir, verbose) @@ -218,11 +268,10 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U staging::activate_runtime(&new_manifest.id, base_dir) .map_err(|e| UpdateError::StagingFailed(e.to_string()))?; + let short_id = &new_manifest.id[..8.min(new_manifest.id.len())]; println!( - " Activated runtime: {} v{} ({})", - new_manifest.runtime.name, - new_manifest.runtime.version, - &new_manifest.id[..8.min(new_manifest.id.len())] + " Activated runtime: {} {} ({short_id})", + new_manifest.runtime.name, new_manifest.runtime.version, ); // Clean up staging directory @@ -232,6 +281,171 @@ pub fn perform_update(url: &str, base_dir: &Path, verbose: bool) -> Result<(), U Ok(()) } +/// Download a single target file into the staging directory, verifying hash and length. +/// Skips content-addressable image files that already exist on disk. +#[allow(clippy::too_many_arguments)] +fn download_target( + url: &str, + name_str: &str, + target_info: &tough::schema::Target, + staging_dir: &Path, + existing_images: &std::collections::HashSet, + auth_token: Option<&str>, + artifacts_url: Option<&str>, + verbose: bool, +) -> Result<(), UpdateError> { + // Content-addressable skip: if this target is an image that already + // exists locally, the UUIDv5 name guarantees identical content. + if name_str != "manifest.json" && existing_images.contains(name_str) { + if verbose { + println!(" Already present, skipping: {name_str}"); + } + return Ok(()); + } + + // .raw image files are fetched from the artifacts URL (shared blob storage) + // rather than the per-device TUF repo, but still verified against TUF hashes. + let target_url = if name_str.ends_with(".raw") { + if let Some(art_url) = artifacts_url { + let art_url = art_url.trim_end_matches('/'); + format!("{art_url}/{name_str}") + } else { + format!("{url}/targets/{name_str}") + } + } else { + format!("{url}/targets/{name_str}") + }; + if verbose { + println!(" Downloading {name_str}..."); + } + + let data = fetch_url_bytes(&target_url, auth_token)?; + + // Verify length + if data.len() as u64 != target_info.length { + return Err(UpdateError::HashMismatch { + target: name_str.to_string(), + expected: format!("{} bytes", target_info.length), + actual: format!("{} bytes", data.len()), + }); + } + + // Verify sha256 hash + let expected_hex = hex_encode(target_info.hashes.sha256.as_ref()); + let actual_hash = sha256_hex(&data); + if actual_hash != expected_hex { + return Err(UpdateError::HashMismatch { + target: name_str.to_string(), + expected: expected_hex, + actual: actual_hash, + }); + } + + let target_path = staging_dir.join(name_str); + fs::write(&target_path, &data) + .map_err(|e| UpdateError::StagingFailed(format!("Failed to write {name_str}: {e}")))?; + + Ok(()) +} + +/// Verify a delegation file's hash and length against the snapshot metadata. +fn verify_delegation_hash( + role_path: &str, + raw_json: &str, + snapshot: &tough::schema::Signed, +) -> Result<(), UpdateError> { + // The snapshot meta key uses the full path like "delegations/runtime-.json" + let meta_entry = snapshot.signed.meta.get(role_path).ok_or_else(|| { + UpdateError::MetadataError(format!( + "Delegation '{role_path}' not found in snapshot.json meta" + )) + })?; + + let actual_len = raw_json.len() as u64; + if let Some(expected_len) = meta_entry.length { + if actual_len != expected_len { + return Err(UpdateError::MetadataError(format!( + "Length mismatch for '{role_path}': snapshot says {expected_len}, got {actual_len}" + ))); + } + } + + let actual_hash = sha256_hex(raw_json.as_bytes()); + let hashes = meta_entry.hashes.as_ref().ok_or_else(|| { + UpdateError::MetadataError(format!("No hashes in snapshot.json for '{role_path}'")) + })?; + let expected_hash = hex_encode(hashes.sha256.as_ref()); + if actual_hash != expected_hash { + return Err(UpdateError::MetadataError(format!( + "Hash mismatch for '{role_path}': snapshot says {expected_hash}, got {actual_hash}" + ))); + } + + Ok(()) +} + +/// Verify signatures on a delegation file using the keys declared in the +/// parent targets.json `delegations.keys` map. +fn verify_delegation_signatures>( + name: &str, + raw_json: &str, + signatures: &[tough::schema::Signature], + delegation_keys: &std::collections::HashMap, + authorized_keyids: &[K], + threshold: std::num::NonZeroU64, +) -> Result<(), UpdateError> { + let authorized_hex: Vec = authorized_keyids + .iter() + .map(|id| hex_encode(id.as_ref())) + .collect(); + + let threshold = threshold.get() as usize; + + // Build a map of keyid-hex → PublicKey from the delegation keys + let mut key_map: Vec<(String, PublicKey)> = Vec::new(); + for (key_id, key) in delegation_keys { + let key_id_hex = hex_encode(key_id.as_ref()); + if let tough::schema::key::Key::Ed25519 { keyval, .. } = key { + let public_hex = hex_encode(keyval.public.as_ref()); + if let Ok(public_bytes) = hex_decode(&public_hex) { + if let Ok(pk) = PublicKey::from_slice(&public_bytes) { + key_map.push((key_id_hex, pk)); + } + } + } + } + + let canonical = extract_signed_canonical(raw_json) + .map_err(|e| UpdateError::SignatureVerification(name.to_string(), e))?; + + let mut valid_count = 0; + + for sig in signatures { + let sig_key_id = hex_encode(sig.keyid.as_ref()); + + if !authorized_hex.contains(&sig_key_id) { + continue; + } + + if let Some((_, pk)) = key_map.iter().find(|(id, _)| *id == sig_key_id) { + if let Ok(signature) = ed25519_compact::Signature::from_slice(sig.sig.as_ref()) { + if pk.verify(canonical.as_bytes(), &signature).is_ok() { + valid_count += 1; + } + } + } + } + + if valid_count < threshold { + return Err(UpdateError::SignatureVerification( + name.to_string(), + format!("Insufficient valid signatures: got {valid_count}, need {threshold}"), + )); + } + + Ok(()) +} + fn extract_trusted_keys( root: &tough::schema::Root, ) -> Result, UpdateError> { @@ -331,10 +545,14 @@ fn extract_signed_canonical(raw_json: &str) -> Result { serde_json::to_string(signed).map_err(|e| format!("Failed to serialize: {e}")) } -fn fetch_url(url: &str) -> Result { - let response = ureq::get(url) - .call() - .map_err(|e| UpdateError::FetchFailed(url.to_string(), e.to_string()))?; +fn fetch_url(url: &str, auth_token: Option<&str>) -> Result { + let req = ureq::get(url); + let response = match auth_token { + Some(token) => req.header("Authorization", format!("Bearer {token}")), + None => req, + } + .call() + .map_err(|e| UpdateError::FetchFailed(url.to_string(), e.to_string()))?; let mut body = String::new(); response @@ -346,10 +564,14 @@ fn fetch_url(url: &str) -> Result { Ok(body) } -fn fetch_url_bytes(url: &str) -> Result, UpdateError> { - let response = ureq::get(url) - .call() - .map_err(|e| UpdateError::FetchFailed(url.to_string(), e.to_string()))?; +fn fetch_url_bytes(url: &str, auth_token: Option<&str>) -> Result, UpdateError> { + let req = ureq::get(url); + let response = match auth_token { + Some(token) => req.header("Authorization", format!("Bearer {token}")), + None => req, + } + .call() + .map_err(|e| UpdateError::FetchFailed(url.to_string(), e.to_string()))?; let mut body = Vec::new(); response @@ -406,6 +628,11 @@ mod tests { ed25519_compact::KeyPair::from_seed(ed25519_compact::Seed::from(seed_bytes)) } + fn content_keypair() -> ed25519_compact::KeyPair { + let seed_bytes = [99u8; 32]; + ed25519_compact::KeyPair::from_seed(ed25519_compact::Seed::from(seed_bytes)) + } + fn make_test_root_json() -> (String, ed25519_compact::KeyPair) { let kp = test_keypair(); let pk_hex = hex_encode(kp.pk.as_ref()); @@ -449,6 +676,25 @@ mod tests { (serde_json::to_string_pretty(&root).unwrap(), kp) } + /// Build a signed TUF metadata envelope. + fn sign_json(payload: &serde_json::Value, kp: &ed25519_compact::KeyPair) -> (String, String) { + let pk_hex = hex_encode(kp.pk.as_ref()); + let key_id = sha256_hex( + format!( + r#"{{"keytype":"ed25519","keyval":{{"public":"{pk_hex}"}},"scheme":"ed25519"}}"# + ) + .as_bytes(), + ); + let canonical = serde_json::to_string(payload).unwrap(); + let sig = kp.sk.sign(canonical.as_bytes(), None); + let sig_hex = hex_encode(sig.as_ref()); + let envelope = serde_json::json!({ + "signatures": [{ "keyid": &key_id, "sig": sig_hex }], + "signed": payload + }); + (serde_json::to_string_pretty(&envelope).unwrap(), key_id) + } + #[test] fn test_extract_trusted_keys() { let (root_json, _kp) = make_test_root_json(); @@ -477,7 +723,7 @@ mod tests { #[test] fn test_no_trust_anchor() { let tmp = tempfile::TempDir::new().unwrap(); - let result = perform_update("http://localhost:9999", tmp.path(), false); + let result = perform_update("http://localhost:9999", tmp.path(), None, None, false); assert!(matches!(result, Err(UpdateError::NoTrustAnchor))); } @@ -541,4 +787,256 @@ mod tests { "Signature verification should succeed: {result:?}" ); } + + // ---- Delegation tests ---- + + fn make_delegated_targets_json( + runtime_uuid: &str, + content_kp: &ed25519_compact::KeyPair, + targets: &[(&str, &str, u64)], // (name, sha256_hex, size) + ) -> String { + let mut targets_map = serde_json::Map::new(); + for (name, hash, size) in targets { + targets_map.insert( + name.to_string(), + serde_json::json!({ + "hashes": { "sha256": hash }, + "length": size + }), + ); + } + let payload = serde_json::json!({ + "_type": "targets", + "expires": "2030-01-01T00:00:00Z", + "spec_version": "1.0.0", + "targets": targets_map, + "version": 1, + "_delegation_name": format!("runtime-{runtime_uuid}") + }); + let (json, _) = sign_json(&payload, content_kp); + json + } + + fn make_targets_with_delegation( + runtime_uuid: &str, + content_kp: &ed25519_compact::KeyPair, + signer_kp: &ed25519_compact::KeyPair, + ) -> String { + let content_pk_hex = hex_encode(content_kp.pk.as_ref()); + let content_key_id = sha256_hex( + format!( + r#"{{"keytype":"ed25519","keyval":{{"public":"{content_pk_hex}"}},"scheme":"ed25519"}}"# + ) + .as_bytes(), + ); + + let payload = serde_json::json!({ + "_type": "targets", + "expires": "2030-01-01T00:00:00Z", + "spec_version": "1.0.0", + "targets": {}, + "delegations": { + "keys": { + &content_key_id: { + "keytype": "ed25519", + "keyval": { "public": content_pk_hex }, + "scheme": "ed25519" + } + }, + "roles": [ + { + "name": format!("runtime-{runtime_uuid}"), + "keyids": [&content_key_id], + "threshold": 1, + "paths": ["manifest.json", "*.raw"], + "terminating": true + } + ] + }, + "version": 1 + }); + let (json, _) = sign_json(&payload, signer_kp); + json + } + + fn make_snapshot_with_delegation( + targets_json: &str, + delegation_json: &str, + runtime_uuid: &str, + signer_kp: &ed25519_compact::KeyPair, + ) -> String { + let targets_hash = sha256_hex(targets_json.as_bytes()); + let targets_len = targets_json.len() as u64; + let del_hash = sha256_hex(delegation_json.as_bytes()); + let del_len = delegation_json.len() as u64; + let del_path = format!("delegations/runtime-{runtime_uuid}.json"); + + let payload = serde_json::json!({ + "_type": "snapshot", + "expires": "2030-01-01T00:00:00Z", + "spec_version": "1.0.0", + "meta": { + "targets.json": { + "hashes": { "sha256": targets_hash }, + "length": targets_len, + "version": 1 + }, + del_path: { + "hashes": { "sha256": del_hash }, + "length": del_len, + "version": 1 + } + }, + "version": 1 + }); + let (json, _) = sign_json(&payload, signer_kp); + json + } + + #[test] + fn test_verify_delegation_hash_ok() { + let kp = test_keypair(); + let ckp = content_keypair(); + let uuid = "550e8400-e29b-41d4-a716-446655440000"; + let del_json = make_delegated_targets_json(uuid, &ckp, &[]); + let targets_json = make_targets_with_delegation(uuid, &ckp, &kp); + let snapshot_json = make_snapshot_with_delegation(&targets_json, &del_json, uuid, &kp); + let snapshot: tough::schema::Signed = + serde_json::from_str(&snapshot_json).unwrap(); + + let role_path = format!("delegations/runtime-{uuid}.json"); + assert!(verify_delegation_hash(&role_path, &del_json, &snapshot).is_ok()); + } + + #[test] + fn test_verify_delegation_hash_mismatch() { + let kp = test_keypair(); + let ckp = content_keypair(); + let uuid = "550e8400-e29b-41d4-a716-446655440000"; + let del_json = make_delegated_targets_json(uuid, &ckp, &[]); + let targets_json = make_targets_with_delegation(uuid, &ckp, &kp); + let snapshot_json = make_snapshot_with_delegation(&targets_json, &del_json, uuid, &kp); + let snapshot: tough::schema::Signed = + serde_json::from_str(&snapshot_json).unwrap(); + + let role_path = format!("delegations/runtime-{uuid}.json"); + let tampered = del_json.replace("runtime", "TAMPERED"); + assert!(verify_delegation_hash(&role_path, &tampered, &snapshot).is_err()); + } + + #[test] + fn test_verify_delegation_signatures_ok() { + let ckp = content_keypair(); + let uuid = "550e8400-e29b-41d4-a716-446655440000"; + let del_json = + make_delegated_targets_json(uuid, &ckp, &[("manifest.json", &"aa".repeat(32), 10)]); + + let del: tough::schema::Signed = + serde_json::from_str(&del_json).unwrap(); + + // Build keys + keyids matching the content keypair + let content_pk_hex = hex_encode(ckp.pk.as_ref()); + let content_key_id_hex = sha256_hex( + format!( + r#"{{"keytype":"ed25519","keyval":{{"public":"{content_pk_hex}"}},"scheme":"ed25519"}}"# + ) + .as_bytes(), + ); + + // Parse from a full targets.json with delegation block to get proper tough types + let kp = test_keypair(); + let targets_json = make_targets_with_delegation(uuid, &ckp, &kp); + let targets: tough::schema::Signed = + serde_json::from_str(&targets_json).unwrap(); + let delegations = targets.signed.delegations.unwrap(); + let role = &delegations.roles[0]; + + let role_path = format!("delegations/runtime-{uuid}.json"); + let result = verify_delegation_signatures( + &role_path, + &del_json, + &del.signatures, + &delegations.keys, + &role.keyids, + role.threshold, + ); + assert!( + result.is_ok(), + "Delegation signature verification should succeed: {result:?}" + ); + let _ = content_key_id_hex; + } + + #[test] + fn test_verify_delegation_signatures_wrong_key() { + let ckp = content_keypair(); + let wrong_kp = test_keypair(); // different key + let uuid = "550e8400-e29b-41d4-a716-446655440000"; + + // Sign with the content key, but declare a different key in delegation + let del_json = make_delegated_targets_json(uuid, &wrong_kp, &[]); + let del: tough::schema::Signed = + serde_json::from_str(&del_json).unwrap(); + + // targets.json delegates to ckp, but the file is signed by wrong_kp + let targets_json = make_targets_with_delegation(uuid, &ckp, &ckp); + let targets: tough::schema::Signed = + serde_json::from_str(&targets_json).unwrap(); + let delegations = targets.signed.delegations.unwrap(); + let role = &delegations.roles[0]; + + let role_path = format!("delegations/runtime-{uuid}.json"); + let result = verify_delegation_signatures( + &role_path, + &del_json, + &del.signatures, + &delegations.keys, + &role.keyids, + role.threshold, + ); + assert!(result.is_err(), "Should fail with wrong signing key"); + } + + #[test] + fn test_flat_targets_no_delegation() { + // Without a delegations block, delegated_targets should be empty + // and processing continues using inline targets only. + let kp = test_keypair(); + let pk_hex = hex_encode(kp.pk.as_ref()); + let key_id = sha256_hex( + format!( + r#"{{"keytype":"ed25519","keyval":{{"public":"{pk_hex}"}},"scheme":"ed25519"}}"# + ) + .as_bytes(), + ); + + // Build a flat targets.json without delegations + let payload = serde_json::json!({ + "_type": "targets", + "expires": "2030-01-01T00:00:00Z", + "spec_version": "1.0.0", + "targets": { + "manifest.json": { + "hashes": { "sha256": "aa".repeat(32) }, + "length": 10 + } + }, + "version": 1 + }); + let canonical = serde_json::to_string(&payload).unwrap(); + let sig = kp.sk.sign(canonical.as_bytes(), None); + let sig_hex = hex_encode(sig.as_ref()); + let targets_json = serde_json::to_string(&serde_json::json!({ + "signatures": [{ "keyid": key_id, "sig": sig_hex }], + "signed": payload + })) + .unwrap(); + + let targets: tough::schema::Signed = + serde_json::from_str(&targets_json).unwrap(); + + // No delegations block → no delegation walking + assert!(targets.signed.delegations.is_none()); + assert_eq!(targets.signed.targets.len(), 1); + } } diff --git a/src/varlink/org.avocado.Extensions.varlink b/src/varlink/org.avocado.Extensions.varlink index 66b3fe2..89ffabb 100644 --- a/src/varlink/org.avocado.Extensions.varlink +++ b/src/varlink/org.avocado.Extensions.varlink @@ -24,13 +24,16 @@ type ExtensionStatus ( method List() -> (extensions: []Extension) # Merge extensions using systemd-sysext and systemd-confext -method Merge() -> () +# Supports streaming: client may set more=true to receive per-message progress +method Merge() -> (message: string, done: bool) # Unmerge extensions -method Unmerge(unmount: ?bool) -> () +# Supports streaming: client may set more=true to receive per-message progress +method Unmerge(unmount: ?bool) -> (message: string, done: bool) # Refresh extensions (unmerge then merge) -method Refresh() -> () +# Supports streaming: client may set more=true to receive per-message progress +method Refresh() -> (message: string, done: bool) # Enable extensions for a specific OS release version method Enable(extensions: []string, osRelease: ?string) -> (enabled: int, failed: int) diff --git a/src/varlink/org.avocado.Runtimes.varlink b/src/varlink/org.avocado.Runtimes.varlink index 94ee00c..98b7957 100644 --- a/src/varlink/org.avocado.Runtimes.varlink +++ b/src/varlink/org.avocado.Runtimes.varlink @@ -24,17 +24,20 @@ type Runtime ( # List all available runtimes method List() -> (runtimes: []Runtime) -# Add a runtime from a TUF repository URL -method AddFromUrl(url: string) -> () +# Add a runtime from a TUF repository URL (authToken: optional bearer token for protected endpoints) +# Supports streaming: client may set more=true to receive per-message progress +method AddFromUrl(url: string, authToken: ?string, artifactsUrl: ?string) -> (message: string, done: bool) # Add a runtime from a local manifest file -method AddFromManifest(manifestPath: string) -> () +# Supports streaming: client may set more=true to receive per-message progress +method AddFromManifest(manifestPath: string) -> (message: string, done: bool) # Remove a staged runtime by ID (or prefix) method Remove(id: string) -> () # Activate a staged runtime by ID (or prefix) -method Activate(id: string) -> () +# Supports streaming: client may set more=true to receive per-message progress +method Activate(id: string) -> (message: string, done: bool) # Inspect a runtime's details method Inspect(id: string) -> (runtime: Runtime) diff --git a/src/varlink/org_avocado_Extensions.rs b/src/varlink/org_avocado_Extensions.rs index 4e2962c..16a4e7f 100644 --- a/src/varlink/org_avocado_Extensions.rs +++ b/src/varlink/org_avocado_Extensions.rs @@ -356,26 +356,32 @@ pub trait Call_List: VarlinkCallError { } impl Call_List for varlink::Call<'_> {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct Merge_Reply {} +pub struct Merge_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for Merge_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct Merge_Args {} #[allow(dead_code)] pub trait Call_Merge: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(Merge_Reply { r#message, r#done }.into()) } } impl Call_Merge for varlink::Call<'_> {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct Refresh_Reply {} +pub struct Refresh_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for Refresh_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct Refresh_Args {} #[allow(dead_code)] pub trait Call_Refresh: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(Refresh_Reply { r#message, r#done }.into()) } } impl Call_Refresh for varlink::Call<'_> {} @@ -394,7 +400,10 @@ pub trait Call_Status: VarlinkCallError { } impl Call_Status for varlink::Call<'_> {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct Unmerge_Reply {} +pub struct Unmerge_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for Unmerge_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct Unmerge_Args { @@ -403,8 +412,8 @@ pub struct Unmerge_Args { } #[allow(dead_code)] pub trait Call_Unmerge: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(Unmerge_Reply { r#message, r#done }.into()) } } impl Call_Unmerge for varlink::Call<'_> {} @@ -548,7 +557,7 @@ pub fn new(inner: Box) -> VarlinkInterfacePr } impl varlink::Interface for VarlinkInterfaceProxy { fn get_description(&self) -> &'static str { - "# Extension management for Avocado Linux system extensions\ninterface org.avocado.Extensions\n\ntype Extension (\n name: string,\n version: ?string,\n path: string,\n isSysext: bool,\n isConfext: bool,\n isDirectory: bool\n)\n\ntype ExtensionStatus (\n name: string,\n version: ?string,\n isSysext: bool,\n isConfext: bool,\n isMerged: bool,\n origin: ?string,\n imageId: ?string\n)\n\n# List all available extensions in the extensions directory\nmethod List() -> (extensions: []Extension)\n\n# Merge extensions using systemd-sysext and systemd-confext\nmethod Merge() -> ()\n\n# Unmerge extensions\nmethod Unmerge(unmount: ?bool) -> ()\n\n# Refresh extensions (unmerge then merge)\nmethod Refresh() -> ()\n\n# Enable extensions for a specific OS release version\nmethod Enable(extensions: []string, osRelease: ?string) -> (enabled: int, failed: int)\n\n# Disable extensions for a specific OS release version\nmethod Disable(extensions: ?[]string, all: ?bool, osRelease: ?string) -> (disabled: int, failed: int)\n\n# Show status of merged extensions\nmethod Status() -> (extensions: []ExtensionStatus)\n\nerror ExtensionNotFound (name: string)\nerror MergeFailed (reason: string)\nerror UnmergeFailed (reason: string)\nerror ConfigurationError (message: string)\nerror CommandFailed (command: string, message: string)\n" + "# Extension management for Avocado Linux system extensions\ninterface org.avocado.Extensions\n\ntype Extension (\n name: string,\n version: ?string,\n path: string,\n isSysext: bool,\n isConfext: bool,\n isDirectory: bool\n)\n\ntype ExtensionStatus (\n name: string,\n version: ?string,\n isSysext: bool,\n isConfext: bool,\n isMerged: bool,\n origin: ?string,\n imageId: ?string\n)\n\n# List all available extensions in the extensions directory\nmethod List() -> (extensions: []Extension)\n\n# Merge extensions using systemd-sysext and systemd-confext\n# Supports streaming: client may set more=true to receive per-message progress\nmethod Merge() -> (message: string, done: bool)\n\n# Unmerge extensions\n# Supports streaming: client may set more=true to receive per-message progress\nmethod Unmerge(unmount: ?bool) -> (message: string, done: bool)\n\n# Refresh extensions (unmerge then merge)\n# Supports streaming: client may set more=true to receive per-message progress\nmethod Refresh() -> (message: string, done: bool)\n\n# Enable extensions for a specific OS release version\nmethod Enable(extensions: []string, osRelease: ?string) -> (enabled: int, failed: int)\n\n# Disable extensions for a specific OS release version\nmethod Disable(extensions: ?[]string, all: ?bool, osRelease: ?string) -> (disabled: int, failed: int)\n\n# Show status of merged extensions\nmethod Status() -> (extensions: []ExtensionStatus)\n\nerror ExtensionNotFound (name: string)\nerror MergeFailed (reason: string)\nerror UnmergeFailed (reason: string)\nerror ConfigurationError (message: string)\nerror CommandFailed (command: string, message: string)\n" } fn get_name(&self) -> &'static str { "org.avocado.Extensions" diff --git a/src/varlink/org_avocado_Runtimes.rs b/src/varlink/org_avocado_Runtimes.rs index 97b3f9b..f97296d 100644 --- a/src/varlink/org_avocado_Runtimes.rs +++ b/src/varlink/org_avocado_Runtimes.rs @@ -280,7 +280,10 @@ pub struct UpdateFailed_Args { pub r#reason: String, } #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct Activate_Reply {} +pub struct Activate_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for Activate_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct Activate_Args { @@ -288,13 +291,16 @@ pub struct Activate_Args { } #[allow(dead_code)] pub trait Call_Activate: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(Activate_Reply { r#message, r#done }.into()) } } impl Call_Activate for varlink::Call<'_> {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct AddFromManifest_Reply {} +pub struct AddFromManifest_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for AddFromManifest_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct AddFromManifest_Args { @@ -302,22 +308,29 @@ pub struct AddFromManifest_Args { } #[allow(dead_code)] pub trait Call_AddFromManifest: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(AddFromManifest_Reply { r#message, r#done }.into()) } } impl Call_AddFromManifest for varlink::Call<'_> {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] -pub struct AddFromUrl_Reply {} +pub struct AddFromUrl_Reply { + pub r#message: String, + pub r#done: bool, +} impl varlink::VarlinkReply for AddFromUrl_Reply {} #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct AddFromUrl_Args { pub r#url: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub r#authToken: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub r#artifactsUrl: Option, } #[allow(dead_code)] pub trait Call_AddFromUrl: VarlinkCallError { - fn reply(&mut self) -> varlink::Result<()> { - self.reply_struct(varlink::Reply::parameters(None)) + fn reply(&mut self, r#message: String, r#done: bool) -> varlink::Result<()> { + self.reply_struct(AddFromUrl_Reply { r#message, r#done }.into()) } } impl Call_AddFromUrl for varlink::Call<'_> {} @@ -373,7 +386,13 @@ pub trait VarlinkInterface { call: &mut dyn Call_AddFromManifest, r#manifestPath: String, ) -> varlink::Result<()>; - fn add_from_url(&self, call: &mut dyn Call_AddFromUrl, r#url: String) -> varlink::Result<()>; + fn add_from_url( + &self, + call: &mut dyn Call_AddFromUrl, + r#url: String, + r#authToken: Option, + r#artifactsUrl: Option, + ) -> varlink::Result<()>; fn inspect(&self, call: &mut dyn Call_Inspect, r#id: String) -> varlink::Result<()>; fn list(&self, call: &mut dyn Call_List) -> varlink::Result<()>; fn remove(&self, call: &mut dyn Call_Remove, r#id: String) -> varlink::Result<()>; @@ -398,6 +417,8 @@ pub trait VarlinkClientInterface { fn add_from_url( &mut self, r#url: String, + r#authToken: Option, + r#artifactsUrl: Option, ) -> varlink::MethodCall; fn inspect(&mut self, r#id: String) -> varlink::MethodCall; fn list(&mut self) -> varlink::MethodCall; @@ -437,11 +458,17 @@ impl VarlinkClientInterface for VarlinkClient { fn add_from_url( &mut self, r#url: String, + r#authToken: Option, + r#artifactsUrl: Option, ) -> varlink::MethodCall { varlink::MethodCall::::new( self.connection.clone(), "org.avocado.Runtimes.AddFromUrl", - AddFromUrl_Args { r#url }, + AddFromUrl_Args { + r#url, + r#authToken, + r#artifactsUrl, + }, ) } fn inspect(&mut self, r#id: String) -> varlink::MethodCall { @@ -476,7 +503,7 @@ pub fn new(inner: Box) -> VarlinkInterfacePr } impl varlink::Interface for VarlinkInterfaceProxy { fn get_description(&self) -> &'static str { - "# Runtime lifecycle management for Avocado Linux\ninterface org.avocado.Runtimes\n\ntype RuntimeInfo (\n name: string,\n version: string\n)\n\ntype ManifestExtension (\n name: string,\n version: string,\n imageId: ?string\n)\n\ntype Runtime (\n id: string,\n manifestVersion: int,\n builtAt: string,\n runtime: RuntimeInfo,\n extensions: []ManifestExtension,\n active: bool\n)\n\n# List all available runtimes\nmethod List() -> (runtimes: []Runtime)\n\n# Add a runtime from a TUF repository URL\nmethod AddFromUrl(url: string) -> ()\n\n# Add a runtime from a local manifest file\nmethod AddFromManifest(manifestPath: string) -> ()\n\n# Remove a staged runtime by ID (or prefix)\nmethod Remove(id: string) -> ()\n\n# Activate a staged runtime by ID (or prefix)\nmethod Activate(id: string) -> ()\n\n# Inspect a runtime's details\nmethod Inspect(id: string) -> (runtime: Runtime)\n\nerror RuntimeNotFound (id: string)\nerror AmbiguousRuntimeId (id: string, candidates: []string)\nerror RemoveActiveRuntime ()\nerror StagingFailed (reason: string)\nerror UpdateFailed (reason: string)\n" + "# Runtime lifecycle management for Avocado Linux\ninterface org.avocado.Runtimes\n\ntype RuntimeInfo (\n name: string,\n version: string\n)\n\ntype ManifestExtension (\n name: string,\n version: string,\n imageId: ?string\n)\n\ntype Runtime (\n id: string,\n manifestVersion: int,\n builtAt: string,\n runtime: RuntimeInfo,\n extensions: []ManifestExtension,\n active: bool\n)\n\n# List all available runtimes\nmethod List() -> (runtimes: []Runtime)\n\n# Add a runtime from a TUF repository URL (authToken: optional bearer token for protected endpoints)\n# Supports streaming: client may set more=true to receive per-message progress\nmethod AddFromUrl(url: string, authToken: ?string, artifactsUrl: ?string) -> (message: string, done: bool)\n\n# Add a runtime from a local manifest file\n# Supports streaming: client may set more=true to receive per-message progress\nmethod AddFromManifest(manifestPath: string) -> (message: string, done: bool)\n\n# Remove a staged runtime by ID (or prefix)\nmethod Remove(id: string) -> ()\n\n# Activate a staged runtime by ID (or prefix)\n# Supports streaming: client may set more=true to receive per-message progress\nmethod Activate(id: string) -> (message: string, done: bool)\n\n# Inspect a runtime's details\nmethod Inspect(id: string) -> (runtime: Runtime)\n\nerror RuntimeNotFound (id: string)\nerror AmbiguousRuntimeId (id: string, candidates: []string)\nerror RemoveActiveRuntime ()\nerror StagingFailed (reason: string)\nerror UpdateFailed (reason: string)\n" } fn get_name(&self) -> &'static str { "org.avocado.Runtimes" @@ -535,8 +562,12 @@ impl varlink::Interface for VarlinkInterfaceProxy { return Err(varlink::context!(varlink::ErrorKind::SerdeJsonDe(es))); } }; - self.inner - .add_from_url(call as &mut dyn Call_AddFromUrl, args.r#url) + self.inner.add_from_url( + call as &mut dyn Call_AddFromUrl, + args.r#url, + args.r#authToken, + args.r#artifactsUrl, + ) } else { call.reply_invalid_parameter("parameters".into()) } diff --git a/src/varlink_client.rs b/src/varlink_client.rs new file mode 100644 index 0000000..00a0760 --- /dev/null +++ b/src/varlink_client.rs @@ -0,0 +1,296 @@ +use crate::output::OutputManager; +use crate::varlink::{ + org_avocado_Extensions as vl_ext, org_avocado_Hitl as vl_hitl, + org_avocado_RootAuthority as vl_ra, org_avocado_Runtimes as vl_rt, +}; +use std::sync::{Arc, RwLock}; +use varlink::Connection; + +pub use vl_ext::VarlinkClientInterface as ExtClientInterface; +pub use vl_hitl::VarlinkClientInterface as HitlClientInterface; +pub use vl_ra::VarlinkClientInterface as RaClientInterface; +pub use vl_rt::VarlinkClientInterface as RtClientInterface; + +/// Connect to the varlink daemon socket. +/// Prints an error and exits with code 1 if the daemon is not reachable. +pub fn connect_or_exit(address: &str, output: &OutputManager) -> Arc> { + match varlink::Connection::with_address(address) { + Ok(conn) => conn, + Err(e) => { + output.error( + "Daemon Not Running", + &format!( + "Cannot connect to avocadoctl daemon at {address}: {e}\n \ + Start it with: systemctl start avocadoctl" + ), + ); + std::process::exit(1); + } + } +} + +/// Print an RPC error and exit with code 1. +pub fn exit_with_rpc_error( + err: impl std::fmt::Display + std::fmt::Debug, + output: &OutputManager, +) -> ! { + if output.is_verbose() { + output.error("RPC Error", &format!("{err:?}")); + } else { + output.error("RPC Error", &err.to_string()); + } + std::process::exit(1); +} + +// ── Log output helpers ─────────────────────────────────────────────────────── + +/// Print a single log message from a streaming varlink reply. +/// Re-colorizes `[INFO]` and `[SUCCESS]` prefixes for the caller's terminal. +pub fn print_single_log(message: &str, output: &OutputManager) { + if message.is_empty() { + return; + } + if let Some(rest) = message.strip_prefix("[INFO] ") { + output.log_info(rest); + } else if let Some(rest) = message.strip_prefix("[SUCCESS] ") { + output.log_success(rest); + } else { + println!("{message}"); + } +} + +// ── Extension output helpers ───────────────────────────────────────────────── + +pub fn print_extensions(extensions: &[vl_ext::Extension], output: &OutputManager) { + if output.is_json() { + match serde_json::to_string(extensions) { + Ok(json) => println!("{json}"), + Err(e) => { + output.error("Output", &format!("JSON serialization failed: {e}")); + std::process::exit(1); + } + } + return; + } + + if extensions.is_empty() { + println!("No extensions found."); + return; + } + + let name_width = extensions + .iter() + .map(|e| e.name.len() + e.version.as_ref().map(|v| v.len() + 1).unwrap_or(0)) + .max() + .unwrap_or(9) + .max(9); + + println!("{: ext.name.clone(), + }; + + let mut types = Vec::new(); + if ext.isSysext { + types.push("sys"); + } + if ext.isConfext { + types.push("conf"); + } + let type_str = if types.is_empty() { + "?".to_string() + } else { + types.join("+") + }; + + println!( + "{: println!("{json}"), + Err(e) => { + output.error("Output", &format!("JSON serialization failed: {e}")); + std::process::exit(1); + } + } + return; + } + + if extensions.is_empty() { + println!("No extensions currently merged."); + return; + } + + let name_width = extensions + .iter() + .map(|e| e.name.len() + e.version.as_ref().map(|v| v.len() + 1).unwrap_or(0)) + .max() + .unwrap_or(9) + .max(9); + + println!( + "{: ext.name.clone(), + }; + + let mut types = Vec::new(); + if ext.isSysext { + types.push("sys"); + } + if ext.isConfext { + types.push("conf"); + } + let type_str = if types.is_empty() { + "?".to_string() + } else { + types.join("+") + }; + + let merged_str = if ext.isMerged { "yes" } else { "no" }; + let origin = ext.origin.as_deref().unwrap_or("-"); + + println!("{versioned_name: println!("{json}"), + Err(e) => { + output.error("Output", &format!("JSON serialization failed: {e}")); + std::process::exit(1); + } + } + return; + } + + if runtimes.is_empty() { + println!("No runtimes found."); + return; + } + + println!("{:<32} {:<12} Built At", "Runtime", "Active",); + println!("{}", "=".repeat(32 + 1 + 12 + 1 + 20)); + + for rt in runtimes { + let short_id = &rt.id[..rt.id.len().min(8)]; + let runtime_label = format!("{} {} ({short_id})", rt.runtime.name, rt.runtime.version); + let active_str = if rt.active { "* active" } else { "" }; + + println!("{:<32} {:<12} {}", runtime_label, active_str, rt.builtAt,); + } + + println!(); + println!("Total: {} runtime(s)", runtimes.len()); +} + +pub fn print_runtime_detail(rt: &vl_rt::Runtime, output: &OutputManager) { + if output.is_json() { + match serde_json::to_string(rt) { + Ok(json) => println!("{json}"), + Err(e) => { + output.error("Output", &format!("JSON serialization failed: {e}")); + std::process::exit(1); + } + } + return; + } + + let short_id = &rt.id[..rt.id.len().min(8)]; + println!(); + println!( + " Runtime: {} {} ({short_id})", + rt.runtime.name, rt.runtime.version + ); + println!(" ID: {}", rt.id); + println!(" Built: {}", rt.builtAt); + println!(" Active: {}", if rt.active { "yes" } else { "no" }); + + if !rt.extensions.is_empty() { + println!(); + println!(" Extensions:"); + for ext in &rt.extensions { + let img = ext.imageId.as_deref().unwrap_or("-"); + println!(" {} {} (image: {})", ext.name, ext.version, img); + } + } + println!(); +} + +// ── Root authority output helper ────────────────────────────────────────────── + +pub fn print_root_authority(info: &Option, output: &OutputManager) { + if output.is_json() { + match serde_json::to_string(info) { + Ok(json) => println!("{json}"), + Err(e) => { + output.error("Output", &format!("JSON serialization failed: {e}")); + std::process::exit(1); + } + } + return; + } + + match info { + None => { + output.info( + "Root Authority", + "No root authority configured. Build and provision a runtime with avocado build to enable verified updates.", + ); + } + Some(ra) => { + println!(); + println!(" Root authority:"); + println!(); + println!(" Version: {}", ra.version); + println!(" Expires: {}", ra.expires); + println!(); + println!(" Trusted signing keys:"); + println!(); + println!(" {:<18} {:<12} ROLES", "KEY ID", "TYPE"); + for key in &ra.keys { + let short_id = &key.keyId[..key.keyId.len().min(16)]; + let roles_str = key.roles.join(", "); + println!(" {short_id:<18} {:<12} {roles_str}", key.keyType); + } + println!(); + } + } +} diff --git a/src/varlink_server.rs b/src/varlink_server.rs index ae28bff..3ef52c3 100644 --- a/src/varlink_server.rs +++ b/src/varlink_server.rs @@ -7,6 +7,51 @@ use crate::varlink::{ org_avocado_Extensions as vl_ext, org_avocado_Hitl as vl_hitl, org_avocado_RootAuthority as vl_ra, org_avocado_Runtimes as vl_rt, }; +use std::sync::mpsc; +use std::thread; +use varlink::CallTrait; + +// ── Streaming helper ─────────────────────────────────────────────── + +/// Drain a streaming channel, sending each message as a varlink reply with +/// `continues: true`. After the channel closes, join the worker thread and +/// send a final reply (success or error). +/// +/// The `reply_fn` sends one intermediate message. The `done_fn` sends the +/// final success reply. The `error_fn` sends an error reply. +fn drain_stream( + call: &mut C, + rx: mpsc::Receiver, + handle: thread::JoinHandle>, + reply_fn: R, + done_fn: D, + error_fn: E, +) -> varlink::Result<()> +where + C: CallTrait + ?Sized, + R: Fn(&mut C, String) -> varlink::Result<()>, + D: Fn(&mut C) -> varlink::Result<()>, + E: Fn(&mut C, AvocadoError) -> varlink::Result<()>, +{ + call.set_continues(true); + for message in rx { + reply_fn(call, message)?; + } + // Channel closed — worker thread is done + let result = handle.join().unwrap_or_else(|_| { + Err(AvocadoError::MergeFailed { + reason: "internal panic".into(), + }) + }); + call.set_continues(false); + match result { + Ok(()) => done_fn(call), + Err(e) => { + eprintln!(" Error: {e}"); + error_fn(call, e) + } + } +} // ── Extensions handler ────────────────────────────────────────────── @@ -50,9 +95,21 @@ impl vl_ext::VarlinkInterface for ExtensionsHandler { } fn merge(&self, call: &mut dyn vl_ext::Call_Merge) -> varlink::Result<()> { - match service::ext::merge_extensions(&self.config) { - Ok(()) => call.reply(), - Err(e) => map_ext_error!(call, e), + if call.wants_more() { + let (rx, handle) = service::ext::merge_extensions_streaming(&self.config); + drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_ext_error!(c, e), + ) + } else { + match service::ext::merge_extensions(&self.config) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_ext_error!(call, e), + } } } @@ -61,16 +118,40 @@ impl vl_ext::VarlinkInterface for ExtensionsHandler { call: &mut dyn vl_ext::Call_Unmerge, r#unmount: Option, ) -> varlink::Result<()> { - match service::ext::unmerge_extensions(unmount.unwrap_or(false)) { - Ok(()) => call.reply(), - Err(e) => map_ext_error!(call, e), + if call.wants_more() { + let (rx, handle) = service::ext::unmerge_extensions_streaming(unmount.unwrap_or(false)); + drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_ext_error!(c, e), + ) + } else { + match service::ext::unmerge_extensions(unmount.unwrap_or(false)) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_ext_error!(call, e), + } } } fn refresh(&self, call: &mut dyn vl_ext::Call_Refresh) -> varlink::Result<()> { - match service::ext::refresh_extensions(&self.config) { - Ok(()) => call.reply(), - Err(e) => map_ext_error!(call, e), + if call.wants_more() { + let (rx, handle) = service::ext::refresh_extensions_streaming(&self.config); + drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_ext_error!(c, e), + ) + } else { + match service::ext::refresh_extensions(&self.config) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_ext_error!(call, e), + } } } @@ -122,7 +203,8 @@ pub struct RuntimesHandler { } macro_rules! map_rt_error { - ($call:expr, $err:expr) => { + ($call:expr, $err:expr) => {{ + eprintln!(" Error: {}", $err); match $err { AvocadoError::RuntimeNotFound { id } => $call.reply_runtime_not_found(id), AvocadoError::AmbiguousRuntimeId { id, candidates } => { @@ -133,7 +215,7 @@ macro_rules! map_rt_error { AvocadoError::UpdateFailed { reason } => $call.reply_update_failed(reason), e => $call.reply_staging_failed(e.to_string()), } - }; + }}; } fn runtime_entry_to_varlink(entry: crate::service::types::RuntimeEntry) -> vl_rt::Runtime { @@ -174,10 +256,36 @@ impl vl_rt::VarlinkInterface for RuntimesHandler { &self, call: &mut dyn vl_rt::Call_AddFromUrl, r#url: String, + r#authToken: Option, + r#artifactsUrl: Option, ) -> varlink::Result<()> { - match service::runtime::add_from_url(&url, &self.config) { - Ok(()) => call.reply(), - Err(e) => map_rt_error!(call, e), + if call.wants_more() { + match service::runtime::add_from_url_streaming( + &url, + authToken.as_deref(), + artifactsUrl.as_deref(), + &self.config, + ) { + Ok((rx, handle)) => drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_rt_error!(c, e), + ), + Err(e) => map_rt_error!(call, e), + } + } else { + match service::runtime::add_from_url( + &url, + authToken.as_deref(), + artifactsUrl.as_deref(), + &self.config, + ) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_rt_error!(call, e), + } } } @@ -186,9 +294,23 @@ impl vl_rt::VarlinkInterface for RuntimesHandler { call: &mut dyn vl_rt::Call_AddFromManifest, r#manifestPath: String, ) -> varlink::Result<()> { - match service::runtime::add_from_manifest(&manifestPath, &self.config) { - Ok(()) => call.reply(), - Err(e) => map_rt_error!(call, e), + if call.wants_more() { + match service::runtime::add_from_manifest_streaming(&manifestPath, &self.config) { + Ok((rx, handle)) => drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_rt_error!(c, e), + ), + Err(e) => map_rt_error!(call, e), + } + } else { + match service::runtime::add_from_manifest(&manifestPath, &self.config) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_rt_error!(call, e), + } } } @@ -200,9 +322,27 @@ impl vl_rt::VarlinkInterface for RuntimesHandler { } fn activate(&self, call: &mut dyn vl_rt::Call_Activate, r#id: String) -> varlink::Result<()> { - match service::runtime::activate_runtime(&id, &self.config) { - Ok(()) => call.reply(), - Err(e) => map_rt_error!(call, e), + if call.wants_more() { + match service::runtime::activate_runtime_streaming(&id, &self.config) { + Ok(Some((rx, handle))) => drain_stream( + call, + rx, + handle, + |c, msg| c.reply(msg, false), + |c| c.reply(String::new(), true), + |c, e| map_rt_error!(c, e), + ), + Ok(None) => { + // Already active, nothing to stream + call.reply(String::new(), true) + } + Err(e) => map_rt_error!(call, e), + } + } else { + match service::runtime::activate_runtime(&id, &self.config) { + Ok(log) => call.reply(log.join("\n"), true), + Err(e) => map_rt_error!(call, e), + } } } diff --git a/systemd/avocadoctl.service b/systemd/avocadoctl.service index 5b9f8ab..282ead8 100644 --- a/systemd/avocadoctl.service +++ b/systemd/avocadoctl.service @@ -1,14 +1,12 @@ [Unit] Description=Avocado Control Daemon (Varlink) +DefaultDependencies=no Requires=avocadoctl.socket -After=avocadoctl.socket +After=local-fs.target avocadoctl.socket [Service] Type=simple ExecStart=/usr/bin/avocadoctl serve -ProtectHome=yes -ProtectSystem=full -PrivateTmp=yes NoNewPrivileges=yes [Install] diff --git a/systemd/avocadoctl.socket b/systemd/avocadoctl.socket index 913352e..cd573a7 100644 --- a/systemd/avocadoctl.socket +++ b/systemd/avocadoctl.socket @@ -1,5 +1,8 @@ [Unit] Description=Avocado Control Varlink Socket +DefaultDependencies=no +After=local-fs.target +Before=avocado-extension.service avocado-extension-initrd.service [Socket] ListenStream=/run/avocado/avocadoctl.sock diff --git a/tests/ext_integration_tests.rs b/tests/ext_integration_tests.rs index fa99a33..7b50163 100644 --- a/tests/ext_integration_tests.rs +++ b/tests/ext_integration_tests.rs @@ -62,7 +62,7 @@ fn run_avocadoctl(args: &[&str]) -> std::process::Output { /// Test ext list with non-existent default directory #[test] fn test_ext_list_nonexistent_directory() { - let output = run_avocadoctl(&["ext", "list"]); + let output = run_avocadoctl_with_env(&["ext", "list"], &[("AVOCADO_TEST_MODE", "1")]); // The scanner handles missing directories gracefully — exits 0 and reports no extensions assert!( output.status.success(), @@ -199,7 +199,10 @@ fn test_invalid_config_file() { /// Test -c flag with nonexistent config file (should use defaults) #[test] fn test_nonexistent_config_file() { - let output = run_avocadoctl(&["-c", "/nonexistent/config.toml", "ext", "list"]); + let output = run_avocadoctl_with_env( + &["-c", "/nonexistent/config.toml", "ext", "list"], + &[("AVOCADO_TEST_MODE", "1")], + ); // Nonexistent config falls back to defaults; missing extensions directories are handled // gracefully by the scanner — the command should succeed and report no extensions. @@ -224,7 +227,10 @@ fn test_ext_list_empty_directory() { // Run avocadoctl ext list with empty extensions directory let output = run_avocadoctl_with_env( &["ext", "list"], - &[("AVOCADO_EXTENSIONS_PATH", extensions_dir.to_str().unwrap())], + &[ + ("AVOCADO_EXTENSIONS_PATH", extensions_dir.to_str().unwrap()), + ("AVOCADO_TEST_MODE", "1"), + ], ); assert!( diff --git a/tests/varlink_interface_tests.rs b/tests/varlink_interface_tests.rs new file mode 100644 index 0000000..f3649f8 --- /dev/null +++ b/tests/varlink_interface_tests.rs @@ -0,0 +1,284 @@ +//! Integration tests that exercise the full varlink client → daemon → service path. +//! +//! Each test starts a real `avocadoctl serve` daemon on a temporary Unix socket, +//! then invokes CLI commands with `--socket` pointing at it. This proves that: +//! - The CLI routes requests through the daemon (not direct service calls) +//! - The daemon serialises concurrent callers on the socket +//! - Error messages are correct when the daemon is not running + +use std::fs; +use std::path::PathBuf; +use std::process::{Child, Command}; +use std::time::{Duration, Instant}; +use tempfile::TempDir; + +// ── helpers ────────────────────────────────────────────────────────────────── + +fn get_binary_path() -> PathBuf { + let mut path = std::env::current_dir().expect("Failed to get current directory"); + path.push("target"); + path.push("debug"); + path.push("avocadoctl"); + path +} + +fn fixtures_path() -> PathBuf { + std::env::current_dir().expect("cwd").join("tests/fixtures") +} + +/// A running `avocadoctl serve` process bound to a temp socket. +/// Killed automatically when dropped. +struct TestDaemon { + child: Child, + socket_path: PathBuf, + /// Keep temp dir alive so the socket directory isn't removed while the daemon runs. + _temp_dir: TempDir, +} + +impl TestDaemon { + /// Start a daemon with `AVOCADO_TEST_MODE=1` and the test fixtures on PATH. + /// Blocks until the socket file appears (up to 5 s) or panics. + fn start() -> Self { + let temp_dir = TempDir::new().expect("temp dir"); + let socket_path = temp_dir.path().join("avocadoctl-test.sock"); + let socket_address = format!("unix:{}", socket_path.display()); + + let original_path = std::env::var("PATH").unwrap_or_default(); + let test_path = format!("{}:{}", fixtures_path().display(), original_path); + + let child = Command::new(get_binary_path()) + .args(["serve", "--address", &socket_address]) + .env("AVOCADO_TEST_MODE", "1") + .env("PATH", &test_path) + .spawn() + .expect("Failed to spawn daemon"); + + // Wait for the socket to be created (up to 5 s) + let deadline = Instant::now() + Duration::from_secs(5); + while Instant::now() < deadline { + if socket_path.exists() { + break; + } + std::thread::sleep(Duration::from_millis(50)); + } + assert!( + socket_path.exists(), + "Daemon socket should appear within 5 s at {}", + socket_path.display() + ); + + TestDaemon { + child, + socket_path, + _temp_dir: temp_dir, + } + } + + fn socket_address(&self) -> String { + format!("unix:{}", self.socket_path.display()) + } + + /// Run a CLI command routed through this daemon. + fn run(&self, args: &[&str]) -> std::process::Output { + let socket = self.socket_address(); + + let mut all_args = vec!["--socket", socket.as_str()]; + all_args.extend_from_slice(args); + + // Do NOT set AVOCADO_TEST_MODE here: the client must use the varlink path. + // The daemon (started above with AVOCADO_TEST_MODE=1) handles the actual + // service calls with mock executables. + Command::new(get_binary_path()) + .args(&all_args) + .output() + .expect("Failed to execute avocadoctl") + } +} + +impl Drop for TestDaemon { + fn drop(&mut self) { + let _ = self.child.kill(); + let _ = self.child.wait(); + } +} + +// ── tests ───────────────────────────────────────────────────────────────────── + +/// The daemon starts and the socket is reachable. +#[test] +fn test_daemon_starts_and_accepts_connections() { + let daemon = TestDaemon::start(); + // If TestDaemon::start() returned, the socket exists and the daemon is running. + assert!(daemon.socket_path.exists(), "socket should exist"); +} + +/// `ext list` routed through the daemon returns extension data (or "no extensions"). +#[test] +fn test_ext_list_via_daemon() { + let daemon = TestDaemon::start(); + let output = daemon.run(&["ext", "list"]); + + assert!( + output.status.success(), + "ext list via daemon should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let stdout = String::from_utf8_lossy(&output.stdout); + // Either shows extensions or a "No extensions found" message — both are valid. + let valid = stdout.contains("Extension") || stdout.contains("No extensions found"); + assert!( + valid, + "ext list should produce a table or empty message; got: {stdout}" + ); +} + +/// `ext list` with an extensions directory populates the table. +#[test] +fn test_ext_list_with_extensions_via_daemon() { + let temp_dir = TempDir::new().expect("temp dir"); + let ext_dir = temp_dir.path().join("images"); + fs::create_dir_all(&ext_dir).expect("create ext dir"); + fs::create_dir(ext_dir.join("my-app")).expect("create extension dir"); + fs::create_dir(ext_dir.join("base-tools")).expect("create extension dir"); + + let socket_path = temp_dir.path().join("avocadoctl.sock"); + let socket_address = format!("unix:{}", socket_path.display()); + let original_path = std::env::var("PATH").unwrap_or_default(); + let test_path = format!("{}:{}", fixtures_path().display(), original_path); + + let mut child = Command::new(get_binary_path()) + .args(["serve", "--address", &socket_address]) + .env("AVOCADO_TEST_MODE", "1") + .env("AVOCADO_EXTENSIONS_PATH", ext_dir.to_str().unwrap()) + .env("PATH", &test_path) + .spawn() + .expect("spawn daemon"); + + let deadline = Instant::now() + Duration::from_secs(5); + while Instant::now() < deadline { + if socket_path.exists() { + break; + } + std::thread::sleep(Duration::from_millis(50)); + } + assert!(socket_path.exists(), "socket should appear"); + + // Client must NOT have AVOCADO_TEST_MODE set so it routes through varlink. + let output = Command::new(get_binary_path()) + .args(["--socket", &socket_address, "ext", "list"]) + .output() + .expect("run cli"); + + let _ = child.kill(); + let _ = child.wait(); + + assert!( + output.status.success(), + "ext list should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!( + stdout.contains("my-app"), + "Should list my-app extension; got: {stdout}" + ); + assert!( + stdout.contains("base-tools"), + "Should list base-tools extension; got: {stdout}" + ); +} + +/// `ext merge` routed through the daemon calls the mock systemd-sysext. +#[test] +fn test_ext_merge_via_daemon() { + let daemon = TestDaemon::start(); + let output = daemon.run(&["ext", "merge"]); + + // In AVOCADO_TEST_MODE, merge uses mock-systemd-sysext which succeeds. + assert!( + output.status.success(), + "ext merge via daemon should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); +} + +/// `ext status` routed through the daemon returns status data. +#[test] +fn test_ext_status_via_daemon() { + let daemon = TestDaemon::start(); + let output = daemon.run(&["ext", "status"]); + + assert!( + output.status.success(), + "ext status via daemon should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); +} + +/// Top-level `merge` alias is routed through the daemon. +#[test] +fn test_merge_alias_via_daemon() { + let daemon = TestDaemon::start(); + let output = daemon.run(&["merge"]); + + assert!( + output.status.success(), + "merge alias via daemon should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); +} + +/// When no daemon is running, the CLI prints a helpful "Daemon Not Running" error. +#[test] +fn test_no_daemon_shows_helpful_error() { + let temp_dir = TempDir::new().expect("temp dir"); + let nonexistent_socket = format!("unix:{}/nonexistent.sock", temp_dir.path().display()); + + let output = Command::new(get_binary_path()) + .args(["--socket", &nonexistent_socket, "ext", "list"]) + .output() + .expect("run cli"); + + assert!( + !output.status.success(), + "Should fail when daemon is not running" + ); + + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("Daemon Not Running") || stderr.contains("Cannot connect"), + "Should show daemon-not-running error; got: {stderr}" + ); +} + +/// Two concurrent CLI invocations both succeed — the daemon serialises them. +#[test] +fn test_concurrent_requests_serialised_by_daemon() { + let daemon = TestDaemon::start(); + let socket = daemon.socket_address(); + + // Spawn two merge requests at the same time. + // Clients must NOT set AVOCADO_TEST_MODE — they must route through varlink. + let mut handles = Vec::new(); + for _ in 0..2 { + let socket_clone = socket.clone(); + let bin = get_binary_path(); + handles.push(std::thread::spawn(move || { + Command::new(bin) + .args(["--socket", &socket_clone, "ext", "merge"]) + .output() + .expect("run cli") + })); + } + + for handle in handles { + let output = handle.join().expect("thread panicked"); + assert!( + output.status.success(), + "Concurrent merge should succeed: {}", + String::from_utf8_lossy(&output.stderr) + ); + } +}