From 8c0ab58ecc63f02316cb9d8486b8f63cfec9acd2 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 12:32:38 -0400 Subject: [PATCH 01/11] Add stone bundle command for OS update bundles (.aos) Implements the `stone bundle` command that replaces the separate validate + create workflow with a single operation that validates inputs, copies manifest artifacts, builds FAT images, generates a self-describing bundle.json, and packages everything into a content-addressable .aos (tar.zst) archive for OTA and provisioning. - Add `update` section to manifest schema (SlotDetection, OsArtifactRef, SlotAction) - Add `update_strategy` field to Runtime - Add sha2, tar, zstd dependencies - bundle.json includes platform info, update strategy, slot targets, partition layout, and artifact checksums - Build dir output is compatible with stone provision (same paths) --- Cargo.lock | 179 ++++++++ Cargo.toml | 3 + src/commands/stone/bundle.rs | 823 +++++++++++++++++++++++++++++++++++ src/commands/stone/mod.rs | 5 + src/main.rs | 1 + src/manifest.rs | 41 ++ 6 files changed, 1052 insertions(+) create mode 100644 src/commands/stone/bundle.rs diff --git a/Cargo.lock b/Cargo.lock index 1d45a5c..5d8dba5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -110,6 +110,15 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "bstr" version = "1.12.0" @@ -139,6 +148,8 @@ version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ + "jobserver", + "libc", "shlex", ] @@ -214,12 +225,41 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + [[package]] name = "difflib" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -254,6 +294,17 @@ dependencies = [ "log", ] +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "float-cmp" version = "0.10.0" @@ -263,6 +314,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.3.3" @@ -317,6 +378,16 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom", + "libc", +] + [[package]] name = "js-sys" version = "0.3.77" @@ -333,6 +404,18 @@ version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +[[package]] +name = "libredox" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +dependencies = [ + "bitflags 2.9.1", + "libc", + "plain", + "redox_syscall", +] + [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -378,6 +461,18 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "predicates" version = "3.1.3" @@ -432,6 +527,15 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "redox_syscall" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" +dependencies = [ + "bitflags 2.9.1", +] + [[package]] name = "regex" version = "1.11.1" @@ -518,6 +622,17 @@ dependencies = [ "serde", ] +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "shlex" version = "1.3.0" @@ -540,8 +655,11 @@ dependencies = [ "predicates", "serde", "serde_json", + "sha2", "simply_colored", + "tar", "tempfile", + "zstd", ] [[package]] @@ -561,6 +679,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "tempfile" version = "3.20.0" @@ -580,6 +709,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + [[package]] name = "unicode-ident" version = "1.0.18" @@ -592,6 +727,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + [[package]] name = "wait-timeout" version = "0.2.1" @@ -808,3 +949,41 @@ checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.1", ] + +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index 658dcb8..649abf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,10 @@ clap = { version = "4.5", features = ["derive"] } fatfs = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +sha2 = "0.10" simply_colored = "0.1" +tar = "0.4" +zstd = "0.13" [dev-dependencies] predicates = "3.0" diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs new file mode 100644 index 0000000..492438d --- /dev/null +++ b/src/commands/stone/bundle.rs @@ -0,0 +1,823 @@ +use crate::fat; +use crate::log::*; +use crate::manifest::{BuildArgs, FatVariant, FileEntry, Image, Manifest}; +use clap::Args; +use sha2::{Digest, Sha256}; + +use std::collections::HashMap; +use std::fs; +use std::io::Read; +use std::path::{Path, PathBuf}; + +#[derive(Args, Debug)] +pub struct BundleArgs { + /// Path to the stone manifest JSON file + #[arg( + short = 'm', + long = "manifest-path", + value_name = "PATH", + default_value = "manifest.json" + )] + pub manifest: PathBuf, + + /// Path to the OS release file to include + #[arg(long = "os-release", value_name = "PATH")] + pub os_release: PathBuf, + + /// Path to the input directory (can be specified multiple times for search priority) + #[arg( + short = 'i', + long = "input-dir", + value_name = "DIR", + default_value = "." + )] + pub input_dirs: Vec, + + /// Path to the output .aos bundle file + #[arg( + short = 'o', + long = "output", + value_name = "PATH", + default_value = "os-bundle.aos" + )] + pub output: PathBuf, + + /// Directory for intermediate build artifacts + #[arg(long = "build-dir", value_name = "DIR")] + pub build_dir: Option, + + /// Enable verbose output + #[arg(short = 'v', long = "verbose")] + pub verbose: bool, +} + +impl BundleArgs { + pub fn execute(&self) -> Result<(), String> { + bundle_command( + &self.manifest, + &self.os_release, + &self.input_dirs, + &self.output, + self.build_dir.as_deref(), + self.verbose, + ) + } +} + +/// Find a file in multiple input directories, searching in order +fn find_file_in_dirs(filename: &str, input_dirs: &[PathBuf]) -> Option { + for dir in input_dirs { + let candidate = dir.join(filename); + if candidate.exists() { + return Some(candidate); + } + } + None +} + +/// Compute SHA256 hash of a file, returning the hex string +fn sha256_file(path: &Path) -> Result { + let mut file = fs::File::open(path) + .map_err(|e| format!("Failed to open '{}' for hashing: {}", path.display(), e))?; + let mut hasher = Sha256::new(); + let mut buf = [0u8; 8192]; + loop { + let n = file + .read(&mut buf) + .map_err(|e| format!("Failed to read '{}': {}", path.display(), e))?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + Ok(format!("{:x}", hasher.finalize())) +} + +pub fn bundle_command( + manifest_path: &Path, + os_release_path: &Path, + input_dirs: &[PathBuf], + output_path: &Path, + build_dir_override: Option<&Path>, + verbose: bool, +) -> Result<(), String> { + // Validate inputs exist + if !manifest_path.exists() { + return Err(format!( + "Manifest file '{}' not found.", + manifest_path.display() + )); + } + if !os_release_path.exists() { + return Err(format!( + "OS release file '{}' not found.", + os_release_path.display() + )); + } + + let manifest = Manifest::from_file(manifest_path)?; + + // Determine build directory + let default_build_dir = output_path + .parent() + .unwrap_or(Path::new(".")) + .join("_build"); + let build_dir = build_dir_override.unwrap_or(&default_build_dir); + + fs::create_dir_all(build_dir).map_err(|e| { + format!( + "Failed to create build directory '{}': {}", + build_dir.display(), + e + ) + })?; + + let images_dir = build_dir.join("images"); + fs::create_dir_all(&images_dir).map_err(|e| { + format!( + "Failed to create images directory '{}': {}", + images_dir.display(), + e + ) + })?; + + log_info(&format!( + "Building OS bundle.\n Manifest: {}\n Build dir: {}\n Output: {}", + manifest_path.display(), + build_dir.display(), + output_path.display() + )); + + // Step 1: Copy all manifest inputs to build dir (like stone create) + copy_manifest_inputs( + &manifest, + manifest_path, + os_release_path, + input_dirs, + build_dir, + verbose, + )?; + + // Step 2: Build FAT images and collect built image artifacts + let built_images = build_all_images(&manifest, input_dirs, build_dir, &images_dir, verbose)?; + + // Step 3: Collect all artifacts (built images + pre-existing images) + let artifacts = collect_artifacts(&manifest, &built_images, input_dirs, &images_dir, verbose)?; + + // Step 4: Parse os-release for OS build ID + let os_build_id = parse_os_release_field(os_release_path, "BUILD_ID")?; + + // Step 5: Generate bundle.json + let bundle_json = generate_bundle_json(&manifest, &artifacts, &os_build_id)?; + let bundle_json_path = build_dir.join("bundle.json"); + let bundle_json_str = serde_json::to_string_pretty(&bundle_json) + .map_err(|e| format!("Failed to serialize bundle.json: {e}"))?; + fs::write(&bundle_json_path, &bundle_json_str) + .map_err(|e| format!("Failed to write bundle.json: {e}"))?; + + if verbose { + log_debug(&format!("Generated bundle.json:\n{bundle_json_str}")); + } + + // Step 6: Package into .aos (tar.zst) + package_aos(output_path, &bundle_json_path, &artifacts, verbose)?; + + log_success(&format!("OS bundle created: {}", output_path.display())); + Ok(()) +} + +/// Represents a built/collected artifact ready for packaging +struct BundleArtifact { + /// Name of the artifact (e.g., "boot", "rootfs") + name: String, + /// Path to the artifact file on disk + path: PathBuf, + /// Relative path inside the .aos archive (e.g., "images/boot.img") + archive_path: String, + /// SHA256 hash + sha256: String, +} + +/// Copy manifest inputs to the build directory (mirrors stone create behavior) +fn copy_manifest_inputs( + manifest: &Manifest, + manifest_path: &Path, + os_release_path: &Path, + input_dirs: &[PathBuf], + build_dir: &Path, + verbose: bool, +) -> Result<(), String> { + // Copy the manifest itself + let manifest_dest = build_dir.join("manifest.json"); + copy_file(manifest_path, &manifest_dest, verbose)?; + + // Copy os-release + let os_release_dest = build_dir.join("os-release"); + copy_file(os_release_path, &os_release_dest, verbose)?; + + // Copy fwup templates and provision scripts for provision compatibility + for device in manifest.storage_devices.values() { + if let Some(build_args) = &device.build_args + && let Some(template) = build_args.fwup_template() + && let Some(src) = find_file_in_dirs(template, input_dirs) + { + copy_file(&src, &build_dir.join(template), verbose)?; + } + + // Copy image source files that are simple string references + for image in device.images.values() { + if let Image::String(filename) = image + && let Some(src) = find_file_in_dirs(filename, input_dirs) + { + let dest = build_dir.join(filename); + copy_path(&src, &dest, verbose)?; + } + // Copy fwup templates for images + if let Some(ba) = image.build_args() + && let Some(template) = ba.fwup_template() + && let Some(src) = find_file_in_dirs(template, input_dirs) + { + copy_file(&src, &build_dir.join(template), verbose)?; + } + } + } + + // Copy provision file + if let Some(provision_file) = &manifest.runtime.provision + && let Some(src) = find_file_in_dirs(provision_file, input_dirs) + { + copy_file(&src, &build_dir.join(provision_file), verbose)?; + } + + // Copy provision profile scripts + if let Some(provision) = &manifest.provision { + for profile in provision.profiles.values() { + if let Some(src) = find_file_in_dirs(&profile.script, input_dirs) { + copy_file(&src, &build_dir.join(&profile.script), verbose)?; + } + } + } + + Ok(()) +} + +/// Build all images that have build_args (FAT images) +fn build_all_images( + manifest: &Manifest, + input_dirs: &[PathBuf], + build_dir: &Path, + images_dir: &Path, + verbose: bool, +) -> Result, String> { + let mut built = HashMap::new(); + + for device in manifest.storage_devices.values() { + for (image_name, image) in &device.images { + match image { + Image::Object { + out, + build_args: Some(BuildArgs::Fat { variant, files }), + size, + size_unit, + .. + } => { + log_info(&format!("Building FAT image '{image_name}' -> '{out}'.")); + + let size_mb = convert_size_to_mb(*size, size_unit)?; + let fat_type = match variant { + FatVariant::Fat12 => fat::FatType::Fat12, + FatVariant::Fat16 => fat::FatType::Fat16, + FatVariant::Fat32 => fat::FatType::Fat32, + }; + + let fat_manifest = create_fat_manifest_with_resolved_paths(files, input_dirs)?; + let temp_manifest_path = + build_dir.join(format!("temp_manifest_{image_name}.json")); + let manifest_json = serde_json::to_string_pretty(&fat_manifest) + .map_err(|e| format!("Failed to serialize FAT manifest: {e}"))?; + fs::write(&temp_manifest_path, manifest_json) + .map_err(|e| format!("Failed to write temporary manifest: {e}"))?; + + // Build into images/ dir for the bundle, and also into build_dir for provision + let output_in_images = images_dir.join(out); + let output_in_build = build_dir.join(out); + let base_path = PathBuf::from("."); + + let options = fat::FatImageOptions::new() + .with_manifest_path(&temp_manifest_path) + .with_base_path(&base_path) + .with_output_path(&output_in_images) + .with_size_mebibytes(size_mb) + .with_fat_type(fat_type) + .with_verbose(verbose); + + fat::create_fat_image(&options)?; + let _ = fs::remove_file(&temp_manifest_path); + + // Also copy to build_dir so provision can find it at the same path as before + fs::copy(&output_in_images, &output_in_build) + .map_err(|e| format!("Failed to copy built image to build dir: {e}"))?; + + log_success(&format!("Built FAT image '{out}'.")); + built.insert(image_name.clone(), output_in_images); + } + _ => { + // Non-FAT images (string refs, fwup, or no build_args) are handled in collect_artifacts + } + } + } + } + + Ok(built) +} + +/// Collect all artifacts that should go into the bundle. +/// Uses the update.os_artifacts section to determine which images to include. +fn collect_artifacts( + manifest: &Manifest, + built_images: &HashMap, + input_dirs: &[PathBuf], + images_dir: &Path, + verbose: bool, +) -> Result, String> { + let mut artifacts = Vec::new(); + + let update = match &manifest.update { + Some(u) => u, + None => { + // No update section - collect all images as artifacts + log_warning("No 'update' section in manifest. Bundle will include all images."); + return collect_all_images_as_artifacts( + manifest, + built_images, + input_dirs, + images_dir, + verbose, + ); + } + }; + + // Collect only the images referenced in os_artifacts + for (artifact_name, artifact_ref) in &update.os_artifacts { + let image_key = &artifact_ref.image_key; + + // Find this image in the manifest's storage_devices + let image_path = if let Some(path) = built_images.get(image_key) { + // Already built (FAT image) + path.clone() + } else { + // Look for it as a pre-existing file + let image = find_image_in_manifest(manifest, image_key)?; + let filename = image.out(); + + // Check if it's already in images_dir + let in_images = images_dir.join(filename); + if in_images.exists() { + in_images + } else { + // Find in input dirs and copy to images/ + let src = find_file_in_dirs(filename, input_dirs).ok_or_else(|| { + format!( + "Image file '{}' for artifact '{}' not found in any input directory", + filename, artifact_name + ) + })?; + let dest = images_dir.join(filename); + copy_file(&src, &dest, verbose)?; + dest + } + }; + + let filename = image_path + .file_name() + .ok_or_else(|| format!("Invalid image path for artifact '{artifact_name}'"))? + .to_string_lossy() + .to_string(); + let archive_path = format!("images/{filename}"); + let sha256 = sha256_file(&image_path)?; + + if verbose { + log_debug(&format!( + "Artifact '{artifact_name}': {archive_path} (sha256: {sha256})" + )); + } + + artifacts.push(BundleArtifact { + name: artifact_name.clone(), + path: image_path, + archive_path, + sha256, + }); + } + + Ok(artifacts) +} + +/// Fallback: collect all images when no update section is present +fn collect_all_images_as_artifacts( + manifest: &Manifest, + built_images: &HashMap, + input_dirs: &[PathBuf], + images_dir: &Path, + verbose: bool, +) -> Result, String> { + let mut artifacts = Vec::new(); + + for device in manifest.storage_devices.values() { + for (image_name, image) in &device.images { + let image_path = if let Some(path) = built_images.get(image_name) { + path.clone() + } else { + let filename = image.out(); + let in_images = images_dir.join(filename); + if in_images.exists() { + in_images + } else if let Some(src) = find_file_in_dirs(filename, input_dirs) { + let dest = images_dir.join(filename); + copy_file(&src, &dest, verbose)?; + dest + } else { + if verbose { + log_debug(&format!( + "Skipping image '{image_name}' - file '{}' not found", + filename + )); + } + continue; + } + }; + + let filename = image_path + .file_name() + .unwrap() + .to_string_lossy() + .to_string(); + let archive_path = format!("images/{filename}"); + let sha256 = sha256_file(&image_path)?; + + artifacts.push(BundleArtifact { + name: image_name.clone(), + path: image_path, + archive_path, + sha256, + }); + } + } + + Ok(artifacts) +} + +/// Find an image by key across all storage devices in the manifest +fn find_image_in_manifest<'a>( + manifest: &'a Manifest, + image_key: &str, +) -> Result<&'a Image, String> { + for device in manifest.storage_devices.values() { + if let Some(image) = device.images.get(image_key) { + return Ok(image); + } + } + Err(format!( + "Image key '{image_key}' not found in any storage device in the manifest" + )) +} + +/// Parse a field from an os-release file (KEY=VALUE format) +fn parse_os_release_field(path: &Path, field: &str) -> Result { + let content = fs::read_to_string(path) + .map_err(|e| format!("Failed to read os-release '{}': {}", path.display(), e))?; + + for line in content.lines() { + let line = line.trim(); + if let Some(value) = line.strip_prefix(&format!("{field}=")) { + // Strip surrounding quotes if present + let value = value.trim_matches('"').trim_matches('\''); + return Ok(value.to_string()); + } + } + + // Not fatal - return empty string + Ok(String::new()) +} + +/// Generate the bundle.json structure +fn generate_bundle_json( + manifest: &Manifest, + artifacts: &[BundleArtifact], + os_build_id: &str, +) -> Result { + let update = manifest.update.as_ref(); + + // Build the update.artifacts array for bundle.json + let mut bundle_artifacts = Vec::new(); + for artifact in artifacts { + let mut artifact_entry = serde_json::json!({ + "name": artifact.name, + "file": artifact.archive_path, + "sha256": artifact.sha256, + }); + + // Add slot_targets from the manifest's os_artifacts + if let Some(update) = update + && let Some(os_artifact) = update.os_artifacts.get(&artifact.name) + { + let slot_partitions = &os_artifact.slot_partitions; + let mut slot_targets = serde_json::Map::new(); + + // Determine slot identifiers based on update strategy + let strategy = manifest + .runtime + .update_strategy + .as_deref() + .unwrap_or("uboot-ab"); + let slot_ids: Vec<&str> = match strategy { + "tegra-ab" => vec!["0", "1"], + _ => vec!["a", "b"], + }; + + for (idx, slot_id) in slot_ids.iter().enumerate() { + if let Some(partition) = slot_partitions.get(idx) { + slot_targets.insert( + slot_id.to_string(), + serde_json::json!({ "partition": partition }), + ); + } + } + + artifact_entry["slot_targets"] = serde_json::Value::Object(slot_targets); + } + + bundle_artifacts.push(artifact_entry); + } + + // Build the top-level bundle.json + let mut bundle = serde_json::json!({ + "format_version": 1, + "platform": manifest.runtime.platform, + "architecture": manifest.runtime.architecture, + "os_build_id": os_build_id, + }); + + // Add update section if manifest has one + if let Some(update) = update { + let strategy = manifest + .runtime + .update_strategy + .as_deref() + .unwrap_or("uboot-ab"); + + let mut update_section = serde_json::json!({ + "strategy": strategy, + "slot_detection": serde_json::to_value(&update.slot_detection) + .map_err(|e| format!("Failed to serialize slot_detection: {e}"))?, + "artifacts": bundle_artifacts, + "activate": serde_json::to_value(&update.activate) + .map_err(|e| format!("Failed to serialize activate: {e}"))?, + }); + + if let Some(rollback) = &update.rollback { + update_section["rollback"] = serde_json::to_value(rollback) + .map_err(|e| format!("Failed to serialize rollback: {e}"))?; + } + + bundle["update"] = update_section; + } + + // Add layout section from storage_devices partitions + for device in manifest.storage_devices.values() { + if !device.partitions.is_empty() { + let partitions: Vec = device + .partitions + .iter() + .map(|p| { + let mut part = serde_json::json!({}); + if let Some(name) = &p.name { + part["name"] = serde_json::json!(name); + } + part["size"] = serde_json::json!(p.size); + part["size_unit"] = serde_json::json!(p.size_unit); + if let Some(offset) = p.offset { + part["offset"] = serde_json::json!(offset); + } + if let Some(offset_unit) = &p.offset_unit { + part["offset_unit"] = serde_json::json!(offset_unit); + } + if let Some(expand) = &p.expand { + part["expand"] = serde_json::json!(expand); + } + part + }) + .collect(); + + bundle["layout"] = serde_json::json!({ + "device": device.devpath, + "partitions": partitions, + }); + + if let Some(block_size) = device.block_size { + bundle["layout"]["block_size"] = serde_json::json!(block_size); + } + + // Only include the first device's layout + break; + } + } + + // Add verify section + if !os_build_id.is_empty() { + bundle["verify"] = serde_json::json!({ + "type": "os-release", + "field": "BUILD_ID", + "expected": os_build_id, + }); + } + + Ok(bundle) +} + +/// Package everything into a .aos tar.zst archive +fn package_aos( + output_path: &Path, + bundle_json_path: &Path, + artifacts: &[BundleArtifact], + verbose: bool, +) -> Result<(), String> { + // Create output directory if needed + if let Some(parent) = output_path.parent() { + fs::create_dir_all(parent).map_err(|e| { + format!( + "Failed to create output directory '{}': {}", + parent.display(), + e + ) + })?; + } + + let output_file = fs::File::create(output_path).map_err(|e| { + format!( + "Failed to create output file '{}': {}", + output_path.display(), + e + ) + })?; + + let zst_encoder = zstd::Encoder::new(output_file, 3) + .map_err(|e| format!("Failed to create zstd encoder: {e}"))?; + + let mut tar_builder = tar::Builder::new(zst_encoder); + + // Add bundle.json at the root + if verbose { + log_debug("Adding bundle.json to archive."); + } + tar_builder + .append_path_with_name(bundle_json_path, "bundle.json") + .map_err(|e| format!("Failed to add bundle.json to archive: {e}"))?; + + // Add each artifact + for artifact in artifacts { + if verbose { + log_debug(&format!( + "Adding {} -> {}", + artifact.path.display(), + artifact.archive_path + )); + } + tar_builder + .append_path_with_name(&artifact.path, &artifact.archive_path) + .map_err(|e| { + format!( + "Failed to add '{}' to archive: {}", + artifact.archive_path, e + ) + })?; + } + + // Finish the tar, then finish zstd + let zst_encoder = tar_builder + .into_inner() + .map_err(|e| format!("Failed to finalize tar archive: {e}"))?; + zst_encoder + .finish() + .map_err(|e| format!("Failed to finalize zstd compression: {e}"))?; + + Ok(()) +} + +/// Convert size value to mebibytes based on unit string +fn convert_size_to_mb(size: i64, size_unit: &str) -> Result { + let size_mb = match size_unit.to_lowercase().as_str() { + "bytes" | "byte" | "b" => size as f64 / (1024.0 * 1024.0), + "kilobytes" | "kilobyte" | "kb" => size as f64 / 1024.0, + "kibibytes" | "kibibyte" | "kib" => size as f64 / 1024.0, + "megabytes" | "megabyte" | "mb" => size as f64, + "mebibytes" | "mebibyte" | "mib" => size as f64, + "gigabytes" | "gigabyte" | "gb" => size as f64 * 1024.0, + "gibibytes" | "gibibyte" | "gib" => size as f64 * 1024.0, + _ => return Err(format!("Unsupported size unit: {size_unit}")), + }; + + if size_mb <= 0.0 { + return Err("Image size must be positive".to_string()); + } + + Ok(size_mb.ceil() as u64) +} + +/// Resolve file paths for FAT manifest entries +fn create_fat_manifest_with_resolved_paths( + files: &[FileEntry], + input_dirs: &[PathBuf], +) -> Result { + let mut fat_files = Vec::new(); + + for entry in files { + let (input_filename, output_name) = match entry { + FileEntry::String(filename) => (filename.as_str(), filename.clone()), + FileEntry::Object { input, output } => (input.as_str(), output.clone()), + }; + + let resolved_path = find_file_in_dirs(input_filename, input_dirs).ok_or_else(|| { + format!("File '{input_filename}' not found in any input directory for FAT image") + })?; + + fat_files.push(fat::FileEntry { + filename: Some(resolved_path.to_string_lossy().to_string()), + output: Some(output_name), + }); + } + + Ok(fat::Manifest { + files: fat_files, + directories: None, + }) +} + +fn copy_path(input_path: &Path, output_path: &Path, verbose: bool) -> Result<(), String> { + if !input_path.exists() { + return Err(format!("Input path '{}' not found.", input_path.display())); + } + + if input_path.is_dir() { + copy_directory(input_path, output_path, verbose) + } else { + copy_file(input_path, output_path, verbose) + } +} + +fn copy_directory(input_dir: &Path, output_dir: &Path, verbose: bool) -> Result<(), String> { + fs::create_dir_all(output_dir).map_err(|e| { + format!( + "Failed to create directory '{}': {}", + output_dir.display(), + e + ) + })?; + + let entries = fs::read_dir(input_dir) + .map_err(|e| format!("Failed to read directory '{}': {}", input_dir.display(), e))?; + + for entry in entries { + let entry = entry.map_err(|e| format!("Failed to read directory entry: {e}"))?; + let input_child = entry.path(); + let output_child = output_dir.join(entry.file_name()); + + if input_child.is_dir() { + copy_directory(&input_child, &output_child, verbose)?; + } else { + copy_file(&input_child, &output_child, verbose)?; + } + } + + Ok(()) +} + +fn copy_file(input_path: &Path, output_path: &Path, verbose: bool) -> Result<(), String> { + if !input_path.exists() { + return Err(format!("Input file '{}' not found.", input_path.display())); + } + + if let Some(parent) = output_path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create directory '{}': {}", parent.display(), e))?; + } + + fs::copy(input_path, output_path).map_err(|e| { + format!( + "Failed to copy '{}' to '{}': {}", + input_path.display(), + output_path.display(), + e + ) + })?; + + if verbose { + log_debug(&format!( + "Copied:\n {}\n {}", + input_path.display(), + output_path.display() + )); + } + + Ok(()) +} diff --git a/src/commands/stone/mod.rs b/src/commands/stone/mod.rs index 5f002bb..dcd4887 100644 --- a/src/commands/stone/mod.rs +++ b/src/commands/stone/mod.rs @@ -1,10 +1,12 @@ use clap::Subcommand; +pub mod bundle; pub mod create; pub mod describe_manifest; pub mod provision; pub mod validate; +use bundle::BundleArgs; use create::CreateArgs; use describe_manifest::DescribeManifestArgs; use provision::ProvisionArgs; @@ -22,6 +24,9 @@ pub enum Commands { /// Create the artifacts specified in the manifest. Create(CreateArgs), + /// Build an OS bundle (.aos) containing all boot/OS artifacts for OTA and provisioning. + Bundle(BundleArgs), + /// Provision by actually building the artifacts specified in the manifest. Provision(ProvisionArgs), } diff --git a/src/main.rs b/src/main.rs index b41ba72..3f9b790 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,6 +30,7 @@ fn run() -> Result<(), String> { Commands::Validate(args) => args.execute(), Commands::DescribeManifest(args) => args.execute(), Commands::Create(args) => args.execute(), + Commands::Bundle(args) => args.execute(), Commands::Provision(args) => args.execute(), } } diff --git a/src/manifest.rs b/src/manifest.rs index 449ac55..ec81aab 100644 --- a/src/manifest.rs +++ b/src/manifest.rs @@ -63,6 +63,43 @@ pub struct Manifest { pub storage_devices: std::collections::HashMap, #[serde(skip_serializing_if = "Option::is_none")] pub provision: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub update: Option, +} + +// --- Update section: declares how OS artifacts map to A/B slots for OTA --- + +#[derive(Debug, Deserialize, Serialize)] +pub struct Update { + pub slot_detection: SlotDetection, + pub os_artifacts: HashMap, + pub activate: SlotAction, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub rollback: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +#[serde(tag = "type")] +pub enum SlotDetection { + #[serde(rename = "uboot-env")] + UbootEnv { var: String }, + #[serde(rename = "command")] + Command { command: Vec }, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct OsArtifactRef { + pub image_key: String, + pub slot_partitions: Vec, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +#[serde(tag = "type")] +pub enum SlotAction { + #[serde(rename = "uboot-env")] + UbootEnv { set: HashMap }, + #[serde(rename = "command")] + Command { command: Vec }, } #[derive(Debug, Deserialize, Serialize)] @@ -73,6 +110,8 @@ pub struct Runtime { pub provision: Option, #[serde(skip_serializing_if = "Option::is_none")] pub provision_default: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub update_strategy: Option, } #[derive(Debug, Deserialize, Serialize)] @@ -506,6 +545,7 @@ mod tests { architecture: "x86_64".to_string(), provision: Some("provision.sh".to_string()), provision_default: None, + update_strategy: None, }; let serialized = serde_json::to_value(&runtime).unwrap(); @@ -527,6 +567,7 @@ mod tests { architecture: "x86_64".to_string(), provision: None, provision_default: None, + update_strategy: None, }; let serialized = serde_json::to_value(&runtime).unwrap(); From 982df34b2a26b703ee2798d348d67066dad6536c Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 12:58:18 -0400 Subject: [PATCH 02/11] fix: copy FAT source files in stone bundle for provision compatibility stone bundle was not copying files referenced in FAT build_args.files (e.g., initramfs cpio, bzImage) to the build directory. This caused stone provision to fail when trying to rebuild the FAT image since it could not find these source files in any input directory. --- src/commands/stone/bundle.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index 492438d..73da9ce 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -239,6 +239,14 @@ fn copy_manifest_inputs( { copy_file(&src, &build_dir.join(template), verbose)?; } + // Copy FAT source files (e.g., initramfs, bzImage) so provision can rebuild FAT images + for file_entry in image.files() { + let input_filename = file_entry.input_filename(); + if let Some(src) = find_file_in_dirs(input_filename, input_dirs) { + let dest = build_dir.join(input_filename); + copy_path(&src, &dest, verbose)?; + } + } } } From 7b6bac4510c62b0e128ab88038ed3ffb741ab8e5 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 15:25:06 -0400 Subject: [PATCH 03/11] fix: use AVOCADO_OS_BUILD_ID for OS update verify check Read AVOCADO_OS_BUILD_ID instead of BUILD_ID from os-release when generating bundle.json. BUILD_ID is a monotonic counter set by the production release pipeline, while AVOCADO_OS_BUILD_ID is the deterministic content-addressable UUID computed from installed packages. --- src/commands/stone/bundle.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index 73da9ce..2c236bc 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -165,7 +165,7 @@ pub fn bundle_command( let artifacts = collect_artifacts(&manifest, &built_images, input_dirs, &images_dir, verbose)?; // Step 4: Parse os-release for OS build ID - let os_build_id = parse_os_release_field(os_release_path, "BUILD_ID")?; + let os_build_id = parse_os_release_field(os_release_path, "AVOCADO_OS_BUILD_ID")?; // Step 5: Generate bundle.json let bundle_json = generate_bundle_json(&manifest, &artifacts, &os_build_id)?; @@ -635,7 +635,7 @@ fn generate_bundle_json( if !os_build_id.is_empty() { bundle["verify"] = serde_json::json!({ "type": "os-release", - "field": "BUILD_ID", + "field": "AVOCADO_OS_BUILD_ID", "expected": os_build_id, }); } From 7e8148b65b2b25da4254079b955441f2ebd89941 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 18:02:43 -0400 Subject: [PATCH 04/11] feat: add --os-release-initrd flag for initramfs build ID in bundle Add optional --os-release-initrd CLI arg to stone bundle. When provided, parses AVOCADO_OS_BUILD_ID from the initramfs os-release file and includes initramfs_build_id and verify_initramfs sections in the generated bundle.json. --- src/commands/stone/bundle.rs | 39 +++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index 2c236bc..91f1980 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -24,6 +24,10 @@ pub struct BundleArgs { #[arg(long = "os-release", value_name = "PATH")] pub os_release: PathBuf, + /// Path to the initramfs OS release file (optional, for initramfs build ID) + #[arg(long = "os-release-initrd", value_name = "PATH")] + pub os_release_initrd: Option, + /// Path to the input directory (can be specified multiple times for search priority) #[arg( short = 'i', @@ -56,6 +60,7 @@ impl BundleArgs { bundle_command( &self.manifest, &self.os_release, + self.os_release_initrd.as_deref(), &self.input_dirs, &self.output, self.build_dir.as_deref(), @@ -96,6 +101,7 @@ fn sha256_file(path: &Path) -> Result { pub fn bundle_command( manifest_path: &Path, os_release_path: &Path, + os_release_initrd_path: Option<&Path>, input_dirs: &[PathBuf], output_path: &Path, build_dir_override: Option<&Path>, @@ -153,6 +159,7 @@ pub fn bundle_command( &manifest, manifest_path, os_release_path, + os_release_initrd_path, input_dirs, build_dir, verbose, @@ -167,8 +174,16 @@ pub fn bundle_command( // Step 4: Parse os-release for OS build ID let os_build_id = parse_os_release_field(os_release_path, "AVOCADO_OS_BUILD_ID")?; + // Step 4b: Parse initramfs os-release for initramfs build ID (if provided) + let initramfs_build_id = if let Some(initrd_path) = os_release_initrd_path { + let id = parse_os_release_field(initrd_path, "AVOCADO_OS_BUILD_ID")?; + if id.is_empty() { None } else { Some(id) } + } else { + None + }; + // Step 5: Generate bundle.json - let bundle_json = generate_bundle_json(&manifest, &artifacts, &os_build_id)?; + let bundle_json = generate_bundle_json(&manifest, &artifacts, &os_build_id, initramfs_build_id.as_deref())?; let bundle_json_path = build_dir.join("bundle.json"); let bundle_json_str = serde_json::to_string_pretty(&bundle_json) .map_err(|e| format!("Failed to serialize bundle.json: {e}"))?; @@ -203,6 +218,7 @@ fn copy_manifest_inputs( manifest: &Manifest, manifest_path: &Path, os_release_path: &Path, + os_release_initrd_path: Option<&Path>, input_dirs: &[PathBuf], build_dir: &Path, verbose: bool, @@ -215,6 +231,12 @@ fn copy_manifest_inputs( let os_release_dest = build_dir.join("os-release"); copy_file(os_release_path, &os_release_dest, verbose)?; + // Copy os-release-initrd (if provided) + if let Some(initrd_path) = os_release_initrd_path { + let initrd_dest = build_dir.join("os-release-initrd"); + copy_file(initrd_path, &initrd_dest, verbose)?; + } + // Copy fwup templates and provision scripts for provision compatibility for device in manifest.storage_devices.values() { if let Some(build_args) = &device.build_args @@ -513,6 +535,7 @@ fn generate_bundle_json( manifest: &Manifest, artifacts: &[BundleArtifact], os_build_id: &str, + initramfs_build_id: Option<&str>, ) -> Result { let update = manifest.update.as_ref(); @@ -566,6 +589,10 @@ fn generate_bundle_json( "os_build_id": os_build_id, }); + if let Some(initramfs_id) = initramfs_build_id { + bundle["initramfs_build_id"] = serde_json::json!(initramfs_id); + } + // Add update section if manifest has one if let Some(update) = update { let strategy = manifest @@ -640,6 +667,16 @@ fn generate_bundle_json( }); } + // Add initramfs verify section + if let Some(initramfs_id) = initramfs_build_id { + bundle["verify_initramfs"] = serde_json::json!({ + "type": "os-release", + "field": "AVOCADO_OS_BUILD_ID", + "path": "/etc/os-release-initrd", + "expected": initramfs_id, + }); + } + Ok(bundle) } From aa7603100885a56b27f9016fd6e1d2ff0f139765 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 21:00:19 -0400 Subject: [PATCH 05/11] feat: add MbrSwitch slot action and array activate/rollback support Add MbrSwitch variant to SlotAction for MBR-based partition table switching (RPi). Add SlotActions untagged enum to support both single action and array of actions for activate/rollback. Bundle serialization now always emits arrays for forward compatibility. --- src/commands/stone/bundle.rs | 11 ++++++++--- src/manifest.rs | 25 +++++++++++++++++++++++-- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index 91f1980..c5689bc 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -183,7 +183,12 @@ pub fn bundle_command( }; // Step 5: Generate bundle.json - let bundle_json = generate_bundle_json(&manifest, &artifacts, &os_build_id, initramfs_build_id.as_deref())?; + let bundle_json = generate_bundle_json( + &manifest, + &artifacts, + &os_build_id, + initramfs_build_id.as_deref(), + )?; let bundle_json_path = build_dir.join("bundle.json"); let bundle_json_str = serde_json::to_string_pretty(&bundle_json) .map_err(|e| format!("Failed to serialize bundle.json: {e}"))?; @@ -606,12 +611,12 @@ fn generate_bundle_json( "slot_detection": serde_json::to_value(&update.slot_detection) .map_err(|e| format!("Failed to serialize slot_detection: {e}"))?, "artifacts": bundle_artifacts, - "activate": serde_json::to_value(&update.activate) + "activate": serde_json::to_value(update.activate.as_vec()) .map_err(|e| format!("Failed to serialize activate: {e}"))?, }); if let Some(rollback) = &update.rollback { - update_section["rollback"] = serde_json::to_value(rollback) + update_section["rollback"] = serde_json::to_value(rollback.as_vec()) .map_err(|e| format!("Failed to serialize rollback: {e}"))?; } diff --git a/src/manifest.rs b/src/manifest.rs index ec81aab..1d5f529 100644 --- a/src/manifest.rs +++ b/src/manifest.rs @@ -73,9 +73,9 @@ pub struct Manifest { pub struct Update { pub slot_detection: SlotDetection, pub os_artifacts: HashMap, - pub activate: SlotAction, + pub activate: SlotActions, #[serde(default, skip_serializing_if = "Option::is_none")] - pub rollback: Option, + pub rollback: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -100,6 +100,27 @@ pub enum SlotAction { UbootEnv { set: HashMap }, #[serde(rename = "command")] Command { command: Vec }, + #[serde(rename = "mbr-switch")] + MbrSwitch { + devpath: String, + slot_layouts: HashMap>, + }, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +#[serde(untagged)] +pub enum SlotActions { + Single(SlotAction), + Multiple(Vec), +} + +impl SlotActions { + pub fn as_vec(&self) -> Vec<&SlotAction> { + match self { + SlotActions::Single(a) => vec![a], + SlotActions::Multiple(v) => v.iter().collect(), + } + } } #[derive(Debug, Deserialize, Serialize)] From 75180714e7b43978f07719ac8d462435ea072c16 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 21:14:58 -0400 Subject: [PATCH 06/11] fix: compute sequential partition offsets in bundle layout Partitions without explicit offsets now get their byte offsets computed sequentially at bundle time, so bundle.json always includes complete offset information for all partitions. --- src/commands/stone/bundle.rs | 39 ++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index c5689bc..b8eba9e 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -624,8 +624,10 @@ fn generate_bundle_json( } // Add layout section from storage_devices partitions + // Compute sequential offsets for partitions that don't have explicit ones for device in manifest.storage_devices.values() { if !device.partitions.is_empty() { + let mut cursor_bytes: u64 = 0; let partitions: Vec = device .partitions .iter() @@ -636,12 +638,21 @@ fn generate_bundle_json( } part["size"] = serde_json::json!(p.size); part["size_unit"] = serde_json::json!(p.size_unit); - if let Some(offset) = p.offset { - part["offset"] = serde_json::json!(offset); - } - if let Some(offset_unit) = &p.offset_unit { - part["offset_unit"] = serde_json::json!(offset_unit); - } + + // Use explicit offset if provided, otherwise use sequential cursor + let offset_bytes = if let Some(offset) = p.offset { + let unit = p.offset_unit.as_deref(); + to_bytes(offset as u64, unit) + } else { + cursor_bytes + }; + part["offset"] = serde_json::json!(offset_bytes); + part["offset_unit"] = serde_json::json!("bytes"); + + // Advance cursor past this partition + let size_bytes = to_bytes(p.size as u64, Some(&p.size_unit)); + cursor_bytes = offset_bytes + size_bytes; + if let Some(expand) = &p.expand { part["expand"] = serde_json::json!(expand); } @@ -685,6 +696,22 @@ fn generate_bundle_json( Ok(bundle) } +/// Convert a size value to bytes based on its unit. +fn to_bytes(value: u64, unit: Option<&str>) -> u64 { + match unit { + Some("tebibytes") => value * 1024 * 1024 * 1024 * 1024, + Some("gibibytes") => value * 1024 * 1024 * 1024, + Some("mebibytes") => value * 1024 * 1024, + Some("kibibytes") => value * 1024, + Some("terabytes") => value * 1_000_000_000_000, + Some("gigabytes") => value * 1_000_000_000, + Some("megabytes") => value * 1_000_000, + Some("kilobytes") => value * 1_000, + Some("bytes") | None => value, + _ => value, + } +} + /// Package everything into a .aos tar.zst archive fn package_aos( output_path: &Path, From ac12a4c73aeb9afe8b098e1302b3ad508bc82150 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Tue, 10 Mar 2026 21:33:38 -0400 Subject: [PATCH 07/11] feat: embed git commit hash in --version output Add build.rs to capture git rev-parse --short HEAD at compile time. stone --version now shows e.g. "stone 1.9.0 (b2c24ba)". --- build.rs | 11 +++++++++++ src/main.rs | 1 + 2 files changed, 12 insertions(+) create mode 100644 build.rs diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..5ab89d0 --- /dev/null +++ b/build.rs @@ -0,0 +1,11 @@ +fn main() { + // Embed git commit hash for version identification + let git_hash = std::process::Command::new("git") + .args(["rev-parse", "--short", "HEAD"]) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| "unknown".to_string()); + println!("cargo:rustc-env=GIT_HASH={git_hash}"); +} diff --git a/src/main.rs b/src/main.rs index 3f9b790..4a8cea0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,6 +11,7 @@ mod manifest; #[derive(Parser, Debug)] #[command(name = "stone")] #[command(about = "A CLI for managing Avocado stones.")] +#[command(version = concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_HASH"), ")"))] struct Cli { #[command(subcommand)] command: Commands, From de8555fbcf2e069271dd49d21415fd9cf8b5cf96 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 11 Mar 2026 12:26:14 -0400 Subject: [PATCH 08/11] feat: include artifact size in bundle.json Add size field to BundleArtifact populated from file metadata, included in the generated bundle.json for progress reporting and validation during streaming OS updates. --- src/commands/stone/bundle.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index b8eba9e..5d3f234 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -216,6 +216,8 @@ struct BundleArtifact { archive_path: String, /// SHA256 hash sha256: String, + /// File size in bytes + size: u64, } /// Copy manifest inputs to the build directory (mirrors stone create behavior) @@ -430,10 +432,13 @@ fn collect_artifacts( .to_string(); let archive_path = format!("images/{filename}"); let sha256 = sha256_file(&image_path)?; + let size = std::fs::metadata(&image_path) + .map(|m| m.len()) + .map_err(|e| format!("Failed to get size of '{}': {e}", image_path.display()))?; if verbose { log_debug(&format!( - "Artifact '{artifact_name}': {archive_path} (sha256: {sha256})" + "Artifact '{artifact_name}': {archive_path} (sha256: {sha256}, size: {size})" )); } @@ -442,6 +447,7 @@ fn collect_artifacts( path: image_path, archive_path, sha256, + size, }); } @@ -489,12 +495,16 @@ fn collect_all_images_as_artifacts( .to_string(); let archive_path = format!("images/{filename}"); let sha256 = sha256_file(&image_path)?; + let size = std::fs::metadata(&image_path) + .map(|m| m.len()) + .map_err(|e| format!("Failed to get size of '{}': {e}", image_path.display()))?; artifacts.push(BundleArtifact { name: image_name.clone(), path: image_path, archive_path, sha256, + size, }); } } @@ -551,6 +561,7 @@ fn generate_bundle_json( "name": artifact.name, "file": artifact.archive_path, "sha256": artifact.sha256, + "size": artifact.size, }); // Add slot_targets from the manifest's os_artifacts From 70094af7680b8bb2eba054c3277c2483ff52c518 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 11 Mar 2026 19:00:04 -0400 Subject: [PATCH 09/11] fix: always copy fresh artifacts instead of reusing cached build dir Stone's collect_artifacts() checked if an image already existed in the build dir's images/ subdirectory before looking in input dirs. Since the build directory persists between builds, this caused stale rootfs images to be bundled into the .aos even when a fresh rootfs was available in the input directory. The manifest's os_build_id was correct (read from the sysroot) but the actual rootfs binary was from a previous build. Remove the images_dir cache check so pre-existing images are always overwritten from input_dirs. --- src/commands/stone/bundle.rs | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/commands/stone/bundle.rs b/src/commands/stone/bundle.rs index 5d3f234..b453f1c 100644 --- a/src/commands/stone/bundle.rs +++ b/src/commands/stone/bundle.rs @@ -407,22 +407,17 @@ fn collect_artifacts( let image = find_image_in_manifest(manifest, image_key)?; let filename = image.out(); - // Check if it's already in images_dir - let in_images = images_dir.join(filename); - if in_images.exists() { - in_images - } else { - // Find in input dirs and copy to images/ - let src = find_file_in_dirs(filename, input_dirs).ok_or_else(|| { - format!( - "Image file '{}' for artifact '{}' not found in any input directory", - filename, artifact_name - ) - })?; - let dest = images_dir.join(filename); - copy_file(&src, &dest, verbose)?; - dest - } + // Always copy fresh from input_dirs to ensure we don't reuse + // stale cached artifacts from a previous build + let dest = images_dir.join(filename); + let src = find_file_in_dirs(filename, input_dirs).ok_or_else(|| { + format!( + "Image file '{}' for artifact '{}' not found in any input directory", + filename, artifact_name + ) + })?; + copy_file(&src, &dest, verbose)?; + dest }; let filename = image_path From 015e4b1d9288bdc4a704d5848f68ea6cf0c873c1 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 11 Mar 2026 21:36:03 -0400 Subject: [PATCH 10/11] feat: add sdboot-efi slot detection and efibootmgr slot action Add SdbootEfi variant for detecting active slot via GPT partition UUIDs, Efibootmgr variant for managing EFI boot entries per slot, and partition_type/partition_uuid fields on Partition struct. --- src/manifest.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/manifest.rs b/src/manifest.rs index 1d5f529..bd107fd 100644 --- a/src/manifest.rs +++ b/src/manifest.rs @@ -85,6 +85,11 @@ pub enum SlotDetection { UbootEnv { var: String }, #[serde(rename = "command")] Command { command: Vec }, + #[serde(rename = "sdboot-efi")] + SdbootEfi { + /// Map from GPT partition UUID -> slot name (e.g. {"": "a", "": "b"}) + partitions: HashMap, + }, } #[derive(Debug, Deserialize, Serialize)] @@ -105,6 +110,11 @@ pub enum SlotAction { devpath: String, slot_layouts: HashMap>, }, + #[serde(rename = "efibootmgr")] + Efibootmgr { + /// Map from slot name -> EFI boot entry label (e.g. {"a": "boot-a", "b": "boot-b"}) + slot_entries: HashMap, + }, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -278,6 +288,10 @@ pub struct Partition { #[serde(skip_serializing_if = "Option::is_none")] pub image: Option, #[serde(skip_serializing_if = "Option::is_none")] + pub partition_type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub partition_uuid: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub offset: Option, #[serde(skip_serializing_if = "Option::is_none")] pub offset_unit: Option, From 7f85a856f1085760805a38724f0b1055ab3dbb26 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 11 Mar 2026 22:21:23 -0400 Subject: [PATCH 11/11] chore: bump version to 2.0.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d8dba5..6dc6198 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -647,7 +647,7 @@ checksum = "9d26fcce2f397e5488affdf681b20c030aa9faa877b92b1825e5d66b08d2fc33" [[package]] name = "stone" -version = "1.10.0" +version = "2.0.0" dependencies = [ "assert_cmd", "clap", diff --git a/Cargo.toml b/Cargo.toml index 649abf0..fba136e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stone" -version = "1.10.0" +version = "2.0.0" edition = "2024" description = "A CLI for managing Avocado stones." homepage = "https://github.com/avocado-linux/stone"