diff --git a/crates/karapace-core/Cargo.toml b/crates/karapace-core/Cargo.toml new file mode 100644 index 0000000..ff234b5 --- /dev/null +++ b/crates/karapace-core/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "karapace-core" +description = "Build engine, lifecycle state machine, drift control, and concurrency for Karapace" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +blake3.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +chrono.workspace = true +ctrlc.workspace = true +tracing.workspace = true +fs2.workspace = true +libc.workspace = true +karapace-schema = { path = "../karapace-schema" } +karapace-store = { path = "../karapace-store" } +karapace-runtime = { path = "../karapace-runtime" } +karapace-remote = { path = "../karapace-remote" } +tempfile.workspace = true + +[dev-dependencies] +criterion.workspace = true + +[[bin]] +name = "stress_test" +path = "src/bin/stress_test.rs" + +[[bench]] +name = "engine_benchmarks" +harness = false diff --git a/crates/karapace-core/benches/engine_benchmarks.rs b/crates/karapace-core/benches/engine_benchmarks.rs new file mode 100644 index 0000000..6e80069 --- /dev/null +++ b/crates/karapace-core/benches/engine_benchmarks.rs @@ -0,0 +1,202 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use std::fs; +use std::path::Path; + +fn create_test_manifest(dir: &Path) -> std::path::PathBuf { + let manifest_path = dir.join("karapace.toml"); + fs::write( + &manifest_path, + r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = ["git", "clang"] +[runtime] +backend = "mock" +"#, + ) + .unwrap(); + manifest_path +} + +fn bench_build(c: &mut Criterion) { + c.bench_function("engine_build_mock_2pkg", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let project_dir = tempfile::tempdir().unwrap(); + let manifest = create_test_manifest(project_dir.path()); + let engine = karapace_core::Engine::new(store_dir.path()); + (store_dir, project_dir, manifest, engine) + }, + |(_sd, _pd, manifest, engine)| { + engine.build(&manifest).unwrap(); + }, + ); + }); +} + +fn bench_rebuild_unchanged(c: &mut Criterion) { + c.bench_function("engine_rebuild_unchanged", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let project_dir = tempfile::tempdir().unwrap(); + let manifest = create_test_manifest(project_dir.path()); + let engine = karapace_core::Engine::new(store_dir.path()); + let result = engine.build(&manifest).unwrap(); + (store_dir, project_dir, manifest, engine, result) + }, + |(_sd, _pd, manifest, engine, _result)| { + engine.build(&manifest).unwrap(); + }, + ); + }); +} + +fn bench_commit(c: &mut Criterion) { + c.bench_function("engine_commit_100files", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let project_dir = tempfile::tempdir().unwrap(); + let manifest = create_test_manifest(project_dir.path()); + let engine = karapace_core::Engine::new(store_dir.path()); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.to_string(); + + // Create 100 files in the upper directory to simulate drift + let upper = store_dir.path().join("env").join(&env_id).join("upper"); + fs::create_dir_all(&upper).unwrap(); + for i in 0..100 { + fs::write( + upper.join(format!("file_{i:03}.txt")), + format!("content {i}"), + ) + .unwrap(); + } + + (store_dir, project_dir, engine, env_id) + }, + |(_sd, _pd, engine, env_id)| { + engine.commit(&env_id).unwrap(); + }, + ); + }); +} + +fn bench_restore(c: &mut Criterion) { + c.bench_function("engine_restore_snapshot", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let project_dir = tempfile::tempdir().unwrap(); + let manifest = create_test_manifest(project_dir.path()); + let engine = karapace_core::Engine::new(store_dir.path()); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.to_string(); + + // Create files and commit a snapshot + let upper = store_dir.path().join("env").join(&env_id).join("upper"); + fs::create_dir_all(&upper).unwrap(); + for i in 0..50 { + fs::write( + upper.join(format!("file_{i:03}.txt")), + format!("content {i}"), + ) + .unwrap(); + } + let snapshot_hash = engine.commit(&env_id).unwrap(); + + (store_dir, project_dir, engine, env_id, snapshot_hash) + }, + |(_sd, _pd, engine, env_id, snapshot_hash)| { + engine.restore(&env_id, &snapshot_hash).unwrap(); + }, + ); + }); +} + +fn bench_gc(c: &mut Criterion) { + c.bench_function("gc_50envs", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let layout = karapace_store::StoreLayout::new(store_dir.path()); + layout.initialize().unwrap(); + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Create 50 environments: 25 live (ref_count=1), 25 dead (ref_count=0) + for i in 0..50 { + let obj_hash = obj_store.put(format!("obj-{i}").as_bytes()).unwrap(); + let meta = karapace_store::EnvMetadata { + env_id: format!("env_{i:04}").into(), + short_id: format!("env_{i:04}").into(), + name: None, + state: karapace_store::EnvState::Built, + manifest_hash: obj_hash.into(), + base_layer: "".into(), + dependency_layers: vec![], + policy_layer: None, + created_at: "2026-01-01T00:00:00Z".to_owned(), + updated_at: "2026-01-01T00:00:00Z".to_owned(), + ref_count: u32::from(i < 25), + checksum: None, + }; + meta_store.put(&meta).unwrap(); + } + + // Create 200 orphan objects + for i in 0..200 { + obj_store + .put(format!("orphan-object-{i}").as_bytes()) + .unwrap(); + } + + (store_dir, layout) + }, + |(_sd, layout)| { + let gc = karapace_store::GarbageCollector::new(layout); + gc.collect(false).unwrap(); + }, + ); + }); +} + +fn bench_verify_store(c: &mut Criterion) { + c.bench_function("verify_store_200objects", |b| { + b.iter_with_setup( + || { + let store_dir = tempfile::tempdir().unwrap(); + let layout = karapace_store::StoreLayout::new(store_dir.path()); + layout.initialize().unwrap(); + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Create 200 objects + for i in 0..200 { + obj_store + .put(format!("verify-object-{i}").as_bytes()) + .unwrap(); + } + + (store_dir, layout) + }, + |(_sd, layout)| { + karapace_store::verify_store_integrity(&layout).unwrap(); + }, + ); + }); +} + +criterion_group!( + benches, + bench_build, + bench_rebuild_unchanged, + bench_commit, + bench_restore, + bench_gc, + bench_verify_store, +); +criterion_main!(benches); diff --git a/crates/karapace-core/karapace-core.cdx.json b/crates/karapace-core/karapace-core.cdx.json new file mode 100644 index 0000000..440cfc8 --- /dev/null +++ b/crates/karapace-core/karapace-core.cdx.json @@ -0,0 +1,2521 @@ +{ + "bomFormat": "CycloneDX", + "specVersion": "1.3", + "version": 1, + "serialNumber": "urn:uuid:7b41cdd6-f072-482a-ac5d-2bdad545998d", + "metadata": { + "timestamp": "2026-02-22T14:03:10.565669265Z", + "tools": [ + { + "vendor": "CycloneDX", + "name": "cargo-cyclonedx", + "version": "0.5.5" + } + ], + "component": { + "type": "application", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-core#0.1.0", + "name": "karapace-core", + "version": "0.1.0", + "description": "Build engine, lifecycle state machine, drift control, and concurrency for Karapace", + "scope": "required", + "licenses": [ + { + "expression": "EUPL-1.2" + } + ], + "purl": "pkg:cargo/karapace-core@0.1.0?download_url=file://.", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/marcoallegretti/karapace" + } + ], + "components": [ + { + "type": "library", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-core#0.1.0 bin-target-0", + "name": "karapace_core", + "version": "0.1.0", + "purl": "pkg:cargo/karapace-core@0.1.0?download_url=file://.#src/lib.rs" + }, + { + "type": "application", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-core#0.1.0 bin-target-1", + "name": "stress_test", + "version": "0.1.0", + "purl": "pkg:cargo/karapace-core@0.1.0?download_url=file://.#src/bin/stress_test.rs" + } + ] + } + }, + "components": [ + { + "type": "library", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-remote#0.1.0", + "name": "karapace-remote", + "version": "0.1.0", + "description": "Remote content-addressable store for Karapace environment sharing", + "scope": "required", + "licenses": [ + { + "expression": "EUPL-1.2" + } + ], + "purl": "pkg:cargo/karapace-remote@0.1.0?download_url=file://../karapace-remote", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/marcoallegretti/karapace" + } + ] + }, + { + "type": "library", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-runtime#0.1.0", + "name": "karapace-runtime", + "version": "0.1.0", + "description": "Container runtime backends, image management, sandbox, and host integration for Karapace", + "scope": "required", + "licenses": [ + { + "expression": "EUPL-1.2" + } + ], + "purl": "pkg:cargo/karapace-runtime@0.1.0?download_url=file://../karapace-runtime", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/marcoallegretti/karapace" + } + ] + }, + { + "type": "library", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-schema#0.1.0", + "name": "karapace-schema", + "version": "0.1.0", + "description": "Manifest parsing, normalization, identity hashing, and lock file for Karapace", + "scope": "required", + "licenses": [ + { + "expression": "EUPL-1.2" + } + ], + "purl": "pkg:cargo/karapace-schema@0.1.0?download_url=file://../karapace-schema", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/marcoallegretti/karapace" + } + ] + }, + { + "type": "library", + "bom-ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-store#0.1.0", + "name": "karapace-store", + "version": "0.1.0", + "description": "Content-addressable store, metadata, layers, GC, and integrity for Karapace", + "scope": "required", + "licenses": [ + { + "expression": "EUPL-1.2" + } + ], + "purl": "pkg:cargo/karapace-store@0.1.0?download_url=file://../karapace-store", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/marcoallegretti/karapace" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#adler2@2.0.1", + "name": "adler2", + "version": "2.0.1", + "description": "A simple clean-room implementation of the Adler-32 checksum", + "scope": "required", + "licenses": [ + { + "expression": "0BSD OR MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/adler2@2.0.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/adler2/" + }, + { + "type": "vcs", + "url": "https://github.com/oyvindln/adler2" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "name": "arrayref", + "version": "0.3.9", + "description": "Macros to take array references of slices", + "scope": "required", + "licenses": [ + { + "expression": "BSD-2-Clause" + } + ], + "purl": "pkg:cargo/arrayref@0.3.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/arrayref" + }, + { + "type": "vcs", + "url": "https://github.com/droundy/arrayref" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "name": "arrayvec", + "version": "0.7.6", + "description": "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/arrayvec@0.7.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/arrayvec/" + }, + { + "type": "vcs", + "url": "https://github.com/bluss/arrayvec" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "name": "autocfg", + "version": "1.5.0", + "description": "Automatic cfg for Rust compiler features", + "scope": "excluded", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/autocfg@1.5.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/autocfg/" + }, + { + "type": "vcs", + "url": "https://github.com/cuviper/autocfg" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "name": "base64", + "version": "0.22.1", + "description": "encodes and decodes base64 as bytes or utf8", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/base64@0.22.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/base64" + }, + { + "type": "vcs", + "url": "https://github.com/marshallpierce/rust-base64" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.11.0", + "name": "bitflags", + "version": "2.11.0", + "description": "A macro to generate structures which behave like bitflags. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/bitflags@2.11.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/bitflags" + }, + { + "type": "website", + "url": "https://github.com/bitflags/bitflags" + }, + { + "type": "vcs", + "url": "https://github.com/bitflags/bitflags" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "name": "blake3", + "version": "1.8.3", + "description": "the BLAKE3 hash function", + "scope": "required", + "licenses": [ + { + "expression": "CC0-1.0 OR Apache-2.0 OR Apache-2.0 WITH LLVM-exception" + } + ], + "purl": "pkg:cargo/blake3@1.8.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/blake3" + }, + { + "type": "vcs", + "url": "https://github.com/BLAKE3-team/BLAKE3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "name": "bytes", + "version": "1.11.1", + "description": "Types and traits for working with bytes", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/bytes@1.11.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/tokio-rs/bytes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.56", + "name": "cc", + "version": "1.2.56", + "description": "A build-time dependency for Cargo build scripts to assist in invoking the native C compiler to compile native C code into a static archive to be linked into Rust code. ", + "scope": "excluded", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cc@1.2.56", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cc" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/cc-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/cc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "name": "cfg-if", + "version": "1.0.4", + "description": "A macro to ergonomically define an item depending on a large number of #[cfg] parameters. Structured like an if-else chain, the first matching branch is the item that gets emitted. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cfg-if@1.0.4", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/cfg-if" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "name": "cfg_aliases", + "version": "0.2.1", + "description": "A tiny utility to help save you a lot of effort with long winded `#[cfg()]` checks.", + "scope": "excluded", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/cfg_aliases@0.2.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cfg_aliases" + }, + { + "type": "website", + "url": "https://github.com/katharostech/cfg_aliases" + }, + { + "type": "vcs", + "url": "https://github.com/katharostech/cfg_aliases" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "name": "chrono", + "version": "0.4.43", + "description": "Date and time library for Rust", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/chrono@0.4.43", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/chrono/" + }, + { + "type": "website", + "url": "https://github.com/chronotope/chrono" + }, + { + "type": "vcs", + "url": "https://github.com/chronotope/chrono" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "name": "constant_time_eq", + "version": "0.4.2", + "description": "Compares two equal-sized byte strings in constant time.", + "scope": "required", + "licenses": [ + { + "expression": "CC0-1.0 OR MIT-0 OR Apache-2.0" + } + ], + "purl": "pkg:cargo/constant_time_eq@0.4.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/constant_time_eq" + }, + { + "type": "vcs", + "url": "https://github.com/cesarb/constant_time_eq" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "name": "cpufeatures", + "version": "0.2.17", + "description": "Lightweight runtime CPU feature detection for aarch64, loongarch64, and x86/x86_64 targets, with no_std support and support for mobile targets including Android and iOS ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cpufeatures@0.2.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cpufeatures" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#crc32fast@1.5.0", + "name": "crc32fast", + "version": "1.5.0", + "description": "Fast, SIMD-accelerated CRC32 (IEEE) checksum computation", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/crc32fast@1.5.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/srijs/rust-crc32fast" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ctrlc@3.5.2", + "name": "ctrlc", + "version": "3.5.2", + "description": "Easy Ctrl-C handler for Rust projects", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/ctrlc@3.5.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://detegr.github.io/doc/ctrlc" + }, + { + "type": "website", + "url": "https://github.com/Detegr/rust-ctrlc" + }, + { + "type": "vcs", + "url": "https://github.com/Detegr/rust-ctrlc.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "name": "equivalent", + "version": "1.0.2", + "description": "Traits for key comparison in maps.", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/equivalent@1.0.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/indexmap-rs/equivalent" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "name": "errno", + "version": "0.3.14", + "description": "Cross-platform interface to the `errno` variable.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/errno@0.3.14", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/errno" + }, + { + "type": "vcs", + "url": "https://github.com/lambda-fairy/rust-errno" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "name": "fastrand", + "version": "2.3.0", + "description": "A simple and fast random number generator", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/fastrand@2.3.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/smol-rs/fastrand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#filetime@0.2.27", + "name": "filetime", + "version": "0.2.27", + "description": "Platform-agnostic accessors of timestamps in File metadata ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/filetime@0.2.27", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/filetime" + }, + { + "type": "website", + "url": "https://github.com/alexcrichton/filetime" + }, + { + "type": "vcs", + "url": "https://github.com/alexcrichton/filetime" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "name": "find-msvc-tools", + "version": "0.1.9", + "description": "Find windows-specific tools, read MSVC versions from the registry and from COM interfaces", + "scope": "excluded", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/find-msvc-tools@0.1.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/find-msvc-tools" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/cc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#flate2@1.1.9", + "name": "flate2", + "version": "1.1.9", + "description": "DEFLATE compression and decompression exposed as Read/BufRead/Write streams. Supports miniz_oxide and multiple zlib implementations. Supports zlib, gzip, and raw deflate streams. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/flate2@1.1.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/flate2" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/flate2-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/flate2-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#fs2@0.4.3", + "name": "fs2", + "version": "0.4.3", + "description": "Cross-platform file locks and file duplication.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/fs2@0.4.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/fs2" + }, + { + "type": "vcs", + "url": "https://github.com/danburkert/fs2-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "name": "getrandom", + "version": "0.2.17", + "description": "A small cross-platform library for retrieving random data from system source", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/getrandom@0.2.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/getrandom" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/getrandom" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "name": "getrandom", + "version": "0.4.1", + "description": "A small cross-platform library for retrieving random data from system source", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/getrandom@0.4.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/getrandom" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/getrandom" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "name": "hashbrown", + "version": "0.16.1", + "description": "A Rust port of Google's SwissTable hash map", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/hashbrown@0.16.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/hashbrown" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "name": "http", + "version": "1.4.0", + "description": "A set of types for representing HTTP requests and responses. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/http@1.4.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/http" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/http" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "name": "httparse", + "version": "1.10.1", + "description": "A tiny, safe, speedy, zero-copy HTTP/1.x parser.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/httparse@1.10.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/httparse" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/httparse" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "name": "iana-time-zone", + "version": "0.1.65", + "description": "get the IANA time zone for the current system", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/iana-time-zone@0.1.65", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/strawlab/iana-time-zone" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "name": "indexmap", + "version": "2.13.0", + "description": "A hash table with consistent order and fast iteration.", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/indexmap@2.13.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/indexmap/" + }, + { + "type": "vcs", + "url": "https://github.com/indexmap-rs/indexmap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "name": "itoa", + "version": "1.0.17", + "description": "Fast integer primitive to string conversion", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/itoa@1.0.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/itoa" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/itoa" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "name": "libc", + "version": "0.2.180", + "description": "Raw FFI bindings to platform libraries like libc.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/libc@0.2.180", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/libc" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0", + "name": "linux-raw-sys", + "version": "0.11.0", + "description": "Generated bindings for Linux's userspace API", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/linux-raw-sys@0.11.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/linux-raw-sys" + }, + { + "type": "vcs", + "url": "https://github.com/sunfishcode/linux-raw-sys" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "name": "log", + "version": "0.4.29", + "description": "A lightweight logging facade for Rust ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/log@0.4.29", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/log" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/log" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "name": "memchr", + "version": "2.8.0", + "description": "Provides extremely fast (uses SIMD on x86_64, aarch64 and wasm32) routines for 1, 2 or 3 byte search and single substring search. ", + "scope": "required", + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/memchr@2.8.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/memchr/" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/memchr" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/memchr" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#miniz_oxide@0.8.9", + "name": "miniz_oxide", + "version": "0.8.9", + "description": "DEFLATE compression and decompression library rewritten in Rust based on miniz", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Zlib OR Apache-2.0" + } + ], + "purl": "pkg:cargo/miniz_oxide@0.8.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/miniz_oxide" + }, + { + "type": "website", + "url": "https://github.com/Frommi/miniz_oxide/tree/master/miniz_oxide" + }, + { + "type": "vcs", + "url": "https://github.com/Frommi/miniz_oxide/tree/master/miniz_oxide" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#nix@0.31.1", + "name": "nix", + "version": "0.31.1", + "description": "Rust friendly bindings to *nix APIs", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/nix@0.31.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/nix-rust/nix" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "name": "num-traits", + "version": "0.2.19", + "description": "Numeric traits for generic mathematics", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-traits@0.2.19", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-traits" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-traits" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-traits" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "name": "once_cell", + "version": "1.21.3", + "description": "Single assignment cells and lazy values.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/once_cell@1.21.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/once_cell" + }, + { + "type": "vcs", + "url": "https://github.com/matklad/once_cell" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "name": "percent-encoding", + "version": "2.3.2", + "description": "Percent encoding and decoding", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/percent-encoding@2.3.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/servo/rust-url/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "name": "pin-project-lite", + "version": "0.2.16", + "description": "A lightweight version of pin-project written with declarative macros. ", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/pin-project-lite@0.2.16", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/taiki-e/pin-project-lite" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "name": "proc-macro2", + "version": "1.0.106", + "description": "A substitute implementation of the compiler's `proc_macro` API to decouple token-based libraries from the procedural macro use case.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/proc-macro2@1.0.106", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/proc-macro2" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/proc-macro2" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "name": "quote", + "version": "1.0.44", + "description": "Quasi-quoting macro quote!(...)", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/quote@1.0.44", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/quote/" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/quote" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "name": "ring", + "version": "0.17.14", + "description": "An experiment.", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 AND ISC" + } + ], + "purl": "pkg:cargo/ring@0.17.14", + "externalReferences": [ + { + "type": "other", + "url": "ring_core_0_17_14_" + }, + { + "type": "vcs", + "url": "https://github.com/briansmith/ring" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3", + "name": "rustix", + "version": "1.1.3", + "description": "Safe Rust bindings to POSIX/Unix/Linux/Winsock-like syscalls", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/rustix@1.1.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rustix" + }, + { + "type": "vcs", + "url": "https://github.com/bytecodealliance/rustix" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "name": "rustls-pki-types", + "version": "1.14.0", + "description": "Shared types for the rustls PKI ecosystem", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rustls-pki-types@1.14.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rustls-pki-types" + }, + { + "type": "website", + "url": "https://github.com/rustls/pki-types" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/pki-types" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "name": "rustls-webpki", + "version": "0.103.9", + "description": "Web PKI X.509 Certificate Verification.", + "scope": "required", + "licenses": [ + { + "expression": "ISC" + } + ], + "purl": "pkg:cargo/rustls-webpki@0.103.9", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rustls/webpki" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "name": "rustls", + "version": "0.23.36", + "description": "Rustls is a modern TLS library written in Rust.", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR ISC OR MIT" + } + ], + "purl": "pkg:cargo/rustls@0.23.36", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/rustls/rustls" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/rustls" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "name": "serde", + "version": "1.0.228", + "description": "A generic serialization/deserialization framework", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "name": "serde_core", + "version": "1.0.228", + "description": "Serde traits only, with no support for derive -- use the `serde` crate instead", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_core@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_core" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228", + "name": "serde_derive", + "version": "1.0.228", + "description": "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_derive@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://serde.rs/derive.html" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "name": "serde_json", + "version": "1.0.149", + "description": "A JSON serialization file format", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_json@1.0.149", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_json" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/json" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_spanned@0.6.9", + "name": "serde_spanned", + "version": "0.6.9", + "description": "Serde-compatible spanned Value", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_spanned@0.6.9", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/toml-rs/toml" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0", + "name": "shlex", + "version": "1.3.0", + "description": "Split a string into shell words, like Python's shlex.", + "scope": "excluded", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/shlex@1.3.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/comex/rust-shlex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#simd-adler32@0.3.8", + "name": "simd-adler32", + "version": "0.3.8", + "description": "A SIMD-accelerated Adler-32 hash algorithm implementation.", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/simd-adler32@0.3.8", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/mcountryman/simd-adler32" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "name": "subtle", + "version": "2.6.1", + "description": "Pure-Rust traits and utilities for constant-time cryptographic implementations.", + "scope": "required", + "licenses": [ + { + "expression": "BSD-3-Clause" + } + ], + "purl": "pkg:cargo/subtle@2.6.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/subtle" + }, + { + "type": "website", + "url": "https://dalek.rs/" + }, + { + "type": "vcs", + "url": "https://github.com/dalek-cryptography/subtle" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.117", + "name": "syn", + "version": "2.0.117", + "description": "Parser for Rust source code", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/syn@2.0.117", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/syn" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/syn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tar@0.4.44", + "name": "tar", + "version": "0.4.44", + "description": "A Rust implementation of a TAR file reader and writer. This library does not currently handle compression, but it is abstract over all I/O readers and writers. Additionally, great lengths are taken to ensure that the entire contents are never required to be entirely resident in memory all at once. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/tar@0.4.44", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tar" + }, + { + "type": "website", + "url": "https://github.com/alexcrichton/tar-rs" + }, + { + "type": "vcs", + "url": "https://github.com/alexcrichton/tar-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "name": "tempfile", + "version": "3.25.0", + "description": "A library for managing temporary files and directories.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/tempfile@3.25.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tempfile" + }, + { + "type": "website", + "url": "https://stebalien.com/projects/tempfile-rs/" + }, + { + "type": "vcs", + "url": "https://github.com/Stebalien/tempfile" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18", + "name": "thiserror-impl", + "version": "2.0.18", + "description": "Implementation detail of the `thiserror` crate", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror-impl@2.0.18", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "name": "thiserror", + "version": "2.0.18", + "description": "derive(Error)", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror@2.0.18", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/thiserror" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#toml@0.8.23", + "name": "toml", + "version": "0.8.23", + "description": "A native Rust encoder and decoder of TOML-formatted files and streams. Provides implementations of the standard Serialize/Deserialize traits for TOML data to facilitate deserializing and serializing Rust structures. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/toml@0.8.23", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/toml-rs/toml" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#toml_datetime@0.6.11", + "name": "toml_datetime", + "version": "0.6.11", + "description": "A TOML-compatible datetime type", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/toml_datetime@0.6.11", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/toml-rs/toml" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#toml_edit@0.22.27", + "name": "toml_edit", + "version": "0.22.27", + "description": "Yet another format-preserving TOML parser.", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/toml_edit@0.22.27", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/toml-rs/toml" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#toml_write@0.1.2", + "name": "toml_write", + "version": "0.1.2", + "description": "A low-level interface for writing out TOML ", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/toml_write@0.1.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/toml-rs/toml" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "name": "tracing-attributes", + "version": "0.1.31", + "description": "Procedural macro attributes for automatically instrumenting functions. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-attributes@0.1.31", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36", + "name": "tracing-core", + "version": "0.1.36", + "description": "Core primitives for application-level tracing. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-core@0.1.36", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "name": "tracing", + "version": "0.1.44", + "description": "Application-level tracing for Rust. ", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing@0.1.44", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.24", + "name": "unicode-ident", + "version": "1.0.24", + "description": "Determine whether characters have the XID_Start or XID_Continue properties according to Unicode Standard Annex #31", + "scope": "required", + "licenses": [ + { + "expression": "(MIT OR Apache-2.0) AND Unicode-3.0" + } + ], + "purl": "pkg:cargo/unicode-ident@1.0.24", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/unicode-ident" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/unicode-ident" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0", + "name": "untrusted", + "version": "0.9.0", + "description": "Safe, fast, zero-panic, zero-crashing, zero-allocation parsing of untrusted inputs in Rust.", + "scope": "required", + "licenses": [ + { + "expression": "ISC" + } + ], + "purl": "pkg:cargo/untrusted@0.9.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://briansmith.org/rustdoc/untrusted/" + }, + { + "type": "vcs", + "url": "https://github.com/briansmith/untrusted" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ureq-proto@0.5.3", + "name": "ureq-proto", + "version": "0.5.3", + "description": "ureq support crate", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/ureq-proto@0.5.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/algesten/ureq-proto" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ureq@3.2.0", + "name": "ureq", + "version": "3.2.0", + "description": "Simple, safe HTTP client", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/ureq@3.2.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/algesten/ureq" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#utf-8@0.7.6", + "name": "utf-8", + "version": "0.7.6", + "description": "Incremental, zero-copy UTF-8 decoding with error handling", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/utf-8@0.7.6", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/SimonSapin/rust-utf8" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#webpki-roots@1.0.6", + "name": "webpki-roots", + "version": "1.0.6", + "description": "Mozilla's CA root certificates for use with webpki", + "scope": "required", + "licenses": [ + { + "expression": "CDLA-Permissive-2.0" + } + ], + "purl": "pkg:cargo/webpki-roots@1.0.6", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/rustls/webpki-roots" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/webpki-roots" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14", + "name": "winnow", + "version": "0.7.14", + "description": "A byte-oriented, zero-copy, parser combinators library", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/winnow@0.7.14", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/winnow-rs/winnow" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#xattr@1.6.1", + "name": "xattr", + "version": "1.6.1", + "description": "unix extended filesystem attributes", + "scope": "required", + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/xattr@1.6.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/xattr" + }, + { + "type": "vcs", + "url": "https://github.com/Stebalien/xattr" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2", + "name": "zeroize", + "version": "1.8.2", + "description": "Securely clear secrets from memory with a simple trait built on stable Rust primitives which guarantee memory is zeroed using an operation will not be 'optimized away' by the compiler. Uses a portable pure Rust implementation that works everywhere, even WASM! ", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/zeroize@1.8.2", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/RustCrypto/utils/tree/master/zeroize" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.21", + "name": "zmij", + "version": "1.0.21", + "description": "A double-to-string conversion algorithm based on Schubfach and yy", + "scope": "required", + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/zmij@1.0.21", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/zmij" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/zmij" + } + ] + } + ], + "dependencies": [ + { + "ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-core#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#ctrlc@3.5.2", + "registry+https://github.com/rust-lang/crates.io-index#fs2@0.4.3", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-remote#0.1.0", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-runtime#0.1.0", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-schema#0.1.0", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-store#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-remote#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-store#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#ureq@3.2.0" + ] + }, + { + "ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-runtime#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-schema#0.1.0", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-store#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-schema#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#toml@0.8.23" + ] + }, + { + "ref": "path+file:///home/lateuf/Projects/Karapace/crates/karapace-store#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#fs2@0.4.3", + "path+file:///home/lateuf/Projects/Karapace/crates/karapace-schema#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#tar@0.4.44", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#adler2@2.0.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.11.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.56", + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.56", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#crc32fast@1.5.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ctrlc@3.5.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#nix@0.31.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#filetime@0.2.27", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#flate2@1.1.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#crc32fast@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#miniz_oxide@0.8.9" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#fs2@0.4.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#miniz_oxide@0.8.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#adler2@2.0.1", + "registry+https://github.com/rust-lang/crates.io-index#simd-adler32@0.3.8" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#nix@0.31.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.11.0", + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.24" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.56", + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.11.0", + "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.117" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.21" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_spanned@0.6.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#simd-adler32@0.3.8", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.117", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.24" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tar@0.4.44", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#filetime@0.2.27", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.180", + "registry+https://github.com/rust-lang/crates.io-index#xattr@1.6.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.117" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#toml@0.8.23", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_spanned@0.6.9", + "registry+https://github.com/rust-lang/crates.io-index#toml_datetime@0.6.11", + "registry+https://github.com/rust-lang/crates.io-index#toml_edit@0.22.27" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#toml_datetime@0.6.11", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#toml_edit@0.22.27", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_spanned@0.6.9", + "registry+https://github.com/rust-lang/crates.io-index#toml_datetime@0.6.11", + "registry+https://github.com/rust-lang/crates.io-index#toml_write@0.1.2", + "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#toml_write@0.1.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.117" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.24", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ureq-proto@0.5.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ureq@3.2.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#flate2@1.1.9", + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#ureq-proto@0.5.3", + "registry+https://github.com/rust-lang/crates.io-index#utf-8@0.7.6", + "registry+https://github.com/rust-lang/crates.io-index#webpki-roots@1.0.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#utf-8@0.7.6", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#webpki-roots@1.0.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#xattr@1.6.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.21", + "dependsOn": [] + } + ] +} \ No newline at end of file diff --git a/crates/karapace-core/src/bin/stress_test.rs b/crates/karapace-core/src/bin/stress_test.rs new file mode 100644 index 0000000..f7c2608 --- /dev/null +++ b/crates/karapace-core/src/bin/stress_test.rs @@ -0,0 +1,238 @@ +//! Long-running stress test for the Karapace engine. +//! +//! Runs hundreds of build/commit/destroy/gc cycles with the mock backend, +//! checking for resource leaks (orphaned files, stale WAL entries, metadata +//! corruption) after every cycle. +//! +//! Usage: +//! cargo run --bin stress_test -- [--cycles N] + +use karapace_core::Engine; +use karapace_store::{verify_store_integrity, GarbageCollector, StoreLayout}; +use std::fs; +use std::path::Path; +use std::time::{Duration, Instant}; + +fn write_manifest(dir: &Path) -> std::path::PathBuf { + let manifest_path = dir.join("karapace.toml"); + fs::write( + &manifest_path, + r#"manifest_version = 1 +[base] +image = "rolling" +[system] +packages = ["git", "clang"] +[runtime] +backend = "mock" +"#, + ) + .expect("write manifest"); + manifest_path +} + +fn count_files_in(dir: &Path) -> usize { + if !dir.exists() { + return 0; + } + fs::read_dir(dir) + .map(|rd| { + rd.filter_map(Result::ok) + .filter(|e| e.file_type().map(|t| t.is_file()).unwrap_or(false)) + .count() + }) + .unwrap_or(0) +} + +struct Timings { + build: Duration, + commit: Duration, + destroy: Duration, + gc: Duration, +} + +fn run_cycle( + engine: &Engine, + manifest_path: &Path, + layout: &StoreLayout, + cycle: usize, + timings: &mut Timings, +) -> Result<(), String> { + let t0 = Instant::now(); + let build_result = engine + .build(manifest_path) + .map_err(|e| format!("cycle {cycle}: BUILD FAILED: {e}"))?; + timings.build += t0.elapsed(); + let env_id = build_result.identity.env_id.to_string(); + + let t0 = Instant::now(); + if let Err(e) = engine.commit(&env_id) { + eprintln!(" cycle {cycle}: COMMIT FAILED: {e}"); + } + timings.commit += t0.elapsed(); + + let t0 = Instant::now(); + engine + .destroy(&env_id) + .map_err(|e| format!("cycle {cycle}: DESTROY FAILED: {e}"))?; + timings.destroy += t0.elapsed(); + + if cycle.is_multiple_of(10) { + let gc = GarbageCollector::new(layout.clone()); + let t0 = Instant::now(); + match gc.collect(false) { + Ok(report) => { + timings.gc += t0.elapsed(); + if cycle.is_multiple_of(100) { + println!( + " cycle {cycle}: GC collected {} objects, {} layers", + report.removed_objects, report.removed_layers + ); + } + } + Err(e) => return Err(format!("cycle {cycle}: GC FAILED: {e}")), + } + } + Ok(()) +} + +fn check_health(layout: &StoreLayout, wal_dir: &Path, cycle: usize) -> u64 { + let mut failures = 0u64; + match verify_store_integrity(layout) { + Ok(report) => { + if !report.failed.is_empty() { + eprintln!( + " cycle {cycle}: INTEGRITY FAILURE: {} objects failed", + report.failed.len() + ); + failures += 1; + } + } + Err(e) => { + eprintln!(" cycle {cycle}: INTEGRITY CHECK ERROR: {e}"); + failures += 1; + } + } + let wal_files = count_files_in(wal_dir); + if wal_files > 0 { + eprintln!(" cycle {cycle}: WAL LEAK: {wal_files} stale entries"); + failures += 1; + } + let meta_count = count_files_in(&layout.metadata_dir()); + if meta_count > 0 { + eprintln!(" cycle {cycle}: METADATA LEAK: {meta_count} entries after full destroy+gc"); + } + failures +} + +fn print_report( + cycles: usize, + failures: u64, + timings: &Timings, + layout: &StoreLayout, + wal_dir: &Path, +) { + let final_integrity = verify_store_integrity(layout); + let wal_files = count_files_in(wal_dir); + + println!(); + println!("============================================"); + println!("Results: {cycles} cycles, {failures} failures"); + println!( + " build: {:.3}s total, {:.3}ms avg", + timings.build.as_secs_f64(), + timings.build.as_secs_f64() * 1000.0 / cycles as f64 + ); + println!( + " commit: {:.3}s total, {:.3}ms avg", + timings.commit.as_secs_f64(), + timings.commit.as_secs_f64() * 1000.0 / cycles as f64 + ); + println!( + " destroy: {:.3}s total, {:.3}ms avg", + timings.destroy.as_secs_f64(), + timings.destroy.as_secs_f64() * 1000.0 / cycles as f64 + ); + println!(" gc: {:.3}s total", timings.gc.as_secs_f64()); + println!(" WAL entries remaining: {wal_files}"); + println!( + " metadata remaining: {}", + count_files_in(&layout.metadata_dir()) + ); + println!( + " objects remaining: {}", + count_files_in(&layout.objects_dir()) + ); + println!( + " layers remaining: {}", + count_files_in(&layout.layers_dir()) + ); + match final_integrity { + Ok(report) => println!( + " integrity: {} checked, {} passed, {} failed", + report.checked, + report.passed, + report.failed.len() + ), + Err(e) => println!(" integrity: ERROR: {e}"), + } + + if failures > 0 || wal_files > 0 { + eprintln!("\nSTRESS TEST FAILED"); + std::process::exit(1); + } else { + println!("\nSTRESS TEST PASSED"); + } +} + +fn main() { + let args: Vec = std::env::args().collect(); + let cycles: usize = args + .iter() + .position(|a| a == "--cycles") + .and_then(|i| args.get(i + 1)) + .and_then(|s| s.parse().ok()) + .unwrap_or(500); + + println!("Karapace stress test: {cycles} cycles"); + println!("============================================"); + + let store_dir = tempfile::tempdir().expect("create temp dir"); + let project_dir = tempfile::tempdir().expect("create project dir"); + let manifest_path = write_manifest(project_dir.path()); + + let layout = StoreLayout::new(store_dir.path()); + layout.initialize().expect("initialize store"); + let engine = Engine::new(store_dir.path()); + let wal_dir = store_dir.path().join("store").join("wal"); + + let mut timings = Timings { + build: Duration::ZERO, + commit: Duration::ZERO, + destroy: Duration::ZERO, + gc: Duration::ZERO, + }; + let mut failures = 0u64; + + for cycle in 1..=cycles { + if let Err(msg) = run_cycle(&engine, &manifest_path, &layout, cycle, &mut timings) { + eprintln!(" {msg}"); + failures += 1; + continue; + } + if cycle.is_multiple_of(50) { + failures += check_health(&layout, &wal_dir, cycle); + } + if cycle.is_multiple_of(100) { + let elapsed = timings.build + timings.commit + timings.destroy + timings.gc; + println!( + " cycle {cycle}/{cycles}: {:.1}s elapsed, {failures} failures", + elapsed.as_secs_f64() + ); + } + } + + let gc = GarbageCollector::new(layout.clone()); + let _ = gc.collect(false); + + print_report(cycles, failures, &timings, &layout, &wal_dir); +} diff --git a/crates/karapace-core/src/concurrency.rs b/crates/karapace-core/src/concurrency.rs new file mode 100644 index 0000000..cdbd839 --- /dev/null +++ b/crates/karapace-core/src/concurrency.rs @@ -0,0 +1,106 @@ +use crate::CoreError; +use fs2::FileExt; +use std::fs::{File, OpenOptions}; +use std::path::Path; +use std::sync::atomic::{AtomicBool, Ordering}; + +pub struct StoreLock { + lock_file: File, +} + +impl StoreLock { + pub fn acquire(lock_path: &Path) -> Result { + if let Some(parent) = lock_path.parent() { + std::fs::create_dir_all(parent)?; + } + + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(false) + .open(lock_path)?; + + file.lock_exclusive() + .map_err(|e| CoreError::Io(std::io::Error::new(std::io::ErrorKind::WouldBlock, e)))?; + + Ok(Self { lock_file: file }) + } + + pub fn try_acquire(lock_path: &Path) -> Result, CoreError> { + if let Some(parent) = lock_path.parent() { + std::fs::create_dir_all(parent)?; + } + + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(false) + .open(lock_path)?; + + match file.try_lock_exclusive() { + Ok(()) => Ok(Some(Self { lock_file: file })), + Err(_) => Ok(None), + } + } +} + +impl Drop for StoreLock { + fn drop(&mut self) { + let _ = self.lock_file.unlock(); + } +} + +static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false); + +pub fn install_signal_handler() { + let _ = ctrlc::set_handler(move || { + if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) { + std::process::exit(1); + } + SHUTDOWN_REQUESTED.store(true, Ordering::SeqCst); + eprintln!("\nshutdown requested, finishing current operation..."); + }); +} + +pub fn shutdown_requested() -> bool { + SHUTDOWN_REQUESTED.load(Ordering::SeqCst) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn lock_acquire_and_release() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("test.lock"); + + { + let _lock = StoreLock::acquire(&lock_path).unwrap(); + assert!(lock_path.exists()); + } + } + + #[test] + fn try_acquire_returns_none_when_held() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("test.lock"); + + let _lock = StoreLock::acquire(&lock_path).unwrap(); + let result = StoreLock::try_acquire(&lock_path).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn lock_released_on_drop() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("test.lock"); + + { + let _lock = StoreLock::acquire(&lock_path).unwrap(); + } + + let lock2 = StoreLock::try_acquire(&lock_path).unwrap(); + assert!(lock2.is_some()); + } +} diff --git a/crates/karapace-core/src/drift.rs b/crates/karapace-core/src/drift.rs new file mode 100644 index 0000000..a89dd49 --- /dev/null +++ b/crates/karapace-core/src/drift.rs @@ -0,0 +1,277 @@ +use crate::CoreError; +use karapace_store::StoreLayout; +use serde::Serialize; +use std::fs; +use std::path::Path; + +const WHITEOUT_PREFIX: &str = ".wh."; + +/// Report of filesystem drift detected in an environment's overlay upper layer. +#[derive(Debug, Serialize)] +pub struct DriftReport { + pub env_id: String, + pub added: Vec, + pub modified: Vec, + pub removed: Vec, + pub has_drift: bool, +} + +/// Scan the overlay upper directory for added, modified, and removed files. +pub fn diff_overlay(layout: &StoreLayout, env_id: &str) -> Result { + let upper_dir = layout.upper_dir(env_id); + let lower_dir = layout.env_path(env_id).join("lower"); + + let mut added = Vec::new(); + let mut modified = Vec::new(); + let mut removed = Vec::new(); + + if upper_dir.exists() { + collect_drift( + &upper_dir, + &upper_dir, + &lower_dir, + &mut added, + &mut modified, + &mut removed, + )?; + } + + added.sort(); + modified.sort(); + removed.sort(); + + let has_drift = !added.is_empty() || !modified.is_empty() || !removed.is_empty(); + + Ok(DriftReport { + env_id: env_id.to_owned(), + added, + modified, + removed, + has_drift, + }) +} + +fn collect_drift( + upper_base: &Path, + current: &Path, + lower_base: &Path, + added: &mut Vec, + modified: &mut Vec, + removed: &mut Vec, +) -> Result<(), CoreError> { + if !current.is_dir() { + return Ok(()); + } + for entry in fs::read_dir(current)? { + let entry = entry?; + let path = entry.path(); + let file_name = entry.file_name(); + let name_str = file_name.to_string_lossy(); + + let rel = path + .strip_prefix(upper_base) + .unwrap_or(&path) + .to_string_lossy() + .to_string(); + + // Overlayfs whiteout files indicate deletion of the corresponding + // file in the lower layer. + if name_str.starts_with(WHITEOUT_PREFIX) { + let deleted_name = name_str.strip_prefix(WHITEOUT_PREFIX).unwrap_or(&name_str); + let deleted_rel = if let Some(parent) = path.parent() { + let parent_rel = parent + .strip_prefix(upper_base) + .unwrap_or(parent) + .to_string_lossy(); + if parent_rel.is_empty() { + deleted_name.to_string() + } else { + format!("{parent_rel}/{deleted_name}") + } + } else { + deleted_name.to_string() + }; + removed.push(deleted_rel); + continue; + } + + if path.is_dir() { + collect_drift(upper_base, &path, lower_base, added, modified, removed)?; + } else { + // If the same relative path exists in the lower layer, + // this is a modification; otherwise it's a new file. + let lower_path = lower_base.join(&rel); + if lower_path.exists() { + modified.push(rel); + } else { + added.push(rel); + } + } + } + Ok(()) +} + +pub fn export_overlay(layout: &StoreLayout, env_id: &str, dest: &Path) -> Result { + let upper_dir = layout.upper_dir(env_id); + if !upper_dir.exists() { + return Ok(0); + } + + fs::create_dir_all(dest)?; + let mut count = 0; + copy_recursive(&upper_dir, dest, &mut count)?; + Ok(count) +} + +fn copy_recursive(src: &Path, dst: &Path, count: &mut usize) -> Result<(), CoreError> { + for entry in fs::read_dir(src)? { + let entry = entry?; + let src_path = entry.path(); + let dst_path = dst.join(entry.file_name()); + + if src_path.is_dir() { + fs::create_dir_all(&dst_path)?; + copy_recursive(&src_path, &dst_path, count)?; + } else { + fs::copy(&src_path, &dst_path)?; + *count += 1; + } + } + Ok(()) +} + +pub fn commit_overlay( + layout: &StoreLayout, + env_id: &str, + obj_store: &karapace_store::ObjectStore, +) -> Result, CoreError> { + let upper_dir = layout.upper_dir(env_id); + if !upper_dir.exists() { + return Ok(Vec::new()); + } + + let mut committed = Vec::new(); + commit_files(&upper_dir, obj_store, &mut committed)?; + Ok(committed) +} + +fn commit_files( + current: &Path, + obj_store: &karapace_store::ObjectStore, + committed: &mut Vec, +) -> Result<(), CoreError> { + if !current.is_dir() { + return Ok(()); + } + for entry in fs::read_dir(current)? { + let entry = entry?; + let path = entry.path(); + + if path.is_dir() { + commit_files(&path, obj_store, committed)?; + } else { + let data = fs::read(&path)?; + let hash = obj_store.put(&data)?; + committed.push(hash); + } + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn setup() -> (tempfile::TempDir, StoreLayout) { + let dir = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(dir.path()); + layout.initialize().unwrap(); + (dir, layout) + } + + #[test] + fn empty_overlay_reports_no_drift() { + let (_dir, layout) = setup(); + let report = diff_overlay(&layout, "test-env").unwrap(); + assert!(!report.has_drift); + assert!(report.added.is_empty()); + } + + #[test] + fn files_in_overlay_detected_as_drift() { + let (_dir, layout) = setup(); + let upper = layout.upper_dir("test-env"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("new_file.txt"), "content").unwrap(); + + let report = diff_overlay(&layout, "test-env").unwrap(); + assert!(report.has_drift); + assert_eq!(report.added.len(), 1); + assert!(report.added.contains(&"new_file.txt".to_owned())); + } + + #[test] + fn whiteout_files_detected_as_removed() { + let (_dir, layout) = setup(); + let upper = layout.upper_dir("test-env"); + fs::create_dir_all(&upper).unwrap(); + // Overlayfs whiteout: .wh.deleted_file marks "deleted_file" as removed + fs::write(upper.join(".wh.deleted_file"), "").unwrap(); + + let report = diff_overlay(&layout, "test-env").unwrap(); + assert!(report.has_drift); + assert_eq!(report.removed.len(), 1); + assert!(report.removed.contains(&"deleted_file".to_owned())); + assert!(report.added.is_empty()); + } + + #[test] + fn modified_files_classified_correctly() { + let (_dir, layout) = setup(); + // Create a "lower" layer with an existing file + let env_dir = layout.env_path("test-env"); + let lower = env_dir.join("lower"); + fs::create_dir_all(&lower).unwrap(); + fs::write(lower.join("existing.txt"), "original").unwrap(); + + // Same file in upper = modification + let upper = layout.upper_dir("test-env"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("existing.txt"), "changed").unwrap(); + // New file in upper only = added + fs::write(upper.join("brand_new.txt"), "new").unwrap(); + + let report = diff_overlay(&layout, "test-env").unwrap(); + assert!(report.has_drift); + assert_eq!(report.modified.len(), 1); + assert!(report.modified.contains(&"existing.txt".to_owned())); + assert_eq!(report.added.len(), 1); + assert!(report.added.contains(&"brand_new.txt".to_owned())); + } + + #[test] + fn export_copies_overlay_files() { + let (_dir, layout) = setup(); + let upper = layout.upper_dir("test-env"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("file.txt"), "data").unwrap(); + + let export_dir = tempfile::tempdir().unwrap(); + let count = export_overlay(&layout, "test-env", export_dir.path()).unwrap(); + assert_eq!(count, 1); + assert!(export_dir.path().join("file.txt").exists()); + } + + #[test] + fn commit_stores_overlay_as_objects() { + let (_dir, layout) = setup(); + let upper = layout.upper_dir("test-env"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("file.txt"), "data").unwrap(); + + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + let committed = commit_overlay(&layout, "test-env", &obj_store).unwrap(); + assert_eq!(committed.len(), 1); + assert!(obj_store.exists(&committed[0])); + } +} diff --git a/crates/karapace-core/src/engine.rs b/crates/karapace-core/src/engine.rs new file mode 100644 index 0000000..9f74f46 --- /dev/null +++ b/crates/karapace-core/src/engine.rs @@ -0,0 +1,1049 @@ +use crate::concurrency::StoreLock; +use crate::lifecycle::validate_transition; +use crate::CoreError; +use karapace_runtime::backend::{select_backend, RuntimeSpec}; +use karapace_runtime::SecurityPolicy; +use karapace_schema::types::{LayerHash, ObjectHash}; +use karapace_schema::{ + compute_env_id, parse_manifest_file, EnvIdentity, LockFile, ManifestV1, NormalizedManifest, + ResolutionResult, +}; +use karapace_store::{ + pack_layer, unpack_layer, EnvMetadata, EnvState, LayerKind, LayerManifest, LayerStore, + MetadataStore, ObjectStore, RollbackStep, StoreLayout, WalOpKind, WriteAheadLog, +}; +use std::path::{Path, PathBuf}; +use tracing::{debug, info, warn}; + +/// Central orchestration engine for Karapace environment lifecycle. +/// +/// Coordinates manifest parsing, object storage, layer management, and runtime +/// backends to provide build, enter, exec, stop, destroy, and inspection operations. +pub struct Engine { + layout: StoreLayout, + /// Cached lossy UTF-8 representation of the store root, avoiding repeated + /// `to_string_lossy()` allocations on every engine operation. + store_root_str: String, + meta_store: MetadataStore, + obj_store: ObjectStore, + layer_store: LayerStore, + wal: WriteAheadLog, +} + +/// Result of a successful environment build. +pub struct BuildResult { + pub identity: EnvIdentity, + pub lock_file: LockFile, +} + +impl Engine { + /// Create a new engine rooted at the given store directory. + /// + /// On construction, the WAL is scanned for incomplete entries from + /// previous runs and any orphaned state is rolled back automatically. + pub fn new(store_root: impl Into) -> Self { + let root: PathBuf = store_root.into(); + let layout = StoreLayout::new(&root); + let meta_store = MetadataStore::new(layout.clone()); + let obj_store = ObjectStore::new(layout.clone()); + let layer_store = LayerStore::new(layout.clone()); + let wal = WriteAheadLog::new(&layout); + + // Recover any incomplete operations from a previous crash + if let Err(e) = wal.recover() { + warn!("WAL recovery failed: {e}"); + } + + // Clean up stale .running markers left by a crash during enter/exec. + // After WAL recovery, any env still marked Running was mid-operation. + let env_base = layout.env_dir(); + if env_base.exists() { + if let Ok(entries) = std::fs::read_dir(&env_base) { + for entry in entries.flatten() { + let running_marker = entry.path().join(".running"); + if running_marker.exists() { + debug!( + "removing stale .running marker: {}", + running_marker.display() + ); + let _ = std::fs::remove_file(&running_marker); + } + } + } + } + + let store_root_str = root.to_string_lossy().into_owned(); + Self { + layout, + store_root_str, + meta_store, + obj_store, + layer_store, + wal, + } + } + + /// Initialize an environment from a manifest without building it. + pub fn init(&self, manifest_path: &Path) -> Result { + info!("initializing environment from {}", manifest_path.display()); + self.layout.initialize()?; + + let manifest = parse_manifest_file(manifest_path)?; + let normalized = manifest.normalize()?; + + // Use preliminary identity from manifest (not resolved yet). + // This is sufficient for the Defined state; build will re-resolve. + let identity = compute_env_id(&normalized); + + if !self.meta_store.exists(&identity.env_id) { + let manifest_json = normalized.canonical_json(); + let manifest_hash = self.obj_store.put(manifest_json.as_bytes())?; + + let now = chrono::Utc::now().to_rfc3339(); + let meta = EnvMetadata { + env_id: identity.env_id.clone(), + short_id: identity.short_id.clone(), + name: None, + state: EnvState::Defined, + manifest_hash: ObjectHash::new(manifest_hash), + base_layer: LayerHash::new(""), + dependency_layers: Vec::new(), + policy_layer: None, + created_at: now.clone(), + updated_at: now, + ref_count: 1, + checksum: None, + }; + self.meta_store.put(&meta)?; + } + + // Generate a preliminary lock with mock resolution + // (no real image digest or package versions yet). + let preliminary_resolution = ResolutionResult { + base_image_digest: blake3::hash( + format!("unresolved:{}", normalized.base_image).as_bytes(), + ) + .to_hex() + .to_string(), + resolved_packages: normalized + .system_packages + .iter() + .map(|name| karapace_schema::ResolvedPackage { + name: name.clone(), + version: "unresolved".to_owned(), + }) + .collect(), + }; + let lock = LockFile::from_resolved(&normalized, &preliminary_resolution); + + let lock_path = manifest_path + .parent() + .unwrap_or(Path::new(".")) + .join("karapace.lock"); + lock.write_to_file(&lock_path)?; + + Ok(BuildResult { + identity, + lock_file: lock, + }) + } + + #[allow(clippy::too_many_lines)] + pub fn build(&self, manifest_path: &Path) -> Result { + info!("building environment from {}", manifest_path.display()); + self.layout.initialize()?; + + let manifest = parse_manifest_file(manifest_path)?; + let normalized = manifest.normalize()?; + + let policy = SecurityPolicy::from_manifest(&normalized); + policy.validate_mounts(&normalized)?; + policy.validate_devices(&normalized)?; + policy.validate_resource_limits(&normalized)?; + + let store_str = self.store_root_str.clone(); + let backend = select_backend(&normalized.runtime_backend, &store_str)?; + + // Phase 1: Resolve dependencies through the backend. + // This downloads the base image, computes its content digest, + // and queries the package manager for exact versions. + let preliminary_id = compute_env_id(&normalized); + let preliminary_spec = RuntimeSpec { + env_id: preliminary_id.env_id.to_string(), + root_path: self + .layout + .env_path(&preliminary_id.env_id) + .to_string_lossy() + .to_string(), + overlay_path: self + .layout + .env_path(&preliminary_id.env_id) + .to_string_lossy() + .to_string(), + store_root: store_str.clone(), + manifest: normalized.clone(), + }; + let resolution = backend.resolve(&preliminary_spec)?; + debug!( + "resolved {} packages, base digest {}", + resolution.resolved_packages.len(), + &resolution.base_image_digest[..12] + ); + + // Phase 2: Generate the lock file from resolved data. + // The env_id is computed from the locked state — content digest + // + pinned package versions — not from unresolved names. + let lock = LockFile::from_resolved(&normalized, &resolution); + let identity = lock.compute_identity(); + + info!( + "canonical env_id: {} ({})", + identity.env_id, identity.short_id + ); + + // Phase 3: Build the environment, then capture real filesystem layers. + let manifest_hash = self.obj_store.put(normalized.canonical_json().as_bytes())?; + + let env_dir = self.layout.env_path(&identity.env_id); + + // Begin WAL entry before creating side effects + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Build, &identity.env_id)?; + + // Register rollback BEFORE the side-effect so a crash between + // create_dir_all and add_rollback_step cannot orphan the directory. + // rollback_entry already checks path.exists(), so a no-op if dir was never created. + self.wal + .add_rollback_step(&wal_op, RollbackStep::RemoveDir(env_dir.clone()))?; + std::fs::create_dir_all(&env_dir)?; + + let spec = RuntimeSpec { + env_id: identity.env_id.to_string(), + root_path: env_dir.to_string_lossy().to_string(), + overlay_path: env_dir.to_string_lossy().to_string(), + store_root: store_str, + manifest: normalized.clone(), + }; + if let Err(e) = backend.build(&spec) { + let _ = std::fs::remove_dir_all(&env_dir); + let _ = self.wal.commit(&wal_op); + return Err(e.into()); + } + + // Capture the overlay upper directory as a real tar layer. + // The upper dir contains all filesystem changes made during build + // (installed packages, config files, etc.). + let upper_dir = self.layout.upper_dir(&identity.env_id); + let build_tar = if upper_dir.exists() { + pack_layer(&upper_dir)? + } else { + // No upper dir (shouldn't happen with real backends, but handle gracefully) + Vec::new() + }; + let build_tar_hash = self.obj_store.put(&build_tar)?; + debug!( + "captured build layer: {} bytes, hash {}", + build_tar.len(), + &build_tar_hash[..12] + ); + + let base_layer = LayerManifest { + hash: build_tar_hash.clone(), + kind: LayerKind::Base, + parent: None, + object_refs: vec![build_tar_hash.clone()], + read_only: true, + tar_hash: build_tar_hash.clone(), + }; + let base_layer_hash = self.layer_store.put(&base_layer)?; + + // No separate dependency layers — the build tar captures everything. + // Individual package tracking is in the lock file, not the layer store. + let dep_layers = Vec::new(); + + let now = chrono::Utc::now().to_rfc3339(); + let meta = EnvMetadata { + env_id: identity.env_id.clone(), + short_id: identity.short_id.clone(), + name: None, + state: EnvState::Built, + manifest_hash: ObjectHash::new(manifest_hash), + base_layer: LayerHash::new(base_layer_hash), + dependency_layers: dep_layers, + policy_layer: None, + created_at: now.clone(), + updated_at: now, + ref_count: 1, + checksum: None, + }; + + // Phase 4: Write metadata and lock file. + // If either fails after a successful build, clean up the orphaned env_dir. + let finalize = || -> Result<(), CoreError> { + if let Ok(existing) = self.meta_store.get(&identity.env_id) { + validate_transition(existing.state, EnvState::Built)?; + } + self.meta_store.put(&meta)?; + + let lock_path = manifest_path + .parent() + .unwrap_or(Path::new(".")) + .join("karapace.lock"); + lock.write_to_file(&lock_path)?; + Ok(()) + }; + + if let Err(e) = finalize() { + warn!("post-build finalization failed, cleaning up env_dir: {e}"); + let _ = std::fs::remove_dir_all(&env_dir); + let _ = self.wal.commit(&wal_op); + return Err(e); + } + + // Build succeeded — commit WAL (removes entry) + self.wal.commit(&wal_op)?; + + Ok(BuildResult { + identity, + lock_file: lock, + }) + } + + fn load_manifest(&self, manifest_hash: &str) -> Result { + let data = self.obj_store.get(manifest_hash)?; + Ok(serde_json::from_slice(&data)?) + } + + fn prepare_spec(&self, env_id: &str, manifest: NormalizedManifest) -> RuntimeSpec { + let env_path_str = self.layout.env_path(env_id).to_string_lossy().into_owned(); + RuntimeSpec { + env_id: env_id.to_owned(), + root_path: env_path_str.clone(), + overlay_path: env_path_str, + store_root: self.store_root_str.clone(), + manifest, + } + } + + pub fn enter(&self, env_id: &str) -> Result<(), CoreError> { + info!("entering environment {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + validate_transition(meta.state, EnvState::Running)?; + + let normalized = self.load_manifest(&meta.manifest_hash)?; + let store_str = self.store_root_str.clone(); + let backend = select_backend(&normalized.runtime_backend, &store_str)?; + let spec = self.prepare_spec(env_id, normalized); + + // WAL: if we crash while Running, recover back to Built + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Enter, env_id)?; + self.wal.add_rollback_step( + &wal_op, + RollbackStep::ResetState { + env_id: env_id.to_owned(), + target_state: "Built".to_owned(), + }, + )?; + + self.meta_store.update_state(env_id, EnvState::Running)?; + if let Err(e) = backend.enter(&spec) { + let _ = self.meta_store.update_state(env_id, EnvState::Built); + let _ = self.wal.commit(&wal_op); + return Err(e.into()); + } + self.meta_store.update_state(env_id, EnvState::Built)?; + self.wal.commit(&wal_op)?; + + Ok(()) + } + + pub fn exec(&self, env_id: &str, command: &[String]) -> Result<(), CoreError> { + info!("exec in environment {env_id}: {command:?}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + validate_transition(meta.state, EnvState::Running)?; + + let normalized = self.load_manifest(&meta.manifest_hash)?; + let store_str = self.store_root_str.clone(); + let backend = select_backend(&normalized.runtime_backend, &store_str)?; + let spec = self.prepare_spec(env_id, normalized); + + // WAL: if we crash while Running, recover back to Built + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Exec, env_id)?; + self.wal.add_rollback_step( + &wal_op, + RollbackStep::ResetState { + env_id: env_id.to_owned(), + target_state: "Built".to_owned(), + }, + )?; + + self.meta_store.update_state(env_id, EnvState::Running)?; + let result = backend.exec(&spec, command); + let _ = self.meta_store.update_state(env_id, EnvState::Built); + let _ = self.wal.commit(&wal_op); + + match result { + Ok(output) => { + use std::io::Write; + let _ = std::io::stdout().write_all(&output.stdout); + let _ = std::io::stderr().write_all(&output.stderr); + if output.status.success() { + Ok(()) + } else { + let detail = if let Some(code) = output.status.code() { + format!("command exited with code {code}") + } else { + #[cfg(unix)] + { + use std::os::unix::process::ExitStatusExt; + match output.status.signal() { + Some(sig) => format!("command killed by signal {sig}"), + None => "command failed with unknown status".to_owned(), + } + } + #[cfg(not(unix))] + { + "command failed with unknown status".to_owned() + } + }; + Err(CoreError::Runtime( + karapace_runtime::RuntimeError::ExecFailed(detail), + )) + } + } + Err(e) => Err(e.into()), + } + } + + pub fn stop(&self, env_id: &str) -> Result<(), CoreError> { + info!("stopping environment {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + if meta.state != EnvState::Running { + return Err(CoreError::Runtime( + karapace_runtime::RuntimeError::NotRunning(env_id.to_owned()), + )); + } + + let normalized = self.load_manifest(&meta.manifest_hash)?; + let store_str = self.store_root_str.clone(); + let backend = select_backend(&normalized.runtime_backend, &store_str)?; + let status = backend.status(env_id)?; + + if let Some(pid) = status.pid { + let pid_i32 = i32::try_from(pid).map_err(|_| { + CoreError::Runtime(karapace_runtime::RuntimeError::ExecFailed(format!( + "invalid pid {pid}: exceeds i32 range" + ))) + })?; + debug!("sending SIGTERM to pid {pid}"); + // SAFETY: kill() with a valid pid and signal is safe; pid validated via i32::try_from above. + #[allow(unsafe_code)] + let term_ret = unsafe { libc::kill(pid_i32, libc::SIGTERM) }; + if term_ret != 0 { + let errno = std::io::Error::last_os_error(); + if errno.raw_os_error() == Some(libc::ESRCH) { + debug!("pid {pid} already exited before SIGTERM"); + } else { + return Err(CoreError::Runtime( + karapace_runtime::RuntimeError::ExecFailed(format!( + "failed to send SIGTERM to pid {pid}: {errno}" + )), + )); + } + } else { + // Give it a moment to clean up + std::thread::sleep(std::time::Duration::from_millis(500)); + // Force kill if still running + if Path::new(&format!("/proc/{pid}")).exists() { + warn!("process {pid} did not exit after SIGTERM, sending SIGKILL"); + // SAFETY: same as above — valid pid and signal. + #[allow(unsafe_code)] + let kill_ret = unsafe { libc::kill(pid_i32, libc::SIGKILL) }; + if kill_ret != 0 { + let errno = std::io::Error::last_os_error(); + if errno.raw_os_error() != Some(libc::ESRCH) { + warn!("failed to send SIGKILL to pid {pid}: {errno}"); + } + } + } + } + } + + // Clean up running marker + let running_file = self.layout.env_path(env_id).join(".running"); + let _ = std::fs::remove_file(running_file); + + self.meta_store.update_state(env_id, EnvState::Built)?; + Ok(()) + } + + pub fn destroy(&self, env_id: &str) -> Result<(), CoreError> { + info!("destroying environment {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + if meta.state == EnvState::Running { + return Err(CoreError::InvalidTransition { + from: "Running".to_owned(), + to: "cannot destroy a running environment; stop it first".to_owned(), + }); + } + + let normalized = self.load_manifest(&meta.manifest_hash)?; + let store_str = self.store_root_str.clone(); + let backend = select_backend(&normalized.runtime_backend, &store_str)?; + let spec = self.prepare_spec(env_id, normalized); + + // Begin WAL entry BEFORE any side-effects (including backend.destroy). + // If the backend cleans up runtime state but we crash before metadata + // removal, recovery will complete the cleanup on next startup. + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Destroy, env_id)?; + + let env_dir = self.layout.env_path(env_id); + // Register rollback steps BEFORE side-effects. Destroy rollback is + // best-effort: re-running destroy on an already-destroyed env is safe. + self.wal + .add_rollback_step(&wal_op, RollbackStep::RemoveDir(env_dir.clone()))?; + + if let Err(e) = backend.destroy(&spec) { + let _ = self.wal.commit(&wal_op); + return Err(e.into()); + } + if env_dir.exists() { + std::fs::remove_dir_all(&env_dir)?; + } + + let metadata_path = self.layout.metadata_dir().join(env_id); + self.wal + .add_rollback_step(&wal_op, RollbackStep::RemoveFile(metadata_path))?; + let remaining = self.meta_store.decrement_ref(env_id)?; + if remaining == 0 { + let _ = self.meta_store.remove(env_id); + } + + // Destroy succeeded — commit WAL (removes entry) + self.wal.commit(&wal_op)?; + + Ok(()) + } + + pub fn rebuild(&self, manifest_path: &Path) -> Result { + // Collect the old env_id(s) to clean up AFTER a successful build. + // This ensures we don't lose the old environment if the new build fails. + let lock_path = manifest_path + .parent() + .unwrap_or(Path::new(".")) + .join("karapace.lock"); + + let mut old_env_ids: Vec = Vec::new(); + if let Ok(lock) = LockFile::read_from_file(&lock_path) { + if self.meta_store.exists(&lock.env_id) { + old_env_ids.push(lock.env_id); + } + } + if old_env_ids.is_empty() { + let manifest = parse_manifest_file(manifest_path)?; + let normalized = manifest.normalize()?; + let identity = compute_env_id(&normalized); + if self.meta_store.exists(&identity.env_id) { + old_env_ids.push(identity.env_id.to_string()); + } + } + + // Build first — if this fails, old environment is preserved. + let result = self.build(manifest_path)?; + + // Only destroy the old environment(s) after the new build succeeds. + for old_id in &old_env_ids { + if *old_id != result.identity.env_id { + if let Err(e) = self.destroy(old_id) { + warn!("failed to destroy old environment {old_id} during rebuild: {e}"); + } + let _ = self.meta_store.remove(old_id); + } + } + + Ok(result) + } + + pub fn inspect(&self, env_id: &str) -> Result { + self.meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned())) + } + + pub fn list(&self) -> Result, CoreError> { + Ok(self.meta_store.list()?) + } + + pub fn freeze(&self, env_id: &str) -> Result<(), CoreError> { + info!("freezing environment {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + validate_transition(meta.state, EnvState::Frozen)?; + self.meta_store.update_state(env_id, EnvState::Frozen)?; + Ok(()) + } + + pub fn archive(&self, env_id: &str) -> Result<(), CoreError> { + info!("archiving environment {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + validate_transition(meta.state, EnvState::Archived)?; + self.meta_store.update_state(env_id, EnvState::Archived)?; + Ok(()) + } + + pub fn set_name(&self, env_id: &str, name: Option) -> Result<(), CoreError> { + self.meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + self.meta_store.update_name(env_id, name)?; + Ok(()) + } + + pub fn rename(&self, env_id: &str, new_name: &str) -> Result<(), CoreError> { + info!("renaming environment {env_id} to '{new_name}'"); + self.set_name(env_id, Some(new_name.to_owned())) + } + + pub fn commit(&self, env_id: &str) -> Result { + info!("committing overlay drift for {env_id}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + if meta.state != EnvState::Built && meta.state != EnvState::Frozen { + return Err(CoreError::InvalidTransition { + from: meta.state.to_string(), + to: "commit requires built or frozen state".to_owned(), + }); + } + + // Begin WAL entry for commit + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Commit, env_id)?; + + // Pack the overlay upper directory as a deterministic tar layer. + let upper_dir = self.layout.upper_dir(env_id); + let tar_data = if upper_dir.exists() { + pack_layer(&upper_dir)? + } else { + let _ = self.wal.commit(&wal_op); + return Err(CoreError::EnvNotFound(format!( + "no overlay upper directory for {env_id}" + ))); + }; + + let tar_hash = self.obj_store.put(&tar_data)?; + debug!( + "committed snapshot layer: {} bytes, hash {}", + tar_data.len(), + &tar_hash[..12] + ); + + // Compute a unique layer hash for this snapshot. + // The tar_hash alone may collide with the base layer if the upper + // dir content hasn't changed. Use a composite identity. + let snapshot_id_input = format!("snapshot:{}:{}:{}", env_id, meta.base_layer, tar_hash); + let snapshot_hash = blake3::hash(snapshot_id_input.as_bytes()) + .to_hex() + .to_string(); + + let snapshot_layer = LayerManifest { + hash: snapshot_hash.clone(), + kind: LayerKind::Snapshot, + parent: Some(meta.base_layer.to_string()), + object_refs: vec![tar_hash.clone()], + read_only: true, + tar_hash, + }; + // Compute the content hash before writing so we can register the + // correct rollback path. Uses LayerStore::compute_hash() to ensure + // the hash matches what put() will produce. + let content_hash = LayerStore::compute_hash(&snapshot_layer)?; + + // Register rollback for the snapshot layer manifest BEFORE writing it, + // so a crash after put() but before WAL commit cleans up the orphan. + let layer_path = self.layout.layers_dir().join(&content_hash); + self.wal + .add_rollback_step(&wal_op, RollbackStep::RemoveFile(layer_path))?; + let stored_hash = self.layer_store.put(&snapshot_layer)?; + + // Commit succeeded — remove WAL entry + self.wal.commit(&wal_op)?; + + Ok(stored_hash) + } + + /// Restore an environment's overlay from a snapshot layer. + /// + /// Unpacks the snapshot tar into the overlay upper directory, replacing + /// any current upper content. The operation is atomic: the old upper is + /// only removed after the new content is fully unpacked in a staging dir. + pub fn restore(&self, env_id: &str, snapshot_hash: &str) -> Result<(), CoreError> { + info!("restoring {env_id} from snapshot {snapshot_hash}"); + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + if meta.state != EnvState::Built && meta.state != EnvState::Frozen { + return Err(CoreError::InvalidTransition { + from: meta.state.to_string(), + to: "restore requires built or frozen state".to_owned(), + }); + } + + // Verify the snapshot layer exists and is a Snapshot kind. + let layer = self.layer_store.get(snapshot_hash).map_err(|_| { + CoreError::Store(karapace_store::StoreError::LayerNotFound( + snapshot_hash.to_owned(), + )) + })?; + if layer.kind != LayerKind::Snapshot { + return Err(CoreError::InvalidTransition { + from: format!("{:?}", layer.kind), + to: "restore requires a Snapshot layer".to_owned(), + }); + } + if layer.tar_hash.is_empty() { + return Err(CoreError::Store(karapace_store::StoreError::LayerNotFound( + format!("snapshot {snapshot_hash} has no tar content (legacy layer)"), + ))); + } + + // Retrieve the tar data from the object store. + let tar_data = self.obj_store.get(&layer.tar_hash)?; + + // Begin WAL entry for restore + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Restore, env_id)?; + + // Atomic restore: unpack to staging, then swap with current upper. + let staging = self.layout.staging_dir().join(format!("restore-{env_id}")); + + // Register rollback BEFORE any staging dir operations so a crash + // between create and registration cannot orphan the staging dir. + self.wal + .add_rollback_step(&wal_op, RollbackStep::RemoveDir(staging.clone()))?; + + if staging.exists() { + std::fs::remove_dir_all(&staging)?; + } + + unpack_layer(&tar_data, &staging)?; + + // Swap: remove old upper, rename staging to upper. + let upper_dir = self.layout.upper_dir(env_id); + if upper_dir.exists() { + std::fs::remove_dir_all(&upper_dir)?; + } + std::fs::rename(&staging, &upper_dir)?; + + // Restore succeeded — remove WAL entry + self.wal.commit(&wal_op)?; + + debug!("restored upper dir from snapshot {}", &snapshot_hash[..12]); + Ok(()) + } + + /// List all snapshot layers associated with an environment. + /// + /// Returns snapshot `LayerManifest` entries whose parent matches + /// the environment's base layer, ordered by hash. + pub fn list_snapshots(&self, env_id: &str) -> Result, CoreError> { + let meta = self + .meta_store + .get(env_id) + .map_err(|_| CoreError::EnvNotFound(env_id.to_owned()))?; + + let all_hashes = self.layer_store.list()?; + let mut snapshots = Vec::new(); + for hash in &all_hashes { + if let Ok(layer) = self.layer_store.get(hash) { + if layer.kind == LayerKind::Snapshot + && layer.parent.as_deref() == Some(&meta.base_layer) + { + snapshots.push(layer); + } + } + } + snapshots.sort_by(|a, b| a.hash.cmp(&b.hash)); + Ok(snapshots) + } + + /// Run garbage collection on the store. + /// + /// Requires a `&StoreLock` parameter as compile-time proof that the caller + /// holds the store lock. The lock is not used internally — its presence in + /// the signature enforces the invariant at the type level. + pub fn gc( + &self, + _lock: &StoreLock, + dry_run: bool, + ) -> Result { + info!("running garbage collection (dry_run={dry_run})"); + + // WAL marker: track GC in-flight. No rollback steps — GC is + // inherently idempotent (orphaned items re-discovered on next run). + // On recovery, an incomplete GC entry is logged and removed. + self.wal.initialize()?; + let wal_op = self.wal.begin(WalOpKind::Gc, "gc")?; + + let gc = karapace_store::GarbageCollector::new(self.layout.clone()); + let report = gc.collect_with_cancel(dry_run, crate::shutdown_requested)?; + + self.wal.commit(&wal_op)?; + Ok(report) + } + + /// Push an environment to a remote store. + /// + /// Transfers metadata, layers, and objects to the remote backend, + /// skipping blobs that already exist. Optionally publishes under + /// a registry tag (e.g. `"my-env@latest"`). + pub fn push( + &self, + env_id: &str, + backend: &dyn karapace_remote::RemoteBackend, + registry_tag: Option<&str>, + ) -> Result { + info!("pushing environment {env_id}"); + Ok(karapace_remote::push_env( + &self.layout, + env_id, + backend, + registry_tag, + )?) + } + + /// Pull an environment from a remote store into the local store. + /// + /// Downloads metadata, layers, and objects from the remote backend, + /// skipping blobs that already exist locally. Verifies blake3 integrity + /// on all downloaded objects. + pub fn pull( + &self, + env_id: &str, + backend: &dyn karapace_remote::RemoteBackend, + ) -> Result { + info!("pulling environment {env_id}"); + self.layout.initialize()?; + Ok(karapace_remote::pull_env(&self.layout, env_id, backend)?) + } + + /// Resolve a registry reference to an env_id using the remote registry. + pub fn resolve_remote_ref( + backend: &dyn karapace_remote::RemoteBackend, + reference: &str, + ) -> Result { + Ok(karapace_remote::resolve_ref(backend, reference)?) + } + + pub fn store_layout(&self) -> &StoreLayout { + &self.layout + } + + pub fn resolve_manifest( + &self, + manifest_path: &Path, + ) -> Result<(ManifestV1, NormalizedManifest, EnvIdentity), CoreError> { + let manifest = parse_manifest_file(manifest_path)?; + let normalized = manifest.normalize()?; + let identity = compute_env_id(&normalized); + Ok((manifest, normalized, identity)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_engine() -> (tempfile::TempDir, Engine, tempfile::TempDir) { + let store_dir = tempfile::tempdir().unwrap(); + let project_dir = tempfile::tempdir().unwrap(); + + let manifest_content = r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = ["git", "clang"] +[runtime] +backend = "mock" +"#; + std::fs::write(project_dir.path().join("karapace.toml"), manifest_content).unwrap(); + + let engine = Engine::new(store_dir.path()); + (store_dir, engine, project_dir) + } + + #[test] + fn init_creates_lock_and_metadata() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.init(&manifest_path).unwrap(); + + assert!(!result.identity.env_id.is_empty()); + assert!(project.path().join("karapace.lock").exists()); + } + + #[test] + fn build_creates_environment() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.build(&manifest_path).unwrap(); + + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); + } + + #[test] + fn rebuild_produces_same_id() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let r1 = engine.build(&manifest_path).unwrap(); + let r2 = engine.rebuild(&manifest_path).unwrap(); + assert_eq!(r1.identity.env_id, r2.identity.env_id); + } + + #[test] + fn destroy_cleans_up() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.build(&manifest_path).unwrap(); + engine.destroy(&result.identity.env_id).unwrap(); + } + + #[test] + fn freeze_transitions_correctly() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.build(&manifest_path).unwrap(); + engine.freeze(&result.identity.env_id).unwrap(); + + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Frozen); + } + + #[test] + fn list_environments() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + engine.build(&manifest_path).unwrap(); + let envs = engine.list().unwrap(); + assert_eq!(envs.len(), 1); + } + + #[test] + fn archive_transitions_correctly() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.build(&manifest_path).unwrap(); + engine.freeze(&result.identity.env_id).unwrap(); + engine.archive(&result.identity.env_id).unwrap(); + + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Archived); + } + + #[test] + fn set_name_and_rename() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let result = engine.build(&manifest_path).unwrap(); + + engine + .set_name(&result.identity.env_id, Some("test-env".to_owned())) + .unwrap(); + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.name, Some("test-env".to_owned())); + + engine.rename(&result.identity.env_id, "new-name").unwrap(); + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.name, Some("new-name".to_owned())); + } + + #[test] + fn inspect_nonexistent_fails() { + let (_store, engine, _project) = test_engine(); + assert!(engine.inspect("nonexistent").is_err()); + } + + #[test] + fn destroy_nonexistent_fails() { + let (_store, engine, _project) = test_engine(); + engine.store_layout().initialize().unwrap(); + assert!(engine.destroy("nonexistent").is_err()); + } + + #[test] + fn list_empty_store() { + let (_store, engine, _project) = test_engine(); + engine.store_layout().initialize().unwrap(); + let envs = engine.list().unwrap(); + assert!(envs.is_empty()); + } + + #[test] + fn init_is_idempotent() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let r1 = engine.init(&manifest_path).unwrap(); + let r2 = engine.init(&manifest_path).unwrap(); + assert_eq!(r1.identity.env_id, r2.identity.env_id); + } + + #[test] + fn build_creates_lock_file() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + engine.build(&manifest_path).unwrap(); + let lock_path = project.path().join("karapace.lock"); + assert!(lock_path.exists()); + let content = std::fs::read_to_string(&lock_path).unwrap(); + assert!(content.contains("lock_version")); + } + + #[test] + fn resolve_manifest_returns_identity() { + let (_store, engine, project) = test_engine(); + let manifest_path = project.path().join("karapace.toml"); + let (manifest, normalized, identity) = engine.resolve_manifest(&manifest_path).unwrap(); + assert_eq!(manifest.manifest_version, 1); + assert_eq!(normalized.base_image, "rolling"); + assert!(!identity.env_id.is_empty()); + } + + #[test] + fn store_layout_accessor() { + let (_store, engine, _project) = test_engine(); + let layout = engine.store_layout(); + assert!(layout.objects_dir().to_string_lossy().contains("objects")); + } +} diff --git a/crates/karapace-core/src/lib.rs b/crates/karapace-core/src/lib.rs new file mode 100644 index 0000000..047e209 --- /dev/null +++ b/crates/karapace-core/src/lib.rs @@ -0,0 +1,40 @@ +//! Core orchestration engine for Karapace environment lifecycle. +//! +//! This crate ties together schema parsing, store operations, and runtime backends +//! into the `Engine` — the central API for building, entering, stopping, destroying, +//! and inspecting deterministic container environments. It also provides overlay +//! drift detection, concurrent store locking, and state-machine lifecycle validation. + +pub mod concurrency; +pub mod drift; +pub mod engine; +pub mod lifecycle; + +pub use concurrency::{install_signal_handler, shutdown_requested, StoreLock}; +pub use drift::{commit_overlay, diff_overlay, export_overlay, DriftReport}; +pub use engine::{BuildResult, Engine}; +pub use lifecycle::validate_transition; + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum CoreError { + #[error("manifest error: {0}")] + Manifest(#[from] karapace_schema::ManifestError), + #[error("lock error: {0}")] + Lock(#[from] karapace_schema::LockError), + #[error("store error: {0}")] + Store(#[from] karapace_store::StoreError), + #[error("runtime error: {0}")] + Runtime(#[from] karapace_runtime::RuntimeError), + #[error("invalid state transition: {from} -> {to}")] + InvalidTransition { from: String, to: String }, + #[error("environment not found: {0}")] + EnvNotFound(String), + #[error("I/O error: {0}")] + Io(#[from] std::io::Error), + #[error("serialization error: {0}")] + Serialization(#[from] serde_json::Error), + #[error("remote error: {0}")] + Remote(#[from] karapace_remote::RemoteError), +} diff --git a/crates/karapace-core/src/lifecycle.rs b/crates/karapace-core/src/lifecycle.rs new file mode 100644 index 0000000..2a9a01c --- /dev/null +++ b/crates/karapace-core/src/lifecycle.rs @@ -0,0 +1,57 @@ +use crate::CoreError; +use karapace_store::EnvState; + +pub fn validate_transition(from: EnvState, to: EnvState) -> Result<(), CoreError> { + let valid = matches!( + (from, to), + ( + EnvState::Defined + | EnvState::Built + | EnvState::Running + | EnvState::Frozen + | EnvState::Archived, + EnvState::Built + ) | ( + EnvState::Built, + EnvState::Running | EnvState::Frozen | EnvState::Archived + ) | (EnvState::Running, EnvState::Frozen) + | (EnvState::Frozen, EnvState::Archived) + ); + + if valid { + Ok(()) + } else { + Err(CoreError::InvalidTransition { + from: from.to_string(), + to: to.to_string(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn valid_transitions() { + assert!(validate_transition(EnvState::Defined, EnvState::Built).is_ok()); + assert!(validate_transition(EnvState::Built, EnvState::Built).is_ok()); // idempotent rebuild + assert!(validate_transition(EnvState::Built, EnvState::Running).is_ok()); + assert!(validate_transition(EnvState::Built, EnvState::Frozen).is_ok()); + assert!(validate_transition(EnvState::Running, EnvState::Built).is_ok()); + assert!(validate_transition(EnvState::Running, EnvState::Frozen).is_ok()); + assert!(validate_transition(EnvState::Frozen, EnvState::Built).is_ok()); + assert!(validate_transition(EnvState::Built, EnvState::Archived).is_ok()); + assert!(validate_transition(EnvState::Frozen, EnvState::Archived).is_ok()); + assert!(validate_transition(EnvState::Archived, EnvState::Built).is_ok()); + } + + #[test] + fn invalid_transitions() { + assert!(validate_transition(EnvState::Defined, EnvState::Running).is_err()); + assert!(validate_transition(EnvState::Defined, EnvState::Frozen).is_err()); + assert!(validate_transition(EnvState::Archived, EnvState::Running).is_err()); + assert!(validate_transition(EnvState::Running, EnvState::Defined).is_err()); + assert!(validate_transition(EnvState::Frozen, EnvState::Running).is_err()); + } +} diff --git a/crates/karapace-core/tests/crash.rs b/crates/karapace-core/tests/crash.rs new file mode 100644 index 0000000..8e73fe4 --- /dev/null +++ b/crates/karapace-core/tests/crash.rs @@ -0,0 +1,391 @@ +#![allow(unsafe_code, clippy::undocumented_unsafe_blocks)] +//! Real crash recovery tests using fork + SIGKILL. +//! +//! These tests fork a child process that runs Karapace operations in a tight +//! loop, kill it mid-flight with SIGKILL, then verify the store is recoverable +//! and consistent in the parent. +//! +//! This validates that: +//! - WAL recovery cleans up incomplete operations +//! - No partially created environment directories survive +//! - No corrupted metadata remains +//! - Store integrity check passes after recovery +//! - Lock state is released (flock auto-released on process death) + +use karapace_core::{Engine, StoreLock}; +use karapace_store::StoreLayout; +use std::fs; +use std::path::Path; + +fn write_manifest(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("karapace.toml"); + fs::write(&path, content).unwrap(); + path +} + +fn mock_manifest(packages: &[&str]) -> String { + format!( + r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = [{}] +[runtime] +backend = "mock" +"#, + packages + .iter() + .map(|p| format!("\"{p}\"")) + .collect::>() + .join(", ") + ) +} + +/// Verify that the store is in a consistent state after crash recovery. +fn verify_store_healthy(store_path: &Path) { + // Creating a new Engine triggers WAL recovery + let engine = Engine::new(store_path); + let layout = StoreLayout::new(store_path); + + // WAL must be empty after recovery + let wal = karapace_store::WriteAheadLog::new(&layout); + let incomplete = wal.list_incomplete().unwrap(); + assert!( + incomplete.is_empty(), + "WAL must be clean after recovery, found {} incomplete entries", + incomplete.len() + ); + + // Store integrity check must pass + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert!( + report.failed.is_empty(), + "store integrity check found {} failures: {:?}", + report.failed.len(), + report.failed + ); + + // All listed environments must be inspectable + let envs = engine.list().unwrap(); + for env in &envs { + let meta = engine.inspect(&env.env_id).unwrap(); + // No environment should be stuck in Running state after crash recovery + // (WAL ResetState rollback should have fixed it) + assert_ne!( + meta.state, + karapace_store::EnvState::Running, + "env {} stuck in Running after crash recovery", + env.env_id + ); + } + + // Lock must be acquirable (proves the dead child released it) + let lock = StoreLock::try_acquire(&layout.lock_file()).unwrap(); + assert!( + lock.is_some(), + "store lock must be acquirable after child death" + ); + + // No orphaned env directories (dirs in env/ without matching metadata) + let env_base = layout.env_dir(); + if env_base.exists() { + if let Ok(entries) = fs::read_dir(&env_base) { + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + for entry in entries.flatten() { + let dir_name = entry.file_name(); + let dir_name_str = dir_name.to_string_lossy(); + // Skip dotfiles + if dir_name_str.starts_with('.') { + continue; + } + // Every env dir should have matching metadata (or be cleaned up by WAL) + // We don't assert this is always true because the build might have + // completed successfully before the kill. But if metadata exists, + // it should be readable. + if meta_store.get(&dir_name_str).is_ok() { + // Metadata exists and is valid — good + } else { + // Orphaned env dir — WAL should have cleaned it, but if the + // build completed and was killed before metadata write, + // this is acceptable as long as it doesn't cause errors + } + } + } + } + + // No stale .running markers + if env_base.exists() { + if let Ok(entries) = fs::read_dir(&env_base) { + for entry in entries.flatten() { + let running = entry.path().join(".running"); + assert!( + !running.exists(), + "stale .running marker at {}", + running.display() + ); + } + } + } +} + +/// Fork a child that runs `child_fn` in a loop, kill it after `delay`, +/// then verify store health in the parent. +/// +/// # Safety +/// Uses `libc::fork()` which is inherently unsafe. The child must not +/// return — it either loops forever or exits. +unsafe fn crash_test(store_path: &Path, delay: std::time::Duration, child_fn: fn(&Path)) { + let pid = libc::fork(); + assert!(pid >= 0, "fork() failed"); + + if pid == 0 { + // === CHILD PROCESS === + // Run the operation in a tight loop until killed + child_fn(store_path); + // If child_fn returns, exit immediately + libc::_exit(0); + } + + // === PARENT PROCESS === + std::thread::sleep(delay); + + // Send SIGKILL — no chance for cleanup + let ret = libc::kill(pid, libc::SIGKILL); + assert_eq!(ret, 0, "kill() failed"); + + // Wait for child to die + let mut status: i32 = 0; + let waited = libc::waitpid(pid, &raw mut status, 0); + assert_eq!(waited, pid, "waitpid() failed"); + + // Now verify the store + verify_store_healthy(store_path); +} + +/// Child function: build environments in a tight loop +fn child_build_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + for i in 0u64.. { + let pkgs: Vec = (0..=(i % 4)).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let manifest = write_manifest(project.path(), &mock_manifest(&pkg_refs)); + let _ = engine.build(&manifest); + } +} + +/// Child function: build + destroy in a tight loop +fn child_build_destroy_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + for i in 0u64.. { + let pkgs: Vec = (0..=(i % 2)).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let manifest = write_manifest(project.path(), &mock_manifest(&pkg_refs)); + if let Ok(r) = engine.build(&manifest) { + let _ = engine.destroy(&r.identity.env_id); + } + } +} + +/// Child function: build + commit in a tight loop +fn child_build_commit_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + if let Ok(r) = engine.build(&manifest) { + let env_id = r.identity.env_id.to_string(); + let upper = store_path.join("env").join(&env_id).join("upper"); + let _ = fs::create_dir_all(&upper); + + for i in 0u64.. { + let _ = fs::write(upper.join(format!("file_{i}.txt")), format!("data {i}")); + let _ = engine.commit(&env_id); + } + } +} + +/// Child function: build + commit + restore in a tight loop +fn child_commit_restore_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + if let Ok(r) = engine.build(&manifest) { + let env_id = r.identity.env_id.to_string(); + let upper = store_path.join("env").join(&env_id).join("upper"); + let _ = fs::create_dir_all(&upper); + + // Create initial snapshot + let _ = fs::write(upper.join("base.txt"), "base content"); + if let Ok(snap_hash) = engine.commit(&env_id) { + for i in 0u64.. { + let _ = fs::write(upper.join(format!("file_{i}.txt")), format!("data {i}")); + let _ = engine.commit(&env_id); + let _ = engine.restore(&env_id, &snap_hash); + } + } + } +} + +/// Child function: build + GC in a tight loop +fn child_gc_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + // Build several environments + let mut env_ids = Vec::new(); + for i in 0..5 { + let pkgs: Vec = (0..=i).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let manifest = write_manifest(project.path(), &mock_manifest(&pkg_refs)); + if let Ok(r) = engine.build(&manifest) { + env_ids.push(r.identity.env_id.to_string()); + } + } + + let layout = StoreLayout::new(store_path); + for i in 0u64.. { + // Destroy one environment per cycle + let idx = (i as usize) % env_ids.len(); + let _ = engine.destroy(&env_ids[idx]); + + // Run GC + if let Ok(Some(lock)) = StoreLock::try_acquire(&layout.lock_file()) { + let _ = engine.gc(&lock, false); + } + + // Rebuild + let pkgs: Vec = (0..=idx).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let manifest = write_manifest(project.path(), &mock_manifest(&pkg_refs)); + if let Ok(r) = engine.build(&manifest) { + env_ids[idx] = r.identity.env_id.to_string(); + } + } +} + +/// Child function: build + enter in a tight loop (tests ResetState WAL) +fn child_enter_loop(store_path: &Path) { + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store_path); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + if let Ok(r) = engine.build(&manifest) { + let env_id = r.identity.env_id.to_string(); + for _ in 0u64.. { + let _ = engine.enter(&env_id); + } + } +} + +// --- Crash tests --- +// Each test runs with multiple delay values to increase the chance of hitting +// different points in the operation lifecycle. + +#[test] +fn crash_during_build() { + for delay_ms in [1, 5, 10, 20, 50] { + let store = tempfile::tempdir().unwrap(); + // Pre-initialize the store + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_build_loop, + ); + } + } +} + +#[test] +fn crash_during_build_destroy() { + for delay_ms in [1, 5, 10, 20, 50] { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_build_destroy_loop, + ); + } + } +} + +#[test] +fn crash_during_commit() { + for delay_ms in [1, 5, 10, 20, 50] { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_build_commit_loop, + ); + } + } +} + +#[test] +fn crash_during_restore() { + for delay_ms in [1, 5, 10, 20, 50] { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_commit_restore_loop, + ); + } + } +} + +#[test] +fn crash_during_gc() { + for delay_ms in [5, 10, 20, 50, 100] { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_gc_loop, + ); + } + } +} + +#[test] +fn crash_during_enter() { + for delay_ms in [1, 5, 10, 20, 50] { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + unsafe { + crash_test( + store.path(), + std::time::Duration::from_millis(delay_ms), + child_enter_loop, + ); + } + } +} diff --git a/crates/karapace-core/tests/e2e.rs b/crates/karapace-core/tests/e2e.rs new file mode 100644 index 0000000..93f956f --- /dev/null +++ b/crates/karapace-core/tests/e2e.rs @@ -0,0 +1,630 @@ +//! End-to-end tests that exercise the real namespace backend. +//! +//! These tests are `#[ignore]` by default because they require: +//! - Linux with user namespace support +//! - `fuse-overlayfs` installed +//! - `curl` installed +//! - Network access (to download base images) +//! +//! Run with: `cargo test --test e2e -- --ignored` + +use karapace_core::Engine; +use karapace_store::{EnvState, StoreLayout}; +use std::fs; +use std::path::Path; + +fn namespace_manifest(packages: &[&str]) -> String { + let pkgs = if packages.is_empty() { + String::new() + } else { + let list: Vec = packages.iter().map(|p| format!("\"{p}\"")).collect(); + format!("\n[system]\npackages = [{}]\n", list.join(", ")) + }; + format!( + r#"manifest_version = 1 + +[base] +image = "rolling" +{pkgs} +[runtime] +backend = "namespace" +"# + ) +} + +fn write_manifest(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("karapace.toml"); + fs::write(&path, content).unwrap(); + path +} + +fn prereqs_available() -> bool { + let ns = karapace_runtime::check_namespace_prereqs(); + if !ns.is_empty() { + let msg = karapace_runtime::format_missing(&ns); + assert!( + std::env::var("CI").is_err(), + "CI FATAL: E2E prerequisites missing — tests cannot silently skip in CI.\n{msg}" + ); + eprintln!("skipping E2E: missing prerequisites: {msg}"); + return false; + } + true +} + +/// Build a minimal environment with the namespace backend (no packages). +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_build_minimal_namespace() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + + assert!(!result.identity.env_id.is_empty()); + assert!(!result.identity.short_id.is_empty()); + + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); + + // Verify the environment directory was created + let layout = StoreLayout::new(store.path()); + assert!(layout.env_path(&result.identity.env_id).exists()); + + // Lock file was written + assert!(project.path().join("karapace.lock").exists()); +} + +/// Exec a command inside a built environment and verify stdout. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_exec_in_namespace() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + + // Exec `echo hello` inside the container + let cmd = vec!["echo".to_owned(), "hello".to_owned()]; + // exec() writes to stdout/stderr directly; just verify it doesn't error + engine.exec(&result.identity.env_id, &cmd).unwrap(); +} + +/// Destroy cleans up all overlay directories. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_destroy_cleans_up() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + let layout = StoreLayout::new(store.path()); + assert!(layout.env_path(&env_id).exists()); + + engine.destroy(&env_id).unwrap(); + + // Environment directory should be gone + assert!(!layout.env_path(&env_id).exists()); +} + +/// Rebuild produces the same env_id for the same manifest. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_rebuild_determinism() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let r1 = engine.build(&manifest).unwrap(); + let r2 = engine.rebuild(&manifest).unwrap(); + + assert_eq!( + r1.identity.env_id, r2.identity.env_id, + "rebuild must produce the same env_id" + ); +} + +/// Snapshot and restore round-trip with real namespace backend. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_snapshot_and_restore() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + // Write a file to the upper dir (simulating user modifications) + let upper = StoreLayout::new(store.path()).upper_dir(&env_id); + if upper.exists() { + // Clear build artifacts first + let _ = fs::remove_dir_all(&upper); + } + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("user_data.txt"), "snapshot baseline").unwrap(); + + // Commit a snapshot + let snapshot_hash = engine.commit(&env_id).unwrap(); + assert!(!snapshot_hash.is_empty()); + + // Verify snapshot is listed + let snapshots = engine.list_snapshots(&env_id).unwrap(); + assert_eq!(snapshots.len(), 1); + + // Mutate upper dir after snapshot + fs::write(upper.join("user_data.txt"), "MODIFIED").unwrap(); + fs::write(upper.join("extra.txt"), "should disappear").unwrap(); + + // Restore from snapshot + engine.restore(&env_id, &snapshot_hash).unwrap(); + + // Verify restore worked + assert_eq!( + fs::read_to_string(upper.join("user_data.txt")).unwrap(), + "snapshot baseline" + ); + assert!( + !upper.join("extra.txt").exists(), + "extra file must be gone after restore" + ); +} + +/// Overlay correctness: files written in upper are visible, base is read-only. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_overlay_file_visibility() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + let layout = StoreLayout::new(store.path()); + let upper = layout.upper_dir(&env_id); + fs::create_dir_all(&upper).unwrap(); + + // Write a file in upper — should be visible via exec + fs::write(upper.join("test_marker.txt"), "visible").unwrap(); + + // exec `cat /test_marker.txt` should succeed (file visible through overlay) + let cmd = vec!["cat".to_owned(), "/test_marker.txt".to_owned()]; + let result = engine.exec(&env_id, &cmd); + // If overlay is correctly mounted, the file is visible + assert!( + result.is_ok(), + "files in upper dir must be visible through overlay" + ); +} + +/// Enter/exit cycle: repeated enter should not leak state. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_enter_exit_cycle() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + // Run exec 20 times — should not accumulate state or leak + for i in 0..20 { + let cmd = vec!["echo".to_owned(), format!("cycle-{i}")]; + engine.exec(&env_id, &cmd).unwrap(); + } + + // Environment should still be in Built state + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!( + meta.state, + EnvState::Built, + "env must be Built after enter/exit cycles" + ); +} + +// --- IG-M1: Real Runtime Backend Validation --- + +/// Verify no fuse-overlayfs mounts leak after build + exec + destroy cycle. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_mount_leak_detection() { + if !prereqs_available() { + return; + } + + let mounts_before = fs::read_to_string("/proc/mounts").unwrap_or_default(); + let fuse_before = mounts_before + .lines() + .filter(|l| l.contains("fuse-overlayfs")) + .count(); + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + // Exec inside + engine + .exec(&env_id, &["echo".to_owned(), "leak-test".to_owned()]) + .unwrap(); + + // Destroy + engine.destroy(&env_id).unwrap(); + + let mounts_after = fs::read_to_string("/proc/mounts").unwrap_or_default(); + let fuse_after = mounts_after + .lines() + .filter(|l| l.contains("fuse-overlayfs")) + .count(); + + assert_eq!( + fuse_before, fuse_after, + "fuse-overlayfs mount count must not change after build+exec+destroy: before={fuse_before}, after={fuse_after}" + ); +} + +/// Repeated build/destroy cycles must not accumulate state or stale mounts. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_build_destroy_20_cycles() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + let layout = StoreLayout::new(store.path()); + + for i in 0..20 { + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + engine.destroy(&env_id).unwrap(); + assert!( + !layout.env_path(&env_id).exists(), + "env dir must be gone after destroy in cycle {i}" + ); + } + + // Final integrity check + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert!( + report.failed.is_empty(), + "store integrity must pass after 20 build/destroy cycles: {:?}", + report.failed + ); + + // No stale overlays + let mounts = fs::read_to_string("/proc/mounts").unwrap_or_default(); + let store_path_str = store.path().to_string_lossy(); + let stale: Vec<&str> = mounts + .lines() + .filter(|l| l.contains("fuse-overlayfs") && l.contains(store_path_str.as_ref())) + .collect(); + assert!( + stale.is_empty(), + "no stale overlayfs mounts after 20 cycles: {stale:?}" + ); +} + +/// If an OCI runtime (crun/runc) is available, build and destroy with it. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_oci_build_if_available() { + if !prereqs_available() { + return; + } + + // Check if crun or runc exists + let has_oci = std::process::Command::new("which") + .arg("crun") + .output() + .map(|o| o.status.success()) + .unwrap_or(false) + || std::process::Command::new("which") + .arg("runc") + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + + if !has_oci { + assert!( + std::env::var("CI").is_err(), + "CI FATAL: OCI test requires crun or runc — install in CI or remove test from CI job" + ); + eprintln!("skipping OCI test: no crun or runc found"); + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest_content = r#"manifest_version = 1 + +[base] +image = "rolling" + +[runtime] +backend = "oci" +"#; + let manifest = write_manifest(project.path(), manifest_content); + let result = engine.build(&manifest).unwrap(); + let env_id = result.identity.env_id.clone(); + + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); + + engine.destroy(&env_id).unwrap(); + let layout = StoreLayout::new(store.path()); + assert!( + !layout.env_path(&env_id).exists(), + "OCI env dir must be cleaned up after destroy" + ); +} + +/// Concurrent exec calls on the same environment must all succeed. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_namespace_concurrent_exec() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = std::sync::Arc::new(Engine::new(store.path())); + + let manifest = write_manifest(project.path(), &namespace_manifest(&[])); + let result = engine.build(&manifest).unwrap(); + let env_id = std::sync::Arc::new(result.identity.env_id.clone()); + + let handles: Vec<_> = (0..4) + .map(|i| { + let eng = std::sync::Arc::clone(&engine); + let eid = std::sync::Arc::clone(&env_id); + std::thread::spawn(move || { + let cmd = vec!["echo".to_owned(), format!("thread-{i}")]; + eng.exec(&eid, &cmd).unwrap(); + }) + }) + .collect(); + + for h in handles { + h.join().expect("exec thread must not panic"); + } + + // No stale .running markers + let layout = StoreLayout::new(store.path()); + let env_path = layout.env_path(&env_id); + if env_path.exists() { + let running_markers: Vec<_> = fs::read_dir(&env_path) + .unwrap() + .filter_map(Result::ok) + .filter(|e| e.file_name().to_string_lossy().ends_with(".running")) + .collect(); + assert!( + running_markers.is_empty(), + "no stale .running markers after concurrent exec: {:?}", + running_markers + .iter() + .map(fs::DirEntry::file_name) + .collect::>() + ); + } +} + +// --- IG-M2: Real Package Resolution Validation --- + +/// Verify resolved packages have real versions (not mock/unresolved). +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_resolve_pins_exact_versions() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&["curl"])); + let result = engine.build(&manifest).unwrap(); + + for pkg in &result.lock_file.resolved_packages { + assert!( + !pkg.version.is_empty(), + "package {} has empty version", + pkg.name + ); + assert_ne!( + pkg.version, "0.0.0-mock", + "package {} has mock version — real resolver not running", + pkg.name + ); + // Version should contain at least one digit + assert!( + pkg.version.chars().any(|c| c.is_ascii_digit()), + "package {} version '{}' contains no digits — suspect", + pkg.name, + pkg.version + ); + } +} + +/// Rebuild same manifest must produce identical env_id and resolved versions. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_resolve_deterministic_across_rebuilds() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&["curl"])); + let r1 = engine.build(&manifest).unwrap(); + let r2 = engine.rebuild(&manifest).unwrap(); + + assert_eq!( + r1.identity.env_id, r2.identity.env_id, + "same manifest must produce same env_id" + ); + assert_eq!( + r1.lock_file.resolved_packages, r2.lock_file.resolved_packages, + "resolved packages must be identical across rebuilds" + ); +} + +/// Building with a non-existent package must fail cleanly. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_resolve_nonexistent_package_fails() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest( + project.path(), + &namespace_manifest(&["nonexistent-pkg-that-does-not-exist-xyz"]), + ); + let result = engine.build(&manifest); + + assert!(result.is_err(), "build with non-existent package must fail"); + + // No orphaned env directories + let layout = StoreLayout::new(store.path()); + let env_dir = layout.env_dir(); + if env_dir.exists() { + let entries: Vec<_> = fs::read_dir(&env_dir) + .unwrap() + .filter_map(Result::ok) + .collect(); + assert!( + entries.is_empty(), + "no orphaned env dirs after failed build: {:?}", + entries + .iter() + .map(fs::DirEntry::file_name) + .collect::>() + ); + } +} + +/// Build with multiple packages — all must have non-empty resolved versions. +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_resolve_multiple_packages() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &namespace_manifest(&["curl", "git"])); + let result = engine.build(&manifest).unwrap(); + + assert!( + result.lock_file.resolved_packages.len() >= 2, + "at least 2 resolved packages expected, got {}", + result.lock_file.resolved_packages.len() + ); + for pkg in &result.lock_file.resolved_packages { + assert!( + !pkg.version.is_empty() && pkg.version != "unresolved", + "package {} has unresolved version", + pkg.name + ); + } +} + +/// Build with packages (requires network to download image + install). +#[test] +#[ignore = "requires Linux user namespaces, fuse-overlayfs, curl, and network"] +fn e2e_build_with_packages() { + if !prereqs_available() { + return; + } + + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + // Use a package whose zypper name matches its RPM name on openSUSE + let manifest = write_manifest(project.path(), &namespace_manifest(&["curl"])); + let result = engine.build(&manifest).unwrap(); + + assert!(!result.identity.env_id.is_empty()); + + // Lock file should have resolved packages with real versions + let lock = result.lock_file; + assert_eq!(lock.lock_version, 2); + assert!(!lock.resolved_packages.is_empty()); + // At least one package should have a resolved version. + // Note: some package names may not match their RPM names exactly, + // causing fallback to "unresolved". This is a known limitation. + let resolved_count = lock + .resolved_packages + .iter() + .filter(|p| p.version != "unresolved") + .count(); + assert!( + resolved_count > 0, + "at least one package should have a resolved version, got: {:?}", + lock.resolved_packages + ); +} diff --git a/crates/karapace-core/tests/enospc.rs b/crates/karapace-core/tests/enospc.rs new file mode 100644 index 0000000..d4497d0 --- /dev/null +++ b/crates/karapace-core/tests/enospc.rs @@ -0,0 +1,444 @@ +//! IG-M4: True disk-full (ENOSPC) simulation tests. +//! +//! These tests mount a tiny tmpfs to trigger real ENOSPC conditions. +//! They require root (or equivalent) to mount tmpfs, so they are ignored +//! by default and run in CI with: `sudo -E cargo test --test enospc -- --ignored` + +use std::path::{Path, PathBuf}; +use std::process::Command; + +/// Mount a tmpfs of the given size (in KB) at `path`. +/// Returns true if successful. Requires root. +fn mount_tiny_tmpfs(path: &Path, size_kb: u64) -> bool { + std::fs::create_dir_all(path).unwrap(); + let status = Command::new("mount") + .args(["-t", "tmpfs", "-o", &format!("size={size_kb}k"), "tmpfs"]) + .arg(path) + .status(); + matches!(status, Ok(s) if s.success()) +} + +/// Unmount the tmpfs at `path`. +fn unmount(path: &Path) { + let _ = Command::new("umount").arg(path).status(); +} + +/// RAII guard that unmounts on drop. +struct TmpfsGuard { + path: PathBuf, +} + +impl TmpfsGuard { + fn mount(path: &Path, size_kb: u64) -> Option { + if mount_tiny_tmpfs(path, size_kb) { + Some(Self { + path: path.to_path_buf(), + }) + } else { + None + } + } +} + +impl Drop for TmpfsGuard { + fn drop(&mut self) { + unmount(&self.path); + } +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_object_put_returns_io_error() { + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny"); + let _guard = TmpfsGuard::mount(&mount_point, 64) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = karapace_store::StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + let obj_store = karapace_store::ObjectStore::new(layout); + + // Write objects until we hit ENOSPC + let mut hit_error = false; + for i in 0..10_000 { + let data = format!("object-data-{i}-padding-to-fill-disk-quickly").repeat(10); + match obj_store.put(data.as_bytes()) { + Ok(_) => {} + Err(e) => { + let msg = format!("{e}"); + eprintln!("ENOSPC triggered at object {i}: {msg}"); + hit_error = true; + // Must be an Io error, never a panic + assert!( + matches!(e, karapace_store::StoreError::Io(_)), + "expected StoreError::Io, got: {e}" + ); + break; + } + } + } + assert!( + hit_error, + "should have hit ENOSPC within 10000 objects on 64KB tmpfs" + ); +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_build_fails_cleanly() { + use karapace_core::Engine; + use karapace_store::StoreLayout; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny"); + let _guard = TmpfsGuard::mount(&mount_point, 64) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + + let manifest = r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = ["curl", "git", "vim", "wget", "htop"] +"#; + + let manifest_path = mount_point.join("karapace.toml"); + std::fs::write(&manifest_path, manifest).unwrap(); + + let engine = Engine::new(&mount_point); + let result = engine.build(&manifest_path); + + // Build must fail (ENOSPC), not panic + assert!(result.is_err(), "build on 64KB tmpfs must fail"); + + // WAL must have no incomplete entries after error cleanup + let wal = karapace_store::WriteAheadLog::new(&layout); + let incomplete = wal.list_incomplete().unwrap_or_default(); + assert!( + incomplete.is_empty(), + "WAL must be clean after failed build, found {} incomplete entries", + incomplete.len() + ); + + // No orphaned env directories + let env_dir = layout.env_dir(); + if env_dir.exists() { + let entries: Vec<_> = std::fs::read_dir(&env_dir) + .unwrap() + .filter_map(Result::ok) + .collect(); + assert!( + entries.is_empty(), + "no orphaned env dirs after failed build, found: {:?}", + entries + .iter() + .map(std::fs::DirEntry::file_name) + .collect::>() + ); + } +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_wal_write_fails() { + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny"); + let _guard = TmpfsGuard::mount(&mount_point, 4) + .expect("failed to mount tmpfs — are you running as root?"); + + // Create minimal store structure + let store_dir = mount_point.join("store"); + std::fs::create_dir_all(store_dir.join("wal")).unwrap(); + + // Fill the tmpfs with dummy data until nearly full + for i in 0..100 { + let path = mount_point.join(format!("filler_{i}")); + if std::fs::write(&path, [0u8; 512]).is_err() { + break; + } + } + + let layout = karapace_store::StoreLayout::new(&mount_point); + let wal = karapace_store::WriteAheadLog::new(&layout); + + // WAL begin should fail due to ENOSPC + let result = wal.begin(karapace_store::WalOpKind::Build, "test_env"); + assert!( + result.is_err(), + "WAL begin on full disk must fail, not panic" + ); +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_commit_fails_cleanly() { + use karapace_core::Engine; + use karapace_store::StoreLayout; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("medium"); + // 256KB — enough for build, but commit with large upper should fail + let _guard = TmpfsGuard::mount(&mount_point, 256) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + + let manifest = r#" +manifest_version = 1 +[base] +image = "rolling" +"#; + let manifest_path = mount_point.join("karapace.toml"); + std::fs::write(&manifest_path, manifest).unwrap(); + + let engine = Engine::new(&mount_point); + + // Build must succeed on 256KB — if it doesn't, the test setup is wrong + let build_result = engine.build(&manifest_path); + assert!( + build_result.is_ok(), + "build on 256KB tmpfs must succeed for commit test to be valid: {:?}", + build_result.err() + ); + let env_id = build_result.unwrap().identity.env_id; + + // Write enough data to the upper dir to fill the disk + let upper = layout.upper_dir(&env_id); + std::fs::create_dir_all(&upper).unwrap(); + let mut filled = false; + for i in 0..500 { + let path = upper.join(format!("bigfile_{i}")); + if std::fs::write(&path, [0xAB; 1024]).is_err() { + filled = true; + break; + } + } + assert!( + filled, + "must fill disk before commit — 256KB tmpfs should be exhaustible" + ); + + // Commit MUST fail due to ENOSPC during layer packing + let commit_result = engine.commit(&env_id); + assert!( + commit_result.is_err(), + "commit on full disk MUST fail — test is invalid if it succeeds" + ); + + // Verify env state is still Built (not corrupted) + let meta = karapace_store::MetadataStore::new(layout.clone()) + .get(&env_id) + .unwrap(); + assert_eq!( + meta.state, + karapace_store::EnvState::Built, + "env state must remain Built after failed commit" + ); + + // No partial commit artifacts + let layers_dir = layout.layers_dir(); + if layers_dir.exists() { + let staging = layout.staging_dir(); + if staging.exists() { + let staging_entries: Vec<_> = std::fs::read_dir(&staging) + .unwrap() + .filter_map(Result::ok) + .collect(); + assert!( + staging_entries.is_empty(), + "no partial staging artifacts after failed commit: {:?}", + staging_entries + .iter() + .map(std::fs::DirEntry::file_name) + .collect::>() + ); + } + } +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_recovery_after_freeing_space() { + use karapace_store::{ObjectStore, StoreLayout}; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("recov"); + let _guard = TmpfsGuard::mount(&mount_point, 128) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + let obj_store = ObjectStore::new(layout); + + // Fill with objects + let mut hashes = Vec::new(); + for i in 0..500 { + let data = format!("fill-data-{i}").repeat(5); + match obj_store.put(data.as_bytes()) { + Ok(h) => hashes.push(h), + Err(_) => break, + } + } + assert!(!hashes.is_empty(), "should have stored at least one object"); + + // Attempt one more write — MUST fail (disk full) + let big_data = [0xCD; 4096]; + let err_result = obj_store.put(&big_data); + assert!( + err_result.is_err(), + "128KB tmpfs must be full after filling — test setup invalid if write succeeds" + ); + + // Delete half the objects to free space + let objects_dir = mount_point.join("store").join("objects"); + let half = hashes.len() / 2; + for h in &hashes[..half] { + let _ = std::fs::remove_file(objects_dir.join(h)); + } + + // Now writes should succeed again + let recovery_result = obj_store.put(b"recovery data after freeing space"); + assert!( + recovery_result.is_ok(), + "write must succeed after freeing space: {:?}", + recovery_result.err() + ); +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_layer_put_fails_cleanly() { + use karapace_store::{LayerKind, LayerManifest, LayerStore, StoreLayout}; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny_layer"); + let _guard = TmpfsGuard::mount(&mount_point, 8) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + + // Fill the tmpfs + for i in 0..200 { + let path = mount_point.join(format!("filler_{i}")); + if std::fs::write(&path, [0u8; 256]).is_err() { + break; + } + } + + let layer_store = LayerStore::new(layout.clone()); + let manifest = LayerManifest { + hash: "test_layer_enospc".to_owned(), + kind: LayerKind::Base, + parent: None, + object_refs: vec!["obj1".to_owned(), "obj2".to_owned()], + read_only: true, + tar_hash: String::new(), + }; + + let result = layer_store.put(&manifest); + assert!( + result.is_err(), + "layer put on full disk MUST fail, not succeed" + ); + assert!( + matches!( + result.as_ref().unwrap_err(), + karapace_store::StoreError::Io(_) + ), + "expected StoreError::Io, got: {:?}", + result.unwrap_err() + ); +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_metadata_put_fails_cleanly() { + use karapace_store::{EnvMetadata, EnvState, MetadataStore, StoreLayout}; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny_meta"); + let _guard = TmpfsGuard::mount(&mount_point, 8) + .expect("failed to mount tmpfs — are you running as root?"); + + let layout = StoreLayout::new(&mount_point); + layout.initialize().unwrap(); + + // Fill the tmpfs + for i in 0..200 { + let path = mount_point.join(format!("filler_{i}")); + if std::fs::write(&path, [0u8; 256]).is_err() { + break; + } + } + + let meta_store = MetadataStore::new(layout); + let meta = EnvMetadata { + env_id: "enospc_test_env".into(), + short_id: "enospc_test".into(), + name: Some("enospc-test".to_owned()), + state: EnvState::Built, + base_layer: "fake_layer".into(), + dependency_layers: vec![], + policy_layer: None, + manifest_hash: "fake_hash".into(), + ref_count: 1, + created_at: "2025-01-01T00:00:00Z".to_owned(), + updated_at: "2025-01-01T00:00:00Z".to_owned(), + checksum: None, + }; + + let result = meta_store.put(&meta); + assert!( + result.is_err(), + "metadata put on full disk MUST fail, not succeed" + ); + assert!( + matches!( + result.as_ref().unwrap_err(), + karapace_store::StoreError::Io(_) + ), + "expected StoreError::Io, got: {:?}", + result.unwrap_err() + ); +} + +#[test] +#[ignore = "requires root for tmpfs mount"] +fn enospc_version_file_write_fails() { + use karapace_store::StoreLayout; + + let base = tempfile::tempdir().unwrap(); + let mount_point = base.path().join("tiny_ver"); + // Very small: just enough for dirs but not for version file after fill + let _guard = TmpfsGuard::mount(&mount_point, 4) + .expect("failed to mount tmpfs — are you running as root?"); + + // Manually create minimal dirs (initialize writes version file, we want it to fail) + let store_dir = mount_point.join("store"); + std::fs::create_dir_all(store_dir.join("objects")).unwrap(); + std::fs::create_dir_all(store_dir.join("layers")).unwrap(); + std::fs::create_dir_all(store_dir.join("metadata")).unwrap(); + std::fs::create_dir_all(store_dir.join("staging")).unwrap(); + std::fs::create_dir_all(mount_point.join("env")).unwrap(); + + // Fill the tmpfs completely + for i in 0..200 { + let path = mount_point.join(format!("filler_{i}")); + if std::fs::write(&path, [0u8; 256]).is_err() { + break; + } + } + + // Now try to initialize (which writes the version file) — must fail + let layout = StoreLayout::new(&mount_point); + let result = layout.initialize(); + assert!( + result.is_err(), + "StoreLayout::initialize on full disk MUST fail when writing version file" + ); +} diff --git a/crates/karapace-core/tests/integration.rs b/crates/karapace-core/tests/integration.rs new file mode 100644 index 0000000..70560fd --- /dev/null +++ b/crates/karapace-core/tests/integration.rs @@ -0,0 +1,2337 @@ +#![allow(unsafe_code)] + +use karapace_core::{Engine, StoreLock}; +use karapace_store::{EnvState, StoreLayout}; +use std::fs; +use std::os::unix::fs::PermissionsExt; +use std::path::Path; +use std::sync::{Arc, Barrier}; +use std::thread; + +fn write_manifest(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("karapace.toml"); + fs::write(&path, content).unwrap(); + path +} + +fn mock_manifest(packages: &[&str]) -> String { + format!( + r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = [{}] +[runtime] +backend = "mock" +"#, + packages + .iter() + .map(|p| format!("\"{p}\"")) + .collect::>() + .join(", ") + ) +} + +// §11.2: Build → Destroy → Rebuild equality +#[test] +fn build_destroy_rebuild_produces_identical_env_id() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git", "clang"])); + + let r1 = engine.build(&manifest).unwrap(); + let id1 = r1.identity.env_id.clone(); + + engine.destroy(&id1).unwrap(); + + let meta_store = karapace_store::MetadataStore::new(StoreLayout::new(store.path())); + meta_store.remove(&id1).unwrap(); + + let r2 = engine.build(&manifest).unwrap(); + assert_eq!( + id1, r2.identity.env_id, + "rebuild after destroy must produce identical env_id" + ); +} + +// §11.2: Multi-environment isolation +#[test] +fn multi_environment_isolation() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let p1 = tempfile::tempdir().unwrap(); + let p2 = tempfile::tempdir().unwrap(); + + let m1 = write_manifest(p1.path(), &mock_manifest(&["git"])); + let m2 = write_manifest(p2.path(), &mock_manifest(&["cmake"])); + + let r1 = engine.build(&m1).unwrap(); + let r2 = engine.build(&m2).unwrap(); + + assert_ne!(r1.identity.env_id, r2.identity.env_id); + + let envs = engine.list().unwrap(); + assert_eq!(envs.len(), 2); + + engine.destroy(&r1.identity.env_id).unwrap(); + let envs = engine.list().unwrap(); + assert_eq!(envs.len(), 1); + + let meta = engine.inspect(&r2.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); +} + +// §11.2: GC safety under load +#[test] +fn gc_safety_with_multiple_environments() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let mut env_ids = Vec::new(); + for i in 0..10 { + let p = tempfile::tempdir().unwrap(); + let pkgs: Vec = (0..=i).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m = write_manifest(p.path(), &mock_manifest(&pkg_refs)); + let r = engine.build(&m).unwrap(); + env_ids.push(r.identity.env_id); + } + + assert_eq!(engine.list().unwrap().len(), 10); + + for id in &env_ids[..5] { + engine.destroy(id).unwrap(); + } + + let layout = StoreLayout::new(store.path()); + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let _report = engine.gc(&lock, false).unwrap(); + + for id in &env_ids[5..] { + let meta = engine.inspect(id).unwrap(); + assert_eq!(meta.state, EnvState::Built, "active env should survive GC"); + } +} + +// §11.2: Concurrent build safety +#[test] +fn concurrent_builds_are_safe() { + let store = tempfile::tempdir().unwrap(); + let store_path = store.path().to_path_buf(); + + let barrier = Arc::new(Barrier::new(4)); + let mut handles = Vec::new(); + + for i in 0..4 { + let sp = store_path.clone(); + let b = Arc::clone(&barrier); + handles.push(thread::spawn(move || { + let engine = Engine::new(&sp); + let p = tempfile::tempdir().unwrap(); + let pkgs = [format!("thread-pkg-{i}")]; + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m = write_manifest(p.path(), &mock_manifest(&pkg_refs)); + + b.wait(); + engine.build(&m).unwrap() + })); + } + + let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect(); + + let engine = Engine::new(&store_path); + let envs = engine.list().unwrap(); + assert_eq!(envs.len(), 4, "all 4 concurrent builds should succeed"); + + let ids: std::collections::HashSet<_> = + results.iter().map(|r| r.identity.env_id.clone()).collect(); + assert_eq!(ids.len(), 4, "all env IDs should be unique"); +} + +// §11.3: Reproducibility test +#[test] +fn same_manifest_produces_identical_env_id_across_engines() { + let manifest_content = mock_manifest(&["git", "clang", "cmake"]); + + let store1 = tempfile::tempdir().unwrap(); + let store2 = tempfile::tempdir().unwrap(); + let p1 = tempfile::tempdir().unwrap(); + let p2 = tempfile::tempdir().unwrap(); + + let m1 = write_manifest(p1.path(), &manifest_content); + let m2 = write_manifest(p2.path(), &manifest_content); + + let engine1 = Engine::new(store1.path()); + let engine2 = Engine::new(store2.path()); + + let r1 = engine1.build(&m1).unwrap(); + let r2 = engine2.build(&m2).unwrap(); + + assert_eq!( + r1.identity.env_id, r2.identity.env_id, + "same manifest on different stores must produce identical env_id" + ); + assert_eq!(r1.lock_file.env_id, r2.lock_file.env_id); + assert_eq!( + r1.lock_file.base_image_digest, + r2.lock_file.base_image_digest + ); +} + +// §16 Core invariant: host system remains untouched +#[test] +fn host_system_untouched_after_lifecycle() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let home_before: Vec<_> = fs::read_dir(std::env::var("HOME").unwrap_or("/tmp".to_owned())) + .unwrap() + .filter_map(Result::ok) + .map(|e| e.path()) + .collect(); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + engine.freeze(&r.identity.env_id).unwrap(); + engine.destroy(&r.identity.env_id).unwrap(); + + let home_after: Vec<_> = fs::read_dir(std::env::var("HOME").unwrap_or("/tmp".to_owned())) + .unwrap() + .filter_map(Result::ok) + .map(|e| e.path()) + .collect(); + + assert_eq!(home_before, home_after, "host HOME must not be modified"); +} + +// §16 Core invariant: no silent drift +#[test] +fn drift_always_observable() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Clear the upper dir to simulate a clean post-build state. + // The mock backend creates files there during build; a real backend's + // overlay upper would also have content, but drift is measured from + // the post-build snapshot baseline. + let upper = engine.store_layout().upper_dir(&r.identity.env_id); + if upper.exists() { + fs::remove_dir_all(&upper).unwrap(); + fs::create_dir_all(&upper).unwrap(); + } + + let report = karapace_core::diff_overlay(engine.store_layout(), &r.identity.env_id).unwrap(); + assert!(!report.has_drift, "fresh build should have no drift"); + + fs::write(upper.join("injected.txt"), "mutation").unwrap(); + + let report = karapace_core::diff_overlay(engine.store_layout(), &r.identity.env_id).unwrap(); + assert!(report.has_drift, "mutation must be detected"); + assert!(report.added.contains(&"injected.txt".to_owned())); +} + +// §15: Crash safety — interrupted write leaves no partial objects +#[test] +fn store_integrity_after_partial_operations() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + obj_store.put(b"valid1").unwrap(); + obj_store.put(b"valid2").unwrap(); + + let stray_path = layout.objects_dir().join("not_a_real_hash"); + fs::write(&stray_path, b"corrupted data").unwrap(); + + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert_eq!(report.failed.len(), 1, "corrupted object must be detected"); + assert_eq!(report.passed, 2); +} + +// §5.2: commit persists overlay into store +#[test] +fn commit_persists_overlay_drift() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Clear mock backend artifacts from upper, then add user files + let upper = engine.store_layout().upper_dir(&r.identity.env_id); + if upper.exists() { + fs::remove_dir_all(&upper).unwrap(); + } + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("user_file.txt"), "user data").unwrap(); + fs::create_dir_all(upper.join("subdir")).unwrap(); + fs::write(upper.join("subdir").join("nested.txt"), "nested data").unwrap(); + + let snapshot_hash = engine.commit(&r.identity.env_id).unwrap(); + assert!( + !snapshot_hash.is_empty(), + "commit should return a snapshot hash" + ); + + // Look up the snapshot layer to get the tar_hash + let layer_store = karapace_store::LayerStore::new(StoreLayout::new(store.path())); + let layer = layer_store.get(&snapshot_hash).unwrap(); + assert_eq!(layer.kind, karapace_store::LayerKind::Snapshot); + assert!(!layer.tar_hash.is_empty()); + + let obj_store = karapace_store::ObjectStore::new(StoreLayout::new(store.path())); + assert!( + obj_store.exists(&layer.tar_hash), + "committed tar object must exist in store" + ); + + // Verify the committed tar can be unpacked and contains the user files + let tar_data = obj_store.get(&layer.tar_hash).unwrap(); + let unpack_dir = tempfile::tempdir().unwrap(); + karapace_store::unpack_layer(&tar_data, unpack_dir.path()).unwrap(); + assert!(unpack_dir.path().join("user_file.txt").exists()); + assert!(unpack_dir.path().join("subdir").join("nested.txt").exists()); +} + +// INV-S1: build → modify → commit → restore → diff = zero drift +#[test] +fn restore_roundtrip() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Clear mock artifacts and add user files + let upper = engine.store_layout().upper_dir(&r.identity.env_id); + if upper.exists() { + fs::remove_dir_all(&upper).unwrap(); + } + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("user_file.txt"), "snapshot content").unwrap(); + fs::create_dir_all(upper.join("data")).unwrap(); + fs::write(upper.join("data").join("config.json"), r#"{"key":"val"}"#).unwrap(); + + // Commit the snapshot + let snapshot_hash = engine.commit(&r.identity.env_id).unwrap(); + + // Mutate the upper dir (simulating user modifications after snapshot) + fs::write(upper.join("user_file.txt"), "MODIFIED AFTER SNAPSHOT").unwrap(); + fs::write(upper.join("extra.txt"), "extra file").unwrap(); + + // Restore from the snapshot + engine.restore(&r.identity.env_id, &snapshot_hash).unwrap(); + + // Verify the upper dir matches the snapshot content exactly + assert_eq!( + fs::read_to_string(upper.join("user_file.txt")).unwrap(), + "snapshot content" + ); + assert_eq!( + fs::read_to_string(upper.join("data").join("config.json")).unwrap(), + r#"{"key":"val"}"# + ); + // The extra file should NOT exist after restore + assert!( + !upper.join("extra.txt").exists(), + "extra file must be gone after restore" + ); +} + +// INV-S2: restore nonexistent snapshot → error +#[test] +fn restore_nonexistent_snapshot_fails() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + let result = engine.restore(&r.identity.env_id, "nonexistent_hash"); + assert!(result.is_err()); +} + +// list_snapshots returns committed snapshots +#[test] +fn list_snapshots_after_commit() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // No snapshots initially + let snaps = engine.list_snapshots(&r.identity.env_id).unwrap(); + assert!(snaps.is_empty()); + + // Commit a snapshot + let _hash = engine.commit(&r.identity.env_id).unwrap(); + + let snaps = engine.list_snapshots(&r.identity.env_id).unwrap(); + assert_eq!(snaps.len(), 1); + assert_eq!(snaps[0].kind, karapace_store::LayerKind::Snapshot); + assert!(!snaps[0].tar_hash.is_empty()); +} + +// §12: GC scales to at least 100 environments +#[test] +fn gc_scales_to_100_environments() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + for i in 0..100 { + let p = tempfile::tempdir().unwrap(); + let pkgs: Vec = (0..=i).map(|j| format!("scale-pkg-{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m = write_manifest(p.path(), &mock_manifest(&pkg_refs)); + engine.build(&m).unwrap(); + } + + assert_eq!(engine.list().unwrap().len(), 100); + + let layout = StoreLayout::new(store.path()); + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let start = std::time::Instant::now(); + let report = engine.gc(&lock, true).unwrap(); + let elapsed = start.elapsed(); + + assert!( + elapsed.as_secs() < 10, + "GC dry-run on 100 envs took {elapsed:?}, must be under 10s" + ); + assert_eq!(report.orphaned_envs.len(), 0, "all envs have ref_count > 0"); +} + +// §12: Warm cache build under 10 seconds +#[test] +fn warm_build_under_10_seconds() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git", "clang", "cmake"])); + + // Cold build + engine.build(&manifest).unwrap(); + + // Warm rebuild (store already populated) + let start = std::time::Instant::now(); + engine.rebuild(&manifest).unwrap(); + let elapsed = start.elapsed(); + + assert!( + elapsed.as_secs() < 10, + "warm rebuild took {elapsed:?}, must be under 10s" + ); +} + +// §3.1: Build fails deterministically — different manifests get different IDs +#[test] +fn different_manifests_different_ids() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let p1 = tempfile::tempdir().unwrap(); + let p2 = tempfile::tempdir().unwrap(); + let m1 = write_manifest(p1.path(), &mock_manifest(&["git"])); + let m2 = write_manifest(p2.path(), &mock_manifest(&["git", "cmake"])); + + let r1 = engine.build(&m1).unwrap(); + let r2 = engine.build(&m2).unwrap(); + assert_ne!(r1.identity.env_id, r2.identity.env_id); +} + +// §5.1: Whiteout files detected as removed +#[test] +fn whiteout_files_detected_as_removals() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Clear mock backend artifacts, then add only the whiteout + let upper = engine.store_layout().upper_dir(&r.identity.env_id); + if upper.exists() { + fs::remove_dir_all(&upper).unwrap(); + } + fs::create_dir_all(&upper).unwrap(); + // Overlayfs whiteout: .wh.filename means filename was deleted + fs::write(upper.join(".wh.deleted_config"), "").unwrap(); + + let report = karapace_core::diff_overlay(engine.store_layout(), &r.identity.env_id).unwrap(); + assert!(report.has_drift); + assert!(report.removed.contains(&"deleted_config".to_owned())); + assert!(report.added.is_empty()); +} + +// §5.1: Modified files classified correctly when lower layer exists +#[test] +fn modified_files_detected_against_lower_layer() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Clear mock backend artifacts from upper + let upper = engine.store_layout().upper_dir(&r.identity.env_id); + if upper.exists() { + fs::remove_dir_all(&upper).unwrap(); + } + fs::create_dir_all(&upper).unwrap(); + + // Simulate a lower layer file + let env_dir = engine.store_layout().env_path(&r.identity.env_id); + let lower = env_dir.join("lower"); + fs::create_dir_all(&lower).unwrap(); + fs::write(lower.join("config.txt"), "original").unwrap(); + + // Same file in upper = modification + fs::write(upper.join("config.txt"), "modified content").unwrap(); + // New file = added + fs::write(upper.join("new_script.sh"), "#!/bin/sh").unwrap(); + + let report = karapace_core::diff_overlay(engine.store_layout(), &r.identity.env_id).unwrap(); + assert!(report.has_drift); + assert_eq!(report.modified, vec!["config.txt"]); + assert_eq!(report.added, vec!["new_script.sh"]); + assert!(report.removed.is_empty()); +} + +// §6.1: exec works via mock backend +#[test] +fn exec_via_mock_backend() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + let result = engine.exec(&r.identity.env_id, &["echo".to_owned(), "hello".to_owned()]); + assert!(result.is_ok()); +} + +// §3.2: Lock file integrity verifiable after build +#[test] +fn lock_file_integrity_after_build() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git", "clang"])); + let r = engine.build(&manifest).unwrap(); + + // Lock file was written + let lock_path = project.path().join("karapace.lock"); + assert!(lock_path.exists()); + + // Verify integrity + assert!(r.lock_file.verify_integrity().is_ok()); + + // Verify manifest intent + let parsed = karapace_schema::parse_manifest_file(&manifest).unwrap(); + let normalized = parsed.normalize().unwrap(); + assert!(r.lock_file.verify_manifest_intent(&normalized).is_ok()); +} + +// §5.2: Frozen environment cannot be entered +#[test] +fn frozen_env_cannot_be_entered() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + engine.freeze(&r.identity.env_id).unwrap(); + + let result = engine.enter(&r.identity.env_id); + assert!(result.is_err(), "entering a frozen env must fail"); +} + +// §15: Crash injection — partial write must not corrupt store +#[test] +fn crash_injection_partial_write_detected() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Write valid objects + let h1 = obj_store.put(b"valid-object-1").unwrap(); + let h2 = obj_store.put(b"valid-object-2").unwrap(); + + // Simulate a crash during write: create a file with a valid hash name + // but corrupted content (as if the process died mid-write and the + // atomic rename somehow partially completed — or manual tampering) + let fake_hash = blake3::hash(b"original-content").to_hex().to_string(); + let fake_path = layout.objects_dir().join(&fake_hash); + fs::write(&fake_path, b"truncated-garbage").unwrap(); + + // Store integrity check must detect all corruption + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert_eq!(report.checked, 3); + assert_eq!(report.passed, 2, "two valid objects should pass"); + assert_eq!( + report.failed.len(), + 1, + "corrupted object should be detected" + ); + assert_eq!(report.failed[0].hash, fake_hash); + + // Valid objects must still be readable + assert!(obj_store.get(&h1).is_ok()); + assert!(obj_store.get(&h2).is_ok()); + + // Corrupted object must fail on read + assert!(obj_store.get(&fake_hash).is_err()); +} + +// §15: Crash injection — version file corruption detected +#[test] +fn crash_injection_version_corruption_detected() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Corrupt the version file + let version_path = store.path().join("store").join("version"); + fs::write(&version_path, r#"{"format_version": 999}"#).unwrap(); + + // Re-initializing should detect version mismatch + let result = layout.initialize(); + assert!(result.is_err(), "mismatched version must be rejected"); +} + +// §12: Environment entry under 1 second (mock backend) +#[test] +fn environment_entry_under_1_second() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + let start = std::time::Instant::now(); + // Mock enter is effectively instant — this tests the overhead of + // metadata lookup, state transition, backend dispatch, and cleanup. + engine.enter(&r.identity.env_id).unwrap(); + let elapsed = start.elapsed(); + + assert!( + elapsed.as_secs() < 1, + "environment entry took {elapsed:?}, must be under 1s" + ); +} + +// §4.1: Archive lifecycle — archive preserves but prevents entry +#[test] +fn archive_lifecycle() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Archive from Built state + engine.archive(&r.identity.env_id).unwrap(); + let meta = engine.inspect(&r.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Archived); + + // Archived env cannot be entered + let result = engine.enter(&r.identity.env_id); + assert!(result.is_err(), "entering an archived env must fail"); + + // Archived env can be rebuilt + let r2 = engine.rebuild(&manifest).unwrap(); + let meta2 = engine.inspect(&r2.identity.env_id).unwrap(); + assert_eq!(meta2.state, EnvState::Built); +} + +// §4.1: Freeze then archive lifecycle +#[test] +fn freeze_then_archive() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + engine.freeze(&r.identity.env_id).unwrap(); + engine.archive(&r.identity.env_id).unwrap(); + + let meta = engine.inspect(&r.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Archived); +} + +// §4.1: Destroy of non-existent env fails gracefully +#[test] +fn destroy_nonexistent_env_fails_gracefully() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + let result = engine.destroy("0000000000000000000000000000000000000000000000000000000000000000"); + assert!(result.is_err()); +} + +// §3.2: Lock file v2 contains resolved package versions (not just names) +#[test] +fn lock_file_v2_has_resolved_versions() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git", "clang"])); + let r = engine.build(&manifest).unwrap(); + + assert_eq!(r.lock_file.lock_version, 2); + assert_eq!(r.lock_file.resolved_packages.len(), 2); + for pkg in &r.lock_file.resolved_packages { + assert!(!pkg.name.is_empty()); + assert!(!pkg.version.is_empty()); + assert_ne!(pkg.version, "unresolved"); + } +} + +// §6.2: Cannot destroy a running environment (must stop first) +#[test] +fn destroy_running_env_is_rejected() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Simulate entering (mock backend sets state to Running) + engine.enter(&r.identity.env_id).unwrap(); + + // Now try to destroy — should fail because mock leaves it in Running + // Note: mock enter() sets internal state but engine resets to Built on success, + // so we manually set it to Running to test the guard + let meta_store = karapace_store::MetadataStore::new(engine.store_layout().clone()); + meta_store + .update_state(&r.identity.env_id, EnvState::Running) + .unwrap(); + + let result = engine.destroy(&r.identity.env_id); + assert!(result.is_err(), "destroy must reject Running environments"); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("running") || err_msg.contains("Running"), + "error should mention Running state: {err_msg}" + ); +} + +// Quick-style manifest generation + build (tests the flow used by `karapace quick`) +#[test] +fn quick_style_generated_manifest_builds_correctly() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + // Simulate what cmd_quick does: generate a manifest from CLI flags + let image = "rolling"; + let packages = ["git", "curl"]; + let mut manifest = String::new(); + manifest.push_str("manifest_version = 1\n\n"); + manifest.push_str("[base]\n"); + { + use std::fmt::Write as _; + let _ = write!(manifest, "image = \"{image}\"\n\n"); + } + manifest.push_str("[system]\npackages = ["); + let pkg_list: Vec = packages.iter().map(|p| format!("\"{p}\"")).collect(); + manifest.push_str(&pkg_list.join(", ")); + manifest.push_str("]\n\n"); + manifest.push_str("[runtime]\nbackend = \"mock\"\n"); + + let manifest_path = write_manifest(project.path(), &manifest); + let result = engine.build(&manifest_path).unwrap(); + + assert!(!result.identity.env_id.is_empty()); + assert!(!result.identity.short_id.is_empty()); + + let meta = engine.inspect(&result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); +} + +// Quick-style minimal manifest (no packages, no hardware) +#[test] +fn quick_style_minimal_manifest_builds() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = + "manifest_version = 1\n\n[base]\nimage = \"rolling\"\n\n[runtime]\nbackend = \"mock\"\n"; + let manifest_path = write_manifest(project.path(), manifest); + let result = engine.build(&manifest_path).unwrap(); + + assert!(!result.identity.env_id.is_empty()); +} + +// §16: No hidden mutable state — init + build produce consistent lock +#[test] +fn init_then_build_produces_consistent_state() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let init_result = engine.init(&manifest).unwrap(); + + // Init creates metadata in Defined state + let meta = engine.inspect(&init_result.identity.env_id).unwrap(); + assert_eq!(meta.state, EnvState::Defined); + + // Build resolves and creates a new identity (different from init's preliminary) + let build_result = engine.build(&manifest).unwrap(); + + // The lock file from build is verifiable + assert!(build_result.lock_file.verify_integrity().is_ok()); + assert_eq!(build_result.lock_file.lock_version, 2); +} + +// INV-S2: Restore atomicity — original upper dir preserved if snapshot invalid +#[test] +fn restore_preserves_upper_on_invalid_snapshot() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Create a file in upper that we want to verify survives a failed restore + let upper = store.path().join("env").join(&env_id).join("upper"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("sentinel.txt"), "must survive").unwrap(); + + // Attempt restore with a nonexistent snapshot — should fail + let result = engine.restore(&env_id, "nonexistent_hash_abc123"); + assert!(result.is_err()); + + // Original upper content must still exist + assert!(upper.join("sentinel.txt").exists()); + assert_eq!( + fs::read_to_string(upper.join("sentinel.txt")).unwrap(), + "must survive" + ); +} + +// INV-S3: Multiple snapshots listed in deterministic hash order +#[test] +fn snapshot_ordering_is_deterministic() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Create upper dir with different content for each commit + let upper = store.path().join("env").join(&env_id).join("upper"); + + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("v1.txt"), "version 1").unwrap(); + let h1 = engine.commit(&env_id).unwrap(); + + fs::write(upper.join("v2.txt"), "version 2").unwrap(); + let h2 = engine.commit(&env_id).unwrap(); + + fs::write(upper.join("v3.txt"), "version 3").unwrap(); + let h3 = engine.commit(&env_id).unwrap(); + + // All hashes must be different + assert_ne!(h1, h2); + assert_ne!(h2, h3); + assert_ne!(h1, h3); + + // list_snapshots returns sorted by hash — verify determinism + let snaps = engine.list_snapshots(&env_id).unwrap(); + assert_eq!(snaps.len(), 3); + let hashes: Vec<&str> = snaps.iter().map(|s| s.hash.as_str()).collect(); + let mut sorted = hashes.clone(); + sorted.sort_unstable(); + assert_eq!(hashes, sorted, "snapshots must be sorted by hash"); + + // Calling again must return same order + let snaps2 = engine.list_snapshots(&env_id).unwrap(); + let hashes2: Vec<&str> = snaps2.iter().map(|s| s.hash.as_str()).collect(); + assert_eq!(hashes, hashes2, "snapshot ordering must be deterministic"); +} + +// INV-W1: WAL recovery cleans orphaned env_dir after simulated build crash +#[test] +fn wal_recovery_cleans_orphaned_env_dir() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Simulate a crash during build: + // 1. Create a WAL entry for a build operation + // 2. Create an orphaned env_dir (as if build started but crashed) + // 3. Verify that Engine::new() recovery cleans it up + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + let fake_env_id = "crash_test_env_abc123"; + let orphan_dir = store.path().join("env").join(fake_env_id); + fs::create_dir_all(&orphan_dir).unwrap(); + fs::write(orphan_dir.join("partial_build_artifact"), "data").unwrap(); + + let op_id = wal + .begin(karapace_store::WalOpKind::Build, fake_env_id) + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(orphan_dir.clone()), + ) + .unwrap(); + + // Do NOT commit the WAL — simulates a crash + + // Now create a new Engine, which should trigger WAL recovery + let _engine = Engine::new(store.path()); + + // The orphaned env_dir must be cleaned up + assert!( + !orphan_dir.exists(), + "WAL recovery must remove orphaned env_dir" + ); + + // WAL must be empty after recovery + let wal2 = karapace_store::WriteAheadLog::new(&layout); + assert!(wal2.list_incomplete().unwrap().is_empty()); +} + +// --- A3: WAL & Crash Safety Hardening --- + +// Simulate crash during commit: WAL entry exists, staging dir left behind +#[test] +fn wal_crash_during_commit_leaves_recoverable_state() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Simulate a commit that created a staging dir but crashed before completion + let staging_dir = layout.staging_dir().join("restore-crash_test"); + fs::create_dir_all(&staging_dir).unwrap(); + fs::write(staging_dir.join("partial_data.txt"), "partial").unwrap(); + + let op_id = wal + .begin(karapace_store::WalOpKind::Commit, "crash_env") + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(staging_dir.clone()), + ) + .unwrap(); + + // Do NOT commit — simulates crash + + // Recovery via new Engine must clean up + let _engine = Engine::new(store.path()); + + assert!( + !staging_dir.exists(), + "WAL recovery must remove orphaned staging dir" + ); + let wal2 = karapace_store::WriteAheadLog::new(&layout); + assert!(wal2.list_incomplete().unwrap().is_empty()); +} + +// Simulate crash during restore: old upper removed but new upper not yet renamed +#[test] +fn wal_crash_during_restore_rollback() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + let fake_env_id = "restore_crash_env"; + + // Create orphaned staging dir (as if restore was in progress) + let staging = layout.staging_dir().join(format!("restore-{fake_env_id}")); + fs::create_dir_all(&staging).unwrap(); + fs::write(staging.join("snapshot_file.txt"), "snapshot data").unwrap(); + + let op_id = wal + .begin(karapace_store::WalOpKind::Restore, fake_env_id) + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(staging.clone()), + ) + .unwrap(); + + // Crash — WAL entry remains + + // Recovery should clean up staging + let count = wal.recover().unwrap(); + assert_eq!(count, 1); + assert!(!staging.exists()); +} + +// Multiple incomplete WAL entries all rolled back in order +#[test] +fn wal_multi_entry_recovery() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Create 3 orphaned directories with WAL entries + let mut dirs = Vec::new(); + for i in 0..3 { + let dir = store.path().join(format!("orphan_{i}")); + fs::create_dir_all(&dir).unwrap(); + fs::write(dir.join("data"), format!("data_{i}")).unwrap(); + + let op_id = wal + .begin(karapace_store::WalOpKind::Build, &format!("env_{i}")) + .unwrap(); + wal.add_rollback_step(&op_id, karapace_store::RollbackStep::RemoveDir(dir.clone())) + .unwrap(); + dirs.push(dir); + } + + assert_eq!(wal.list_incomplete().unwrap().len(), 3); + + let count = wal.recover().unwrap(); + assert_eq!(count, 3); + + for dir in &dirs { + assert!(!dir.exists(), "all orphaned dirs must be cleaned up"); + } + assert!(wal.list_incomplete().unwrap().is_empty()); +} + +// Incomplete temp file in objects dir should not be visible +#[test] +fn incomplete_temp_file_invisible_to_store() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Write a valid object + let hash = obj_store.put(b"valid data").unwrap(); + + // Create a temp file (simulating interrupted atomic write) + let temp_path = layout.objects_dir().join(".tmp_partial_write"); + fs::write(&temp_path, b"incomplete").unwrap(); + + // list() should only return the valid object (skips dotfiles) + let list = obj_store.list().unwrap(); + assert_eq!(list.len(), 1); + assert_eq!(list[0], hash); + + // Integrity check should only check the valid object + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert_eq!(report.checked, 1); + assert_eq!(report.passed, 1); + assert!(report.failed.is_empty()); +} + +// Verify atomic rename is used: concurrent writes to same hash don't corrupt +#[test] +fn concurrent_object_writes_are_safe() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let store_path = store.path().to_path_buf(); + let barrier = Arc::new(Barrier::new(8)); + let mut handles = Vec::new(); + + for _ in 0..8 { + let sp = store_path.clone(); + let b = Arc::clone(&barrier); + handles.push(thread::spawn(move || { + let layout = StoreLayout::new(&sp); + let obj_store = karapace_store::ObjectStore::new(layout); + b.wait(); + // All threads write the same data — should deduplicate safely + obj_store.put(b"concurrent-data").unwrap() + })); + } + + let hashes: Vec = handles.into_iter().map(|h| h.join().unwrap()).collect(); + // All must produce the same hash + let first = &hashes[0]; + for h in &hashes { + assert_eq!(h, first); + } + + // Data must be readable and intact + let obj_store = karapace_store::ObjectStore::new(StoreLayout::new(store.path())); + let data = obj_store.get(first).unwrap(); + assert_eq!(data, b"concurrent-data"); +} + +// --- M1: WAL Crash Safety Hardening (2.0) --- + +// M1.1: Verify rollback step is registered before side-effect in build +#[test] +fn wal_rollback_registered_before_build_side_effect() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Simulate the new build pattern: register rollback step, then create dir + let fake_env_id = "m1_test_build_order"; + let op_id = wal + .begin(karapace_store::WalOpKind::Build, fake_env_id) + .unwrap(); + + let env_dir = store.path().join("env").join(fake_env_id); + // Rollback step registered BEFORE dir creation + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(env_dir.clone()), + ) + .unwrap(); + + // Verify WAL entry has the rollback step before dir even exists + let entries = wal.list_incomplete().unwrap(); + assert_eq!(entries.len(), 1); + assert!(!entries[0].rollback_steps.is_empty()); + assert!(!env_dir.exists(), "dir should not exist yet"); + + // Now create the dir (simulating actual build) + fs::create_dir_all(&env_dir).unwrap(); + + // Simulate crash: don't commit. Recovery should clean up. + let _engine = Engine::new(store.path()); + assert!( + !env_dir.exists(), + "WAL recovery must remove orphaned dir even when rollback was registered before creation" + ); +} + +// M1.1: Verify rollback of nonexistent path is a safe no-op +#[test] +fn wal_rollback_nonexistent_path_is_noop() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + let op_id = wal + .begin(karapace_store::WalOpKind::Build, "noop_test") + .unwrap(); + + let nonexistent = store.path().join("env").join("does_not_exist"); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(nonexistent.clone()), + ) + .unwrap(); + + // Recovery should succeed without error even though the path doesn't exist + let wal2 = karapace_store::WriteAheadLog::new(&layout); + let recovered = wal2.recover().unwrap(); + assert_eq!(recovered, 1); + + // WAL should be clean + assert!(wal2.list_incomplete().unwrap().is_empty()); +} + +// M1.2: Destroy with WAL protection — normal path +#[test] +fn destroy_with_wal_commits_cleanly() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Destroy should succeed and leave no WAL entries + engine.destroy(&env_id).unwrap(); + + let layout = StoreLayout::new(store.path()); + let wal = karapace_store::WriteAheadLog::new(&layout); + assert!( + wal.list_incomplete().unwrap().is_empty(), + "WAL must be clean after successful destroy" + ); +} + +// M1.2: Destroy WAL crash recovery — crash between env_dir removal and metadata removal +#[test] +fn wal_crash_during_destroy_is_recoverable() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + let fake_env_id = "destroy_crash_env"; + + // Create env dir and metadata to simulate a built environment + let env_dir = store.path().join("env").join(fake_env_id); + fs::create_dir_all(&env_dir).unwrap(); + fs::write(env_dir.join("some_file"), "data").unwrap(); + + let meta_dir = layout.metadata_dir(); + fs::create_dir_all(&meta_dir).unwrap(); + fs::write( + meta_dir.join(fake_env_id), + r#"{"env_id":"destroy_crash_env"}"#, + ) + .unwrap(); + + // Simulate a destroy that crashed: WAL entry with rollback steps, not committed + let op_id = wal + .begin(karapace_store::WalOpKind::Destroy, fake_env_id) + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveDir(env_dir.clone()), + ) + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveFile(meta_dir.join(fake_env_id)), + ) + .unwrap(); + + // Recovery via Engine::new should clean up + let _engine = Engine::new(store.path()); + + assert!( + !env_dir.exists(), + "WAL recovery must remove orphaned env_dir from incomplete destroy" + ); + + let wal2 = karapace_store::WriteAheadLog::new(&layout); + assert!(wal2.list_incomplete().unwrap().is_empty()); +} + +// M1.3: Commit layer rollback — crash after layer write, before WAL commit +#[test] +fn wal_crash_during_commit_cleans_orphaned_snapshot_layer() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Create a fake snapshot layer manifest file + let layers_dir = layout.layers_dir(); + fs::create_dir_all(&layers_dir).unwrap(); + let fake_hash = "orphaned_snapshot_hash_abc123"; + let layer_path = layers_dir.join(fake_hash); + fs::write( + &layer_path, + r#"{"hash":"orphaned_snapshot_hash_abc123","kind":"Snapshot"}"#, + ) + .unwrap(); + + // Simulate commit crash: WAL entry with layer rollback, not committed + let op_id = wal + .begin(karapace_store::WalOpKind::Commit, "commit_crash_env") + .unwrap(); + wal.add_rollback_step( + &op_id, + karapace_store::RollbackStep::RemoveFile(layer_path.clone()), + ) + .unwrap(); + + // Recovery should remove the orphaned snapshot layer + let _engine = Engine::new(store.path()); + + assert!( + !layer_path.exists(), + "WAL recovery must remove orphaned snapshot layer from incomplete commit" + ); +} + +// M1.4: GC with WAL marker — normal path +#[test] +fn gc_with_wal_commits_cleanly() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Run GC (nothing to collect, but WAL should be clean after) + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let report = engine.gc(&lock, true).unwrap(); + assert_eq!(report.orphaned_envs.len(), 0); + + let wal = karapace_store::WriteAheadLog::new(&layout); + assert!( + wal.list_incomplete().unwrap().is_empty(), + "WAL must be clean after successful GC" + ); +} + +// M1.4: GC incomplete WAL entry recovered safely +#[test] +fn gc_incomplete_wal_entry_recovered_safely() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Simulate an incomplete GC: WAL entry exists, GC didn't finish + let _op_id = wal.begin(karapace_store::WalOpKind::Gc, "gc").unwrap(); + + // Recovery via Engine::new should clean up the WAL entry (no rollback steps needed) + let _engine = Engine::new(store.path()); + + let wal2 = karapace_store::WriteAheadLog::new(&layout); + assert!( + wal2.list_incomplete().unwrap().is_empty(), + "incomplete GC WAL entry must be cleaned up on recovery" + ); +} + +// M1.4: GC is idempotent after partial run +#[test] +fn gc_is_idempotent_after_partial_run() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Destroy to create orphaned objects/layers + engine.destroy(&env_id).unwrap(); + + // First GC run + let layout = StoreLayout::new(store.path()); + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let report1 = engine.gc(&lock, false).unwrap(); + // Second GC run — should be a no-op (idempotent) + let report2 = engine.gc(&lock, false).unwrap(); + assert_eq!( + report2.removed_envs + report2.removed_layers + report2.removed_objects, + 0, + "second GC run should find nothing to collect: {report2:?}" + ); + + // First run should have found something to collect + assert!( + report1.removed_envs + report1.removed_layers + report1.removed_objects > 0, + "first GC run should have collected orphans: {report1:?}" + ); +} + +// --- M6: Failure Mode Testing (2.0) --- + +// M6.2: Object read fails gracefully on permission denied +#[test] +fn object_get_fails_on_permission_denied() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + let hash = obj_store.put(b"test data").unwrap(); + + // Remove read permission on the object file + let obj_path = layout.objects_dir().join(&hash); + fs::set_permissions(&obj_path, fs::Permissions::from_mode(0o000)).unwrap(); + + let result = obj_store.get(&hash); + assert!(result.is_err(), "get must fail on permission denied"); + + // Restore permissions for cleanup + fs::set_permissions(&obj_path, fs::Permissions::from_mode(0o644)).unwrap(); +} + +// M6.2: Metadata write fails gracefully on read-only store +#[test] +fn metadata_put_fails_on_read_only_dir() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + + // Make metadata dir read-only + let meta_dir = layout.metadata_dir(); + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o555)).unwrap(); + + let meta = karapace_store::EnvMetadata { + env_id: "test_ro".into(), + short_id: "test_ro".into(), + name: None, + state: EnvState::Defined, + manifest_hash: "mhash".into(), + base_layer: "base".into(), + dependency_layers: vec![], + policy_layer: None, + created_at: "2025-01-01T00:00:00Z".to_owned(), + updated_at: "2025-01-01T00:00:00Z".to_owned(), + ref_count: 1, + checksum: None, + }; + let result = meta_store.put(&meta); + assert!(result.is_err(), "put must fail on read-only metadata dir"); + + // Restore permissions for cleanup + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o755)).unwrap(); +} + +// M6.3: Concurrent GC blocked by store lock +#[test] +fn concurrent_gc_blocked_by_lock() { + let store = tempfile::tempdir().unwrap(); + let _engine = Engine::new(store.path()); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Acquire lock in main thread + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + + // Try to acquire lock again — should return None (non-blocking try) + let result = StoreLock::try_acquire(&layout.lock_file()).unwrap(); + assert!( + result.is_none(), + "second lock acquisition must return None while first is held" + ); + + drop(lock); + + // After drop, lock should be available again + let lock2 = StoreLock::try_acquire(&layout.lock_file()).unwrap(); + assert!(lock2.is_some(), "lock must be available after drop"); +} + +// M6: Layer corruption detected on get +#[test] +fn layer_corruption_detected_on_get() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let layer_store = karapace_store::LayerStore::new(layout.clone()); + let layer = karapace_store::LayerManifest { + hash: "test_layer".to_owned(), + kind: karapace_store::LayerKind::Base, + parent: None, + object_refs: vec![], + read_only: true, + tar_hash: String::new(), + }; + let content_hash = layer_store.put(&layer).unwrap(); + + // Corrupt the layer file (flip bytes but keep it valid-ish) + let layer_path = layout.layers_dir().join(&content_hash); + fs::write(&layer_path, b"corrupted data that is not the original").unwrap(); + + let result = layer_store.get(&content_hash); + assert!( + result.is_err(), + "corrupted layer must be detected by hash verification" + ); +} + +// M6: Metadata corruption detected on get +#[test] +fn metadata_corruption_detected_on_get() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + let meta = karapace_store::EnvMetadata { + env_id: "corrupt_test".into(), + short_id: "corrupt_test".into(), + name: None, + state: EnvState::Built, + manifest_hash: "mhash".into(), + base_layer: "base".into(), + dependency_layers: vec![], + policy_layer: None, + created_at: "2025-01-01T00:00:00Z".to_owned(), + updated_at: "2025-01-01T00:00:00Z".to_owned(), + ref_count: 1, + checksum: None, + }; + meta_store.put(&meta).unwrap(); + + // Corrupt the metadata file while keeping valid JSON with wrong checksum + let meta_path = layout.metadata_dir().join("corrupt_test"); + let mut content = fs::read_to_string(&meta_path).unwrap(); + content = content.replace("corrupt_test", "tampered_val"); + fs::write(&meta_path, &content).unwrap(); + + let result = meta_store.get("corrupt_test"); + assert!( + result.is_err(), + "tampered metadata must be detected by checksum verification" + ); +} + +// M6: Destroy non-existent environment returns clean error +#[test] +fn destroy_nonexistent_env_returns_error() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let result = engine.destroy("does_not_exist_abc123"); + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("not found") || err.contains("NotFound") || err.contains("does_not_exist"), + "error should indicate env not found: {err}" + ); +} + +// M6: Build with invalid manifest returns clean error +#[test] +fn build_invalid_manifest_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = project.path().join("karapace.toml"); + fs::write(&manifest, "this is not valid toml [[[").unwrap(); + + let result = engine.build(&manifest); + assert!(result.is_err(), "build with invalid manifest must fail"); +} + +// stop() on non-Running env returns correct error +#[test] +fn stop_non_running_env_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + + // Environment is in Built state, not Running + let result = engine.stop(&r.identity.env_id); + assert!(result.is_err(), "stop on Built env must fail"); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.to_lowercase().contains("running") + || err_msg.to_lowercase().contains("not running"), + "error should mention running state: {err_msg}" + ); +} + +#[test] +fn stale_running_marker_cleaned_on_engine_new() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + + // Build an environment so we have an env_dir + let engine = Engine::new(store.path()); + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let result = engine.build(&manifest); + assert!(result.is_ok()); + let env_id = result.unwrap().identity.env_id; + + // Manually create a stale .running marker (simulates a crash) + let env_dir = store.path().join("env").join(&*env_id); + fs::create_dir_all(&env_dir).unwrap(); + let running_marker = env_dir.join(".running"); + fs::write(&running_marker, "stale").unwrap(); + assert!(running_marker.exists()); + + // Creating a new Engine should clean up the stale marker + let _engine2 = Engine::new(store.path()); + assert!( + !running_marker.exists(), + "stale .running marker must be removed by Engine::new()" + ); +} + +// --- §7: Disk-full / write-failure simulation --- + +#[test] +fn build_on_readonly_objects_dir_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + + // Initialize the store, then make objects dir read-only + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + let objects_dir = layout.objects_dir(); + fs::set_permissions(&objects_dir, fs::Permissions::from_mode(0o444)).unwrap(); + + let engine = Engine::new(store.path()); + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let result = engine.build(&manifest); + + // Restore permissions for cleanup + fs::set_permissions(&objects_dir, fs::Permissions::from_mode(0o755)).unwrap(); + + assert!( + result.is_err(), + "build must fail when objects dir is read-only" + ); +} + +#[test] +fn build_on_readonly_metadata_dir_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + let meta_dir = layout.metadata_dir(); + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o444)).unwrap(); + + let engine = Engine::new(store.path()); + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let result = engine.build(&manifest); + + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o755)).unwrap(); + + assert!( + result.is_err(), + "build must fail when metadata dir is read-only" + ); +} + +#[test] +fn commit_on_readonly_layers_dir_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Create upper dir with content + let upper = store.path().join("env").join(&env_id).join("upper"); + fs::create_dir_all(&upper).unwrap(); + fs::write(upper.join("file.txt"), "data").unwrap(); + + // Make layers dir read-only + let layout = StoreLayout::new(store.path()); + let layers_dir = layout.layers_dir(); + fs::set_permissions(&layers_dir, fs::Permissions::from_mode(0o444)).unwrap(); + + let result = engine.commit(&env_id); + + fs::set_permissions(&layers_dir, fs::Permissions::from_mode(0o755)).unwrap(); + + assert!( + result.is_err(), + "commit must fail when layers dir is read-only" + ); + + // Store should still be usable after the error + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!( + meta.state, + EnvState::Built, + "env must still be in Built state after failed commit" + ); +} + +#[test] +fn write_failure_never_panics() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Test ObjectStore::put on read-only dir + let objects_dir = layout.objects_dir(); + fs::set_permissions(&objects_dir, fs::Permissions::from_mode(0o444)).unwrap(); + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + let result = obj_store.put(b"test data"); + fs::set_permissions(&objects_dir, fs::Permissions::from_mode(0o755)).unwrap(); + assert!( + result.is_err(), + "ObjectStore::put must return Err, not panic" + ); + + // Test LayerStore::put on read-only dir + let layers_dir = layout.layers_dir(); + fs::set_permissions(&layers_dir, fs::Permissions::from_mode(0o444)).unwrap(); + let layer_store = karapace_store::LayerStore::new(layout.clone()); + let layer = karapace_store::LayerManifest { + hash: "test".into(), + kind: karapace_store::LayerKind::Base, + parent: None, + object_refs: vec![], + read_only: true, + tar_hash: "test".into(), + }; + let result = layer_store.put(&layer); + fs::set_permissions(&layers_dir, fs::Permissions::from_mode(0o755)).unwrap(); + assert!( + result.is_err(), + "LayerStore::put must return Err, not panic" + ); + + // Test MetadataStore::put on read-only dir + let meta_dir = layout.metadata_dir(); + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o444)).unwrap(); + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + let meta = karapace_store::EnvMetadata { + env_id: "test123".into(), + short_id: "test123".into(), + name: None, + state: EnvState::Defined, + manifest_hash: "mh".into(), + base_layer: "bl".into(), + dependency_layers: vec![], + policy_layer: None, + created_at: "2025-01-01T00:00:00Z".to_owned(), + updated_at: "2025-01-01T00:00:00Z".to_owned(), + ref_count: 1, + checksum: None, + }; + let result = meta_store.put(&meta); + fs::set_permissions(&meta_dir, fs::Permissions::from_mode(0o755)).unwrap(); + assert!( + result.is_err(), + "MetadataStore::put must return Err, not panic" + ); +} + +// --- §2: Bit-flip corruption detection --- + +#[test] +fn bitflip_corruption_detected_on_objects() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Write several objects + let h1 = obj_store.put(b"object-alpha").unwrap(); + let h2 = obj_store.put(b"object-beta-longer-content-here").unwrap(); + let h3 = obj_store.put(b"object-gamma").unwrap(); + + // Flip a random byte in object 2 + let path = layout.objects_dir().join(&h2); + let mut data = fs::read(&path).unwrap(); + let flip_idx = data.len() / 2; + data[flip_idx] ^= 0xFF; + fs::write(&path, &data).unwrap(); + + // Intact objects must still be readable + assert!(obj_store.get(&h1).is_ok()); + assert!(obj_store.get(&h3).is_ok()); + + // Corrupted object must fail with typed error + let result = obj_store.get(&h2); + assert!(result.is_err(), "bit-flipped object must be detected"); + + // verify_store_integrity must report exactly 1 failure + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + assert_eq!(report.checked, 3); + assert_eq!(report.passed, 2); + assert_eq!(report.failed.len(), 1); + assert_eq!(report.failed[0].hash, h2); +} + +// --- §6: GC dry-run equivalence --- + +#[test] +fn gc_dry_run_matches_real_gc() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + // Build 5 environments, destroy 3 + let mut env_ids = Vec::new(); + for i in 0..5 { + let p = tempfile::tempdir().unwrap(); + let pkgs: Vec = (0..=i).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m = write_manifest(p.path(), &mock_manifest(&pkg_refs)); + let r = engine.build(&m).unwrap(); + env_ids.push(r.identity.env_id.to_string()); + } + for id in &env_ids[..3] { + engine.destroy(id).unwrap(); + } + + let layout = StoreLayout::new(store.path()); + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + + // Dry run — populates orphaned_* but doesn't remove + let dry_report = engine.gc(&lock, true).unwrap(); + assert_eq!( + dry_report.removed_envs, 0, + "dry-run must not remove anything" + ); + + // Real GC — actually removes + let real_report = engine.gc(&lock, false).unwrap(); + + // Dry-run orphaned counts must match real removed counts + assert_eq!( + dry_report.orphaned_envs.len(), + real_report.removed_envs, + "dry-run orphaned_envs must match real removed_envs" + ); + assert_eq!( + dry_report.orphaned_layers.len(), + real_report.removed_layers, + "dry-run orphaned_layers must match real removed_layers" + ); + assert_eq!( + dry_report.orphaned_objects.len(), + real_report.removed_objects, + "dry-run orphaned_objects must match real removed_objects" + ); +} + +// --- §15: Soak test — 100 build/destroy/GC cycles --- + +#[test] +fn soak_100_build_destroy_gc_cycles() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + let layout = StoreLayout::new(store.path()); + + for cycle in 0..100 { + let p = tempfile::tempdir().unwrap(); + let pkgs: Vec = (0..=(cycle % 5)).map(|j| format!("pkg{j}")).collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m = write_manifest(p.path(), &mock_manifest(&pkg_refs)); + + let r = engine.build(&m).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Verify it's inspectable + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); + + // Destroy + engine.destroy(&env_id).unwrap(); + + // GC every 10 cycles + if cycle % 10 == 9 { + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let _report = engine.gc(&lock, false).unwrap(); + } + } + + // Final state: no environments should remain + let envs = engine.list().unwrap(); + assert_eq!( + envs.len(), + 0, + "all environments must be destroyed after soak" + ); + + // Final GC — store should be clean + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let _report = engine.gc(&lock, false).unwrap(); + + // WAL must be clean + let wal = karapace_store::WriteAheadLog::new(&layout); + assert!(wal.list_incomplete().unwrap().is_empty()); + + // Store integrity must pass + let integrity = karapace_store::verify_store_integrity(&layout).unwrap(); + assert!( + integrity.failed.is_empty(), + "store integrity must be clean after 100 cycles" + ); +} + +// --- §11: Store v1 rejection test --- + +#[test] +fn old_store_version_rejected_with_clear_error() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + // Overwrite the version file with v1 (old format) + let version_path = store.path().join("store").join("version"); + fs::write(&version_path, r#"{"format_version": 1}"#).unwrap(); + + // Re-initializing must reject v1 + let result = layout.initialize(); + assert!( + result.is_err(), + "store v1 must be rejected by the current binary" + ); + + // The error message should mention the version + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("version") || err_msg.contains("format"), + "error should mention version mismatch: {err_msg}" + ); +} + +// --- §4: Resolver / lockfile determinism --- + +#[test] +fn build_with_invalid_manifest_returns_manifest_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest( + project.path(), + r" +manifest_version = 1 +[base] +", + ); + let result = engine.build(&manifest); + assert!(result.is_err(), "build with missing image must fail"); +} + +#[test] +fn build_with_invalid_backend_returns_error() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest( + project.path(), + r#" +manifest_version = 1 +[base] +image = "rolling" +[runtime] +backend = "nonexistent_backend" +"#, + ); + let result = engine.build(&manifest); + assert!(result.is_err(), "build with unknown backend must fail"); +} + +#[test] +fn lockfile_determinism_same_inputs_same_env_id() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + // Build twice with identical manifests in different project dirs + let p1 = tempfile::tempdir().unwrap(); + let m1 = write_manifest(p1.path(), &mock_manifest(&["git", "vim"])); + let r1 = engine.build(&m1).unwrap(); + + // Destroy first, then rebuild with same manifest + engine.destroy(&r1.identity.env_id).unwrap(); + + let p2 = tempfile::tempdir().unwrap(); + let m2 = write_manifest(p2.path(), &mock_manifest(&["git", "vim"])); + let r2 = engine.build(&m2).unwrap(); + + assert_eq!( + r1.identity.env_id, r2.identity.env_id, + "same manifest must produce same env_id" + ); +} + +#[test] +fn different_packages_produce_different_env_id() { + let store = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let p1 = tempfile::tempdir().unwrap(); + let m1 = write_manifest(p1.path(), &mock_manifest(&["git"])); + let r1 = engine.build(&m1).unwrap(); + + let p2 = tempfile::tempdir().unwrap(); + let m2 = write_manifest(p2.path(), &mock_manifest(&["vim"])); + let r2 = engine.build(&m2).unwrap(); + + assert_ne!( + r1.identity.env_id, r2.identity.env_id, + "different packages must produce different env_id" + ); +} + +// --- §6: GC 1000-object stress test --- + +#[test] +fn gc_stress_1000_objects() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + let obj_store = karapace_store::ObjectStore::new(layout.clone()); + + // Create 1000 orphaned objects (no layer or metadata references them) + for i in 0..1000 { + obj_store + .put(format!("orphaned-object-{i}").as_bytes()) + .unwrap(); + } + + // Also create a live environment so GC has reachable refs to trace + let engine = Engine::new(store.path()); + let p = tempfile::tempdir().unwrap(); + let m = write_manifest(p.path(), &mock_manifest(&["git"])); + let _r = engine.build(&m).unwrap(); + + // Run GC + let lock = StoreLock::acquire(&layout.lock_file()).unwrap(); + let report = engine.gc(&lock, false).unwrap(); + + // All 1000 orphans must be collected + assert_eq!( + report.removed_objects, 1000, + "GC must collect all 1000 orphaned objects, got {}", + report.removed_objects + ); + + // Live env must survive + assert_eq!(engine.list().unwrap().len(), 1); + + // Store integrity must still pass + let integrity = karapace_store::verify_store_integrity(&layout).unwrap(); + assert!( + integrity.failed.is_empty(), + "store integrity must pass after GC stress" + ); +} + +// --- §5: Concurrent build operations --- + +#[test] +fn concurrent_builds_do_not_corrupt_store() { + let store = tempfile::tempdir().unwrap(); + let store_path = store.path().to_owned(); + + let barrier = Arc::new(Barrier::new(4)); + let mut handles = Vec::new(); + + for thread_idx in 0..4 { + let path = store_path.clone(); + let b = Arc::clone(&barrier); + handles.push(thread::spawn(move || { + let engine = Engine::new(&path); + b.wait(); + + let p = tempfile::tempdir().unwrap(); + let pkgs: Vec = (0..=(thread_idx % 3)) + .map(|j| format!("t{thread_idx}pkg{j}")) + .collect(); + let pkg_refs: Vec<&str> = pkgs.iter().map(String::as_str).collect(); + let m_content = format!( + r#" +manifest_version = 1 +[base] +image = "rolling" +[system] +packages = [{}] +[runtime] +backend = "mock" +"#, + pkg_refs + .iter() + .map(|p| format!("\"{p}\"")) + .collect::>() + .join(", ") + ); + let manifest_path = p.path().join("karapace.toml"); + fs::write(&manifest_path, &m_content).unwrap(); + engine.build(&manifest_path).unwrap(); + })); + } + + for h in handles { + h.join().unwrap(); + } + + // Verify store is healthy after concurrent builds + let engine = Engine::new(&store_path); + let envs = engine.list().unwrap(); + assert_eq!(envs.len(), 4, "all 4 concurrent builds must succeed"); + + let layout = StoreLayout::new(&store_path); + let integrity = karapace_store::verify_store_integrity(&layout).unwrap(); + assert!( + integrity.failed.is_empty(), + "store must be intact after concurrent builds" + ); +} + +// --- M6.1: WAL write failure simulation --- + +#[test] +fn wal_write_fails_on_readonly_dir() { + let store = tempfile::tempdir().unwrap(); + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + // Make WAL dir read-only + let wal_dir = store.path().join("store").join("wal"); + fs::set_permissions(&wal_dir, fs::Permissions::from_mode(0o444)).unwrap(); + + let result = wal.begin(karapace_store::WalOpKind::Build, "test-env"); + + fs::set_permissions(&wal_dir, fs::Permissions::from_mode(0o755)).unwrap(); + + assert!( + result.is_err(), + "WAL begin must fail when WAL dir is read-only" + ); +} + +#[test] +fn build_fails_cleanly_when_wal_dir_is_readonly() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + + // Initialize layout and WAL so the dir exists + let layout = StoreLayout::new(store.path()); + layout.initialize().unwrap(); + let wal = karapace_store::WriteAheadLog::new(&layout); + wal.initialize().unwrap(); + + let engine = Engine::new(store.path()); + + // Now make WAL dir read-only to simulate disk full + let wal_dir = layout.root().join("store").join("wal"); + fs::set_permissions(&wal_dir, fs::Permissions::from_mode(0o444)).unwrap(); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let result = engine.build(&manifest); + + fs::set_permissions(&wal_dir, fs::Permissions::from_mode(0o755)).unwrap(); + + assert!( + result.is_err(), + "build must fail cleanly when WAL dir is read-only (simulates disk full)" + ); +} + +// --- M6.4: stop() SIGTERM/SIGKILL path tests --- + +#[test] +fn stop_sends_sigterm_to_real_process() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Spawn a real sleep process and wait on it later to avoid zombie + let mut child = std::process::Command::new("sleep") + .arg("60") + .spawn() + .expect("spawn sleep"); + let pid = child.id(); + let pid_i32 = i32::try_from(pid).expect("pid fits in i32"); + + // Set metadata to Running state and write .running marker with real PID + let layout = StoreLayout::new(store.path()); + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + meta_store.update_state(&env_id, EnvState::Running).unwrap(); + + let env_dir = store.path().join("env").join(&env_id); + fs::create_dir_all(&env_dir).unwrap(); + fs::write(env_dir.join(".running"), pid.to_string()).unwrap(); + + // Verify the process is alive + assert!( + Path::new(&format!("/proc/{pid}")).exists(), + "sleep process must be alive before stop" + ); + + // Stop should send SIGTERM (the mock backend returns pid=99999 which won't exist, + // but the real process PID was written to .running). The mock backend's status() + // returns pid=99999 for running envs, so stop() will try to kill 99999 (ESRCH). + // This tests the ESRCH handling path. + let result = engine.stop(&env_id); + + // Clean up the real process regardless of result + unsafe { + libc::kill(pid_i32, libc::SIGKILL); + } + let _ = child.wait(); + + assert!( + result.is_ok(), + "stop must succeed even when backend PID no longer exists (ESRCH path): {:?}", + result.err() + ); + + // Verify state was reset to Built + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!( + meta.state, + EnvState::Built, + "state must be reset to Built after stop" + ); +} + +#[test] +fn stop_invalid_pid_handled_gracefully() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Manually set state to Running (the mock backend will report pid=99999) + let layout = StoreLayout::new(store.path()); + let meta_store = karapace_store::MetadataStore::new(layout.clone()); + meta_store.update_state(&env_id, EnvState::Running).unwrap(); + + // stop() will try to kill pid 99999 which doesn't exist → ESRCH + let result = engine.stop(&env_id); + + // Must handle gracefully (ESRCH = process already exited) + assert!( + result.is_ok(), + "stop with non-existent PID must succeed (ESRCH handled): {:?}", + result.err() + ); + + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); +} + +// --- M7: Coverage expansion --- + +#[test] +fn freeze_and_archive_state_transitions() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Built → Frozen + engine.freeze(&env_id).unwrap(); + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.state, EnvState::Frozen); + + // Frozen → Archived + engine.archive(&env_id).unwrap(); + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.state, EnvState::Archived); +} + +#[test] +fn rename_environment_works() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r = engine.build(&manifest).unwrap(); + let env_id = r.identity.env_id.to_string(); + + // Rename + engine.rename(&env_id, "my-dev-env").unwrap(); + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.name, Some("my-dev-env".to_owned())); + + // Rename again + engine.rename(&env_id, "new-name").unwrap(); + let meta = engine.inspect(&env_id).unwrap(); + assert_eq!(meta.name, Some("new-name".to_owned())); +} + +#[test] +fn verify_store_reports_all_clean_after_fresh_build() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + let manifest = write_manifest(project.path(), &mock_manifest(&["git", "vim"])); + let _r = engine.build(&manifest).unwrap(); + + let layout = StoreLayout::new(store.path()); + let report = karapace_store::verify_store_integrity(&layout).unwrap(); + + assert!(report.checked > 0, "must check at least some objects"); + assert_eq!(report.passed, report.checked); + assert!(report.failed.is_empty()); + assert!(report.layers_checked > 0, "must check at least some layers"); + assert_eq!(report.layers_passed, report.layers_checked); + assert!( + report.metadata_checked > 0, + "must check at least some metadata" + ); + assert_eq!(report.metadata_passed, report.metadata_checked); +} + +#[test] +fn rebuild_preserves_new_and_cleans_old() { + let store = tempfile::tempdir().unwrap(); + let project = tempfile::tempdir().unwrap(); + let engine = Engine::new(store.path()); + + // Initial build + let manifest = write_manifest(project.path(), &mock_manifest(&["git"])); + let r1 = engine.build(&manifest).unwrap(); + let old_id = r1.identity.env_id.to_string(); + + // Rebuild with different packages → different env_id + let manifest2 = write_manifest(project.path(), &mock_manifest(&["git", "vim"])); + let r2 = engine.rebuild(&manifest2).unwrap(); + let new_id = r2.identity.env_id.to_string(); + + assert_ne!( + old_id, new_id, + "different packages must produce different env_id" + ); + + // New env must be inspectable + let meta = engine.inspect(&new_id).unwrap(); + assert_eq!(meta.state, EnvState::Built); + + // Old env must be gone (destroyed by rebuild) + assert!(engine.inspect(&old_id).is_err()); +}