Merge pull request #2 from marcoallegretti/fix/cli-workflow-audit

Fix/cli workflow audit
This commit is contained in:
Marco Allegretti 2026-02-25 14:24:42 +01:00 committed by GitHub
commit e6d06d6472
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 748 additions and 262 deletions

View file

@ -60,6 +60,10 @@ karapace commit <env_id>
karapace restore <env_id> <snapshot_hash>
```
![Create a manifest](docs/imgs/Screenshot_20260225_105438.png)
![Build and inspect an environment](docs/imgs/Screenshot_20260225_110322.png)
See `examples/` for more manifests: `minimal.toml`, `dev.toml`, `gui-dev.toml`, `ubuntu-dev.toml`, `rust-dev.toml`.
## Commands
@ -120,6 +124,7 @@ karapace-server Reference HTTP server
## Documentation
- [Getting Started](docs/getting-started.md)
- [Architecture](docs/architecture.md)
- [CLI Reference](docs/cli-reference.md)
- [Storage Format](docs/storage-format.md)

View file

@ -40,23 +40,26 @@ pub fn json_pretty(value: &impl serde::Serialize) -> Result<String, String> {
pub fn spinner(msg: &str) -> ProgressBar {
let pb = ProgressBar::new_spinner();
pb.set_style(
ProgressStyle::with_template("{spinner:.cyan} {msg}")
.expect("valid template")
.tick_strings(&["", "", "", "", "", "", "", "", "", ""]),
);
let style = ProgressStyle::with_template("{spinner:.cyan} {msg}")
.unwrap_or_else(|_| ProgressStyle::default_spinner())
.tick_strings(&["", "", "", "", "", "", "", "", "", ""]);
pb.set_style(style);
pb.set_message(msg.to_owned());
pb.enable_steady_tick(Duration::from_millis(80));
pb
}
pub fn spin_ok(pb: &ProgressBar, msg: &str) {
pb.set_style(ProgressStyle::with_template("{msg}").expect("valid template"));
if let Ok(style) = ProgressStyle::with_template("{msg}") {
pb.set_style(style);
}
pb.finish_with_message(format!("{msg}"));
}
pub fn spin_fail(pb: &ProgressBar, msg: &str) {
pb.set_style(ProgressStyle::with_template("{msg}").expect("valid template"));
if let Ok(style) = ProgressStyle::with_template("{msg}") {
pb.set_style(style);
}
pb.finish_with_message(format!("{msg}"));
}

View file

@ -4,7 +4,7 @@ use karapace_schema::manifest::{
parse_manifest_str, BaseSection, GuiSection, HardwareSection, ManifestV1, MountsSection,
RuntimeSection, SystemSection,
};
use std::io::{stdin, IsTerminal};
use std::io::{stderr, stdin, IsTerminal};
use std::path::{Path, PathBuf};
use tempfile::NamedTempFile;
@ -87,12 +87,14 @@ fn print_result(name: &str, template: Option<&str>, json: bool) -> Result<(), St
pub fn run(name: &str, template: Option<&str>, force: bool, json: bool) -> Result<u8, String> {
let dest = Path::new(DEST_MANIFEST);
let is_tty = stdin().is_terminal();
ensure_can_write(dest, force, is_tty)?;
let is_tty = stdin().is_terminal() && stderr().is_terminal();
let mut manifest = if let Some(tpl) = template {
load_template(tpl)?
let m = load_template(tpl)?;
ensure_can_write(dest, force, is_tty)?;
m
} else {
ensure_can_write(dest, force, is_tty)?;
if !is_tty {
return Err("no --template provided and stdin is not a TTY".to_owned());
}

View file

@ -1,6 +1,6 @@
use super::{json_pretty, resolve_env_id, resolve_env_id_pretty, EXIT_SUCCESS};
use karapace_core::Engine;
use karapace_store::StoreLayout;
use karapace_store::{LayerStore, StoreLayout};
use std::path::Path;
pub fn run(engine: &Engine, store_path: &Path, env_id: &str, json: bool) -> Result<u8, String> {
@ -16,16 +16,16 @@ pub fn run(engine: &Engine, store_path: &Path, env_id: &str, json: bool) -> Resu
.map_err(|e| e.to_string())?;
if json {
let entries: Vec<_> = snapshots
.iter()
.map(|s| {
serde_json::json!({
let mut entries = Vec::new();
for s in &snapshots {
let restore_hash = LayerStore::compute_hash(s).map_err(|e| e.to_string())?;
entries.push(serde_json::json!({
"hash": s.hash,
"restore_hash": restore_hash,
"tar_hash": s.tar_hash,
"parent": s.parent,
})
})
.collect();
}));
}
let payload = serde_json::json!({
"env_id": resolved,
"snapshots": entries,
@ -36,7 +36,8 @@ pub fn run(engine: &Engine, store_path: &Path, env_id: &str, json: bool) -> Resu
} else {
println!("snapshots for {env_id}:");
for s in &snapshots {
println!(" {} (tar: {})", &s.hash[..12], &s.tar_hash[..12]);
let restore_hash = LayerStore::compute_hash(s).map_err(|e| e.to_string())?;
println!(" {} (tar: {})", restore_hash, &s.tar_hash[..12]);
}
}
Ok(EXIT_SUCCESS)

View file

@ -1,12 +1,8 @@
use super::{resolve_env_id_pretty, EXIT_SUCCESS};
use karapace_core::{Engine, StoreLock};
use karapace_store::StoreLayout;
use karapace_core::Engine;
use std::path::Path;
pub fn run(engine: &Engine, store_path: &Path, env_id: &str) -> Result<u8, String> {
let layout = StoreLayout::new(store_path);
let _lock = StoreLock::acquire(&layout.lock_file()).map_err(|e| format!("store lock: {e}"))?;
pub fn run(engine: &Engine, _store_path: &Path, env_id: &str) -> Result<u8, String> {
let resolved = resolve_env_id_pretty(engine, env_id)?;
engine.stop(&resolved).map_err(|e| e.to_string())?;
println!("stopped environment {env_id}");

View file

@ -212,6 +212,19 @@ enum Commands {
#[allow(clippy::too_many_lines)]
fn main() -> ExitCode {
let default_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
let msg = info.to_string();
if msg.contains("Broken pipe")
|| msg.contains("broken pipe")
|| msg.contains("os error 32")
|| msg.contains("failed printing to stdout")
{
std::process::exit(0);
}
default_hook(info);
}));
let cli = Cli::parse();
let default_level = if cli.trace {

View file

@ -178,6 +178,85 @@ fn cli_build_offline_fails_fast_with_packages() {
);
}
#[test]
fn cli_snapshots_restore_hash_matches_commit() {
let store = temp_store();
let project = tempfile::tempdir().unwrap();
let manifest = write_minimal_manifest(project.path(), "rolling");
let build_out = karapace_bin()
.args([
"--store",
&store.path().to_string_lossy(),
"--json",
"build",
&manifest.to_string_lossy(),
"--name",
"demo",
])
.output()
.unwrap();
assert!(build_out.status.success());
let commit_out = karapace_bin()
.args([
"--store",
&store.path().to_string_lossy(),
"--json",
"commit",
"demo",
])
.output()
.unwrap();
assert!(
commit_out.status.success(),
"commit must exit 0. stderr: {}",
String::from_utf8_lossy(&commit_out.stderr)
);
let commit_stdout = String::from_utf8_lossy(&commit_out.stdout);
let commit_json: serde_json::Value = serde_json::from_str(&commit_stdout)
.unwrap_or_else(|e| panic!("commit --json must produce valid JSON: {e}\n{commit_stdout}"));
let commit_hash = commit_json["snapshot_hash"].as_str().unwrap().to_owned();
let snaps_out = karapace_bin()
.args([
"--store",
&store.path().to_string_lossy(),
"--json",
"snapshots",
"demo",
])
.output()
.unwrap();
assert!(
snaps_out.status.success(),
"snapshots must exit 0. stderr: {}",
String::from_utf8_lossy(&snaps_out.stderr)
);
let snaps_stdout = String::from_utf8_lossy(&snaps_out.stdout);
let snaps_json: serde_json::Value = serde_json::from_str(&snaps_stdout).unwrap_or_else(|e| {
panic!("snapshots --json must produce valid JSON: {e}\nstdout: {snaps_stdout}")
});
let restore_hash = snaps_json["snapshots"][0]["restore_hash"].as_str().unwrap();
assert_eq!(restore_hash, commit_hash);
let restore_out = karapace_bin()
.args([
"--store",
&store.path().to_string_lossy(),
"restore",
"demo",
restore_hash,
])
.output()
.unwrap();
assert!(
restore_out.status.success(),
"restore must exit 0. stderr: {}",
String::from_utf8_lossy(&restore_out.stderr)
);
}
// A5: CLI Validation — list with JSON output
#[test]
fn cli_list_json_output_stable() {

View file

@ -56,13 +56,14 @@ impl Engine {
let layer_store = LayerStore::new(layout.clone());
let wal = WriteAheadLog::new(&layout);
// Recover any incomplete operations from a previous crash
// Recovery mutates the store; avoid running it while the store is locked.
match StoreLock::try_acquire(&layout.lock_file()) {
Ok(Some(_lock)) => {
if let Err(e) = wal.recover() {
warn!("WAL recovery failed: {e}");
}
// Clean up stale .running markers left by a crash during enter/exec.
// After WAL recovery, any env still marked Running was mid-operation.
// Clean up stale .running markers.
let env_base = layout.env_dir();
if env_base.exists() {
if let Ok(entries) = std::fs::read_dir(&env_base) {
@ -78,6 +79,14 @@ impl Engine {
}
}
}
}
Ok(None) => {
debug!("store lock held; skipping WAL recovery and stale marker cleanup");
}
Err(e) => {
warn!("store lock check failed; skipping WAL recovery: {e}");
}
}
let store_root_str = root.to_string_lossy().into_owned();
Self {
@ -98,12 +107,10 @@ impl Engine {
let manifest = parse_manifest_file(manifest_path)?;
let normalized = manifest.normalize()?;
// Use preliminary identity from manifest (not resolved yet).
// This is sufficient for the Defined state; build will re-resolve.
let identity = compute_env_id(&normalized);
let identity = compute_env_id(&normalized)?;
if !self.meta_store.exists(&identity.env_id) {
let manifest_json = normalized.canonical_json();
let manifest_json = normalized.canonical_json()?;
let manifest_hash = self.obj_store.put(manifest_json.as_bytes())?;
let now = chrono::Utc::now().to_rfc3339();
@ -124,8 +131,6 @@ impl Engine {
self.meta_store.put(&meta)?;
}
// Generate a preliminary lock with mock resolution
// (no real image digest or package versions yet).
let preliminary_resolution = ResolutionResult {
base_image_digest: blake3::hash(
format!("unresolved:{}", normalized.base_image).as_bytes(),
@ -210,10 +215,7 @@ impl Engine {
let store_str = self.store_root_str.clone();
let backend = select_backend(&normalized.runtime_backend, &store_str)?;
// Phase 1: Resolve dependencies through the backend.
// This downloads the base image, computes its content digest,
// and queries the package manager for exact versions.
let preliminary_id = compute_env_id(&normalized);
let preliminary_id = compute_env_id(&normalized)?;
let preliminary_spec = RuntimeSpec {
env_id: preliminary_id.env_id.to_string(),
root_path: self
@ -237,9 +239,6 @@ impl Engine {
&resolution.base_image_digest[..12]
);
// Phase 2: Generate the lock file from resolved data.
// The env_id is computed from the locked state — content digest
// + pinned package versions — not from unresolved names.
let lock = LockFile::from_resolved(&normalized, &resolution);
let identity = lock.compute_identity();
@ -259,18 +258,15 @@ impl Engine {
identity.env_id, identity.short_id
);
// Phase 3: Build the environment, then capture real filesystem layers.
let manifest_hash = self.obj_store.put(normalized.canonical_json().as_bytes())?;
let manifest_json = normalized.canonical_json()?;
let manifest_hash = self.obj_store.put(manifest_json.as_bytes())?;
let env_dir = self.layout.env_path(&identity.env_id);
// Begin WAL entry before creating side effects
self.wal.initialize()?;
let wal_op = self.wal.begin(WalOpKind::Build, &identity.env_id)?;
// Register rollback BEFORE the side-effect so a crash between
// create_dir_all and add_rollback_step cannot orphan the directory.
// rollback_entry already checks path.exists(), so a no-op if dir was never created.
// Register rollback before creating side effects.
self.wal
.add_rollback_step(&wal_op, RollbackStep::RemoveDir(env_dir.clone()))?;
std::fs::create_dir_all(&env_dir)?;
@ -289,14 +285,10 @@ impl Engine {
return Err(e.into());
}
// Capture the overlay upper directory as a real tar layer.
// The upper dir contains all filesystem changes made during build
// (installed packages, config files, etc.).
let upper_dir = self.layout.upper_dir(&identity.env_id);
let build_tar = if upper_dir.exists() {
pack_layer(&upper_dir)?
} else {
// No upper dir (shouldn't happen with real backends, but handle gracefully)
Vec::new()
};
let build_tar_hash = self.obj_store.put(&build_tar)?;
@ -316,8 +308,6 @@ impl Engine {
};
let base_layer_hash = self.layer_store.put(&base_layer)?;
// No separate dependency layers — the build tar captures everything.
// Individual package tracking is in the lock file, not the layer store.
let dep_layers = Vec::new();
let now = chrono::Utc::now().to_rfc3339();
@ -336,8 +326,6 @@ impl Engine {
checksum: None,
};
// Phase 4: Write metadata and lock file.
// If either fails after a successful build, clean up the orphaned env_dir.
let finalize = || -> Result<(), CoreError> {
if let Ok(existing) = self.meta_store.get(&identity.env_id) {
validate_transition(existing.state, EnvState::Built)?;
@ -627,7 +615,7 @@ impl Engine {
if old_env_ids.is_empty() {
let manifest = parse_manifest_file(manifest_path)?;
let normalized = manifest.normalize()?;
let identity = compute_env_id(&normalized);
let identity = compute_env_id(&normalized)?;
if self.meta_store.exists(&identity.env_id) {
old_env_ids.push(identity.env_id.to_string());
}
@ -941,7 +929,7 @@ impl Engine {
) -> Result<(ManifestV1, NormalizedManifest, EnvIdentity), CoreError> {
let manifest = parse_manifest_file(manifest_path)?;
let normalized = manifest.normalize()?;
let identity = compute_env_id(&normalized);
let identity = compute_env_id(&normalized)?;
Ok((manifest, normalized, identity))
}
}

View file

@ -1,4 +1,5 @@
use crate::{BlobKind, RemoteBackend, RemoteConfig, RemoteError};
use std::io::Read;
/// HTTP-based remote store backend.
///
@ -55,10 +56,32 @@ impl HttpBackend {
if let Some(ref token) = self.config.auth_token {
req = req.header("Authorization", &format!("Bearer {token}"));
}
let resp = req.call().map_err(|e| RemoteError::Http(e.to_string()))?;
let body = resp
.into_body()
.read_to_vec()
let resp = match req.call() {
Ok(r) => r,
Err(ureq::Error::StatusCode(404)) => {
return Err(RemoteError::NotFound(url.to_owned()));
}
Err(ureq::Error::StatusCode(code)) => {
return Err(RemoteError::Http(format!("HTTP {code} for {url}")));
}
Err(e) => {
return Err(RemoteError::Http(e.to_string()));
}
};
let status = resp.status();
let code = status.as_u16();
if code == 404 {
return Err(RemoteError::NotFound(url.to_owned()));
}
if code >= 400 {
return Err(RemoteError::Http(format!("HTTP {code} for {url}")));
}
let mut reader = resp.into_body().into_reader();
let mut body = Vec::new();
reader
.read_to_end(&mut body)
.map_err(|e| RemoteError::Http(e.to_string()))?;
Ok(body)
}
@ -71,8 +94,11 @@ impl HttpBackend {
if let Some(ref token) = self.config.auth_token {
req = req.header("Authorization", &format!("Bearer {token}"));
}
let resp = req.call().map_err(|e| RemoteError::Http(e.to_string()))?;
Ok(resp.status().into())
match req.call() {
Ok(resp) => Ok(resp.status().into()),
Err(ureq::Error::StatusCode(code)) => Ok(code),
Err(e) => Err(RemoteError::Http(e.to_string())),
}
}
}
@ -92,9 +118,10 @@ impl RemoteBackend for HttpBackend {
fn has_blob(&self, kind: BlobKind, key: &str) -> Result<bool, RemoteError> {
let url = self.url(kind, key);
tracing::debug!("HEAD {url}");
match self.do_head(&url) {
Ok(status) => Ok(status == 200),
Err(_) => Ok(false),
match self.do_head(&url)? {
200 => Ok(true),
404 => Ok(false),
code => Err(RemoteError::Http(format!("HTTP {code} for HEAD {url}"))),
}
}
@ -195,7 +222,7 @@ mod tests {
let mut body = vec![0u8; content_length];
if content_length > 0 {
let _ = std::io::Read::read_exact(&mut reader, &mut body);
let _ = reader.read_exact(&mut body);
}
let mut data = store.lock().unwrap();

View file

@ -86,8 +86,9 @@ pub fn push_env(
// 7. Update registry if key provided
if let Some(key) = registry_key {
let mut registry = match backend.get_registry() {
Ok(data) => Registry::from_bytes(&data).unwrap_or_default(),
Err(_) => Registry::new(),
Ok(data) => Registry::from_bytes(&data)?,
Err(RemoteError::NotFound(_)) => Registry::new(),
Err(e) => return Err(e),
};
registry.publish(
key,

View file

@ -107,14 +107,15 @@ impl RuntimeBackend for MockBackend {
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let stdout = format!("mock-exec: {}\n", command.join(" "));
// Create a real success ExitStatus portably
let success_status = std::process::Command::new("true")
.status()
.unwrap_or_else(|_| {
std::process::Command::new("/bin/true")
.status()
.expect("cannot execute /bin/true")
});
#[cfg(unix)]
let success_status = {
use std::os::unix::process::ExitStatusExt;
std::process::ExitStatus::from_raw(0)
};
#[cfg(not(unix))]
let success_status = std::process::Command::new("true").status()?;
Ok(std::process::Output {
status: success_status,
stdout: stdout.into_bytes(),

View file

@ -5,12 +5,14 @@ use crate::image::{
parse_version_output, query_versions_command, resolve_image, ImageCache,
};
use crate::sandbox::{
enter_interactive, exec_in_container, install_packages_in_container, mount_overlay,
setup_container_rootfs, unmount_overlay, SandboxConfig,
exec_in_container, install_packages_in_container, mount_overlay, setup_container_rootfs,
spawn_enter_interactive, unmount_overlay, SandboxConfig,
};
use crate::terminal;
use crate::RuntimeError;
use karapace_schema::{ResolutionResult, ResolvedPackage};
use libc::{SIGKILL, SIGTERM};
use std::os::unix::process::ExitStatusExt;
use std::path::{Path, PathBuf};
pub struct NamespaceBackend {
@ -47,7 +49,6 @@ impl RuntimeBackend for NamespaceBackend {
}
fn available(&self) -> bool {
// Check that user namespaces work
let output = std::process::Command::new("unshare")
.args(["--user", "--map-root-user", "--fork", "true"])
.output();
@ -59,12 +60,10 @@ impl RuntimeBackend for NamespaceBackend {
eprintln!("[karapace] {msg}");
};
// Download/cache the base image
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress, spec.offline)?;
// Compute content digest of the base image
let base_image_digest = compute_image_digest(&rootfs)?;
if spec.offline && !spec.manifest.system_packages.is_empty() {
@ -73,8 +72,6 @@ impl RuntimeBackend for NamespaceBackend {
));
}
// If there are packages to resolve, set up a temporary overlay
// and install+query to get exact versions
let resolved_packages = if spec.manifest.system_packages.is_empty() {
Vec::new()
} else {
@ -84,13 +81,11 @@ impl RuntimeBackend for NamespaceBackend {
std::fs::create_dir_all(&tmp_env)?;
let mut sandbox = SandboxConfig::new(rootfs.clone(), "resolve-tmp", &tmp_env);
sandbox.isolate_network = false; // need network for package resolution
sandbox.isolate_network = false;
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
// Run resolution inside an inner closure so cleanup always runs,
// even if detect/install/query fails.
let resolve_inner = || -> Result<Vec<(String, String)>, RuntimeError> {
let pkg_mgr = detect_package_manager(&sandbox.overlay_merged)
.or_else(|| detect_package_manager(&rootfs))
@ -111,13 +106,11 @@ impl RuntimeBackend for NamespaceBackend {
let result = resolve_inner();
// Always cleanup: unmount overlay and remove temp directory
let _ = unmount_overlay(&sandbox);
let _ = std::fs::remove_dir_all(&tmp_env);
let versions = result?;
// Map back to ResolvedPackage, falling back to "unresolved" if query failed
spec.manifest
.system_packages
.iter()
@ -148,21 +141,17 @@ impl RuntimeBackend for NamespaceBackend {
eprintln!("[karapace] {msg}");
};
// Resolve and download the base image
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress, spec.offline)?;
// Set up overlay filesystem
let mut sandbox = SandboxConfig::new(rootfs.clone(), &spec.env_id, &env_dir);
sandbox.isolate_network = spec.offline || spec.manifest.network_isolation;
mount_overlay(&sandbox)?;
// Set up container rootfs (create dirs, user, etc.)
setup_container_rootfs(&sandbox)?;
// Install system packages if any
if !spec.manifest.system_packages.is_empty() {
if spec.offline {
return Err(RuntimeError::ExecFailed(
@ -190,10 +179,8 @@ impl RuntimeBackend for NamespaceBackend {
progress("packages installed");
}
// Unmount overlay after build (will be re-mounted on enter)
unmount_overlay(&sandbox)?;
// Write state marker
std::fs::write(env_dir.join(".built"), "1")?;
progress(&format!(
@ -214,7 +201,6 @@ impl RuntimeBackend for NamespaceBackend {
)));
}
// Resolve image to get rootfs path
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
@ -225,26 +211,17 @@ impl RuntimeBackend for NamespaceBackend {
));
}
// Create sandbox config
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.offline || spec.manifest.network_isolation;
sandbox.hostname = format!("karapace-{}", &spec.env_id[..12.min(spec.env_id.len())]);
// Compute host integration (Wayland, PipeWire, GPU, etc.)
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
sandbox.env_vars.extend(host.env_vars);
// Mount overlay
mount_overlay(&sandbox)?;
// Set up rootfs
setup_container_rootfs(&sandbox)?;
// Mark as running
std::fs::write(env_dir.join(".running"), format!("{}", std::process::id()))?;
// Emit terminal markers
terminal::emit_container_push(&spec.env_id, &sandbox.hostname);
terminal::print_container_banner(
&spec.env_id,
@ -252,8 +229,37 @@ impl RuntimeBackend for NamespaceBackend {
&sandbox.hostname,
);
// Enter the container interactively
let exit_code = enter_interactive(&sandbox);
let mut child = match spawn_enter_interactive(&sandbox) {
Ok(c) => c,
Err(e) => {
terminal::emit_container_pop();
terminal::print_container_exit(&spec.env_id);
let _ = unmount_overlay(&sandbox);
return Err(e);
}
};
if let Err(e) = std::fs::write(env_dir.join(".running"), format!("{}", child.id())) {
let _ = child.kill();
terminal::emit_container_pop();
terminal::print_container_exit(&spec.env_id);
let _ = unmount_overlay(&sandbox);
return Err(e.into());
}
// Wait for the interactive session to complete.
let exit_code = match child.wait() {
Ok(status) => {
let code = status.code().unwrap_or_else(|| match status.signal() {
Some(sig) if sig == SIGTERM || sig == SIGKILL => 0,
_ => 1,
});
Ok(code)
}
Err(e) => Err(RuntimeError::ExecFailed(format!(
"failed to wait for sandbox: {e}"
))),
};
// Cleanup
terminal::emit_container_pop();
@ -288,7 +294,7 @@ impl RuntimeBackend for NamespaceBackend {
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
sandbox.isolate_network = spec.offline || spec.manifest.network_isolation;
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
@ -322,7 +328,20 @@ impl RuntimeBackend for NamespaceBackend {
let running_file = env_dir.join(".running");
if running_file.exists() {
let pid_str = std::fs::read_to_string(&running_file).unwrap_or_default();
let pid_str = match std::fs::read_to_string(&running_file) {
Ok(s) => s,
Err(e) => {
tracing::warn!(
"failed to read .running file for {}: {e}",
&env_id[..12.min(env_id.len())]
);
return Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: false,
pid: None,
});
}
};
let pid = pid_str.trim().parse::<u32>().ok();
if pid.is_none() && !pid_str.trim().is_empty() {
tracing::warn!(
@ -330,6 +349,7 @@ impl RuntimeBackend for NamespaceBackend {
&env_id[..12.min(env_id.len())],
pid_str.trim()
);
let _ = std::fs::remove_file(&running_file);
}
// Check if process is actually alive
if let Some(p) = pid {

View file

@ -11,7 +11,7 @@ use crate::sandbox::{
use crate::terminal;
use crate::RuntimeError;
use karapace_schema::{ResolutionResult, ResolvedPackage};
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::process::Command;
pub struct OciBackend {
@ -416,7 +416,7 @@ impl RuntimeBackend for OciBackend {
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
sandbox.isolate_network = spec.offline || spec.manifest.network_isolation;
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
@ -451,35 +451,48 @@ impl RuntimeBackend for OciBackend {
}
fn status(&self, env_id: &str) -> Result<RuntimeStatus, RuntimeError> {
let env_dir = self.env_dir(env_id);
let running_file = env_dir.join(".running");
let runtime = Self::find_runtime().ok_or_else(|| {
RuntimeError::BackendUnavailable("no OCI runtime found (crun/runc/youki)".to_owned())
})?;
if running_file.exists() {
let pid_str = std::fs::read_to_string(&running_file).unwrap_or_default();
let pid = pid_str.trim().parse::<u32>().ok();
if pid.is_none() && !pid_str.trim().is_empty() {
tracing::warn!(
"corrupt .running file for {}: could not parse PID from '{}'",
&env_id[..12.min(env_id.len())],
pid_str.trim()
);
}
if let Some(p) = pid {
if Path::new(&format!("/proc/{p}")).exists() {
let container_id = format!("karapace-{}", &env_id[..12.min(env_id.len())]);
let output = Command::new(&runtime)
.args(["state", &container_id])
.output()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let msg = stderr.to_lowercase();
if msg.contains("does not exist")
|| msg.contains("not found")
|| msg.contains("no such file or directory")
{
return Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: true,
pid: Some(p),
});
}
let _ = std::fs::remove_file(&running_file);
}
}
Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: false,
pid: None,
});
}
return Err(RuntimeError::ExecFailed(format!(
"{runtime} state failed: {}",
stderr.trim()
)));
}
let state: serde_json::Value = serde_json::from_slice(&output.stdout).map_err(|e| {
RuntimeError::ExecFailed(format!("failed to parse {runtime} state output: {e}"))
})?;
let pid = state
.get("pid")
.and_then(serde_json::Value::as_u64)
.and_then(|p| u32::try_from(p).ok())
.filter(|p| *p != 0);
Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: pid.is_some(),
pid,
})
}
}
@ -506,6 +519,54 @@ mod tests {
#[test]
fn oci_status_reports_not_running() {
use std::ffi::OsString;
use std::os::unix::fs::PermissionsExt;
use std::sync::Mutex;
static ENV_LOCK: Mutex<()> = Mutex::new(());
let _lock = ENV_LOCK.lock().unwrap();
struct PathGuard {
old_path: OsString,
}
impl Drop for PathGuard {
fn drop(&mut self) {
std::env::set_var("PATH", &self.old_path);
}
}
let fake_bin = tempfile::tempdir().unwrap();
let fake_crun = fake_bin.path().join("crun");
std::fs::write(
&fake_crun,
"#!/bin/sh\n\
if [ \"$1\" = \"--version\" ]; then\n\
echo crun-test\n\
exit 0\n\
fi\n\
if [ \"$1\" = \"state\" ]; then\n\
echo \"container does not exist\" 1>&2\n\
exit 1\n\
fi\n\
exit 1\n",
)
.unwrap();
let mut perms = std::fs::metadata(&fake_crun).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&fake_crun, perms).unwrap();
let old_path = std::env::var_os("PATH").unwrap_or_default();
let _guard = PathGuard {
old_path: old_path.clone(),
};
let joined = std::env::join_paths(
std::iter::once(fake_bin.path().to_path_buf()).chain(std::env::split_paths(&old_path)),
)
.unwrap();
std::env::set_var("PATH", joined);
let dir = tempfile::tempdir().unwrap();
let backend = OciBackend::with_store_root(dir.path());
let status = backend.status("oci-test").unwrap();

View file

@ -3,8 +3,6 @@ use std::fmt::Write as _;
use std::path::{Path, PathBuf};
use std::process::Command;
/// Shell-escape a string for safe interpolation into shell scripts.
/// Wraps the value in single quotes, escaping any embedded single quotes.
fn shell_quote(s: &str) -> String {
// Single-quoting in POSIX shell: replace ' with '\'' then wrap in '
format!("'{}'", s.replace('\'', "'\\''"))
@ -80,10 +78,8 @@ impl SandboxConfig {
}
pub fn mount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
// Unmount any stale overlay from a previous failed run
let _ = unmount_overlay(config);
// Clean stale work dir (fuse-overlayfs requires a clean workdir)
if config.overlay_work.exists() {
let _ = std::fs::remove_dir_all(&config.overlay_work);
}
@ -147,7 +143,6 @@ pub fn unmount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
if !config.overlay_merged.exists() {
return Ok(());
}
// Only attempt unmount if actually mounted (avoids spurious errors)
if !is_mounted(&config.overlay_merged) {
return Ok(());
}
@ -156,7 +151,6 @@ pub fn unmount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status();
// Fallback if fusermount3 is not available
if is_mounted(&config.overlay_merged) {
let _ = Command::new("fusermount")
.args(["-u", &config.overlay_merged.to_string_lossy()])
@ -170,7 +164,6 @@ pub fn unmount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
pub fn setup_container_rootfs(config: &SandboxConfig) -> Result<PathBuf, RuntimeError> {
let merged = &config.overlay_merged;
// Essential directories inside the container
for subdir in [
"proc", "sys", "dev", "dev/pts", "dev/shm", "tmp", "run", "run/user", "etc", "var",
"var/tmp",
@ -178,11 +171,9 @@ pub fn setup_container_rootfs(config: &SandboxConfig) -> Result<PathBuf, Runtime
std::fs::create_dir_all(merged.join(subdir))?;
}
// Create run/user/<uid> for XDG_RUNTIME_DIR
let user_run = merged.join(format!("run/user/{}", config.uid));
std::fs::create_dir_all(&user_run)?;
// Create home directory
let container_home = merged.join(
config
.home_dir
@ -191,15 +182,12 @@ pub fn setup_container_rootfs(config: &SandboxConfig) -> Result<PathBuf, Runtime
);
std::fs::create_dir_all(&container_home)?;
// Write /etc/hostname
let _ = std::fs::write(merged.join("etc/hostname"), &config.hostname);
// Ensure /etc/resolv.conf exists (copy from host for DNS)
if !merged.join("etc/resolv.conf").exists() && Path::new("/etc/resolv.conf").exists() {
let _ = std::fs::copy("/etc/resolv.conf", merged.join("etc/resolv.conf"));
}
// Ensure user exists in /etc/passwd
ensure_user_in_container(config, merged)?;
Ok(merged.clone())
@ -248,7 +236,14 @@ fn ensure_user_in_container(config: &SandboxConfig, merged: &Path) -> Result<(),
fn build_unshare_command(config: &SandboxConfig) -> Command {
let mut cmd = Command::new("unshare");
cmd.args(["--user", "--map-root-user", "--mount", "--pid", "--fork"]);
cmd.args([
"--user",
"--map-root-user",
"--mount",
"--pid",
"--fork",
"--kill-child=SIGTERM",
]);
if config.isolate_network {
cmd.arg("--net");
@ -262,16 +257,12 @@ fn build_setup_script(config: &SandboxConfig) -> String {
let qm = shell_quote_path(merged);
let mut script = String::new();
// Mount /proc
let _ = writeln!(script, "mount -t proc proc {qm}/proc 2>/dev/null || true");
// Bind mount /sys (read-only)
let _ = writeln!(script, "mount --rbind /sys {qm}/sys 2>/dev/null && mount --make-rslave {qm}/sys 2>/dev/null || true");
// Bind mount /dev
let _ = writeln!(script, "mount --rbind /dev {qm}/dev 2>/dev/null && mount --make-rslave {qm}/dev 2>/dev/null || true");
// Bind mount home directory
let container_home = merged.join(
config
.home_dir
@ -285,13 +276,10 @@ fn build_setup_script(config: &SandboxConfig) -> String {
shell_quote_path(&container_home)
);
// Bind mount /etc/resolv.conf for DNS resolution
let _ = writeln!(script, "touch {qm}/etc/resolv.conf 2>/dev/null; mount --bind /etc/resolv.conf {qm}/etc/resolv.conf 2>/dev/null || true");
// Bind mount /tmp
let _ = writeln!(script, "mount --bind /tmp {qm}/tmp 2>/dev/null || true");
// Bind mounts from config (user-supplied paths — must be quoted)
for bm in &config.bind_mounts {
let target = if bm.target.is_absolute() {
merged.join(bm.target.strip_prefix("/").unwrap_or(&bm.target))
@ -309,7 +297,6 @@ fn build_setup_script(config: &SandboxConfig) -> String {
}
}
// Bind mount XDG_RUNTIME_DIR sockets (Wayland, PipeWire, D-Bus)
if let Ok(xdg_run) = std::env::var("XDG_RUNTIME_DIR") {
let container_run = merged.join(format!("run/user/{}", config.uid));
for socket in &["wayland-0", "pipewire-0", "pulse/native", "bus"] {
@ -325,7 +312,6 @@ fn build_setup_script(config: &SandboxConfig) -> String {
shell_quote_path(parent)
);
}
// For sockets, touch the target first
if src.is_file() || !src.is_dir() {
let _ = writeln!(script, "touch {qd} 2>/dev/null || true");
}
@ -334,7 +320,6 @@ fn build_setup_script(config: &SandboxConfig) -> String {
}
}
// Bind mount X11 socket if present
if Path::new("/tmp/.X11-unix").exists() {
let _ = writeln!(
script,
@ -342,8 +327,7 @@ fn build_setup_script(config: &SandboxConfig) -> String {
);
}
// Chroot and exec
let _ = write!(script, "exec chroot {qm} /bin/sh -c '");
let _ = writeln!(script, "exec chroot {qm} /bin/sh -s <<'__KARAPACE_EOF__'");
script
}
@ -353,16 +337,14 @@ pub fn enter_interactive(config: &SandboxConfig) -> Result<i32, RuntimeError> {
let mut setup = build_setup_script(config);
// Build environment variable exports (all values shell-quoted, keys validated)
let mut env_exports = String::new();
for (key, val) in &config.env_vars {
if !key.bytes().all(|b| b.is_ascii_alphanumeric() || b == b'_') {
continue; // Skip keys with unsafe characters
continue;
}
let _ = write!(env_exports, "export {}={}; ", key, shell_quote(val));
}
// Set standard env vars (all values shell-quoted)
let _ = write!(
env_exports,
"export HOME={}; ",
@ -402,19 +384,20 @@ pub fn enter_interactive(config: &SandboxConfig) -> Result<i32, RuntimeError> {
shell_quote(&config.hostname)
);
// Determine shell
let shell = if merged.join("bin/bash").exists() || merged.join("usr/bin/bash").exists() {
"/bin/bash"
} else {
"/bin/sh"
};
let _ = write!(setup, "{env_exports}cd ~; exec {shell} -l'");
let _ = write!(
setup,
"{env_exports}cd ~; exec {shell} -l </dev/tty >/dev/tty 2>/dev/tty\n__KARAPACE_EOF__\n"
);
let mut cmd = build_unshare_command(config);
cmd.arg("/bin/sh").arg("-c").arg(&setup);
// Pass through stdin/stdout/stderr for interactive use
cmd.stdin(std::process::Stdio::inherit());
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
@ -426,17 +409,92 @@ pub fn enter_interactive(config: &SandboxConfig) -> Result<i32, RuntimeError> {
Ok(status.code().unwrap_or(1))
}
pub fn spawn_enter_interactive(
config: &SandboxConfig,
) -> Result<std::process::Child, RuntimeError> {
let merged = &config.overlay_merged;
let mut setup = build_setup_script(config);
let mut env_exports = String::new();
for (key, val) in &config.env_vars {
if !key.bytes().all(|b| b.is_ascii_alphanumeric() || b == b'_') {
continue;
}
let _ = write!(env_exports, "export {}={}; ", key, shell_quote(val));
}
let _ = write!(
env_exports,
"export HOME={}; ",
shell_quote_path(&config.home_dir)
);
let _ = write!(
env_exports,
"export USER={}; ",
shell_quote(&config.username)
);
let _ = write!(
env_exports,
"export HOSTNAME={}; ",
shell_quote(&config.hostname)
);
if let Ok(xdg) = std::env::var("XDG_RUNTIME_DIR") {
let _ = write!(
env_exports,
"export XDG_RUNTIME_DIR={}; ",
shell_quote(&xdg)
);
}
if let Ok(display) = std::env::var("DISPLAY") {
let _ = write!(env_exports, "export DISPLAY={}; ", shell_quote(&display));
}
if let Ok(wayland) = std::env::var("WAYLAND_DISPLAY") {
let _ = write!(
env_exports,
"export WAYLAND_DISPLAY={}; ",
shell_quote(&wayland)
);
}
env_exports.push_str("export TERM=${TERM:-xterm-256color}; ");
let _ = write!(
env_exports,
"export KARAPACE_ENV=1; export KARAPACE_HOSTNAME={}; ",
shell_quote(&config.hostname)
);
let shell = if merged.join("bin/bash").exists() || merged.join("usr/bin/bash").exists() {
"/bin/bash"
} else {
"/bin/sh"
};
let _ = write!(
setup,
"{env_exports}cd ~; exec {shell} -l </dev/tty >/dev/tty 2>/dev/tty\n__KARAPACE_EOF__\n"
);
let mut cmd = build_unshare_command(config);
cmd.arg("/bin/sh").arg("-c").arg(&setup);
cmd.stdin(std::process::Stdio::inherit());
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
cmd.spawn()
.map_err(|e| RuntimeError::ExecFailed(format!("failed to spawn sandbox: {e}")))
}
pub fn exec_in_container(
config: &SandboxConfig,
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let mut setup = build_setup_script(config);
// Environment (all values shell-quoted, keys validated)
let mut env_exports = String::new();
for (key, val) in &config.env_vars {
if !key.bytes().all(|b| b.is_ascii_alphanumeric() || b == b'_') {
continue; // Skip keys with unsafe characters
continue;
}
let _ = write!(env_exports, "export {}={}; ", key, shell_quote(val));
}
@ -453,7 +511,11 @@ pub fn exec_in_container(
env_exports.push_str("export KARAPACE_ENV=1; ");
let escaped_cmd: Vec<String> = command.iter().map(|a| shell_quote(a)).collect();
let _ = write!(setup, "{env_exports}{}'", escaped_cmd.join(" "));
let _ = write!(
setup,
"{env_exports}{}\n__KARAPACE_EOF__\n",
escaped_cmd.join(" ")
);
let mut cmd = build_unshare_command(config);
cmd.arg("/bin/sh").arg("-c").arg(&setup);

View file

@ -22,10 +22,10 @@ pub struct EnvIdentity {
/// - Internal lookup during rebuild (to find old environments)
///
/// After `build`, the env_id stored in metadata comes from the lock file.
pub fn compute_env_id(normalized: &NormalizedManifest) -> EnvIdentity {
pub fn compute_env_id(normalized: &NormalizedManifest) -> Result<EnvIdentity, serde_json::Error> {
let mut hasher = blake3::Hasher::new();
hasher.update(normalized.canonical_json().as_bytes());
hasher.update(normalized.canonical_json()?.as_bytes());
let base_digest = blake3::hash(normalized.base_image.as_bytes())
.to_hex()
@ -71,10 +71,10 @@ pub fn compute_env_id(normalized: &NormalizedManifest) -> EnvIdentity {
let hex = hasher.finalize().to_hex().to_string();
let short = hex[..12].to_owned();
EnvIdentity {
Ok(EnvIdentity {
env_id: EnvId::new(hex),
short_id: ShortId::new(short),
}
})
}
#[cfg(test)]
@ -110,7 +110,7 @@ packages = ["clang", "git"]
.normalize()
.unwrap();
assert_eq!(compute_env_id(&a), compute_env_id(&b));
assert_eq!(compute_env_id(&a).unwrap(), compute_env_id(&b).unwrap());
}
#[test]
@ -141,7 +141,7 @@ packages = ["git", "cmake"]
.normalize()
.unwrap();
assert_ne!(compute_env_id(&a), compute_env_id(&b));
assert_ne!(compute_env_id(&a).unwrap(), compute_env_id(&b).unwrap());
}
#[test]
@ -172,7 +172,7 @@ backend = "oci"
.normalize()
.unwrap();
assert_ne!(compute_env_id(&a), compute_env_id(&b));
assert_ne!(compute_env_id(&a).unwrap(), compute_env_id(&b).unwrap());
}
#[test]
@ -188,7 +188,7 @@ image = "rolling"
.normalize()
.unwrap();
let id = compute_env_id(&n);
let id = compute_env_id(&n).unwrap();
assert_eq!(id.short_id.as_str().len(), 12);
assert!(id.env_id.as_str().starts_with(id.short_id.as_str()));
}

View file

@ -74,8 +74,8 @@ impl ManifestV1 {
}
impl NormalizedManifest {
pub fn canonical_json(&self) -> String {
serde_json::to_string(self).expect("normalized manifest serialization is infallible")
pub fn canonical_json(&self) -> Result<String, serde_json::Error> {
serde_json::to_string(self)
}
}
@ -175,7 +175,7 @@ packages = ["clang", "git"]
.normalize()
.unwrap();
assert_eq!(a.canonical_json(), b.canonical_json());
assert_eq!(a.canonical_json().unwrap(), b.canonical_json().unwrap());
}
#[test]

View file

@ -81,13 +81,19 @@ impl Store {
let reg_path = self.data_dir.join("registry.json");
fs::create_dir_all(&self.data_dir)?;
fs::write(&reg_path, data)?;
let mut reg = self.registry.write().expect("registry lock poisoned");
let mut reg = match self.registry.write() {
Ok(g) => g,
Err(e) => e.into_inner(),
};
*reg = Some(data.to_vec());
Ok(())
}
pub fn get_registry(&self) -> Option<Vec<u8>> {
let reg = self.registry.read().expect("registry lock poisoned");
let reg = match self.registry.read() {
Ok(g) => g,
Err(e) => e.into_inner(),
};
reg.clone()
}
}
@ -152,14 +158,19 @@ fn respond_err(req: tiny_http::Request, code: u16, msg: &str) {
}
fn respond_octet(req: tiny_http::Request, data: Vec<u8>) {
let header =
Header::from_bytes("Content-Type", "application/octet-stream").expect("valid header");
let _ = req.respond(Response::from_data(data).with_header(header));
let mut resp = Response::from_data(data);
if let Ok(header) = Header::from_bytes("Content-Type", "application/octet-stream") {
resp = resp.with_header(header);
}
let _ = req.respond(resp);
}
fn respond_json(req: tiny_http::Request, json: impl Into<Vec<u8>>) {
let header = Header::from_bytes("Content-Type", "application/json").expect("valid header");
let _ = req.respond(Response::from_data(json.into()).with_header(header));
let mut resp = Response::from_data(json.into());
if let Ok(header) = Header::from_bytes("Content-Type", "application/json") {
resp = resp.with_header(header);
}
let _ = req.respond(resp);
}
fn read_body(req: &mut tiny_http::Request) -> Option<Vec<u8>> {
@ -262,7 +273,13 @@ pub fn handle_request(store: &Store, req: tiny_http::Request) {
/// Start the server loop, blocking the current thread.
pub fn run_server(store: &Arc<Store>, addr: &str) {
let server = Server::http(addr).expect("failed to bind HTTP server");
let server = match Server::http(addr) {
Ok(s) => s,
Err(e) => {
error!("failed to bind HTTP server on {addr}: {e}");
return;
}
};
for request in server.incoming_requests() {
handle_request(store, request);
}

View file

@ -3,7 +3,7 @@ use karapace_server::Store;
use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
use tracing::info;
use tracing::{error, info};
#[derive(Parser)]
#[command(name = "karapace-server", about = "Karapace remote protocol v1 server")]
@ -27,7 +27,13 @@ fn main() {
let cli = Cli::parse();
fs::create_dir_all(&cli.data_dir).expect("failed to create data directory");
if let Err(e) = fs::create_dir_all(&cli.data_dir) {
error!(
"failed to create data directory {}: {e}",
cli.data_dir.display()
);
std::process::exit(1);
}
let addr = format!("0.0.0.0:{}", cli.port);
info!("starting karapace-server on {addr}");

View file

@ -48,13 +48,12 @@ pub struct EnvMetadata {
impl EnvMetadata {
/// Compute the checksum over the metadata content (excluding the checksum field itself).
fn compute_checksum(&self) -> String {
fn compute_checksum(&self) -> Result<String, StoreError> {
let mut copy = self.clone();
copy.checksum = None;
// Serialize without the checksum field (skip_serializing_if = None)
let json =
serde_json::to_string_pretty(&copy).expect("infallible: EnvMetadata always serializes");
blake3::hash(json.as_bytes()).to_hex().to_string()
let json = serde_json::to_string_pretty(&copy)?;
Ok(blake3::hash(json.as_bytes()).to_hex().to_string())
}
}
@ -89,7 +88,7 @@ impl MetadataStore {
// Compute and embed checksum before writing
let mut meta_with_checksum = meta.clone();
meta_with_checksum.checksum = Some(meta_with_checksum.compute_checksum());
meta_with_checksum.checksum = Some(meta_with_checksum.compute_checksum()?);
let content = serde_json::to_string_pretty(&meta_with_checksum)?;
let dir = self.layout.metadata_dir();
@ -112,7 +111,7 @@ impl MetadataStore {
// Verify checksum if present (backward-compatible: legacy files have None)
if let Some(ref expected) = meta.checksum {
let actual = meta.compute_checksum();
let actual = meta.compute_checksum()?;
if actual != *expected {
return Err(StoreError::IntegrityFailure {
hash: env_id.to_owned(),

View file

@ -1,4 +1,5 @@
use crate::layout::StoreLayout;
use crate::metadata::{EnvMetadata, EnvState, MetadataStore};
use crate::StoreError;
use serde::{Deserialize, Serialize};
use std::fs;
@ -7,21 +8,27 @@ use std::path::PathBuf;
use tempfile::NamedTempFile;
use tracing::{debug, info, warn};
/// A single rollback step that can undo part of an operation.
fn parse_env_state(s: &str) -> Option<EnvState> {
match s {
"Defined" | "defined" => Some(EnvState::Defined),
"Built" | "built" => Some(EnvState::Built),
"Running" | "running" => Some(EnvState::Running),
"Frozen" | "frozen" => Some(EnvState::Frozen),
"Archived" | "archived" => Some(EnvState::Archived),
_ => None,
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RollbackStep {
/// Remove a directory tree (e.g. orphaned env_dir).
RemoveDir(PathBuf),
/// Remove a single file (e.g. metadata, layer manifest).
RemoveFile(PathBuf),
/// Reset an environment's metadata state (e.g. Running → Built after crash).
ResetState {
env_id: String,
target_state: String,
},
}
/// The type of mutating operation being tracked.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum WalOpKind {
Build,
@ -49,7 +56,6 @@ impl std::fmt::Display for WalOpKind {
}
}
/// A WAL entry representing an in-flight operation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalEntry {
pub op_id: String,
@ -59,11 +65,6 @@ pub struct WalEntry {
pub rollback_steps: Vec<RollbackStep>,
}
/// Write-ahead log for crash recovery.
///
/// Mutating engine methods create a WAL entry before starting work,
/// append rollback steps as side effects occur, and remove the entry
/// on successful completion. On startup, incomplete entries are rolled back.
pub struct WriteAheadLog {
wal_dir: PathBuf,
}
@ -74,13 +75,11 @@ impl WriteAheadLog {
Self { wal_dir }
}
/// Ensure the WAL directory exists.
pub fn initialize(&self) -> Result<(), StoreError> {
fs::create_dir_all(&self.wal_dir)?;
Ok(())
}
/// Begin a new WAL entry for an operation. Returns the op_id.
pub fn begin(&self, kind: WalOpKind, env_id: &str) -> Result<String, StoreError> {
let op_id = format!(
"{}-{}",
@ -99,7 +98,6 @@ impl WriteAheadLog {
Ok(op_id)
}
/// Append a rollback step to an existing WAL entry.
pub fn add_rollback_step(&self, op_id: &str, step: RollbackStep) -> Result<(), StoreError> {
let mut entry = self.read_entry(op_id)?;
entry.rollback_steps.push(step);
@ -107,7 +105,6 @@ impl WriteAheadLog {
Ok(())
}
/// Commit (remove) a WAL entry after successful completion.
pub fn commit(&self, op_id: &str) -> Result<(), StoreError> {
let path = self.entry_path(op_id);
if path.exists() {
@ -117,7 +114,6 @@ impl WriteAheadLog {
Ok(())
}
/// List all incomplete WAL entries.
pub fn list_incomplete(&self) -> Result<Vec<WalEntry>, StoreError> {
if !self.wal_dir.exists() {
return Ok(Vec::new());
@ -132,7 +128,6 @@ impl WriteAheadLog {
Ok(entry) => entries.push(entry),
Err(e) => {
warn!("corrupt WAL entry {}: {e}", path.display());
// Remove corrupt entries
let _ = fs::remove_file(&path);
}
},
@ -147,8 +142,6 @@ impl WriteAheadLog {
Ok(entries)
}
/// Roll back all incomplete WAL entries.
/// Returns the number of entries rolled back.
pub fn recover(&self) -> Result<usize, StoreError> {
let entries = self.list_incomplete()?;
let count = entries.len();
@ -158,7 +151,6 @@ impl WriteAheadLog {
entry.kind, entry.env_id, entry.op_id
);
self.rollback_entry(entry);
// Remove the WAL entry after rollback
let _ = fs::remove_file(self.entry_path(&entry.op_id));
}
if count > 0 {
@ -168,7 +160,6 @@ impl WriteAheadLog {
}
fn rollback_entry(&self, entry: &WalEntry) {
// Execute rollback steps in reverse order
for step in entry.rollback_steps.iter().rev() {
match step {
RollbackStep::RemoveDir(path) => {
@ -196,38 +187,53 @@ impl WriteAheadLog {
env_id,
target_state,
} => {
// Resolve metadata dir from wal_dir (wal_dir = root/store/wal)
if let Some(store_dir) = self.wal_dir.parent() {
let metadata_dir = store_dir.join("metadata");
let meta_path = metadata_dir.join(env_id);
if meta_path.exists() {
match fs::read_to_string(&meta_path) {
Ok(content) => {
if let Ok(mut meta) =
serde_json::from_str::<serde_json::Value>(&content)
{
meta["state"] =
serde_json::Value::String(target_state.clone());
if let Ok(updated) = serde_json::to_string_pretty(&meta) {
if let Err(e) = fs::write(&meta_path, updated) {
warn!("WAL rollback: failed to reset state for {env_id}: {e}");
let Some(new_state) = parse_env_state(target_state) else {
warn!("WAL rollback: unknown target state '{target_state}' for {env_id}");
continue;
};
let Some(store_dir) = self.wal_dir.parent() else {
continue;
};
let Some(root_dir) = store_dir.parent() else {
continue;
};
let meta_path = store_dir.join("metadata").join(env_id);
if !meta_path.exists() {
continue;
}
let content = match fs::read_to_string(&meta_path) {
Ok(c) => c,
Err(e) => {
warn!("WAL rollback: failed to read metadata for {env_id}: {e}");
continue;
}
};
let mut meta: EnvMetadata = match serde_json::from_str(&content) {
Ok(m) => m,
Err(e) => {
warn!("WAL rollback: failed to parse metadata for {env_id}: {e}");
continue;
}
};
meta.state = new_state;
meta.updated_at = chrono::Utc::now().to_rfc3339();
meta.checksum = None;
let layout = StoreLayout::new(root_dir);
let meta_store = MetadataStore::new(layout);
if let Err(e) = meta_store.put(&meta) {
warn!("WAL rollback: failed to persist metadata for {env_id}: {e}");
} else {
debug!("WAL rollback: reset {env_id} state to {target_state}");
}
}
}
}
Err(e) => {
warn!(
"WAL rollback: failed to read metadata for {env_id}: {e}"
);
}
}
}
}
}
}
}
}
fn entry_path(&self, op_id: &str) -> PathBuf {

View file

@ -15,7 +15,7 @@ RestartSec=2
ProtectSystem=strict
ProtectHome=read-only
ReadWritePaths=%h/.local/share/karapace
PrivateTmp=true
PrivateTmp=false
NoNewPrivileges=true
[Install]

199
docs/getting-started.md Normal file
View file

@ -0,0 +1,199 @@
# Getting Started
This tutorial walks through the first commands you typically use with Karapace.
It is written as a hands-on sequence:
1. Create a `karapace.toml` manifest.
2. Pin the base image reference.
3. Build an environment (produces an `env_id`).
4. Inspect and run commands inside the environment.
5. Save and restore filesystem changes with snapshots.
For full command flags and exit codes, see [cli-reference.md](cli-reference.md).
## Prerequisites
- A Linux host.
- Runtime prerequisites must be available on your machine (user namespaces, overlay tooling, etc.).
You can validate prerequisites and store health with:
```bash
karapace doctor
```
If you are building the CLI from source, the binary is `karapace` (crate `karapace-cli`).
## Choose a store location
Karapace keeps all persistent data in a *store directory*.
- Default store path: `~/.local/share/karapace`
- Override per-command with `--store <path>`
In this tutorial, we use a disposable store directory so you can experiment safely:
```bash
STORE="$(mktemp -d /tmp/karapace-store.XXXXXX)"
```
## 1) Create a manifest (`karapace new`)
Create a new `karapace.toml` in an empty project directory:
```bash
mkdir -p my-project
cd my-project
karapace --store "$STORE" new demo --template minimal
```
What this does:
- Writes `./karapace.toml` in the current directory.
- If your terminal is interactive (TTY), the command may prompt for optional fields:
- Packages (space-separated)
- A workspace mount
- Runtime backend (`namespace`, `oci`, `mock`)
- Network isolation
What to expect:
- On success, it prints that `karapace.toml` was written.
- If `./karapace.toml` already exists:
- With `--force`, it overwrites.
- Without `--force`, it prompts on a TTY; otherwise it fails.
## 2) Pin the base image (`karapace pin`)
Many workflows rely on using a pinned base image reference.
Check whether the manifest is already pinned:
```bash
karapace --store "$STORE" pin --check karapace.toml
```
What to expect:
- On a fresh `minimal` template, `pin --check` typically fails with an error indicating `base.image` is not pinned.
Pin the base image in-place:
```bash
karapace --store "$STORE" pin karapace.toml
```
Then re-check:
```bash
karapace --store "$STORE" pin --check karapace.toml
```
What this does:
- Resolves the `base.image` value to an explicit `http(s)://...` URL.
- Rewrites the manifest file atomically.
## 3) Build an environment (`karapace build`)
Build an environment from the manifest:
```bash
karapace --store "$STORE" build --require-pinned-image karapace.toml
```
What this does:
- Resolves and prepares the base image.
- Builds the environment filesystem.
- Writes `karapace.lock` next to the manifest.
- Produces a deterministic `env_id` (a 64-character hex string). The first 12 characters are the `short_id`.
What to expect:
- The first build for a base image may download and extract a root filesystem.
- On success, output includes the `env_id`.
## 4) Discover and inspect environments (`list`, `inspect`)
List environments in the store:
```bash
karapace --store "$STORE" list
```
Inspect a specific environment:
```bash
karapace --store "$STORE" inspect <env_id>
```
What to expect:
- `list` shows `SHORT_ID`, `NAME`, `STATE`, and `ENV_ID`.
- After a build, the state is typically `built`.
## 5) Run a command inside the environment (`exec`)
Run a non-interactive command inside an environment:
```bash
karapace --store "$STORE" exec <env_id> -- sh -lc "echo hello"
```
What this does:
- Transitions the environment to `Running` for the duration of the command.
- Streams stdout/stderr back to your terminal.
- Returns to `Built` when the command finishes.
## 6) Check filesystem drift (`diff`)
If you write to the environment, those changes live in the writable overlay.
Show changes in the overlay:
```bash
karapace --store "$STORE" diff <env_id>
```
What to expect:
- If you created or modified files via `exec`, `diff` reports added/modified/removed paths.
## 7) Save changes as a snapshot (`commit`) and restore them (`snapshots`, `restore`)
Create a snapshot from the current overlay:
```bash
karapace --store "$STORE" commit <env_id>
```
List snapshots:
```bash
karapace --store "$STORE" snapshots <env_id>
```
Restore from a snapshot:
```bash
karapace --store "$STORE" restore <env_id> <restore_hash>
```
What to expect:
- `commit` returns a snapshot identifier.
- `snapshots` lists snapshots and includes a `restore_hash` value used with `restore`.
- After `restore`, the overlay directory is replaced with the snapshot content.
## Next steps
- Interactive sessions: `karapace enter <env_id>`
- Stop a running session from another terminal: `karapace stop <env_id>`
- State management: `karapace freeze`, `karapace archive`
- Store maintenance: `karapace verify-store`, `karapace gc`, `karapace destroy`
For details and flags, see [cli-reference.md](cli-reference.md).

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 182 KiB