feat: karapace-runtime — namespace/OCI/mock backends, sandbox, host integration

- RuntimeBackend trait: resolve, build, enter, exec, destroy, status
- Namespace backend: unshare + fuse-overlayfs + chroot (unprivileged)
- OCI backend: crun/runc/youki support
- Mock backend: deterministic test backend with configurable resolution
- Image downloading from images.linuxcontainers.org with blake3 verification
- Sandbox script generation with POSIX shell-quote injection prevention
- Host integration: Wayland, X11, PipeWire, PulseAudio, D-Bus, GPU, audio, SSH agent
- Desktop app export as .desktop files on the host
- SecurityPolicy: mount whitelist, device policy, env var allow/deny, resource limits
- Prerequisite detection with distro-specific install instructions
- OSC 777 terminal markers for container-aware terminals
This commit is contained in:
Marco Allegretti 2026-02-22 18:36:46 +01:00
parent 4de311ebc7
commit 8493831222
14 changed files with 5330 additions and 0 deletions

View file

@ -0,0 +1,21 @@
[package]
name = "karapace-runtime"
description = "Container runtime backends, image management, sandbox, and host integration for Karapace"
version.workspace = true
edition.workspace = true
license.workspace = true
repository.workspace = true
[lints]
workspace = true
[dependencies]
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
blake3.workspace = true
libc.workspace = true
tracing.workspace = true
tempfile.workspace = true
karapace-schema = { path = "../karapace-schema" }
karapace-store = { path = "../karapace-store" }

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,82 @@
use crate::RuntimeError;
use karapace_schema::{NormalizedManifest, ResolutionResult};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct RuntimeSpec {
pub env_id: String,
pub root_path: String,
pub overlay_path: String,
pub store_root: String,
pub manifest: NormalizedManifest,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct RuntimeStatus {
pub env_id: String,
pub running: bool,
pub pid: Option<u32>,
}
pub trait RuntimeBackend: Send + Sync {
fn name(&self) -> &str;
fn available(&self) -> bool;
/// Resolve dependencies: download/identify the base image and query the
/// package manager for exact versions of each requested package.
/// Returns a ResolutionResult with content digest and pinned versions.
fn resolve(&self, spec: &RuntimeSpec) -> Result<ResolutionResult, RuntimeError>;
fn build(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError>;
fn enter(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError>;
fn exec(
&self,
_spec: &RuntimeSpec,
_command: &[String],
) -> Result<std::process::Output, RuntimeError> {
Err(RuntimeError::ExecFailed(format!(
"exec not supported by {} backend",
self.name()
)))
}
fn destroy(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError>;
fn status(&self, env_id: &str) -> Result<RuntimeStatus, RuntimeError>;
}
pub fn select_backend(
name: &str,
store_root: &str,
) -> Result<Box<dyn RuntimeBackend>, RuntimeError> {
match name {
"namespace" => Ok(Box::new(
crate::namespace::NamespaceBackend::with_store_root(store_root),
)),
"oci" => Ok(Box::new(crate::oci::OciBackend::with_store_root(
store_root,
))),
"mock" => Ok(Box::new(crate::mock::MockBackend::new())),
other => Err(RuntimeError::BackendUnavailable(other.to_owned())),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn select_valid_backends() {
assert!(select_backend("namespace", "/tmp/test-store").is_ok());
assert!(select_backend("oci", "/tmp/test-store").is_ok());
assert!(select_backend("mock", "/tmp/test-store").is_ok());
}
#[test]
fn select_invalid_backend_fails() {
assert!(select_backend("nonexistent", "/tmp/test-store").is_err());
}
}

View file

@ -0,0 +1,232 @@
use crate::RuntimeError;
use std::path::{Path, PathBuf};
pub struct ExportedApp {
pub name: String,
pub desktop_file: PathBuf,
pub exec_command: String,
}
fn default_desktop_dir() -> Result<PathBuf, RuntimeError> {
if let Ok(home) = std::env::var("HOME") {
Ok(PathBuf::from(home).join(".local/share/applications"))
} else {
Err(RuntimeError::ExecFailed(
"HOME environment variable not set".to_owned(),
))
}
}
fn desktop_file_name(env_id: &str, app_name: &str) -> String {
let short_id = &env_id[..12.min(env_id.len())];
format!("karapace-{short_id}-{app_name}.desktop")
}
fn desktop_prefix(env_id: &str) -> String {
let short_id = &env_id[..12.min(env_id.len())];
format!("karapace-{short_id}-")
}
fn write_desktop_entry(
desktop_dir: &Path,
env_id: &str,
app_name: &str,
binary_path: &str,
karapace_bin: &str,
store_path: &str,
) -> Result<ExportedApp, RuntimeError> {
let short_id = &env_id[..12.min(env_id.len())];
std::fs::create_dir_all(desktop_dir)?;
let desktop_id = desktop_file_name(env_id, app_name);
let desktop_path = desktop_dir.join(&desktop_id);
let exec_cmd = format!("{karapace_bin} --store {store_path} enter {short_id} -- {binary_path}");
let icon = app_name;
let contents = format!(
"[Desktop Entry]\n\
Type=Application\n\
Name={app_name} (Karapace {short_id})\n\
Exec={exec_cmd}\n\
Icon={icon}\n\
Terminal=false\n\
Categories=Karapace;\n\
X-Karapace-EnvId={env_id}\n\
X-Karapace-Store={store_path}\n\
Comment=Launched inside Karapace environment {short_id}\n"
);
std::fs::write(&desktop_path, &contents)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let _ = std::fs::set_permissions(&desktop_path, std::fs::Permissions::from_mode(0o755));
}
Ok(ExportedApp {
name: app_name.to_owned(),
desktop_file: desktop_path,
exec_command: exec_cmd,
})
}
pub fn export_app(
env_id: &str,
app_name: &str,
binary_path: &str,
karapace_bin: &str,
store_path: &str,
) -> Result<ExportedApp, RuntimeError> {
let desktop_dir = default_desktop_dir()?;
write_desktop_entry(
&desktop_dir,
env_id,
app_name,
binary_path,
karapace_bin,
store_path,
)
}
pub fn unexport_app(env_id: &str, app_name: &str) -> Result<(), RuntimeError> {
let desktop_dir = default_desktop_dir()?;
remove_desktop_entry(&desktop_dir, env_id, app_name)
}
fn remove_desktop_entry(
desktop_dir: &Path,
env_id: &str,
app_name: &str,
) -> Result<(), RuntimeError> {
let desktop_id = desktop_file_name(env_id, app_name);
let desktop_path = desktop_dir.join(&desktop_id);
if desktop_path.exists() {
std::fs::remove_file(&desktop_path)?;
}
Ok(())
}
pub fn unexport_all(env_id: &str) -> Result<Vec<String>, RuntimeError> {
let desktop_dir = default_desktop_dir()?;
remove_all_entries(&desktop_dir, env_id)
}
fn remove_all_entries(desktop_dir: &Path, env_id: &str) -> Result<Vec<String>, RuntimeError> {
let prefix = desktop_prefix(env_id);
let mut removed = Vec::new();
if desktop_dir.exists() {
for entry in std::fs::read_dir(desktop_dir)? {
let entry = entry?;
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.starts_with(&prefix) && name_str.ends_with(".desktop") {
std::fs::remove_file(entry.path())?;
removed.push(name_str.to_string());
}
}
}
Ok(removed)
}
pub fn list_exported(env_id: &str) -> Result<Vec<String>, RuntimeError> {
let desktop_dir = default_desktop_dir()?;
list_entries(&desktop_dir, env_id)
}
fn list_entries(desktop_dir: &Path, env_id: &str) -> Result<Vec<String>, RuntimeError> {
let prefix = desktop_prefix(env_id);
let mut apps = Vec::new();
if desktop_dir.exists() {
for entry in std::fs::read_dir(desktop_dir)? {
let entry = entry?;
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.starts_with(&prefix) && name_str.ends_with(".desktop") {
let app_name = name_str
.strip_prefix(&prefix)
.and_then(|s| s.strip_suffix(".desktop"))
.unwrap_or(&name_str)
.to_string();
apps.push(app_name);
}
}
}
Ok(apps)
}
#[cfg(test)]
mod tests {
use super::*;
fn test_desktop_dir() -> (tempfile::TempDir, PathBuf) {
let dir = tempfile::tempdir().unwrap();
let apps = dir.path().join(".local/share/applications");
(dir, apps)
}
const TEST_ENV_ID: &str = "abc123def456789012345678901234567890123456789012345678901234";
#[test]
fn export_unexport_roundtrip() {
let (_dir, apps) = test_desktop_dir();
let result = write_desktop_entry(
&apps,
TEST_ENV_ID,
"test-app",
"/usr/bin/test-app",
"/usr/bin/karapace",
"/tmp/store",
)
.unwrap();
assert!(result.desktop_file.exists());
let contents = std::fs::read_to_string(&result.desktop_file).unwrap();
assert!(contents.contains("X-Karapace-EnvId="));
assert!(contents.contains("test-app"));
let found = list_entries(&apps, TEST_ENV_ID).unwrap();
assert_eq!(found, vec!["test-app"]);
remove_desktop_entry(&apps, TEST_ENV_ID, "test-app").unwrap();
assert!(!result.desktop_file.exists());
}
#[test]
fn unexport_all_cleans_up() {
let (_dir, apps) = test_desktop_dir();
write_desktop_entry(
&apps,
TEST_ENV_ID,
"app1",
"/usr/bin/app1",
"/usr/bin/karapace",
"/tmp/store",
)
.unwrap();
write_desktop_entry(
&apps,
TEST_ENV_ID,
"app2",
"/usr/bin/app2",
"/usr/bin/karapace",
"/tmp/store",
)
.unwrap();
let removed = remove_all_entries(&apps, TEST_ENV_ID).unwrap();
assert_eq!(removed.len(), 2);
let found = list_entries(&apps, TEST_ENV_ID).unwrap();
assert!(found.is_empty());
}
}

View file

@ -0,0 +1,267 @@
use crate::sandbox::BindMount;
use karapace_schema::NormalizedManifest;
use std::path::{Path, PathBuf};
pub struct HostIntegration {
pub bind_mounts: Vec<BindMount>,
pub env_vars: Vec<(String, String)>,
}
#[allow(clippy::too_many_lines)]
pub fn compute_host_integration(manifest: &NormalizedManifest) -> HostIntegration {
let mut bind_mounts = Vec::new();
let mut env_vars = Vec::new();
// Wayland display
if let Ok(wayland) = std::env::var("WAYLAND_DISPLAY") {
env_vars.push(("WAYLAND_DISPLAY".to_owned(), wayland));
}
// X11 display
if let Ok(display) = std::env::var("DISPLAY") {
env_vars.push(("DISPLAY".to_owned(), display));
if Path::new("/tmp/.X11-unix").exists() {
bind_mounts.push(BindMount {
source: PathBuf::from("/tmp/.X11-unix"),
target: PathBuf::from("/tmp/.X11-unix"),
read_only: true,
});
}
// Xauthority
if let Ok(xauth) = std::env::var("XAUTHORITY") {
if Path::new(&xauth).exists() {
bind_mounts.push(BindMount {
source: PathBuf::from(&xauth),
target: PathBuf::from(&xauth),
read_only: true,
});
env_vars.push(("XAUTHORITY".to_owned(), xauth));
}
}
}
// XDG_RUNTIME_DIR sockets
if let Ok(xdg_run) = std::env::var("XDG_RUNTIME_DIR") {
let xdg_path = PathBuf::from(&xdg_run);
env_vars.push(("XDG_RUNTIME_DIR".to_owned(), xdg_run.clone()));
// PipeWire socket
let pipewire = xdg_path.join("pipewire-0");
if pipewire.exists() {
bind_mounts.push(BindMount {
source: pipewire.clone(),
target: pipewire,
read_only: false,
});
}
// PulseAudio socket
let pulse = xdg_path.join("pulse/native");
if pulse.exists() {
bind_mounts.push(BindMount {
source: pulse.clone(),
target: pulse,
read_only: false,
});
}
// D-Bus session socket
let dbus = xdg_path.join("bus");
if dbus.exists() {
bind_mounts.push(BindMount {
source: dbus.clone(),
target: dbus,
read_only: false,
});
env_vars.push((
"DBUS_SESSION_BUS_ADDRESS".to_owned(),
format!("unix:path={xdg_run}/bus"),
));
}
// Wayland socket
let wayland_sock = xdg_path.join("wayland-0");
if wayland_sock.exists() {
bind_mounts.push(BindMount {
source: wayland_sock.clone(),
target: wayland_sock,
read_only: false,
});
}
}
// GPU passthrough
if manifest.hardware_gpu {
// DRI render nodes
if Path::new("/dev/dri").exists() {
bind_mounts.push(BindMount {
source: PathBuf::from("/dev/dri"),
target: PathBuf::from("/dev/dri"),
read_only: false,
});
}
// Nvidia devices
for dev in &[
"/dev/nvidia0",
"/dev/nvidiactl",
"/dev/nvidia-modeset",
"/dev/nvidia-uvm",
] {
if Path::new(dev).exists() {
bind_mounts.push(BindMount {
source: PathBuf::from(dev),
target: PathBuf::from(dev),
read_only: false,
});
}
}
}
// Audio passthrough
if manifest.hardware_audio && Path::new("/dev/snd").exists() {
bind_mounts.push(BindMount {
source: PathBuf::from("/dev/snd"),
target: PathBuf::from("/dev/snd"),
read_only: false,
});
}
// Manifest-declared mounts
for mount in &manifest.mounts {
let host_path = expand_path(&mount.host_path);
bind_mounts.push(BindMount {
source: host_path,
target: PathBuf::from(&mount.container_path),
read_only: false,
});
}
// Standard env vars to propagate (safe, non-secret variables only).
// Security-sensitive vars like SSH_AUTH_SOCK and GPG_AGENT_INFO are
// excluded here — they are in SecurityPolicy.denied_env_vars.
// Users who need SSH agent forwarding should declare an explicit mount.
for key in &[
"TERM", "LANG", "LANGUAGE", "LC_ALL", "SHELL", "EDITOR", "VISUAL",
] {
if let Ok(val) = std::env::var(key) {
if !env_vars.iter().any(|(k, _)| k == *key) {
env_vars.push((key.to_string(), val));
}
}
}
// Font config and themes
for dir in &["/usr/share/fonts", "/usr/share/icons", "/usr/share/themes"] {
if Path::new(dir).exists() {
bind_mounts.push(BindMount {
source: PathBuf::from(dir),
target: PathBuf::from(dir),
read_only: true,
});
}
}
HostIntegration {
bind_mounts,
env_vars,
}
}
fn expand_path(path: &str) -> PathBuf {
if let Some(stripped) = path.strip_prefix("~/") {
if let Ok(home) = std::env::var("HOME") {
return PathBuf::from(home).join(stripped);
}
}
if path.starts_with("./") || path == "." {
if let Ok(cwd) = std::env::current_dir() {
return cwd.join(path.strip_prefix("./").unwrap_or(path));
}
}
PathBuf::from(path)
}
#[cfg(test)]
mod tests {
use super::*;
use karapace_schema::parse_manifest_str;
#[test]
fn host_integration_includes_gpu_when_requested() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[hardware]
gpu = true
"#,
)
.unwrap()
.normalize()
.unwrap();
let hi = compute_host_integration(&manifest);
let has_dri = hi
.bind_mounts
.iter()
.any(|m| m.source.as_path() == Path::new("/dev/dri"));
// Only assert if the device exists on this system
if Path::new("/dev/dri").exists() {
assert!(has_dri);
}
}
#[test]
fn host_integration_excludes_gpu_when_not_requested() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[hardware]
gpu = false
"#,
)
.unwrap()
.normalize()
.unwrap();
let hi = compute_host_integration(&manifest);
let has_dri = hi
.bind_mounts
.iter()
.any(|m| m.source.as_path() == Path::new("/dev/dri"));
assert!(!has_dri);
}
#[test]
fn manifest_mounts_included() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
workspace = "/tmp/test-src:/workspace"
"#,
)
.unwrap()
.normalize()
.unwrap();
let hi = compute_host_integration(&manifest);
assert!(hi
.bind_mounts
.iter()
.any(|m| m.target.as_path() == Path::new("/workspace")));
}
#[test]
fn expand_tilde_path() {
let expanded = expand_path("~/projects");
if let Ok(home) = std::env::var("HOME") {
assert_eq!(expanded, PathBuf::from(home).join("projects"));
}
}
}

View file

@ -0,0 +1,715 @@
use crate::RuntimeError;
use std::path::{Path, PathBuf};
use std::process::Command;
const LXC_IMAGE_BASE: &str = "https://images.linuxcontainers.org/images";
#[derive(Debug, Clone)]
pub enum ImageSource {
OpenSuse { variant: String },
Ubuntu { codename: String },
Debian { codename: String },
Fedora { version: String },
Arch,
Custom { url: String },
}
#[derive(Debug, Clone)]
pub struct ResolvedImage {
pub source: ImageSource,
pub cache_key: String,
pub display_name: String,
}
#[allow(clippy::too_many_lines)]
pub fn resolve_image(name: &str) -> Result<ResolvedImage, RuntimeError> {
let name = name.trim().to_lowercase();
let (source, cache_key, display_name) = match name.as_str() {
"rolling" | "opensuse" | "opensuse/tumbleweed" | "tumbleweed" => (
ImageSource::OpenSuse {
variant: "tumbleweed".to_owned(),
},
"opensuse-tumbleweed".to_owned(),
"openSUSE Tumbleweed".to_owned(),
),
"opensuse/leap" | "leap" => (
ImageSource::OpenSuse {
variant: "15.6".to_owned(),
},
"opensuse-leap-15.6".to_owned(),
"openSUSE Leap 15.6".to_owned(),
),
"ubuntu" | "ubuntu/24.04" | "ubuntu/noble" => (
ImageSource::Ubuntu {
codename: "noble".to_owned(),
},
"ubuntu-noble".to_owned(),
"Ubuntu 24.04 (Noble)".to_owned(),
),
"ubuntu/22.04" | "ubuntu/jammy" => (
ImageSource::Ubuntu {
codename: "jammy".to_owned(),
},
"ubuntu-jammy".to_owned(),
"Ubuntu 22.04 (Jammy)".to_owned(),
),
"debian" | "debian/bookworm" => (
ImageSource::Debian {
codename: "bookworm".to_owned(),
},
"debian-bookworm".to_owned(),
"Debian Bookworm".to_owned(),
),
"debian/trixie" => (
ImageSource::Debian {
codename: "trixie".to_owned(),
},
"debian-trixie".to_owned(),
"Debian Trixie".to_owned(),
),
"fedora" | "fedora/41" => (
ImageSource::Fedora {
version: "41".to_owned(),
},
"fedora-41".to_owned(),
"Fedora 41".to_owned(),
),
"fedora/40" => (
ImageSource::Fedora {
version: "40".to_owned(),
},
"fedora-40".to_owned(),
"Fedora 40".to_owned(),
),
"fedora/42" => (
ImageSource::Fedora {
version: "42".to_owned(),
},
"fedora-42".to_owned(),
"Fedora 42".to_owned(),
),
"debian/sid" => (
ImageSource::Debian {
codename: "sid".to_owned(),
},
"debian-sid".to_owned(),
"Debian Sid".to_owned(),
),
"ubuntu/24.10" | "ubuntu/oracular" => (
ImageSource::Ubuntu {
codename: "oracular".to_owned(),
},
"ubuntu-oracular".to_owned(),
"Ubuntu 24.10 (Oracular)".to_owned(),
),
"ubuntu/20.04" | "ubuntu/focal" => (
ImageSource::Ubuntu {
codename: "focal".to_owned(),
},
"ubuntu-focal".to_owned(),
"Ubuntu 20.04 (Focal)".to_owned(),
),
"arch" | "archlinux" => (
ImageSource::Arch,
"archlinux".to_owned(),
"Arch Linux".to_owned(),
),
other => {
if other.starts_with("http://") || other.starts_with("https://") {
(
ImageSource::Custom {
url: other.to_owned(),
},
format!("custom-{}", blake3::hash(other.as_bytes()).to_hex()),
format!("Custom ({other})"),
)
} else {
return Err(RuntimeError::ImageNotFound(format!(
"unknown image '{other}'. Supported: rolling, opensuse/tumbleweed, opensuse/leap, \
ubuntu, ubuntu/24.04, ubuntu/22.04, ubuntu/20.04, \
debian, debian/bookworm, debian/trixie, debian/sid, \
fedora, fedora/40, fedora/41, fedora/42, \
arch, archlinux, or a URL"
)));
}
}
};
Ok(ResolvedImage {
source,
cache_key,
display_name,
})
}
fn lxc_rootfs_url(distro: &str, variant: &str) -> String {
format!("{LXC_IMAGE_BASE}/{distro}/{variant}/amd64/default/")
}
fn fetch_latest_build(index_url: &str) -> Result<String, RuntimeError> {
let output = Command::new("curl")
.args(["-fsSL", "--max-time", "30", index_url])
.output()
.map_err(|e| RuntimeError::ExecFailed(format!("curl failed: {e}")))?;
if !output.status.success() {
return Err(RuntimeError::ExecFailed(format!(
"failed to fetch image index from {index_url}: {}",
String::from_utf8_lossy(&output.stderr)
)));
}
let body = String::from_utf8_lossy(&output.stdout);
// LXC image server uses build dates like "20260220_04:20/" or URL-encoded "20260220_04%3A20/"
let mut builds: Vec<String> = body
.lines()
.filter_map(|line| {
let href = line.split("href=\"").nth(1)?;
let raw = href.split('"').next()?;
let name = raw.trim_end_matches('/');
// Decode %3A -> : for comparison, but keep the raw form for URL construction
let decoded = name.replace("%3A", ":");
// Build dates start with digits (e.g. "20260220_04:20")
if decoded.starts_with(|c: char| c.is_ascii_digit()) && decoded.len() >= 8 {
Some(decoded)
} else {
None
}
})
.collect();
builds.sort();
builds
.last()
.cloned()
.ok_or_else(|| RuntimeError::ExecFailed(format!("no builds found at {index_url}")))
}
fn url_encode_build(build: &str) -> String {
build.replace(':', "%3A")
}
fn build_download_url(base_idx: &str) -> Result<String, RuntimeError> {
let build = fetch_latest_build(base_idx)?;
let encoded = url_encode_build(&build);
Ok(format!("{base_idx}{encoded}/rootfs.tar.xz"))
}
fn download_url(source: &ImageSource) -> Result<String, RuntimeError> {
match source {
ImageSource::OpenSuse { variant } => {
let idx = if variant == "tumbleweed" {
lxc_rootfs_url("opensuse", "tumbleweed")
} else {
lxc_rootfs_url("opensuse", variant)
};
build_download_url(&idx)
}
ImageSource::Ubuntu { codename } => {
let idx = lxc_rootfs_url("ubuntu", codename);
build_download_url(&idx)
}
ImageSource::Debian { codename } => {
let idx = lxc_rootfs_url("debian", codename);
build_download_url(&idx)
}
ImageSource::Fedora { version } => {
let idx = lxc_rootfs_url("fedora", version);
build_download_url(&idx)
}
ImageSource::Arch => {
let idx = lxc_rootfs_url("archlinux", "current");
build_download_url(&idx)
}
ImageSource::Custom { url } => Ok(url.clone()),
}
}
pub struct ImageCache {
cache_dir: PathBuf,
}
impl ImageCache {
pub fn new(store_root: &Path) -> Self {
Self {
cache_dir: store_root.join("images"),
}
}
pub fn rootfs_path(&self, cache_key: &str) -> PathBuf {
self.cache_dir.join(cache_key).join("rootfs")
}
pub fn is_cached(&self, cache_key: &str) -> bool {
self.rootfs_path(cache_key).join("etc").exists()
}
pub fn ensure_image(
&self,
resolved: &ResolvedImage,
progress: &dyn Fn(&str),
) -> Result<PathBuf, RuntimeError> {
let rootfs = self.rootfs_path(&resolved.cache_key);
if self.is_cached(&resolved.cache_key) {
progress(&format!("using cached image: {}", resolved.display_name));
return Ok(rootfs);
}
std::fs::create_dir_all(&rootfs)?;
progress(&format!(
"resolving image URL for {}...",
resolved.display_name
));
let url = download_url(&resolved.source)?;
let tarball = self
.cache_dir
.join(&resolved.cache_key)
.join("rootfs.tar.xz");
progress(&format!("downloading {url}..."));
let status = Command::new("curl")
.args([
"-fSL",
"--progress-bar",
"--max-time",
"600",
"-o",
&tarball.to_string_lossy(),
&url,
])
.status()
.map_err(|e| RuntimeError::ExecFailed(format!("curl download failed: {e}")))?;
if !status.success() {
let _ = std::fs::remove_dir_all(self.cache_dir.join(&resolved.cache_key));
return Err(RuntimeError::ExecFailed(format!(
"failed to download image from {url}"
)));
}
progress("extracting rootfs...");
let status = Command::new("tar")
.args([
"xf",
&tarball.to_string_lossy(),
"-C",
&rootfs.to_string_lossy(),
"--no-same-owner",
"--no-same-permissions",
"--exclude=dev/*",
])
.status()
.map_err(|e| RuntimeError::ExecFailed(format!("tar extract failed: {e}")))?;
if !status.success() {
let _ = force_remove(&self.cache_dir.join(&resolved.cache_key));
return Err(RuntimeError::ExecFailed(
"failed to extract rootfs tarball".to_owned(),
));
}
// Ensure all extracted files are user-readable and directories are user-writable.
// LXC rootfs tarballs contain setuid binaries and root-owned restrictive permissions.
let _ = Command::new("chmod")
.args(["-R", "u+rwX", &rootfs.to_string_lossy()])
.status();
let _ = std::fs::remove_file(&tarball);
// Compute and store the content digest for future integrity verification.
progress("computing image digest...");
let digest = compute_image_digest(&rootfs)?;
let digest_file = self
.cache_dir
.join(&resolved.cache_key)
.join("rootfs.blake3");
std::fs::write(&digest_file, &digest)?;
progress(&format!("image {} ready", resolved.display_name));
Ok(rootfs)
}
/// Verify the integrity of a cached image by recomputing its digest
/// and comparing it to the stored value. Returns an error if the image
/// has been corrupted or tampered with.
pub fn verify_image(&self, cache_key: &str) -> Result<(), RuntimeError> {
let rootfs = self.rootfs_path(cache_key);
let digest_file = self.cache_dir.join(cache_key).join("rootfs.blake3");
if !digest_file.exists() {
// No stored digest (pre-verification image); compute and store one now
let digest = compute_image_digest(&rootfs)?;
std::fs::write(&digest_file, &digest)?;
return Ok(());
}
let stored = std::fs::read_to_string(&digest_file)
.map_err(|e| RuntimeError::ExecFailed(format!("failed to read digest file: {e}")))?;
let current = compute_image_digest(&rootfs)?;
if stored.trim() != current.trim() {
return Err(RuntimeError::ExecFailed(format!(
"image integrity check failed for {cache_key}: stored digest {stored} != computed {current}"
)));
}
Ok(())
}
}
/// Compute a content digest (blake3) of a rootfs directory.
///
/// Hashes the sorted list of file paths + sizes for a deterministic
/// fingerprint of the image content without reading every byte.
pub fn compute_image_digest(rootfs: &Path) -> Result<String, RuntimeError> {
// Hash the tarball if it exists, otherwise hash a manifest of the rootfs
let tarball = rootfs.parent().map(|p| p.join("rootfs.tar.xz"));
if let Some(ref tb) = tarball {
if tb.exists() {
let data = std::fs::read(tb)
.map_err(|e| RuntimeError::ExecFailed(format!("failed to read tarball: {e}")))?;
return Ok(blake3::hash(&data).to_hex().to_string());
}
}
// Fallback: hash a deterministic file listing
let mut hasher = blake3::Hasher::new();
let mut entries = Vec::new();
collect_file_entries(rootfs, rootfs, &mut entries)?;
entries.sort();
for entry in &entries {
hasher.update(entry.as_bytes());
}
Ok(hasher.finalize().to_hex().to_string())
}
fn collect_file_entries(
base: &Path,
dir: &Path,
entries: &mut Vec<String>,
) -> Result<(), RuntimeError> {
let Ok(listing) = std::fs::read_dir(dir) else {
return Ok(());
};
for entry in listing {
let entry = entry?;
let ft = entry.file_type()?;
let rel = entry
.path()
.strip_prefix(base)
.unwrap_or(&entry.path())
.to_string_lossy()
.to_string();
if ft.is_file() {
let len = entry.metadata().map(|m| m.len()).unwrap_or(0);
entries.push(format!("{rel}:{len}"));
} else if ft.is_dir() {
entries.push(format!("{rel}/"));
collect_file_entries(base, &entry.path(), entries)?;
}
}
Ok(())
}
/// Build a command to query installed package versions from the container.
pub fn query_versions_command(pkg_manager: &str, packages: &[String]) -> Vec<String> {
match pkg_manager {
"apt" => {
// dpkg-query outputs name\tversion for each installed package
let mut cmd = vec![
"dpkg-query".to_owned(),
"-W".to_owned(),
"-f".to_owned(),
"${Package}\\t${Version}\\n".to_owned(),
];
cmd.extend(packages.iter().cloned());
cmd
}
"dnf" | "zypper" => {
let mut cmd = vec![
"rpm".to_owned(),
"-q".to_owned(),
"--qf".to_owned(),
"%{NAME}\\t%{VERSION}-%{RELEASE}\\n".to_owned(),
];
cmd.extend(packages.iter().cloned());
cmd
}
"pacman" => {
// pacman -Q outputs "name version" per line
let mut cmd = vec!["pacman".to_owned(), "-Q".to_owned()];
cmd.extend(packages.iter().cloned());
cmd
}
_ => Vec::new(),
}
}
/// Parse the output of a version query command into (name, version) pairs.
pub fn parse_version_output(pkg_manager: &str, output: &str) -> Vec<(String, String)> {
let mut results = Vec::new();
for line in output.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let parts: Vec<&str> = if pkg_manager == "pacman" {
line.splitn(2, ' ').collect()
} else {
line.splitn(2, '\t').collect()
};
if parts.len() == 2 {
results.push((parts[0].to_owned(), parts[1].to_owned()));
}
}
results
}
pub fn force_remove(path: &Path) -> Result<(), RuntimeError> {
if path.exists() {
let _ = Command::new("chmod")
.args(["-R", "u+rwX", &path.to_string_lossy()])
.status();
std::fs::remove_dir_all(path)?;
}
Ok(())
}
pub fn detect_package_manager(rootfs: &Path) -> Option<&'static str> {
if rootfs.join("usr/bin/apt-get").exists() || rootfs.join("usr/bin/apt").exists() {
Some("apt")
} else if rootfs.join("usr/bin/dnf").exists() || rootfs.join("usr/bin/dnf5").exists() {
Some("dnf")
} else if rootfs.join("usr/bin/zypper").exists() {
Some("zypper")
} else if rootfs.join("usr/bin/pacman").exists() {
Some("pacman")
} else {
None
}
}
pub fn install_packages_command(pkg_manager: &str, packages: &[String]) -> Vec<String> {
if packages.is_empty() {
return Vec::new();
}
let mut cmd = Vec::new();
match pkg_manager {
"apt" => {
cmd.push("apt-get".to_owned());
cmd.push("install".to_owned());
cmd.push("-y".to_owned());
cmd.push("--no-install-recommends".to_owned());
cmd.extend(packages.iter().cloned());
}
"dnf" => {
cmd.push("dnf".to_owned());
cmd.push("install".to_owned());
cmd.push("-y".to_owned());
cmd.push("--setopt=install_weak_deps=False".to_owned());
cmd.extend(packages.iter().cloned());
}
"zypper" => {
cmd.push("zypper".to_owned());
cmd.push("--non-interactive".to_owned());
cmd.push("install".to_owned());
cmd.push("--no-recommends".to_owned());
cmd.extend(packages.iter().cloned());
}
"pacman" => {
cmd.push("pacman".to_owned());
cmd.push("-S".to_owned());
cmd.push("--noconfirm".to_owned());
cmd.push("--needed".to_owned());
cmd.extend(packages.iter().cloned());
}
_ => {}
}
cmd
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve_known_images() {
assert!(resolve_image("rolling").is_ok());
assert!(resolve_image("ubuntu/24.04").is_ok());
assert!(resolve_image("debian/bookworm").is_ok());
assert!(resolve_image("fedora/41").is_ok());
assert!(resolve_image("archlinux").is_ok());
}
#[test]
fn resolve_unknown_image_fails() {
assert!(resolve_image("not-a-distro").is_err());
}
#[test]
fn resolve_custom_url() {
let r = resolve_image("https://example.com/rootfs.tar.xz").unwrap();
assert!(r.cache_key.starts_with("custom-"));
}
#[test]
fn install_commands_correct() {
let pkgs = vec!["git".to_owned(), "cmake".to_owned()];
let cmd = install_packages_command("apt", &pkgs);
assert_eq!(cmd[0], "apt-get");
assert!(cmd.contains(&"git".to_owned()));
let cmd = install_packages_command("zypper", &pkgs);
assert_eq!(cmd[0], "zypper");
assert!(cmd.contains(&"--non-interactive".to_owned()));
let cmd = install_packages_command("pacman", &pkgs);
assert_eq!(cmd[0], "pacman");
}
#[test]
fn detect_no_pkg_manager_on_empty_dir() {
let dir = tempfile::tempdir().unwrap();
assert!(detect_package_manager(dir.path()).is_none());
}
#[test]
fn parse_apt_version_output() {
let output = "git\t1:2.43.0-1ubuntu7\nclang\t1:18.1.3-1\n";
let versions = parse_version_output("apt", output);
assert_eq!(versions.len(), 2);
assert_eq!(
versions[0],
("git".to_owned(), "1:2.43.0-1ubuntu7".to_owned())
);
assert_eq!(versions[1], ("clang".to_owned(), "1:18.1.3-1".to_owned()));
}
#[test]
fn parse_rpm_version_output() {
let output = "git\t2.44.0-1.fc41\ncmake\t3.28.3-1.fc41\n";
let versions = parse_version_output("zypper", output);
assert_eq!(versions.len(), 2);
assert_eq!(versions[0].0, "git");
assert_eq!(versions[0].1, "2.44.0-1.fc41");
}
#[test]
fn parse_pacman_version_output() {
let output = "git 2.44.0-1\ncmake 3.28.3-1\n";
let versions = parse_version_output("pacman", output);
assert_eq!(versions.len(), 2);
assert_eq!(versions[0], ("git".to_owned(), "2.44.0-1".to_owned()));
}
#[test]
fn parse_empty_version_output() {
let versions = parse_version_output("apt", "");
assert!(versions.is_empty());
let versions = parse_version_output("apt", "\n\n");
assert!(versions.is_empty());
}
#[test]
fn query_versions_commands_generated() {
let pkgs = vec!["git".to_owned()];
let cmd = query_versions_command("apt", &pkgs);
assert_eq!(cmd[0], "dpkg-query");
let cmd = query_versions_command("zypper", &pkgs);
assert_eq!(cmd[0], "rpm");
let cmd = query_versions_command("dnf", &pkgs);
assert_eq!(cmd[0], "rpm");
let cmd = query_versions_command("pacman", &pkgs);
assert_eq!(cmd[0], "pacman");
let cmd = query_versions_command("unknown", &pkgs);
assert!(cmd.is_empty());
}
#[test]
fn compute_digest_of_test_rootfs() {
let dir = tempfile::tempdir().unwrap();
let rootfs = dir.path().join("rootfs");
std::fs::create_dir_all(rootfs.join("etc")).unwrap();
std::fs::write(rootfs.join("etc/hostname"), "test").unwrap();
std::fs::create_dir_all(rootfs.join("usr/bin")).unwrap();
std::fs::write(rootfs.join("usr/bin/hello"), "#!/bin/sh\necho hi").unwrap();
let digest = compute_image_digest(&rootfs).unwrap();
assert_eq!(digest.len(), 64);
// Same content = same digest (determinism)
let digest2 = compute_image_digest(&rootfs).unwrap();
assert_eq!(digest, digest2);
}
#[test]
fn detect_apt_package_manager() {
let dir = tempfile::tempdir().unwrap();
std::fs::create_dir_all(dir.path().join("usr/bin")).unwrap();
std::fs::write(dir.path().join("usr/bin/apt-get"), "").unwrap();
assert_eq!(detect_package_manager(dir.path()), Some("apt"));
}
#[test]
fn detect_zypper_package_manager() {
let dir = tempfile::tempdir().unwrap();
std::fs::create_dir_all(dir.path().join("usr/bin")).unwrap();
std::fs::write(dir.path().join("usr/bin/zypper"), "").unwrap();
assert_eq!(detect_package_manager(dir.path()), Some("zypper"));
}
#[test]
fn detect_pacman_package_manager() {
let dir = tempfile::tempdir().unwrap();
std::fs::create_dir_all(dir.path().join("usr/bin")).unwrap();
std::fs::write(dir.path().join("usr/bin/pacman"), "").unwrap();
assert_eq!(detect_package_manager(dir.path()), Some("pacman"));
}
#[test]
fn resolve_all_image_aliases() {
// Verify every documented alias resolves correctly
for alias in &[
"rolling",
"opensuse",
"opensuse/tumbleweed",
"tumbleweed",
"opensuse/leap",
"leap",
"ubuntu",
"ubuntu/24.04",
"ubuntu/noble",
"ubuntu/22.04",
"ubuntu/jammy",
"ubuntu/20.04",
"ubuntu/focal",
"ubuntu/24.10",
"ubuntu/oracular",
"debian",
"debian/bookworm",
"debian/trixie",
"debian/sid",
"fedora",
"fedora/40",
"fedora/41",
"fedora/42",
"arch",
"archlinux",
] {
let result = resolve_image(alias);
assert!(result.is_ok(), "failed to resolve alias: {alias}");
}
}
#[test]
fn install_empty_packages_returns_empty() {
let cmd = install_packages_command("apt", &[]);
assert!(cmd.is_empty());
}
}

View file

@ -0,0 +1,46 @@
//! Runtime backends and sandbox infrastructure for Karapace environments.
//!
//! This crate implements the execution layer: pluggable `RuntimeBackend` trait with
//! namespace (user-namespace + fuse-overlayfs) and OCI (runc) backends, sandbox
//! setup script generation, host integration (GPU, audio, X11/Wayland passthrough),
//! base image resolution, prerequisite checking, and security policy enforcement.
pub mod backend;
pub mod export;
pub mod host;
pub mod image;
pub mod mock;
pub mod namespace;
pub mod oci;
pub mod prereq;
pub mod sandbox;
pub mod security;
pub mod terminal;
pub use backend::{select_backend, RuntimeBackend, RuntimeSpec, RuntimeStatus};
pub use prereq::{check_namespace_prereqs, check_oci_prereqs, format_missing, MissingPrereq};
pub use security::SecurityPolicy;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum RuntimeError {
#[error("runtime I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("backend '{0}' is not available on this system")]
BackendUnavailable(String),
#[error("environment '{0}' is not running")]
NotRunning(String),
#[error("environment '{0}' is already running")]
AlreadyRunning(String),
#[error("security policy violation: {0}")]
PolicyViolation(String),
#[error("mount not allowed by policy: {0}")]
MountDenied(String),
#[error("device access not allowed: {0}")]
DeviceDenied(String),
#[error("runtime execution failed: {0}")]
ExecFailed(String),
#[error("image not found: {0}")]
ImageNotFound(String),
}

View file

@ -0,0 +1,255 @@
use crate::backend::{RuntimeBackend, RuntimeSpec, RuntimeStatus};
use crate::RuntimeError;
use karapace_schema::{ResolutionResult, ResolvedPackage};
use std::collections::HashMap;
use std::sync::Mutex;
pub struct MockBackend {
state: Mutex<HashMap<String, bool>>,
}
impl Default for MockBackend {
fn default() -> Self {
Self {
state: Mutex::new(HashMap::new()),
}
}
}
impl MockBackend {
pub fn new() -> Self {
Self::default()
}
}
impl RuntimeBackend for MockBackend {
fn name(&self) -> &'static str {
"mock"
}
fn available(&self) -> bool {
true
}
fn resolve(&self, spec: &RuntimeSpec) -> Result<ResolutionResult, RuntimeError> {
// Mock resolution: deterministic digest from image name,
// packages get version "0.0.0-mock" for deterministic identity.
let base_image_digest =
blake3::hash(format!("mock-image:{}", spec.manifest.base_image).as_bytes())
.to_hex()
.to_string();
let resolved_packages = spec
.manifest
.system_packages
.iter()
.map(|name| ResolvedPackage {
name: name.clone(),
version: "0.0.0-mock".to_owned(),
})
.collect();
Ok(ResolutionResult {
base_image_digest,
resolved_packages,
})
}
fn build(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let mut state = self
.state
.lock()
.map_err(|e| RuntimeError::ExecFailed(format!("mutex poisoned: {e}")))?;
state.insert(spec.env_id.clone(), false);
let root = std::path::Path::new(&spec.root_path);
if !root.exists() {
std::fs::create_dir_all(root)?;
}
let overlay = std::path::Path::new(&spec.overlay_path);
if !overlay.exists() {
std::fs::create_dir_all(overlay)?;
}
// Create upper dir with mock filesystem content so engine tests
// exercise the real layer capture path (pack_layer on upper dir).
let upper = overlay.join("upper");
std::fs::create_dir_all(&upper)?;
std::fs::write(
upper.join(".karapace-mock"),
format!("mock-env:{}", spec.env_id),
)?;
for pkg in &spec.manifest.system_packages {
std::fs::write(
upper.join(format!(".pkg-{pkg}")),
format!("{pkg}@0.0.0-mock"),
)?;
}
Ok(())
}
fn enter(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let mut state = self
.state
.lock()
.map_err(|e| RuntimeError::ExecFailed(format!("mutex poisoned: {e}")))?;
if state.get(&spec.env_id) == Some(&true) {
return Err(RuntimeError::AlreadyRunning(spec.env_id.clone()));
}
state.insert(spec.env_id.clone(), true);
Ok(())
}
fn exec(
&self,
_spec: &RuntimeSpec,
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let stdout = format!("mock-exec: {}\n", command.join(" "));
// Create a real success ExitStatus portably
let success_status = std::process::Command::new("true")
.status()
.unwrap_or_else(|_| {
std::process::Command::new("/bin/true")
.status()
.expect("cannot execute /bin/true")
});
Ok(std::process::Output {
status: success_status,
stdout: stdout.into_bytes(),
stderr: Vec::new(),
})
}
fn destroy(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let mut state = self
.state
.lock()
.map_err(|e| RuntimeError::ExecFailed(format!("mutex poisoned: {e}")))?;
state.remove(&spec.env_id);
let overlay = std::path::Path::new(&spec.overlay_path);
if overlay.exists() {
std::fs::remove_dir_all(overlay)?;
}
Ok(())
}
fn status(&self, env_id: &str) -> Result<RuntimeStatus, RuntimeError> {
let state = self
.state
.lock()
.map_err(|e| RuntimeError::ExecFailed(format!("mutex poisoned: {e}")))?;
let running = state.get(env_id).copied().unwrap_or(false);
Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running,
pid: if running { Some(99999) } else { None },
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use karapace_schema::parse_manifest_str;
fn test_spec(dir: &std::path::Path) -> RuntimeSpec {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
"#,
)
.unwrap()
.normalize()
.unwrap();
RuntimeSpec {
env_id: "mock-test".to_owned(),
root_path: dir.join("root").to_string_lossy().to_string(),
overlay_path: dir.join("overlay").to_string_lossy().to_string(),
store_root: dir.to_string_lossy().to_string(),
manifest,
}
}
#[test]
fn mock_resolve_determinism() {
let dir = tempfile::tempdir().unwrap();
let backend = MockBackend::new();
let spec = test_spec(dir.path());
let r1 = backend.resolve(&spec).unwrap();
let r2 = backend.resolve(&spec).unwrap();
assert_eq!(r1.base_image_digest, r2.base_image_digest);
assert_eq!(r1.resolved_packages.len(), r2.resolved_packages.len());
for (a, b) in r1.resolved_packages.iter().zip(r2.resolved_packages.iter()) {
assert_eq!(a.name, b.name);
assert_eq!(a.version, b.version);
}
}
#[test]
fn mock_resolve_with_packages() {
let dir = tempfile::tempdir().unwrap();
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[system]
packages = ["git", "clang", "cmake"]
[runtime]
backend = "mock"
"#,
)
.unwrap()
.normalize()
.unwrap();
let spec = RuntimeSpec {
env_id: "mock-pkg-test".to_owned(),
root_path: dir.path().join("root").to_string_lossy().to_string(),
overlay_path: dir.path().join("overlay").to_string_lossy().to_string(),
store_root: dir.path().to_string_lossy().to_string(),
manifest,
};
let backend = MockBackend::new();
let result = backend.resolve(&spec).unwrap();
assert_eq!(result.resolved_packages.len(), 3);
assert!(result
.resolved_packages
.iter()
.all(|p| p.version == "0.0.0-mock"));
assert!(result.resolved_packages.iter().any(|p| p.name == "git"));
assert!(!result.base_image_digest.is_empty());
}
#[test]
fn mock_lifecycle() {
let dir = tempfile::tempdir().unwrap();
let backend = MockBackend::new();
let spec = test_spec(dir.path());
backend.build(&spec).unwrap();
let status = backend.status(&spec.env_id).unwrap();
assert!(!status.running);
backend.enter(&spec).unwrap();
let status = backend.status(&spec.env_id).unwrap();
assert!(status.running);
assert_eq!(status.pid, Some(99999));
assert!(backend.enter(&spec).is_err());
backend.destroy(&spec).unwrap();
let status = backend.status(&spec.env_id).unwrap();
assert!(!status.running);
}
}

View file

@ -0,0 +1,377 @@
use crate::backend::{RuntimeBackend, RuntimeSpec, RuntimeStatus};
use crate::host::compute_host_integration;
use crate::image::{
compute_image_digest, detect_package_manager, force_remove, install_packages_command,
parse_version_output, query_versions_command, resolve_image, ImageCache,
};
use crate::sandbox::{
enter_interactive, exec_in_container, install_packages_in_container, mount_overlay,
setup_container_rootfs, unmount_overlay, SandboxConfig,
};
use crate::terminal;
use crate::RuntimeError;
use karapace_schema::{ResolutionResult, ResolvedPackage};
use std::path::{Path, PathBuf};
pub struct NamespaceBackend {
store_root: PathBuf,
}
impl Default for NamespaceBackend {
fn default() -> Self {
Self {
store_root: default_store_root(),
}
}
}
impl NamespaceBackend {
pub fn new() -> Self {
Self::default()
}
pub fn with_store_root(store_root: impl Into<PathBuf>) -> Self {
Self {
store_root: store_root.into(),
}
}
fn env_dir(&self, env_id: &str) -> PathBuf {
self.store_root.join("env").join(env_id)
}
}
impl RuntimeBackend for NamespaceBackend {
fn name(&self) -> &'static str {
"namespace"
}
fn available(&self) -> bool {
// Check that user namespaces work
let output = std::process::Command::new("unshare")
.args(["--user", "--map-root-user", "--fork", "true"])
.output();
matches!(output, Ok(o) if o.status.success())
}
fn resolve(&self, spec: &RuntimeSpec) -> Result<ResolutionResult, RuntimeError> {
let progress = |msg: &str| {
eprintln!("[karapace] {msg}");
};
// Download/cache the base image
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress)?;
// Compute content digest of the base image
let base_image_digest = compute_image_digest(&rootfs)?;
// If there are packages to resolve, set up a temporary overlay
// and install+query to get exact versions
let resolved_packages = if spec.manifest.system_packages.is_empty() {
Vec::new()
} else {
let tmp_dir = tempfile::tempdir()
.map_err(|e| RuntimeError::ExecFailed(format!("failed to create temp dir: {e}")))?;
let tmp_env = tmp_dir.path().join("resolve-env");
std::fs::create_dir_all(&tmp_env)?;
let mut sandbox = SandboxConfig::new(rootfs.clone(), "resolve-tmp", &tmp_env);
sandbox.isolate_network = false; // need network for package resolution
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
// Run resolution inside an inner closure so cleanup always runs,
// even if detect/install/query fails.
let resolve_inner = || -> Result<Vec<(String, String)>, RuntimeError> {
let pkg_mgr = detect_package_manager(&sandbox.overlay_merged)
.or_else(|| detect_package_manager(&rootfs))
.ok_or_else(|| {
RuntimeError::ExecFailed(
"no supported package manager found in the image".to_owned(),
)
})?;
let install_cmd = install_packages_command(pkg_mgr, &spec.manifest.system_packages);
install_packages_in_container(&sandbox, &install_cmd)?;
let query_cmd = query_versions_command(pkg_mgr, &spec.manifest.system_packages);
let output = exec_in_container(&sandbox, &query_cmd)?;
let stdout = String::from_utf8_lossy(&output.stdout);
Ok(parse_version_output(pkg_mgr, &stdout))
};
let result = resolve_inner();
// Always cleanup: unmount overlay and remove temp directory
let _ = unmount_overlay(&sandbox);
let _ = std::fs::remove_dir_all(&tmp_env);
let versions = result?;
// Map back to ResolvedPackage, falling back to "unresolved" if query failed
spec.manifest
.system_packages
.iter()
.map(|name| {
let version = versions
.iter()
.find(|(n, _)| n == name)
.map_or_else(|| "unresolved".to_owned(), |(_, v)| v.clone());
ResolvedPackage {
name: name.clone(),
version,
}
})
.collect()
};
Ok(ResolutionResult {
base_image_digest,
resolved_packages,
})
}
fn build(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
std::fs::create_dir_all(&env_dir)?;
let progress = |msg: &str| {
eprintln!("[karapace] {msg}");
};
// Resolve and download the base image
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress)?;
// Set up overlay filesystem
let mut sandbox = SandboxConfig::new(rootfs.clone(), &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
mount_overlay(&sandbox)?;
// Set up container rootfs (create dirs, user, etc.)
setup_container_rootfs(&sandbox)?;
// Install system packages if any
if !spec.manifest.system_packages.is_empty() {
let pkg_mgr = detect_package_manager(&sandbox.overlay_merged)
.or_else(|| detect_package_manager(&rootfs))
.ok_or_else(|| {
RuntimeError::ExecFailed(
"no supported package manager found in the image. \
Supported: apt, dnf, zypper, pacman"
.to_owned(),
)
})?;
progress(&format!(
"installing {} packages via {pkg_mgr}...",
spec.manifest.system_packages.len()
));
let install_cmd = install_packages_command(pkg_mgr, &spec.manifest.system_packages);
install_packages_in_container(&sandbox, &install_cmd)?;
progress("packages installed");
}
// Unmount overlay after build (will be re-mounted on enter)
unmount_overlay(&sandbox)?;
// Write state marker
std::fs::write(env_dir.join(".built"), "1")?;
progress(&format!(
"environment {} built successfully ({} base)",
&spec.env_id[..12.min(spec.env_id.len())],
resolved.display_name
));
Ok(())
}
fn enter(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
if !env_dir.join(".built").exists() {
return Err(RuntimeError::ExecFailed(format!(
"environment {} has not been built yet. Run 'karapace build' first.",
&spec.env_id[..12.min(spec.env_id.len())]
)));
}
// Resolve image to get rootfs path
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
if !rootfs.join("etc").exists() {
return Err(RuntimeError::ExecFailed(
"base image rootfs is missing or corrupted. Run 'karapace rebuild'.".to_owned(),
));
}
// Create sandbox config
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
sandbox.hostname = format!("karapace-{}", &spec.env_id[..12.min(spec.env_id.len())]);
// Compute host integration (Wayland, PipeWire, GPU, etc.)
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
sandbox.env_vars.extend(host.env_vars);
// Mount overlay
mount_overlay(&sandbox)?;
// Set up rootfs
setup_container_rootfs(&sandbox)?;
// Mark as running
std::fs::write(env_dir.join(".running"), format!("{}", std::process::id()))?;
// Emit terminal markers
terminal::emit_container_push(&spec.env_id, &sandbox.hostname);
terminal::print_container_banner(
&spec.env_id,
&spec.manifest.base_image,
&sandbox.hostname,
);
// Enter the container interactively
let exit_code = enter_interactive(&sandbox);
// Cleanup
terminal::emit_container_pop();
terminal::print_container_exit(&spec.env_id);
let _ = std::fs::remove_file(env_dir.join(".running"));
let _ = unmount_overlay(&sandbox);
match exit_code {
Ok(0) => Ok(()),
Ok(code) => Err(RuntimeError::ExecFailed(format!(
"container shell exited with code {code}"
))),
Err(e) => Err(e),
}
}
fn exec(
&self,
spec: &RuntimeSpec,
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
if !env_dir.join(".built").exists() {
return Err(RuntimeError::ExecFailed(format!(
"environment {} has not been built yet. Run 'karapace build' first.",
&spec.env_id[..12.min(spec.env_id.len())]
)));
}
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
sandbox.env_vars.extend(host.env_vars);
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
let output = exec_in_container(&sandbox, command);
let _ = unmount_overlay(&sandbox);
output
}
fn destroy(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
// Unmount overlay if mounted
let sandbox_config =
SandboxConfig::new(PathBuf::from("/nonexistent"), &spec.env_id, &env_dir);
let _ = unmount_overlay(&sandbox_config);
// Remove environment directory (force_remove handles restrictive permissions)
force_remove(&env_dir)?;
Ok(())
}
fn status(&self, env_id: &str) -> Result<RuntimeStatus, RuntimeError> {
let env_dir = self.env_dir(env_id);
let running_file = env_dir.join(".running");
if running_file.exists() {
let pid_str = std::fs::read_to_string(&running_file).unwrap_or_default();
let pid = pid_str.trim().parse::<u32>().ok();
if pid.is_none() && !pid_str.trim().is_empty() {
tracing::warn!(
"corrupt .running file for {}: could not parse PID from '{}'",
&env_id[..12.min(env_id.len())],
pid_str.trim()
);
}
// Check if process is actually alive
if let Some(p) = pid {
let alive = Path::new(&format!("/proc/{p}")).exists();
if !alive {
let _ = std::fs::remove_file(&running_file);
return Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: false,
pid: None,
});
}
return Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: true,
pid: Some(p),
});
}
}
Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: false,
pid: None,
})
}
}
fn default_store_root() -> PathBuf {
if let Ok(home) = std::env::var("HOME") {
PathBuf::from(home).join(".local/share/karapace")
} else {
PathBuf::from("/tmp/karapace")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn namespace_backend_available() {
let backend = NamespaceBackend::new();
// This test verifies the check runs without panicking.
// Result depends on host system capabilities.
let _ = backend.available();
}
#[test]
fn status_reports_not_running_for_nonexistent() {
let dir = tempfile::tempdir().unwrap();
let backend = NamespaceBackend::with_store_root(dir.path());
let status = backend.status("nonexistent").unwrap();
assert!(!status.running);
}
}

View file

@ -0,0 +1,510 @@
use crate::backend::{RuntimeBackend, RuntimeSpec, RuntimeStatus};
use crate::host::compute_host_integration;
use crate::image::{
compute_image_digest, detect_package_manager, force_remove, install_packages_command,
parse_version_output, query_versions_command, resolve_image, ImageCache,
};
use crate::sandbox::{
exec_in_container, install_packages_in_container, mount_overlay, setup_container_rootfs,
unmount_overlay, SandboxConfig,
};
use crate::terminal;
use crate::RuntimeError;
use karapace_schema::{ResolutionResult, ResolvedPackage};
use std::path::{Path, PathBuf};
use std::process::Command;
pub struct OciBackend {
store_root: PathBuf,
}
impl Default for OciBackend {
fn default() -> Self {
Self {
store_root: default_store_root(),
}
}
}
impl OciBackend {
pub fn new() -> Self {
Self::default()
}
pub fn with_store_root(store_root: impl Into<PathBuf>) -> Self {
Self {
store_root: store_root.into(),
}
}
fn find_runtime() -> Option<String> {
for candidate in &["crun", "runc", "youki"] {
if let Ok(output) = Command::new(candidate).arg("--version").output() {
if output.status.success() {
return Some(candidate.to_string());
}
}
}
None
}
fn env_dir(&self, env_id: &str) -> PathBuf {
self.store_root.join("env").join(env_id)
}
fn generate_oci_spec(config: &SandboxConfig, spec: &RuntimeSpec) -> String {
let uid = config.uid;
let gid = config.gid;
let home = config.home_dir.display().to_string();
let hostname = &config.hostname;
let mut env_arr = Vec::new();
env_arr.push(format!("\"HOME={home}\""));
env_arr.push(format!("\"USER={}\"", config.username));
env_arr.push(format!("\"HOSTNAME={hostname}\""));
env_arr.push("\"TERM=xterm-256color\"".to_owned());
env_arr.push("\"KARAPACE_ENV=1\"".to_owned());
for (k, v) in &config.env_vars {
env_arr.push(format!("\"{}={}\"", k, v.replace('"', "\\\"")));
}
let mut mounts = Vec::new();
// Standard mounts
mounts.push(r#"{"destination":"/proc","type":"proc","source":"proc"}"#.to_owned());
mounts.push(
r#"{"destination":"/dev","type":"tmpfs","source":"tmpfs","options":["nosuid","strictatime","mode=755","size=65536k"]}"#
.to_owned(),
);
mounts.push(
r#"{"destination":"/dev/pts","type":"devpts","source":"devpts","options":["nosuid","noexec","newinstance","ptmxmode=0666","mode=0620"]}"#
.to_owned(),
);
mounts.push(
r#"{"destination":"/dev/shm","type":"tmpfs","source":"shm","options":["nosuid","noexec","nodev","mode=1777","size=65536k"]}"#
.to_owned(),
);
mounts.push(
r#"{"destination":"/sys","type":"sysfs","source":"sysfs","options":["nosuid","noexec","nodev","ro"]}"#
.to_owned(),
);
// Home bind mount
mounts.push(format!(
r#"{{"destination":"{home}","type":"bind","source":"{home}","options":["rbind","rw"]}}"#
));
// resolv.conf
mounts.push(
r#"{"destination":"/etc/resolv.conf","type":"bind","source":"/etc/resolv.conf","options":["bind","ro"]}"#
.to_owned(),
);
// Custom bind mounts
for bm in &config.bind_mounts {
let opts = if bm.read_only {
"\"rbind\",\"ro\""
} else {
"\"rbind\",\"rw\""
};
mounts.push(format!(
r#"{{"destination":"{}","type":"bind","source":"{}","options":[{}]}}"#,
bm.target.display(),
bm.source.display(),
opts
));
}
let mounts_json = mounts.join(",");
let env_json = env_arr.join(",");
let network_ns = if spec.manifest.network_isolation {
r#",{"type":"network"}"#
} else {
""
};
let oci_spec = format!(
r#"{{
"ociVersion": "1.0.2",
"process": {{
"terminal": true,
"user": {{ "uid": {uid}, "gid": {gid} }},
"args": ["/bin/bash", "-l"],
"env": [{env_json}],
"cwd": "{home}"
}},
"root": {{
"path": "rootfs",
"readonly": false
}},
"hostname": "{hostname}",
"mounts": [{mounts_json}],
"linux": {{
"namespaces": [
{{"type":"pid"}},
{{"type":"mount"}},
{{"type":"ipc"}},
{{"type":"uts"}}
{network_ns}
],
"uidMappings": [{{ "containerID": 0, "hostID": {uid}, "size": 1 }}],
"gidMappings": [{{ "containerID": 0, "hostID": {gid}, "size": 1 }}]
}}
}}"#
);
oci_spec
}
}
impl RuntimeBackend for OciBackend {
fn name(&self) -> &'static str {
"oci"
}
fn available(&self) -> bool {
Self::find_runtime().is_some()
}
fn resolve(&self, spec: &RuntimeSpec) -> Result<ResolutionResult, RuntimeError> {
let progress = |msg: &str| {
eprintln!("[karapace/oci] {msg}");
};
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress)?;
let base_image_digest = compute_image_digest(&rootfs)?;
let resolved_packages = if spec.manifest.system_packages.is_empty() {
Vec::new()
} else {
let tmp_dir = tempfile::tempdir()
.map_err(|e| RuntimeError::ExecFailed(format!("failed to create temp dir: {e}")))?;
let tmp_env = tmp_dir.path().join("resolve-env");
std::fs::create_dir_all(&tmp_env)?;
let mut sandbox = SandboxConfig::new(rootfs.clone(), "resolve-tmp", &tmp_env);
sandbox.isolate_network = false;
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
// Run resolution inside an inner closure so cleanup always runs,
// even if detect/install/query fails.
let resolve_inner = || -> Result<Vec<(String, String)>, RuntimeError> {
let pkg_mgr = detect_package_manager(&sandbox.overlay_merged)
.or_else(|| detect_package_manager(&rootfs))
.ok_or_else(|| {
RuntimeError::ExecFailed(
"no supported package manager found in the image".to_owned(),
)
})?;
let install_cmd = install_packages_command(pkg_mgr, &spec.manifest.system_packages);
install_packages_in_container(&sandbox, &install_cmd)?;
let query_cmd = query_versions_command(pkg_mgr, &spec.manifest.system_packages);
let output = exec_in_container(&sandbox, &query_cmd)?;
let stdout = String::from_utf8_lossy(&output.stdout);
Ok(parse_version_output(pkg_mgr, &stdout))
};
let result = resolve_inner();
// Always cleanup: unmount overlay and remove temp directory
let _ = unmount_overlay(&sandbox);
let _ = std::fs::remove_dir_all(&tmp_env);
let versions = result?;
spec.manifest
.system_packages
.iter()
.map(|name| {
let version = versions
.iter()
.find(|(n, _)| n == name)
.map_or_else(|| "unresolved".to_owned(), |(_, v)| v.clone());
ResolvedPackage {
name: name.clone(),
version,
}
})
.collect()
};
Ok(ResolutionResult {
base_image_digest,
resolved_packages,
})
}
fn build(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
std::fs::create_dir_all(&env_dir)?;
let progress = |msg: &str| {
eprintln!("[karapace/oci] {msg}");
};
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.ensure_image(&resolved, &progress)?;
let mut sandbox = SandboxConfig::new(rootfs.clone(), &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
if !spec.manifest.system_packages.is_empty() {
let pkg_mgr = detect_package_manager(&sandbox.overlay_merged)
.or_else(|| detect_package_manager(&rootfs))
.ok_or_else(|| {
RuntimeError::ExecFailed(
"no supported package manager found in the image".to_owned(),
)
})?;
progress(&format!(
"installing {} packages via {pkg_mgr}...",
spec.manifest.system_packages.len()
));
let install_cmd = install_packages_command(pkg_mgr, &spec.manifest.system_packages);
install_packages_in_container(&sandbox, &install_cmd)?;
progress("packages installed");
}
unmount_overlay(&sandbox)?;
// Generate OCI bundle config.json
let bundle_dir = env_dir.join("bundle");
std::fs::create_dir_all(&bundle_dir)?;
// Symlink rootfs into bundle
let bundle_rootfs = bundle_dir.join("rootfs");
if !bundle_rootfs.exists() {
#[cfg(unix)]
std::os::unix::fs::symlink(&sandbox.overlay_merged, &bundle_rootfs)?;
}
std::fs::write(env_dir.join(".built"), "1")?;
progress(&format!(
"environment {} built (OCI, {} base)",
&spec.env_id[..12.min(spec.env_id.len())],
resolved.display_name
));
Ok(())
}
fn enter(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let runtime = Self::find_runtime().ok_or_else(|| {
RuntimeError::BackendUnavailable("no OCI runtime found (crun/runc/youki)".to_owned())
})?;
let env_dir = self.env_dir(&spec.env_id);
if !env_dir.join(".built").exists() {
return Err(RuntimeError::ExecFailed(format!(
"environment {} has not been built",
&spec.env_id[..12.min(spec.env_id.len())]
)));
}
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
sandbox.env_vars.extend(host.env_vars);
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
// Write OCI config.json
let bundle_dir = env_dir.join("bundle");
std::fs::create_dir_all(&bundle_dir)?;
let bundle_rootfs = bundle_dir.join("rootfs");
if !bundle_rootfs.exists() {
#[cfg(unix)]
std::os::unix::fs::symlink(&sandbox.overlay_merged, &bundle_rootfs)?;
}
let oci_config = Self::generate_oci_spec(&sandbox, spec);
std::fs::write(bundle_dir.join("config.json"), &oci_config)?;
let container_id = format!("karapace-{}", &spec.env_id[..12.min(spec.env_id.len())]);
std::fs::write(env_dir.join(".running"), format!("{}", std::process::id()))?;
terminal::emit_container_push(&spec.env_id, &sandbox.hostname);
terminal::print_container_banner(
&spec.env_id,
&spec.manifest.base_image,
&sandbox.hostname,
);
let status = Command::new(&runtime)
.args([
"run",
"--bundle",
&bundle_dir.to_string_lossy(),
&container_id,
])
.stdin(std::process::Stdio::inherit())
.stdout(std::process::Stdio::inherit())
.stderr(std::process::Stdio::inherit())
.status()
.map_err(|e| RuntimeError::ExecFailed(format!("{runtime} run failed: {e}")))?;
terminal::emit_container_pop();
terminal::print_container_exit(&spec.env_id);
let _ = std::fs::remove_file(env_dir.join(".running"));
let _ = unmount_overlay(&sandbox);
// Clean up OCI container state
let _ = Command::new(&runtime)
.args(["delete", "--force", &container_id])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status();
if status.success() {
Ok(())
} else {
Err(RuntimeError::ExecFailed(format!(
"OCI runtime exited with code {}",
status.code().unwrap_or(1)
)))
}
}
fn exec(
&self,
spec: &RuntimeSpec,
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
if !env_dir.join(".built").exists() {
return Err(RuntimeError::ExecFailed(format!(
"environment {} has not been built yet",
&spec.env_id[..12.min(spec.env_id.len())]
)));
}
let resolved = resolve_image(&spec.manifest.base_image)?;
let image_cache = ImageCache::new(&self.store_root);
let rootfs = image_cache.rootfs_path(&resolved.cache_key);
let mut sandbox = SandboxConfig::new(rootfs, &spec.env_id, &env_dir);
sandbox.isolate_network = spec.manifest.network_isolation;
let host = compute_host_integration(&spec.manifest);
sandbox.bind_mounts.extend(host.bind_mounts);
sandbox.env_vars.extend(host.env_vars);
mount_overlay(&sandbox)?;
setup_container_rootfs(&sandbox)?;
let output = exec_in_container(&sandbox, command);
let _ = unmount_overlay(&sandbox);
output
}
fn destroy(&self, spec: &RuntimeSpec) -> Result<(), RuntimeError> {
let env_dir = self.env_dir(&spec.env_id);
let sandbox = SandboxConfig::new(PathBuf::from("/nonexistent"), &spec.env_id, &env_dir);
let _ = unmount_overlay(&sandbox);
// Clean up any lingering OCI containers
if let Some(runtime) = Self::find_runtime() {
let container_id = format!("karapace-{}", &spec.env_id[..12.min(spec.env_id.len())]);
let _ = Command::new(&runtime)
.args(["delete", "--force", &container_id])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status();
}
force_remove(&env_dir)?;
Ok(())
}
fn status(&self, env_id: &str) -> Result<RuntimeStatus, RuntimeError> {
let env_dir = self.env_dir(env_id);
let running_file = env_dir.join(".running");
if running_file.exists() {
let pid_str = std::fs::read_to_string(&running_file).unwrap_or_default();
let pid = pid_str.trim().parse::<u32>().ok();
if pid.is_none() && !pid_str.trim().is_empty() {
tracing::warn!(
"corrupt .running file for {}: could not parse PID from '{}'",
&env_id[..12.min(env_id.len())],
pid_str.trim()
);
}
if let Some(p) = pid {
if Path::new(&format!("/proc/{p}")).exists() {
return Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: true,
pid: Some(p),
});
}
let _ = std::fs::remove_file(&running_file);
}
}
Ok(RuntimeStatus {
env_id: env_id.to_owned(),
running: false,
pid: None,
})
}
}
fn default_store_root() -> PathBuf {
if let Ok(home) = std::env::var("HOME") {
PathBuf::from(home).join(".local/share/karapace")
} else {
PathBuf::from("/tmp/karapace")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn oci_env_dir_layout() {
let dir = tempfile::tempdir().unwrap();
let backend = OciBackend::with_store_root(dir.path());
let env_dir = backend.env_dir("abc123");
assert_eq!(env_dir, dir.path().join("env").join("abc123"));
}
#[test]
fn oci_status_reports_not_running() {
let dir = tempfile::tempdir().unwrap();
let backend = OciBackend::with_store_root(dir.path());
let status = backend.status("oci-test").unwrap();
assert!(!status.running);
}
#[test]
fn oci_availability_check() {
let backend = OciBackend::new();
// Just ensure this doesn't panic; result depends on host
let _ = backend.available();
}
}

View file

@ -0,0 +1,151 @@
use std::fmt;
use std::process::Command;
/// A missing prerequisite with actionable install instructions.
#[derive(Debug)]
pub struct MissingPrereq {
pub name: &'static str,
pub purpose: &'static str,
pub install_hint: &'static str,
}
impl fmt::Display for MissingPrereq {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
" - {}: {} (install: {})",
self.name, self.purpose, self.install_hint
)
}
}
fn command_exists(name: &str) -> bool {
Command::new("which")
.arg(name)
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
fn user_namespaces_work() -> bool {
Command::new("unshare")
.args(["--user", "--map-root-user", "--fork", "true"])
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
/// Check all prerequisites for the namespace backend.
/// Returns a list of missing items. Empty list means all prerequisites are met.
pub fn check_namespace_prereqs() -> Vec<MissingPrereq> {
let mut missing = Vec::new();
if !command_exists("unshare") {
missing.push(MissingPrereq {
name: "unshare",
purpose: "user namespace isolation",
install_hint: "part of util-linux (usually pre-installed)",
});
} else if !user_namespaces_work() {
missing.push(MissingPrereq {
name: "user namespaces",
purpose: "unprivileged container isolation",
install_hint:
"enable CONFIG_USER_NS=y in kernel, or: sysctl kernel.unprivileged_userns_clone=1",
});
}
if !command_exists("fuse-overlayfs") {
missing.push(MissingPrereq {
name: "fuse-overlayfs",
purpose: "overlay filesystem for writable container layers",
install_hint: "zypper install fuse-overlayfs | apt install fuse-overlayfs | dnf install fuse-overlayfs | pacman -S fuse-overlayfs",
});
}
if !command_exists("curl") {
missing.push(MissingPrereq {
name: "curl",
purpose: "downloading container images",
install_hint:
"zypper install curl | apt install curl | dnf install curl | pacman -S curl",
});
}
missing
}
/// Check prerequisites for the OCI backend.
pub fn check_oci_prereqs() -> Vec<MissingPrereq> {
let mut missing = Vec::new();
let has_runtime = command_exists("crun") || command_exists("runc") || command_exists("youki");
if !has_runtime {
missing.push(MissingPrereq {
name: "OCI runtime",
purpose: "OCI container execution",
install_hint: "install one of: crun, runc, or youki",
});
}
if !command_exists("curl") {
missing.push(MissingPrereq {
name: "curl",
purpose: "downloading container images",
install_hint:
"zypper install curl | apt install curl | dnf install curl | pacman -S curl",
});
}
missing
}
/// Format a list of missing prerequisites into a user-friendly error message.
pub fn format_missing(missing: &[MissingPrereq]) -> String {
use std::fmt::Write as _;
let mut msg = String::from("missing prerequisites:\n");
for m in missing {
let _ = writeln!(msg, "{m}");
}
msg.push_str("\nKarapace requires these tools to create container environments.");
msg
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn missing_prereq_display() {
let m = MissingPrereq {
name: "curl",
purpose: "downloading images",
install_hint: "apt install curl",
};
let s = format!("{m}");
assert!(s.contains("curl"));
assert!(s.contains("downloading images"));
assert!(s.contains("apt install curl"));
}
#[test]
fn format_missing_produces_readable_output() {
let items = vec![
MissingPrereq {
name: "curl",
purpose: "downloads",
install_hint: "apt install curl",
},
MissingPrereq {
name: "fuse-overlayfs",
purpose: "overlay",
install_hint: "apt install fuse-overlayfs",
},
];
let output = format_missing(&items);
assert!(output.contains("missing prerequisites:"));
assert!(output.contains("curl"));
assert!(output.contains("fuse-overlayfs"));
}
}

View file

@ -0,0 +1,563 @@
use crate::RuntimeError;
use std::fmt::Write as _;
use std::path::{Path, PathBuf};
use std::process::Command;
/// Shell-escape a string for safe interpolation into shell scripts.
/// Wraps the value in single quotes, escaping any embedded single quotes.
fn shell_quote(s: &str) -> String {
// Single-quoting in POSIX shell: replace ' with '\'' then wrap in '
format!("'{}'", s.replace('\'', "'\\''"))
}
/// Shell-escape a Path for safe interpolation.
fn shell_quote_path(p: &Path) -> String {
shell_quote(&p.to_string_lossy())
}
#[derive(Debug, Clone)]
pub struct BindMount {
pub source: PathBuf,
pub target: PathBuf,
pub read_only: bool,
}
#[derive(Debug, Clone)]
pub struct SandboxConfig {
pub rootfs: PathBuf,
pub overlay_lower: PathBuf,
pub overlay_upper: PathBuf,
pub overlay_work: PathBuf,
pub overlay_merged: PathBuf,
pub hostname: String,
pub bind_mounts: Vec<BindMount>,
pub env_vars: Vec<(String, String)>,
pub isolate_network: bool,
pub uid: u32,
pub gid: u32,
pub username: String,
pub home_dir: PathBuf,
}
/// Safe wrapper around libc::getuid().
#[allow(unsafe_code)]
fn current_uid() -> u32 {
// SAFETY: getuid() is always safe — no arguments, no side effects, cannot fail.
unsafe { libc::getuid() }
}
/// Safe wrapper around libc::getgid().
#[allow(unsafe_code)]
fn current_gid() -> u32 {
// SAFETY: getgid() is always safe — no arguments, no side effects, cannot fail.
unsafe { libc::getgid() }
}
impl SandboxConfig {
pub fn new(rootfs: PathBuf, env_id: &str, env_dir: &Path) -> Self {
let uid = current_uid();
let gid = current_gid();
let username = std::env::var("USER").unwrap_or_else(|_| "user".to_owned());
let home_dir =
PathBuf::from(std::env::var("HOME").unwrap_or_else(|_| format!("/home/{username}")));
Self {
rootfs,
overlay_lower: env_dir.join("lower"),
overlay_upper: env_dir.join("upper"),
overlay_work: env_dir.join("work"),
overlay_merged: env_dir.join("merged"),
hostname: format!("karapace-{}", &env_id[..12.min(env_id.len())]),
bind_mounts: Vec::new(),
env_vars: Vec::new(),
isolate_network: false,
uid,
gid,
username,
home_dir,
}
}
}
pub fn mount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
// Unmount any stale overlay from a previous failed run
let _ = unmount_overlay(config);
// Clean stale work dir (fuse-overlayfs requires a clean workdir)
if config.overlay_work.exists() {
let _ = std::fs::remove_dir_all(&config.overlay_work);
}
for dir in [
&config.overlay_upper,
&config.overlay_work,
&config.overlay_merged,
] {
std::fs::create_dir_all(dir)?;
}
// Create a symlink to rootfs as lower dir if needed
if !config.overlay_lower.exists() {
#[cfg(unix)]
std::os::unix::fs::symlink(&config.rootfs, &config.overlay_lower)?;
}
let status = Command::new("fuse-overlayfs")
.args([
"-o",
&format!(
"lowerdir={},upperdir={},workdir={}",
config.rootfs.display(),
config.overlay_upper.display(),
config.overlay_work.display()
),
&config.overlay_merged.to_string_lossy(),
])
.status()
.map_err(|e| {
RuntimeError::ExecFailed(format!(
"fuse-overlayfs not found or failed to start: {e}. Install with: sudo zypper install fuse-overlayfs"
))
})?;
if !status.success() {
return Err(RuntimeError::ExecFailed(
"fuse-overlayfs mount failed".to_owned(),
));
}
Ok(())
}
/// Check if a path is currently a mount point by inspecting /proc/mounts.
fn is_mounted(path: &Path) -> bool {
let canonical = match std::fs::canonicalize(path) {
Ok(p) => p.to_string_lossy().to_string(),
Err(_) => path.to_string_lossy().to_string(),
};
match std::fs::read_to_string("/proc/mounts") {
Ok(mounts) => mounts
.lines()
.any(|line| line.split_whitespace().nth(1) == Some(&canonical)),
Err(_) => false,
}
}
pub fn unmount_overlay(config: &SandboxConfig) -> Result<(), RuntimeError> {
if !config.overlay_merged.exists() {
return Ok(());
}
// Only attempt unmount if actually mounted (avoids spurious errors)
if !is_mounted(&config.overlay_merged) {
return Ok(());
}
let _ = Command::new("fusermount3")
.args(["-u", &config.overlay_merged.to_string_lossy()])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status();
// Fallback if fusermount3 is not available
if is_mounted(&config.overlay_merged) {
let _ = Command::new("fusermount")
.args(["-u", &config.overlay_merged.to_string_lossy()])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status();
}
Ok(())
}
pub fn setup_container_rootfs(config: &SandboxConfig) -> Result<PathBuf, RuntimeError> {
let merged = &config.overlay_merged;
// Essential directories inside the container
for subdir in [
"proc", "sys", "dev", "dev/pts", "dev/shm", "tmp", "run", "run/user", "etc", "var",
"var/tmp",
] {
std::fs::create_dir_all(merged.join(subdir))?;
}
// Create run/user/<uid> for XDG_RUNTIME_DIR
let user_run = merged.join(format!("run/user/{}", config.uid));
std::fs::create_dir_all(&user_run)?;
// Create home directory
let container_home = merged.join(
config
.home_dir
.strip_prefix("/")
.unwrap_or(&config.home_dir),
);
std::fs::create_dir_all(&container_home)?;
// Write /etc/hostname
let _ = std::fs::write(merged.join("etc/hostname"), &config.hostname);
// Ensure /etc/resolv.conf exists (copy from host for DNS)
if !merged.join("etc/resolv.conf").exists() && Path::new("/etc/resolv.conf").exists() {
let _ = std::fs::copy("/etc/resolv.conf", merged.join("etc/resolv.conf"));
}
// Ensure user exists in /etc/passwd
ensure_user_in_container(config, merged)?;
Ok(merged.clone())
}
fn ensure_user_in_container(config: &SandboxConfig, merged: &Path) -> Result<(), RuntimeError> {
let passwd_path = merged.join("etc/passwd");
let existing = std::fs::read_to_string(&passwd_path).unwrap_or_default();
let user_entry = format!(
"{}:x:{}:{}::/{}:/bin/bash\n",
config.username,
config.uid,
config.gid,
config
.home_dir
.strip_prefix("/")
.unwrap_or(&config.home_dir)
.display()
);
if !existing.contains(&format!("{}:", config.username)) {
let mut content = existing;
if !content.contains("root:") {
content.push_str("root:x:0:0:root:/root:/bin/bash\n");
}
content.push_str(&user_entry);
std::fs::write(&passwd_path, content)?;
}
// Ensure group exists
let group_path = merged.join("etc/group");
let existing_groups = std::fs::read_to_string(&group_path).unwrap_or_default();
let group_entry = format!("{}:x:{}:\n", config.username, config.gid);
if !existing_groups.contains(&format!("{}:", config.username)) {
let mut content = existing_groups;
if !content.contains("root:") {
content.push_str("root:x:0:\n");
}
content.push_str(&group_entry);
std::fs::write(&group_path, content)?;
}
Ok(())
}
fn build_unshare_command(config: &SandboxConfig) -> Command {
let mut cmd = Command::new("unshare");
cmd.args(["--user", "--map-root-user", "--mount", "--pid", "--fork"]);
if config.isolate_network {
cmd.arg("--net");
}
cmd
}
fn build_setup_script(config: &SandboxConfig) -> String {
let merged = &config.overlay_merged;
let qm = shell_quote_path(merged);
let mut script = String::new();
// Mount /proc
let _ = writeln!(script, "mount -t proc proc {qm}/proc 2>/dev/null || true");
// Bind mount /sys (read-only)
let _ = writeln!(script, "mount --rbind /sys {qm}/sys 2>/dev/null && mount --make-rslave {qm}/sys 2>/dev/null || true");
// Bind mount /dev
let _ = writeln!(script, "mount --rbind /dev {qm}/dev 2>/dev/null && mount --make-rslave {qm}/dev 2>/dev/null || true");
// Bind mount home directory
let container_home = merged.join(
config
.home_dir
.strip_prefix("/")
.unwrap_or(&config.home_dir),
);
let _ = writeln!(
script,
"mount --bind {} {} 2>/dev/null || true",
shell_quote_path(&config.home_dir),
shell_quote_path(&container_home)
);
// Bind mount /etc/resolv.conf for DNS resolution
let _ = writeln!(script, "touch {qm}/etc/resolv.conf 2>/dev/null; mount --bind /etc/resolv.conf {qm}/etc/resolv.conf 2>/dev/null || true");
// Bind mount /tmp
let _ = writeln!(script, "mount --bind /tmp {qm}/tmp 2>/dev/null || true");
// Bind mounts from config (user-supplied paths — must be quoted)
for bm in &config.bind_mounts {
let target = if bm.target.is_absolute() {
merged.join(bm.target.strip_prefix("/").unwrap_or(&bm.target))
} else {
merged.join(&bm.target)
};
let qt = shell_quote_path(&target);
let qs = shell_quote_path(&bm.source);
let _ = writeln!(
script,
"mkdir -p {qt} 2>/dev/null; mount --bind {qs} {qt} 2>/dev/null || true"
);
if bm.read_only {
let _ = writeln!(script, "mount -o remount,ro,bind {qt} 2>/dev/null || true");
}
}
// Bind mount XDG_RUNTIME_DIR sockets (Wayland, PipeWire, D-Bus)
if let Ok(xdg_run) = std::env::var("XDG_RUNTIME_DIR") {
let container_run = merged.join(format!("run/user/{}", config.uid));
for socket in &["wayland-0", "pipewire-0", "pulse/native", "bus"] {
let src = PathBuf::from(&xdg_run).join(socket);
if src.exists() {
let dst = container_run.join(socket);
let qs = shell_quote_path(&src);
let qd = shell_quote_path(&dst);
if let Some(parent) = dst.parent() {
let _ = writeln!(
script,
"mkdir -p {} 2>/dev/null || true",
shell_quote_path(parent)
);
}
// For sockets, touch the target first
if src.is_file() || !src.is_dir() {
let _ = writeln!(script, "touch {qd} 2>/dev/null || true");
}
let _ = writeln!(script, "mount --bind {qs} {qd} 2>/dev/null || true");
}
}
}
// Bind mount X11 socket if present
if Path::new("/tmp/.X11-unix").exists() {
let _ = writeln!(
script,
"mount --bind /tmp/.X11-unix {qm}/tmp/.X11-unix 2>/dev/null || true"
);
}
// Chroot and exec
let _ = write!(script, "exec chroot {qm} /bin/sh -c '");
script
}
pub fn enter_interactive(config: &SandboxConfig) -> Result<i32, RuntimeError> {
let merged = &config.overlay_merged;
let mut setup = build_setup_script(config);
// Build environment variable exports (all values shell-quoted, keys validated)
let mut env_exports = String::new();
for (key, val) in &config.env_vars {
if !key.bytes().all(|b| b.is_ascii_alphanumeric() || b == b'_') {
continue; // Skip keys with unsafe characters
}
let _ = write!(env_exports, "export {}={}; ", key, shell_quote(val));
}
// Set standard env vars (all values shell-quoted)
let _ = write!(
env_exports,
"export HOME={}; ",
shell_quote_path(&config.home_dir)
);
let _ = write!(
env_exports,
"export USER={}; ",
shell_quote(&config.username)
);
let _ = write!(
env_exports,
"export HOSTNAME={}; ",
shell_quote(&config.hostname)
);
if let Ok(xdg) = std::env::var("XDG_RUNTIME_DIR") {
let _ = write!(
env_exports,
"export XDG_RUNTIME_DIR={}; ",
shell_quote(&xdg)
);
}
if let Ok(display) = std::env::var("DISPLAY") {
let _ = write!(env_exports, "export DISPLAY={}; ", shell_quote(&display));
}
if let Ok(wayland) = std::env::var("WAYLAND_DISPLAY") {
let _ = write!(
env_exports,
"export WAYLAND_DISPLAY={}; ",
shell_quote(&wayland)
);
}
env_exports.push_str("export TERM=${TERM:-xterm-256color}; ");
let _ = write!(
env_exports,
"export KARAPACE_ENV=1; export KARAPACE_HOSTNAME={}; ",
shell_quote(&config.hostname)
);
// Determine shell
let shell = if merged.join("bin/bash").exists() || merged.join("usr/bin/bash").exists() {
"/bin/bash"
} else {
"/bin/sh"
};
let _ = write!(setup, "{env_exports}cd ~; exec {shell} -l'");
let mut cmd = build_unshare_command(config);
cmd.arg("/bin/sh").arg("-c").arg(&setup);
// Pass through stdin/stdout/stderr for interactive use
cmd.stdin(std::process::Stdio::inherit());
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
let status = cmd
.status()
.map_err(|e| RuntimeError::ExecFailed(format!("failed to enter sandbox: {e}")))?;
Ok(status.code().unwrap_or(1))
}
pub fn exec_in_container(
config: &SandboxConfig,
command: &[String],
) -> Result<std::process::Output, RuntimeError> {
let mut setup = build_setup_script(config);
// Environment (all values shell-quoted, keys validated)
let mut env_exports = String::new();
for (key, val) in &config.env_vars {
if !key.bytes().all(|b| b.is_ascii_alphanumeric() || b == b'_') {
continue; // Skip keys with unsafe characters
}
let _ = write!(env_exports, "export {}={}; ", key, shell_quote(val));
}
let _ = write!(
env_exports,
"export HOME={}; ",
shell_quote_path(&config.home_dir)
);
let _ = write!(
env_exports,
"export USER={}; ",
shell_quote(&config.username)
);
env_exports.push_str("export KARAPACE_ENV=1; ");
let escaped_cmd: Vec<String> = command.iter().map(|a| shell_quote(a)).collect();
let _ = write!(setup, "{env_exports}{}'", escaped_cmd.join(" "));
let mut cmd = build_unshare_command(config);
cmd.arg("/bin/sh").arg("-c").arg(&setup);
cmd.output()
.map_err(|e| RuntimeError::ExecFailed(format!("exec in container failed: {e}")))
}
pub fn install_packages_in_container(
config: &SandboxConfig,
install_cmd: &[String],
) -> Result<(), RuntimeError> {
if install_cmd.is_empty() {
return Ok(());
}
let output = exec_in_container(config, install_cmd)?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
return Err(RuntimeError::ExecFailed(format!(
"package installation failed:\nstdout: {stdout}\nstderr: {stderr}"
)));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sandbox_config_defaults() {
let dir = tempfile::tempdir().unwrap();
let rootfs = dir.path().join("rootfs");
std::fs::create_dir_all(&rootfs).unwrap();
let config = SandboxConfig::new(rootfs, "abc123def456", dir.path());
assert!(config.hostname.starts_with("karapace-"));
assert!(!config.isolate_network);
}
#[test]
fn shell_quote_escapes_single_quotes() {
assert_eq!(shell_quote("hello"), "'hello'");
assert_eq!(shell_quote("it's"), "'it'\\''s'");
assert_eq!(shell_quote(""), "''");
}
#[test]
fn shell_quote_prevents_injection() {
// Command substitution is safely wrapped in single quotes
let malicious = "$(rm -rf /)";
let quoted = shell_quote(malicious);
assert_eq!(quoted, "'$(rm -rf /)'");
assert!(quoted.starts_with('\'') && quoted.ends_with('\''));
// Backtick injection is also safely quoted
let backtick = "`whoami`";
let quoted = shell_quote(backtick);
assert_eq!(quoted, "'`whoami`'");
// Newline injection
let newline = "value\n; rm -rf /";
let quoted = shell_quote(newline);
assert!(quoted.starts_with('\'') && quoted.ends_with('\''));
}
#[test]
fn shell_quote_path_handles_spaces() {
let p = PathBuf::from("/home/user/my project/dir");
let quoted = shell_quote_path(&p);
assert_eq!(quoted, "'/home/user/my project/dir'");
}
#[test]
fn build_setup_script_contains_essential_mounts() {
let dir = tempfile::tempdir().unwrap();
let rootfs = dir.path().join("rootfs");
std::fs::create_dir_all(&rootfs).unwrap();
let config = SandboxConfig::new(rootfs, "abc123def456", dir.path());
let script = build_setup_script(&config);
assert!(script.contains("mount -t proc"));
assert!(script.contains("mount --rbind /sys"));
assert!(script.contains("mount --rbind /dev"));
assert!(script.contains("chroot"));
}
#[test]
fn is_mounted_returns_false_for_regular_dir() {
let dir = tempfile::tempdir().unwrap();
assert!(!is_mounted(dir.path()));
}
#[test]
fn unmount_overlay_noop_on_non_mounted() {
let dir = tempfile::tempdir().unwrap();
let rootfs = dir.path().join("rootfs");
std::fs::create_dir_all(&rootfs).unwrap();
let config = SandboxConfig::new(rootfs, "abc123def456", dir.path());
// Create the merged dir but don't mount anything
std::fs::create_dir_all(&config.overlay_merged).unwrap();
// Should not error — just returns Ok because nothing is mounted
assert!(unmount_overlay(&config).is_ok());
}
}

View file

@ -0,0 +1,388 @@
use crate::RuntimeError;
use karapace_schema::NormalizedManifest;
use serde::{Deserialize, Serialize};
/// Resolve `.` and `..` components in an absolute path without touching the filesystem.
///
/// This is critical for security: we must not rely on `std::fs::canonicalize()`
/// because the path may not exist yet, and we need deterministic behavior.
fn canonicalize_logical(path: &str) -> String {
let mut parts: Vec<&str> = Vec::new();
for component in path.split('/') {
match component {
"" | "." => {}
".." => {
parts.pop();
}
other => parts.push(other),
}
}
format!("/{}", parts.join("/"))
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct SecurityPolicy {
pub allowed_mount_prefixes: Vec<String>,
pub allowed_devices: Vec<String>,
pub allow_network: bool,
pub allow_gpu: bool,
pub allow_audio: bool,
pub allowed_env_vars: Vec<String>,
pub denied_env_vars: Vec<String>,
pub max_cpu_shares: Option<u64>,
pub max_memory_mb: Option<u64>,
}
impl Default for SecurityPolicy {
fn default() -> Self {
Self {
allowed_mount_prefixes: vec!["/home".to_owned(), "/tmp".to_owned()],
allowed_devices: Vec::new(),
allow_network: false,
allow_gpu: false,
allow_audio: false,
allowed_env_vars: vec![
"TERM".to_owned(),
"LANG".to_owned(),
"HOME".to_owned(),
"USER".to_owned(),
"PATH".to_owned(),
"SHELL".to_owned(),
"XDG_RUNTIME_DIR".to_owned(),
],
denied_env_vars: vec![
"SSH_AUTH_SOCK".to_owned(),
"GPG_AGENT_INFO".to_owned(),
"AWS_SECRET_ACCESS_KEY".to_owned(),
"DOCKER_HOST".to_owned(),
],
max_cpu_shares: None,
max_memory_mb: None,
}
}
}
impl SecurityPolicy {
pub fn from_manifest(manifest: &NormalizedManifest) -> Self {
let mut allowed_devices = Vec::new();
if manifest.hardware_gpu {
allowed_devices.push("/dev/dri".to_owned());
}
if manifest.hardware_audio {
allowed_devices.push("/dev/snd".to_owned());
}
Self {
allow_gpu: manifest.hardware_gpu,
allow_audio: manifest.hardware_audio,
allow_network: !manifest.network_isolation,
allowed_devices,
max_cpu_shares: manifest.cpu_shares,
max_memory_mb: manifest.memory_limit_mb,
..Self::default()
}
}
pub fn validate_mounts(&self, manifest: &NormalizedManifest) -> Result<(), RuntimeError> {
for mount in &manifest.mounts {
let host = &mount.host_path;
if host.starts_with('/') {
let canonical = canonicalize_logical(host);
let allowed = self
.allowed_mount_prefixes
.iter()
.any(|prefix| canonical.starts_with(prefix));
if !allowed {
return Err(RuntimeError::MountDenied(format!(
"mount '{host}' (resolved: {canonical}) is not under any allowed prefix: {:?}",
self.allowed_mount_prefixes
)));
}
}
}
Ok(())
}
pub fn validate_devices(&self, manifest: &NormalizedManifest) -> Result<(), RuntimeError> {
if manifest.hardware_gpu && !self.allow_gpu {
return Err(RuntimeError::DeviceDenied(
"GPU access requested but not allowed by policy".to_owned(),
));
}
if manifest.hardware_audio && !self.allow_audio {
return Err(RuntimeError::DeviceDenied(
"audio access requested but not allowed by policy".to_owned(),
));
}
Ok(())
}
pub fn filter_env_vars(&self) -> Vec<(String, String)> {
let mut result = Vec::new();
for key in &self.allowed_env_vars {
if self.denied_env_vars.contains(key) {
continue;
}
if let Ok(val) = std::env::var(key) {
result.push((key.clone(), val));
}
}
result
}
pub fn validate_resource_limits(
&self,
manifest: &NormalizedManifest,
) -> Result<(), RuntimeError> {
if let (Some(req), Some(max)) = (manifest.cpu_shares, self.max_cpu_shares) {
if req > max {
return Err(RuntimeError::PolicyViolation(format!(
"requested CPU shares {req} exceeds policy max {max}"
)));
}
}
if let (Some(req), Some(max)) = (manifest.memory_limit_mb, self.max_memory_mb) {
if req > max {
return Err(RuntimeError::PolicyViolation(format!(
"requested memory {req}MB exceeds policy max {max}MB"
)));
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use karapace_schema::parse_manifest_str;
#[test]
fn default_policy_denies_gpu() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[hardware]
gpu = true
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(policy.validate_devices(&manifest).is_err());
}
#[test]
fn manifest_derived_policy_allows_declared_hardware() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[hardware]
gpu = true
audio = true
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::from_manifest(&manifest);
assert!(policy.validate_devices(&manifest).is_ok());
assert!(policy.allow_gpu);
assert!(policy.allow_audio);
assert!(policy.allowed_devices.contains(&"/dev/dri".to_owned()));
}
#[test]
fn absolute_mounts_checked_against_whitelist() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/etc/shadow:/secrets"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(policy.validate_mounts(&manifest).is_err());
}
#[test]
fn denied_env_vars_are_filtered() {
let policy = SecurityPolicy::default();
assert!(policy.denied_env_vars.contains(&"SSH_AUTH_SOCK".to_owned()));
assert!(policy
.denied_env_vars
.contains(&"AWS_SECRET_ACCESS_KEY".to_owned()));
let filtered = policy.filter_env_vars();
assert!(filtered
.iter()
.all(|(k, _)| !policy.denied_env_vars.contains(k)));
}
#[test]
fn resource_limits_enforced() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[runtime]
backend = "namespace"
[runtime.resource_limits]
cpu_shares = 2048
memory_limit_mb = 8192
"#,
)
.unwrap()
.normalize()
.unwrap();
let mut policy = SecurityPolicy::from_manifest(&manifest);
assert!(policy.validate_resource_limits(&manifest).is_ok());
policy.max_cpu_shares = Some(1024);
assert!(policy.validate_resource_limits(&manifest).is_err());
}
#[test]
fn relative_mounts_always_allowed() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
workspace = "./:/workspace"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(policy.validate_mounts(&manifest).is_ok());
}
#[test]
fn path_traversal_via_dotdot_is_rejected() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/../etc/shadow:/secrets"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(
policy.validate_mounts(&manifest).is_err(),
"path traversal via /../ must be rejected"
);
}
#[test]
fn path_traversal_via_allowed_prefix_breakout_is_rejected() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/home/../etc/passwd:/data"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
// /home/../etc/passwd canonicalizes to /etc/passwd, which is NOT under /home
assert!(
policy.validate_mounts(&manifest).is_err(),
"/home/../etc/passwd must be rejected (resolves to /etc/passwd)"
);
}
#[test]
fn root_path_is_rejected() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/:/rootfs"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(
policy.validate_mounts(&manifest).is_err(),
"mounting / must be rejected"
);
}
#[test]
fn etc_shadow_is_rejected() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/etc/shadow:/shadow"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(
policy.validate_mounts(&manifest).is_err(),
"/etc/shadow must be rejected"
);
}
#[test]
fn proc_is_rejected() {
let manifest = parse_manifest_str(
r#"
manifest_version = 1
[base]
image = "rolling"
[mounts]
bad = "/proc/self/root:/escape"
"#,
)
.unwrap()
.normalize()
.unwrap();
let policy = SecurityPolicy::default();
assert!(
policy.validate_mounts(&manifest).is_err(),
"/proc must be rejected"
);
}
}

View file

@ -0,0 +1,54 @@
use std::io::Write;
const OSC_START: &str = "\x1b]777;";
const OSC_END: &str = "\x1b\\";
pub fn emit_container_push(env_id: &str, hostname: &str) {
if is_interactive_terminal() {
let marker = format!("{OSC_START}container;push;{hostname};karapace;{env_id}{OSC_END}");
let _ = std::io::stderr().write_all(marker.as_bytes());
let _ = std::io::stderr().flush();
}
}
pub fn emit_container_pop() {
if is_interactive_terminal() {
let marker = format!("{OSC_START}container;pop;;{OSC_END}");
let _ = std::io::stderr().write_all(marker.as_bytes());
let _ = std::io::stderr().flush();
}
}
pub fn print_container_banner(env_id: &str, image: &str, hostname: &str) {
if is_interactive_terminal() {
let short_id = &env_id[..12.min(env_id.len())];
eprintln!(
"\x1b[1;36m[karapace]\x1b[0m entering \x1b[1m{image}\x1b[0m ({short_id}) as \x1b[1m{hostname}\x1b[0m"
);
}
}
pub fn print_container_exit(env_id: &str) {
if is_interactive_terminal() {
let short_id = &env_id[..12.min(env_id.len())];
eprintln!("\x1b[1;36m[karapace]\x1b[0m exited environment {short_id}");
}
}
#[allow(unsafe_code)]
fn is_interactive_terminal() -> bool {
// SAFETY: isatty() is always safe — checks if fd is a terminal, no side effects.
unsafe { libc::isatty(libc::STDERR_FILENO) != 0 }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn osc_markers_dont_panic() {
// Just ensure these don't crash; output depends on terminal
emit_container_push("abc123def456", "karapace-abc123def456");
emit_container_pop();
}
}