diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a80beeb3e..b72ff6b94 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -8,7 +8,26 @@ env: CARGO_TERM_COLOR: always jobs: + build-windows: + name: Windows Build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: 1.71.0 + target: x86_64-pc-windows-gnu + override: true + - name: Install Cross + uses: brndnmtthws/rust-action-cargo-binstall@v1 + with: + packages: cross + - name: Build SPFS + run: | + make debug-spfs PLATFORM=windows + build-and-test: + name: Linux Build and Test runs-on: ubuntu-latest timeout-minutes: 45 container: diff --git a/Cargo.lock b/Cargo.lock index d38d37ca8..8addccc34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -244,12 +244,6 @@ dependencies = [ "mime", ] -[[package]] -name = "az" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" - [[package]] name = "backtrace" version = "0.3.66" @@ -1184,16 +1178,6 @@ dependencies = [ "regex", ] -[[package]] -name = "gmp-mpfr-sys" -version = "1.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cadbfa1c6e195c872db93b584e63b751f2fc86f59af8d0870d529118e8f1045" -dependencies = [ - "libc", - "winapi 0.3.9", -] - [[package]] name = "h2" version = "0.3.13" @@ -1822,6 +1806,28 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec 0.7.2", + "itoa 1.0.6", + "num-bigint", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -2631,17 +2637,6 @@ dependencies = [ "syn 1.0.98", ] -[[package]] -name = "rug" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55313a5bab6820d1439c0266db37f8084e50618d35d16d81f692eee14d8b01b9" -dependencies = [ - "az", - "gmp-mpfr-sys", - "libc", -] - [[package]] name = "rust-ini" version = "0.18.0" @@ -3123,6 +3118,7 @@ dependencies = [ "uuid", "walkdir", "whoami", + "windows", ] [[package]] @@ -3216,6 +3212,7 @@ dependencies = [ "futures", "hyper", "itertools", + "libc", "nix 0.26.2", "number_prefix", "procfs", @@ -3230,6 +3227,7 @@ dependencies = [ "tracing", "unix_mode", "url", + "windows", ] [[package]] @@ -3854,10 +3852,11 @@ dependencies = [ "dyn-clone", "futures", "itertools", + "num-bigint", + "num-format", "once_cell", "priority-queue", "rstest", - "rug", "sentry", "serde_json", "signal-hook", @@ -4870,6 +4869,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-sys" version = "0.36.1" diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 000000000..7d73426cc --- /dev/null +++ b/Cross.toml @@ -0,0 +1,2 @@ +[target.x86_64-pc-windows-gnu] +pre-build = ["apt-get install -y protobuf-compiler"] diff --git a/Makefile b/Makefile index 6f232a015..51bb7039b 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,24 @@ CARGO_TARGET_DIR := $(shell \ then (grep target-dir .cargo/config.toml || echo target) | sed -sE 's|.*"(.*)".*|\1|'; \ else echo target; \ fi) +CARGO ?= cargo + +spfs_packages = spfs,spfs-cli-main,spfs-cli-clean,spfs-cli-enter,spfs-cli-join,spfs-cli-render + +export PLATFORM ?= unix +ifeq ($(PLATFORM),windows) +CARGO_ARGS += --target x86_64-pc-windows-gnu +# swap cargo for cross when building for other platforms +CARGO = cross +else +spfs_packages := $(spfs_packages),spfs-cli-fuse,spfs-cli-monitor +endif comma := , cargo_features_arg = $(if $(FEATURES),--features $(FEATURES)) cargo_packages_arg := $(if $(CRATES),-p=$(CRATES)) cargo_packages_arg := $(subst $(comma), -p=,$(cargo_packages_arg)) - +cargo_packages_arg := $(if $(cargo_packages_arg),$(cargo_packages_arg),--workspace) # Create a file called "config.mak" to configure variables. -include config.mak @@ -41,37 +53,35 @@ clean: packages.clean .PHONY: lint lint: FEATURES?=server,spfs/server lint: - cargo +nightly fmt --check - cargo clippy --tests $(cargo_features_arg) $(cargo_packages_arg) -- -Dwarnings + $(CARGO) +nightly fmt --check + $(CARGO) clippy --tests $(cargo_features_arg) $(cargo_packages_arg) -- -Dwarnings env RUSTDOCFLAGS="-Dwarnings" cargo doc --no-deps $(cargo_features_arg) $(cargo_packages_arg) .PHONY: format format: - cargo +nightly fmt + $(CARGO) +nightly fmt .PHONY: build build: debug debug: cd $(SOURCE_ROOT) - cargo build --workspace $(cargo_features_arg) + $(CARGO) build $(cargo_packages_arg) $(cargo_features_arg) $(CARGO_ARGS) debug-spfs: - cd $(SOURCE_ROOT) - cargo build -p spfs -p spfs-cli-fuse -p spfs-cli-main -p spfs-cli-clean -p spfs-cli-enter -p spfs-cli-join -p spfs-cli-monitor -p spfs-cli-render $(cargo_features_arg) + $(MAKE) debug CRATES=$(spfs_packages) release: cd $(SOURCE_ROOT) - cargo build --workspace --release $(cargo_features_arg) + $(CARGO) build --release $(cargo_packages_arg) $(cargo_features_arg) $(CARGO_ARGS) release-spfs: - cd $(SOURCE_ROOT) - cargo build --release -p spfs -p spfs-cli-fuse -p spfs-cli-main -p spfs-cli-clean -p spfs-cli-enter -p spfs-cli-join -p spfs-cli-monitor -p spfs-cli-render $(cargo_features_arg) + $(MAKE) release CRATES=$(spfs_packages) .PHONY: test test: FEATURES?=server,spfs/server test: - spfs run - -- cargo test --workspace $(cargo_features_arg) $(cargo_packages_arg) + spfs run - -- cargo test $(cargo_features_arg) $(cargo_packages_arg) .PHONY: converters converters: diff --git a/crates/spfs-cli/cmd-enter/src/cmd_enter.rs b/crates/spfs-cli/cmd-enter/src/cmd_enter.rs index 0e1528c38..e822f4eb5 100644 --- a/crates/spfs-cli/cmd-enter/src/cmd_enter.rs +++ b/crates/spfs-cli/cmd-enter/src/cmd_enter.rs @@ -122,6 +122,7 @@ impl CmdEnter { } } + #[cfg(unix)] pub async fn setup_runtime( &mut self, config: &spfs::Config, @@ -215,6 +216,14 @@ impl CmdEnter { } } + #[cfg(windows)] + pub async fn setup_runtime( + &mut self, + config: &spfs::Config, + ) -> Result> { + todo!() + } + async fn load_runtime(&self, config: &spfs::Config) -> Result { let repo = match &self.runtime_storage { Some(address) => spfs::open_repository(address).await?, diff --git a/crates/spfs-cli/cmd-fuse/Cargo.toml b/crates/spfs-cli/cmd-fuse/Cargo.toml index dd2f40843..c95a33fc7 100644 --- a/crates/spfs-cli/cmd-fuse/Cargo.toml +++ b/crates/spfs-cli/cmd-fuse/Cargo.toml @@ -21,12 +21,14 @@ sentry = ["spfs-cli-common/sentry"] anyhow = { workspace = true } clap = { workspace = true } dashmap = { workspace = true } -fuser = { workspace = true } spfs-vfs = { path = "../../spfs-vfs" } nix = { workspace = true, features = ["process"] } libc = "0.2" -spfs = { path = "../../spfs" } +spfs = { path = "../../spfs", features = ["fuse-backend"] } spfs-cli-common = { path = "../common" } tokio = { version = "1.20", features = ["rt", "rt-multi-thread"] } tracing = { workspace = true } url = "2.2" + +[target.'cfg(unix)'.dependencies] +fuser = { workspace = true } diff --git a/crates/spfs-cli/cmd-join/src/cmd_join.rs b/crates/spfs-cli/cmd-join/src/cmd_join.rs index 5162b5163..200899f7a 100644 --- a/crates/spfs-cli/cmd-join/src/cmd_join.rs +++ b/crates/spfs-cli/cmd-join/src/cmd_join.rs @@ -54,7 +54,7 @@ impl CmdJoin { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() - .map_err(|err| Error::process_spawn_error("new_current_thread()".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("new_current_thread()", err, None))?; let spfs_runtime = rt.block_on(async { let storage = config.get_runtime_storage().await?; @@ -136,7 +136,7 @@ impl CmdJoin { tracing::debug!("{:?}", proc); Ok(proc .status() - .map_err(|err| Error::process_spawn_error("exec_runtime_command".into(), err, None))? + .map_err(|err| Error::process_spawn_error("exec_runtime_command", err, None))? .code() .unwrap_or(1)) } diff --git a/crates/spfs-cli/cmd-monitor/src/cmd_monitor.rs b/crates/spfs-cli/cmd-monitor/src/cmd_monitor.rs index bc5d74c2f..95803440c 100644 --- a/crates/spfs-cli/cmd-monitor/src/cmd_monitor.rs +++ b/crates/spfs-cli/cmd-monitor/src/cmd_monitor.rs @@ -139,11 +139,11 @@ impl CmdMonitor { pub async fn run_async(&mut self) -> Result { let mut interrupt = signal(SignalKind::interrupt()) - .map_err(|err| Error::process_spawn_error("signal()".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("signal()", err, None))?; let mut quit = signal(SignalKind::quit()) - .map_err(|err| Error::process_spawn_error("signal()".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("signal()", err, None))?; let mut terminate = signal(SignalKind::terminate()) - .map_err(|err| Error::process_spawn_error("signal()".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("signal()", err, None))?; let repo = spfs::open_repository(&self.runtime_storage).await?; let storage = spfs::runtime::Storage::new(repo); diff --git a/crates/spfs-cli/common/src/args.rs b/crates/spfs-cli/common/src/args.rs index e1dfd541a..6537fb813 100644 --- a/crates/spfs-cli/common/src/args.rs +++ b/crates/spfs-cli/common/src/args.rs @@ -306,7 +306,7 @@ pub struct Logging { #[clap(long, global = true, env = "SPFS_LOG_FILE")] pub log_file: Option, - /// Enables logging to syslog (for background processes) + /// Enables logging to syslog (for background processes, unix only) #[clap(skip)] pub syslog: bool, @@ -363,6 +363,7 @@ impl Logging { let env_filter = move || tracing_subscriber::filter::EnvFilter::from(config.clone()); let fmt_layer = || tracing_subscriber::fmt::layer().with_target(self.show_target()); + #[cfg(unix)] let syslog_layer = self.syslog.then(|| { let identity = std::ffi::CStr::from_bytes_with_nul(b"spfs\0") .expect("identity value is valid CStr"); @@ -374,6 +375,8 @@ impl Logging { let layer = configure_timestamp!(layer, self.timestamp).with_filter(env_filter()); without_sentry_target!(layer) }); + #[cfg(windows)] + let syslog_layer = false.then(fmt_layer); let stderr_layer = { let layer = fmt_layer().with_writer(std::io::stderr); diff --git a/crates/spfs-cli/main/Cargo.toml b/crates/spfs-cli/main/Cargo.toml index 16b3a1216..5c9338143 100644 --- a/crates/spfs-cli/main/Cargo.toml +++ b/crates/spfs-cli/main/Cargo.toml @@ -22,9 +22,9 @@ colored = "2.0" futures = { workspace = true } hyper = { version = "0.14.16", optional = true } itertools = "0.10.3" +libc = { workspace = true } nix = { workspace = true } number_prefix = "*" # we hope to match versions with indicatif -procfs = { workspace = true } relative-path = "1.3" serde_json = { workspace = true } spfs = { path = "../../spfs" } @@ -36,3 +36,14 @@ tonic = { version = "0.8", optional = true } tracing = { workspace = true } unix_mode = "0.1.3" url = { version = "2.2", optional = true } + +[target.'cfg(unix)'.dependencies] +procfs = { workspace = true } + +[target.'cfg(windows)'.dependencies.windows] +version = "0.48" +features = [ + "Win32_Foundation", + "Win32_System_SystemInformation", + "Win32_System_Threading", +] diff --git a/crates/spfs-cli/main/src/bin.rs b/crates/spfs-cli/main/src/bin.rs index d0fe23a6a..d3b1783cd 100644 --- a/crates/spfs-cli/main/src/bin.rs +++ b/crates/spfs-cli/main/src/bin.rs @@ -155,38 +155,27 @@ async fn run_external_subcommand(args: Vec) -> Result { Some(cmd) => cmd, None => { let mut p = std::env::current_exe() - .map_err(|err| Error::process_spawn_error("current_exe()".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("current_exe()", err, None))?; p.set_file_name(&command); p } }; - let command_cstr = match std::ffi::CString::new(cmd_path.to_string_lossy().to_string()) { - Ok(s) => s, - Err(_) => { - tracing::error!("Invalid subcommand, not a valid string"); - return Ok(1); - } + + let cmd = spfs::bootstrap::Command { + executable: cmd_path.into(), + args: args.into_iter().skip(1).map(Into::into).collect(), + vars: Vec::new(), }; - let mut args_cstr = Vec::with_capacity(args.len()); - args_cstr.push(command_cstr.clone()); - for arg in args.iter().skip(1) { - args_cstr.push(match std::ffi::CString::new(arg.clone()) { - Ok(s) => s, - Err(_) => { - tracing::error!("Invalid argument, not a valid string"); - return Ok(1); - } - }) - } - if let Err(err) = nix::unistd::execvp(command_cstr.as_c_str(), args_cstr.as_slice()) { - match err { - nix::errno::Errno::ENOENT => { + + match cmd.exec() { + Ok(o) => match o {}, + Err(err) => match err.raw_os_error() { + Some(libc::ENOENT) => { tracing::error!("{command} not found in PATH, was it properly installed?") } _ => tracing::error!("subcommand failed: {err:?}"), - } - return Ok(1); + }, } - Ok(0) + Ok(1) } } diff --git a/crates/spfs-cli/main/src/cmd_runtime_prune.rs b/crates/spfs-cli/main/src/cmd_runtime_prune.rs index 4f41de4fd..778eeb3b5 100644 --- a/crates/spfs-cli/main/src/cmd_runtime_prune.rs +++ b/crates/spfs-cli/main/src/cmd_runtime_prune.rs @@ -7,6 +7,8 @@ use chrono::{Duration, Utc}; use clap::Args; use tokio_stream::StreamExt; +use super::cmd_runtime_remove::is_monitor_running; + /// Find and remove runtimes from the repository based on a pruning strategy #[derive(Debug, Args)] pub struct CmdRuntimePrune { @@ -51,6 +53,7 @@ impl CmdRuntimePrune { let default_author = spfs::runtime::Author::default(); + #[cfg(unix)] let boot_time = match procfs::Uptime::new() { Ok(uptime) => { Utc::now() @@ -63,6 +66,12 @@ impl CmdRuntimePrune { return Ok(1); } }; + #[cfg(windows)] + let boot_time = Utc::now() + - Duration::milliseconds(unsafe { + // Safety: this is a raw system API, but seems infallible nontheless + windows::Win32::System::SystemInformation::GetTickCount64() as i64 + }); let mut runtimes = runtime_storage.iter_runtimes().await; while let Some(runtime) = runtimes.next().await { @@ -129,23 +138,3 @@ impl CmdRuntimePrune { Ok(0) } } - -fn is_monitor_running(rt: &spfs::runtime::Runtime) -> bool { - if let Some(pid) = rt.status.monitor { - // we are blatantly ignoring the fact that this pid might - // have been reused and is not the monitor anymore. Given - // that there will always be a race condition to this effect - // even if we did try to check the command line args for this - // process. So we stick on the extra conservative side - is_process_running(pid) - } else { - false - } -} - -fn is_process_running(pid: u32) -> bool { - // sending a null signal to the pid just allows us to check - // if the process actually exists without affecting it - let pid = nix::unistd::Pid::from_raw(pid as i32); - nix::sys::signal::kill(pid, None).is_ok() -} diff --git a/crates/spfs-cli/main/src/cmd_runtime_remove.rs b/crates/spfs-cli/main/src/cmd_runtime_remove.rs index 937744f5f..85e2dc288 100644 --- a/crates/spfs-cli/main/src/cmd_runtime_remove.rs +++ b/crates/spfs-cli/main/src/cmd_runtime_remove.rs @@ -86,7 +86,7 @@ impl CmdRuntimeRemove { } } -fn is_monitor_running(rt: &spfs::runtime::Runtime) -> bool { +pub(crate) fn is_monitor_running(rt: &spfs::runtime::Runtime) -> bool { if let Some(pid) = rt.status.monitor { // we are blatantly ignoring the fact that this pid might // have been reused and is not the monitor anymore. Given @@ -99,9 +99,23 @@ fn is_monitor_running(rt: &spfs::runtime::Runtime) -> bool { } } +#[cfg(unix)] fn is_process_running(pid: u32) -> bool { // sending a null signal to the pid just allows us to check // if the process actually exists without affecting it let pid = nix::unistd::Pid::from_raw(pid as i32); nix::sys::signal::kill(pid, None).is_ok() } + +#[cfg(windows)] +fn is_process_running(pid: u32) -> bool { + // PROCESS_SYNCHRONIZE seems like the most limited access we can request, + // which simply allows us to wait on the PID + let access = windows::Win32::System::Threading::PROCESS_SYNCHRONIZE; + let result = unsafe { windows::Win32::System::Threading::OpenProcess(access, false, pid) }; + let Ok(handle) = result else { + return false; + }; + let _ = unsafe { windows::Win32::Foundation::CloseHandle(handle) }; + true +} diff --git a/crates/spfs-cli/main/src/cmd_write.rs b/crates/spfs-cli/main/src/cmd_write.rs index 3fac35e19..df2140103 100644 --- a/crates/spfs-cli/main/src/cmd_write.rs +++ b/crates/spfs-cli/main/src/cmd_write.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // https://github.com/imageworks/spk +#[cfg(unix)] use std::os::unix::prelude::PermissionsExt; use std::path::PathBuf; @@ -38,12 +39,15 @@ impl CmdWrite { let handle = tokio::fs::File::open(&file) .await .map_err(|err| Error::RuntimeWriteError(file.clone(), err))?; + #[cfg(unix)] let mode = handle .metadata() .await .map_err(|err| Error::RuntimeWriteError(file.clone(), err))? .permissions() .mode(); + #[cfg(windows)] + let mode = 0o644; Box::pin(tokio::io::BufReader::new(handle).with_permissions(mode)) } None => Box::pin(tokio::io::BufReader::new(tokio::io::stdin())), diff --git a/crates/spfs-vfs/Cargo.toml b/crates/spfs-vfs/Cargo.toml index 5a33c5bee..e3df54219 100644 --- a/crates/spfs-vfs/Cargo.toml +++ b/crates/spfs-vfs/Cargo.toml @@ -5,8 +5,8 @@ name = "spfs-vfs" version = "0.34.6" [features] -default = ["fuser/abi-7-22"] -fuse-backend = ["spfs/fuse-backend"] +default = ["fuse-backend-abi-7-22"] +fuse-backend = ["spfs/fuse-backend", "dep:fuser"] fuse-backend-abi-7-22 = ["fuse-backend", "fuser/abi-7-22"] fuse-backend-abi-7-25 = ["fuse-backend-abi-7-22", "fuser/abi-7-25"] fuse-backend-abi-7-28 = ["fuse-backend-abi-7-25", "fuser/abi-7-28"] @@ -18,10 +18,12 @@ fuse-backend-rhel-7-9 = ["fuse-backend-abi-7-31"] anyhow = { workspace = true } clap = { workspace = true } dashmap = { workspace = true } -fuser = { workspace = true } nix = { workspace = true, features = ["process"] } libc = "0.2" spfs = { path = "../spfs" } tokio = { version = "1.20", features = ["rt", "rt-multi-thread"] } tracing = { workspace = true } url = "2.2" + +[target.'cfg(unix)'.dependencies] +fuser = { workspace = true, optional = true } \ No newline at end of file diff --git a/crates/spfs-vfs/src/lib.rs b/crates/spfs-vfs/src/lib.rs index 71b9685bd..67e5b135c 100644 --- a/crates/spfs-vfs/src/lib.rs +++ b/crates/spfs-vfs/src/lib.rs @@ -10,6 +10,8 @@ #![deny(unsafe_op_in_unsafe_fn)] #![warn(clippy::fn_params_excessive_bools)] +#[cfg(unix)] mod fuse; +#[cfg(unix)] pub use fuse::{Config, Session}; diff --git a/crates/spfs/Cargo.toml b/crates/spfs/Cargo.toml index 2fffcb925..f3822bc2f 100644 --- a/crates/spfs/Cargo.toml +++ b/crates/spfs/Cargo.toml @@ -5,7 +5,7 @@ name = "spfs" version = "0.34.6" [features] -default = ["fuse-backend"] +default = [] # If enabled, will create the "local" repository in a subdirectory # of the standard storage root, named "ci/pipeline_${CI_PIPELINE_ID}". @@ -21,7 +21,6 @@ async-trait = "0.1.52" async-recursion = "1.0" async-stream = "0.3" cached = { workspace = true } -caps = "0.5.3" chrono = { workspace = true } close-err = "1.0" colored = "2.0" @@ -29,7 +28,6 @@ config = { workspace = true } dashmap = { workspace = true } data-encoding = "2.3" dirs = { workspace = true } -fuser = { workspace = true, optional = true } faccess = "0.2.3" futures = { workspace = true } gitignore = "1.0" @@ -42,7 +40,6 @@ nix = { workspace = true } nonempty = "0.8.1" once_cell = { workspace = true } pin-project-lite = { workspace = true } -procfs = { workspace = true } progress_bar_derive_macro = { workspace = true } prost = "0.11" rand = "0.8.5" @@ -81,6 +78,15 @@ walkdir = "2.3" whoami = { workspace = true } thiserror = { workspace = true } +[target.'cfg(unix)'.dependencies] +fuser = { workspace = true, optional = true } +procfs = { workspace = true } +caps = "0.5.3" + +[target.'cfg(windows)'.dependencies.windows] +version = "0.48" +features = ["Win32_Storage_FileSystem", "Win32_Foundation"] + [build-dependencies] protobuf-src = { version = "1.0.5", optional = true } # protoc @ 3.19.3 tonic-build = "0.8" diff --git a/crates/spfs/src/bootstrap.rs b/crates/spfs/src/bootstrap.rs index 8253f666b..47a6e5717 100644 --- a/crates/spfs/src/bootstrap.rs +++ b/crates/spfs/src/bootstrap.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // https://github.com/imageworks/spk use std::ffi::{CString, OsStr, OsString}; -use std::os::unix::prelude::OsStringExt; use std::path::{Path, PathBuf}; use super::resolve::{which, which_spfs}; @@ -56,8 +55,10 @@ impl Command { /// Upon success, this function will never return. Upon /// error, the current process' environment will have been updated /// to that of this command, and caution should be taken. - #[cfg(target_os = "linux")] + #[cfg(unix)] pub fn exec(self) -> Result { + use std::os::unix::prelude::OsStringExt; + tracing::debug!("{self:#?}"); // ensure that all components of this command are utilized let Self { @@ -77,6 +78,46 @@ impl Command { } nix::unistd::execv(&argv[0], argv.as_slice()).map_err(crate::Error::from) } + + /// Execute this command, replacing the current program. + /// + /// Upon success, this function will never return. Upon + /// error, the current process' environment will have been updated + /// to that of this command, and caution should be taken. + #[cfg(windows)] + pub fn exec(self) -> Result { + use std::os::windows::prelude::OsStrExt; + + tracing::debug!("{self:#?}"); + // ensure that all components of this command are utilized + let Self { + executable, + args, + vars, + } = self; + let exe: Vec<_> = executable.encode_wide().collect(); + let mut argv = Vec::with_capacity(args.len() + 1); + argv.push(exe[0] as *const u16); + let args = args + .into_iter() + .map(|a| a.encode_wide().collect::>()) + .collect::>(); + argv.extend(args.iter().map(|a| &a[0] as *const u16)); + for (name, value) in vars { + // set the environment to be inherited by the new process + std::env::set_var(name, value); + } + unsafe { + // Safety: this is a low-level operating system call but we + // trust that source OsStrings will be valid for this call + libc::wexecv(argv[0], &argv[0] as *const *const u16); + } + Err(Error::process_spawn_error( + "exec'd runtime process", + std::io::Error::last_os_error(), + None, + )) + } } /// Construct a bootstrap command. diff --git a/crates/spfs/src/bootstrap_test.rs b/crates/spfs/src/bootstrap_test.rs index 49ea2cbc4..1567aaaba 100644 --- a/crates/spfs/src/bootstrap_test.rs +++ b/crates/spfs/src/bootstrap_test.rs @@ -149,6 +149,7 @@ async fn test_shell_initialization_no_startup_scripts(shell: &str, tmpdir: tempf assert_eq!(out.stdout, "\n".as_bytes()); } +#[cfg(unix)] #[rstest(shell, case("bash"), case("tcsh"))] #[tokio::test] #[serial_test::serial] // env manipulation must be reliable @@ -170,6 +171,7 @@ async fn test_find_alternate_bash(shell: &str, tmpdir: tempfile::TempDir) { std::env::set_var("SHELL", original_shell); } +#[cfg(unix)] fn make_exe(path: &std::path::Path) { use std::os::unix::fs::PermissionsExt; let file = std::fs::File::create(path).unwrap(); diff --git a/crates/spfs/src/clean.rs b/crates/spfs/src/clean.rs index e803e787c..8c8ee2a2c 100644 --- a/crates/spfs/src/clean.rs +++ b/crates/spfs/src/clean.rs @@ -5,6 +5,7 @@ use std::collections::{HashMap, HashSet}; use std::fmt::Write; use std::future::ready; +#[cfg(unix)] use std::os::linux::fs::MetadataExt; use chrono::{DateTime, Duration, Local, Utc}; @@ -668,7 +669,10 @@ where let mtime = meta.modified().map_err(|err| { Error::StorageReadError("modified time on proxy file", path.clone(), err) })?; + #[cfg(unix)] let has_hardlinks = meta.st_nlink() > 1; + #[cfg(windows)] + let has_hardlinks = todo!(); let is_old_enough = DateTime::::from(mtime) < self.must_be_older_than; if has_hardlinks || !is_old_enough { Ok(None) diff --git a/crates/spfs/src/env.rs b/crates/spfs/src/env.rs index 41a72df01..cebec0c08 100644 --- a/crates/spfs/src/env.rs +++ b/crates/spfs/src/env.rs @@ -432,7 +432,7 @@ where cmd.arg("none"); cmd.arg(SPFS_DIR); match cmd.status().await { - Err(err) => Err(Error::process_spawn_error("mount".to_owned(), err, None)), + Err(err) => Err(Error::process_spawn_error("mount", err, None)), Ok(status) => match status.code() { Some(0) => Ok(()), _ => Err("Failed to mount overlayfs".into()), @@ -480,7 +480,7 @@ where cmd.stdout(std::process::Stdio::null()); tracing::debug!("{cmd:?}"); match cmd.status() { - Err(err) => return Err(Error::process_spawn_error("mount".to_owned(), err, None)), + Err(err) => return Err(Error::process_spawn_error("mount", err, None)), Ok(status) if status.code() == Some(0) => {} Ok(status) => { return Err(Error::String(format!( @@ -636,8 +636,8 @@ where self.unmount_env_fuse(rt, lazy).await?; match rt.config.mount_backend { - runtime::MountBackend::FuseOnly => { - // a fuse-only runtime cannot be unmounted this way + runtime::MountBackend::FuseOnly | runtime::MountBackend::WinFsp => { + // a vfs-only runtime cannot be unmounted this way // and should already be handled by a previous call to // unmount_env_fuse return Ok(()); @@ -667,7 +667,9 @@ where let mount_path = match rt.config.mount_backend { runtime::MountBackend::OverlayFsWithFuse => rt.config.lower_dir.as_path(), runtime::MountBackend::FuseOnly => std::path::Path::new(SPFS_DIR), - runtime::MountBackend::OverlayFsWithRenders => return Ok(()), + runtime::MountBackend::OverlayFsWithRenders | runtime::MountBackend::WinFsp => { + return Ok(()) + } }; tracing::debug!(%lazy, "unmounting existing fuse env @ {mount_path:?}..."); diff --git a/crates/spfs/src/env_win.rs b/crates/spfs/src/env_win.rs new file mode 100644 index 000000000..cc3b9fa0f --- /dev/null +++ b/crates/spfs/src/env_win.rs @@ -0,0 +1,20 @@ +use crate::{runtime, Result}; + +/// Manages the configuration of an spfs runtime environment. +/// +/// Specifically thing like, privilege escalation, mount namespace, +/// filesystem mounts, etc. +#[derive(Default)] +pub struct RuntimeConfigurator; + +impl RuntimeConfigurator { + /// Make this configurator for an existing runtime. + pub fn current_runtime(self, _rt: &runtime::Runtime) -> Result { + todo!() + } + + /// Move this process into the namespace of an existing runtime + pub fn join_runtime(self, _rt: &runtime::Runtime) -> Result { + todo!() + } +} diff --git a/crates/spfs/src/error.rs b/crates/spfs/src/error.rs index a778d824b..84f3c88e2 100644 --- a/crates/spfs/src/error.rs +++ b/crates/spfs/src/error.rs @@ -12,6 +12,7 @@ use crate::encoding; pub enum Error { #[error("{0}")] String(String), + #[cfg(unix)] #[error(transparent)] Nix(#[from] nix::Error), #[error("[ERRNO {1}] {0}")] @@ -30,6 +31,7 @@ pub enum Error { InvalidDateTime(#[from] chrono::ParseError), #[error("Invalid path {0}")] InvalidPath(std::path::PathBuf, #[source] io::Error), + #[cfg(unix)] #[error(transparent)] Caps(#[from] caps::errors::CapsError), #[error(transparent)] @@ -102,7 +104,7 @@ pub enum Error { #[error("Command, arguments or environment contained a nul byte, this is not supported")] CommandHasNul(#[source] std::ffi::NulError), - #[cfg(target_os = "linux")] + #[cfg(unix)] #[error("OverlayFS kernel module does not appear to be installed")] OverlayFSNotInstalled, @@ -120,6 +122,7 @@ impl Error { Error::Errno(msg, errno) } + #[cfg(unix)] pub fn wrap_nix>(err: nix::Error, prefix: E) -> Error { let err = Self::from(err); err.wrap(prefix) @@ -152,17 +155,21 @@ impl Error { Error::StorageReadError(_, _, err) => handle_io_error(err), Error::StorageWriteError(_, _, err) => handle_io_error(err), Error::Errno(_, errno) => Some(*errno), + #[cfg(unix)] Error::Nix(err) => Some(*err as i32), _ => None, } } /// Create an `Error:ProcessSpawnError` with context. - pub fn process_spawn_error( - process_description: String, + pub fn process_spawn_error( + process_description: S, err: std::io::Error, current_dir: Option, - ) -> Error { + ) -> Error + where + S: std::fmt::Display + Into, + { // A common problem with launching a sub-process is that the specified // current working directory doesn't exist. match (err.kind(), current_dir) { @@ -177,7 +184,7 @@ impl Error { } _ => {} } - Error::ProcessSpawnError(process_description, err) + Error::ProcessSpawnError(process_description.into(), err) } } diff --git a/crates/spfs/src/lib.rs b/crates/spfs/src/lib.rs index 6448a57cc..b50b705fa 100644 --- a/crates/spfs/src/lib.rs +++ b/crates/spfs/src/lib.rs @@ -12,16 +12,18 @@ pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[cfg(test)] pub mod fixtures; -mod bootstrap; +pub mod bootstrap; pub mod check; pub mod clean; pub mod commit; pub mod config; mod diff; +#[cfg_attr(windows, path = "./env_win.rs")] pub mod env; mod error; pub mod graph; pub mod io; +#[cfg_attr(windows, path = "./monitor_win.rs")] pub mod monitor; pub mod prelude; pub mod proto; diff --git a/crates/spfs/src/monitor.rs b/crates/spfs/src/monitor.rs index 2bd10c444..cbceb587b 100644 --- a/crates/spfs/src/monitor.rs +++ b/crates/spfs/src/monitor.rs @@ -34,6 +34,10 @@ enum PidEvent { /// The monitor command will spawn but immediately fail /// if there is already a monitor registered to this runtime pub fn spawn_monitor_for_runtime(rt: &runtime::Runtime) -> Result { + if rt.config.mount_backend.is_winfsp() { + todo!("No monitor implementation for winfsp mounts"); + } + let exe = match super::resolve::which_spfs("monitor") { None => return Err(Error::MissingBinary("spfs")), Some(exe) => exe, @@ -58,6 +62,7 @@ pub fn spawn_monitor_for_runtime(rt: &runtime::Runtime) -> Result Result Result { + todo!() +} + +/// When provided an active runtime, wait until all contained processes exit +/// +/// This is a privileged operation that may fail with a permission +/// issue if the calling process is not root or CAP_NET_ADMIN +pub async fn wait_for_empty_runtime(_rt: &runtime::Runtime) -> Result<()> { + todo!() +} + +/// Identify the mount namespace of the provided process id. +/// +/// Return None if the pid is not found. +pub async fn identify_mount_namespace_of_process(_pid: u32) -> Result> { + todo!() +} + +/// Return an inventory of all known pids and their mount namespaces. +pub async fn find_processes_and_mount_namespaces() -> Result>> { + todo!() +} diff --git a/crates/spfs/src/resolve.rs b/crates/spfs/src/resolve.rs index ce4b1be3e..fa49f8371 100644 --- a/crates/spfs/src/resolve.rs +++ b/crates/spfs/src/resolve.rs @@ -48,7 +48,7 @@ async fn render_via_subcommand(spec: tracking::EnvSpec) -> Result { if let Ok(render_result) = @@ -63,7 +63,7 @@ async fn render_via_subcommand(spec: tracking::EnvSpec) -> Result Err(Error::process_spawn_error( - "spfs-render".to_owned(), + "spfs-render", std::io::Error::new( std::io::ErrorKind::Other, "process exited with non-zero status", @@ -237,6 +237,7 @@ pub(crate) async fn resolve_overlay_dirs( // Determine if layers need to be combined to stay within the length limits // of mount args. + #[cfg(unix)] loop { let mut overlay_dirs = Vec::with_capacity(manifests.len()); for manifest in &manifests { diff --git a/crates/spfs/src/runtime/mod.rs b/crates/spfs/src/runtime/mod.rs index 243627cb7..366d182df 100644 --- a/crates/spfs/src/runtime/mod.rs +++ b/crates/spfs/src/runtime/mod.rs @@ -4,11 +4,15 @@ //! Handles the setup and initialization of runtime environments +#[cfg(unix)] pub mod overlayfs; mod startup_csh; mod startup_sh; mod storage; +#[cfg(windows)] +pub mod winfsp; +#[cfg(unix)] pub use overlayfs::is_removed_entry; pub use storage::{ makedirs_with_perms, @@ -22,3 +26,5 @@ pub use storage::{ Storage, STARTUP_FILES_LOCATION, }; +#[cfg(windows)] +pub use winfsp::is_removed_entry; diff --git a/crates/spfs/src/runtime/overlayfs.rs b/crates/spfs/src/runtime/overlayfs.rs index 209b2db06..0ba0c131c 100644 --- a/crates/spfs/src/runtime/overlayfs.rs +++ b/crates/spfs/src/runtime/overlayfs.rs @@ -39,7 +39,7 @@ fn query_overlayfs_available_options() -> Result> { let output = std::process::Command::new("/sbin/modinfo") .arg("overlay") .output() - .map_err(|err| Error::process_spawn_error("/sbin/modinfo".into(), err, None))?; + .map_err(|err| Error::process_spawn_error("/sbin/modinfo", err, None))?; if output.status.code().unwrap_or(1) != 0 { return Err(Error::OverlayFSNotInstalled); diff --git a/crates/spfs/src/runtime/storage.rs b/crates/spfs/src/runtime/storage.rs index 40137d771..7aacc8b08 100644 --- a/crates/spfs/src/runtime/storage.rs +++ b/crates/spfs/src/runtime/storage.rs @@ -5,7 +5,10 @@ //! Definition and persistent storage of runtimes. use std::collections::HashSet; +#[cfg(unix)] use std::os::unix::fs::{MetadataExt, PermissionsExt}; +#[cfg(windows)] +use std::os::windows::fs::MetadataExt; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::sync::Arc; @@ -205,13 +208,17 @@ pub enum MountBackend { /// Renders each layer to a folder on disk, before mounting /// the whole stack as lower directories in overlayfs. Edits /// are stored in the overlayfs upper directory. - #[default] + #[cfg_attr(unix, default)] OverlayFsWithRenders, // Mounts a since fuse filesystem as the lower directory to // overlayfs, using the overlayfs upper directory for edits OverlayFsWithFuse, // Mounts a fuse filesystem directly FuseOnly, + /// Leverages the win file system protocol system to present + /// dynamic file system entries to runtime processes + #[cfg_attr(windows, default)] + WinFsp, } impl MountBackend { @@ -227,6 +234,10 @@ impl MountBackend { matches!(self, Self::FuseOnly) } + pub fn is_winfsp(&self) -> bool { + matches!(self, Self::WinFsp) + } + /// Reports whether this mount backend requires that all /// data be synced to the local repository before being executed pub fn requires_localization(&self) -> bool { @@ -234,6 +245,7 @@ impl MountBackend { Self::OverlayFsWithRenders => true, Self::OverlayFsWithFuse => false, Self::FuseOnly => false, + Self::WinFsp => false, } } } @@ -435,14 +447,23 @@ impl Runtime { /// Return true if the upper dir of this runtime has changes. pub fn is_dirty(&self) -> bool { - match std::fs::metadata(&self.config.upper_dir) { - Ok(meta) => meta.size() != 0, - Err(err) => { - // Treating other error types as dirty is not strictly - // accurate, but it is not worth the trouble of needing - // to return an error from this function - !matches!(err.kind(), std::io::ErrorKind::NotFound) + match self.config.mount_backend { + MountBackend::OverlayFsWithFuse | MountBackend::OverlayFsWithRenders => { + match std::fs::metadata(&self.config.upper_dir) { + #[cfg(unix)] + Ok(meta) => meta.size() != 0, + #[cfg(windows)] + Ok(meta) => meta.file_size() != 0, + Err(err) => { + // Treating other error types as dirty is not strictly + // accurate, but it is not worth the trouble of needing + // to return an error from this function + !matches!(err.kind(), std::io::ErrorKind::NotFound) + } + } } + MountBackend::FuseOnly => false, + MountBackend::WinFsp => todo!(), } } @@ -726,6 +747,7 @@ fn runtime_tag( /// Recursively create the given directory with the appropriate permissions. pub fn makedirs_with_perms>(dirname: P, perms: u32) -> Result<()> { let dirname = dirname.as_ref(); + #[cfg(unix)] let perms = std::fs::Permissions::from_mode(perms); let mut path = PathBuf::from("/"); for component in dirname.components() { @@ -755,6 +777,7 @@ pub fn makedirs_with_perms>(dirname: P, perms: u32) -> Result<()> } // not fatal, so it's worth allowing things to continue // even though it could cause permission issues later on + #[cfg(unix)] let _ = std::fs::set_permissions(&path, perms.clone()); } } diff --git a/crates/spfs/src/runtime/winfsp.rs b/crates/spfs/src/runtime/winfsp.rs new file mode 100644 index 000000000..848f93113 --- /dev/null +++ b/crates/spfs/src/runtime/winfsp.rs @@ -0,0 +1,9 @@ +// Copyright (c) Sony Pictures Imageworks, et al. +// SPDX-License-Identifier: Apache-2.0 +// https://github.com/imageworks/spk + +pub fn is_removed_entry(meta: &std::fs::Metadata) -> bool { + // WinFSP does not have a working directory that stores whiteout files (yet) + // so this function always returns false + false +} diff --git a/crates/spfs/src/status.rs b/crates/spfs/src/status.rs index 35ef15bd4..dbdc6ed25 100644 --- a/crates/spfs/src/status.rs +++ b/crates/spfs/src/status.rs @@ -2,11 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // https://github.com/imageworks/spk +#[cfg_attr(unix, path = "./status_unix.rs")] +#[cfg_attr(windows, path = "./status_win.rs")] +mod os; + +pub use os::*; + use super::config::get_config; -use super::resolve::{resolve_and_render_overlay_dirs, RenderResult}; -use crate::storage::fs::RenderSummary; use crate::storage::FromConfig; -use crate::{bootstrap, env, runtime, tracking, Error, Result}; +use crate::{runtime, tracking, Error, Result}; static SPFS_RUNTIME: &str = "SPFS_RUNTIME"; @@ -29,53 +33,6 @@ pub async fn make_active_runtime_editable() -> Result<()> { remount_runtime(&rt).await } -/// Remount the given runtime as configured. -pub async fn remount_runtime(rt: &runtime::Runtime) -> Result<()> { - let command = bootstrap::build_spfs_remount_command(rt)?; - // Not using `tokio::process` here because it relies on `SIGCHLD` to know - // when the process is done, which can be unreliable if something else - // is trapping signals, like the tarpaulin code coverage tool. - let mut cmd = std::process::Command::new(command.executable); - cmd.args(command.args); - tracing::debug!("{:?}", cmd); - let res = tokio::task::spawn_blocking(move || cmd.status()) - .await? - .map_err(|err| Error::process_spawn_error("spfs-enter --remount".to_owned(), err, None))?; - if res.code() != Some(0) { - Err(Error::String(format!( - "Failed to re-mount runtime filesystem: spfs-enter --remount failed with code {:?}", - res.code() - ))) - } else { - Ok(()) - } -} - -/// Exit the given runtime as configured, this should only ever be called with the active runtime -pub async fn exit_runtime(rt: &runtime::Runtime) -> Result<()> { - let command = bootstrap::build_spfs_exit_command(rt)?; - // Not using `tokio::process` here because it relies on `SIGCHLD` to know - // when the process is done, which can be unreliable if something else - // is trapping signals, like the tarpaulin code coverage tool. - let mut cmd = std::process::Command::new(command.executable); - cmd.args(command.args); - cmd.stderr(std::process::Stdio::piped()); - tracing::debug!("{:?}", cmd); - let res = tokio::task::spawn_blocking(move || cmd.output()) - .await? - .map_err(|err| Error::process_spawn_error("spfs-enter --exit".to_owned(), err, None))?; - if res.status.code() != Some(0) { - let out = String::from_utf8_lossy(&res.stderr); - Err(Error::String(format!( - "Failed to tear-down runtime filesystem: spfs-enter --exit failed with code {:?}: {}", - res.status.code(), - out.trim() - ))) - } else { - Ok(()) - } -} - /// Get the repository that is being used as the backing storage for the given /// runtime. /// @@ -124,128 +81,3 @@ pub async fn active_runtime() -> Result { let storage = config.get_runtime_storage().await?; storage.read_runtime(name).await } - -/// Reinitialize the current spfs runtime as rt (in case of runtime config changes). -/// -/// This function will run blocking IO on the current thread. Although this is not ideal, -/// the mount namespacing operated per-thread and so restricts our ability to move execution. -pub async fn reinitialize_runtime(rt: &mut runtime::Runtime) -> Result { - let render_result = match rt.config.mount_backend { - runtime::MountBackend::OverlayFsWithRenders => { - resolve_and_render_overlay_dirs(rt, false).await? - } - runtime::MountBackend::OverlayFsWithFuse | runtime::MountBackend::FuseOnly => { - // fuse uses the lowerdir that's defined in the runtime - // config, which is implicitly added to all overlay mounts - Default::default() - } - }; - - let in_namespace = env::RuntimeConfigurator::default().current_runtime(rt)?; - - tracing::debug!("computing runtime manifest"); - let manifest = compute_runtime_manifest(rt).await?; - in_namespace.ensure_mounts_already_exist().await?; - const LAZY: bool = true; // because we are about to re-mount over it - let with_root = in_namespace.become_root()?; - with_root.unmount_env(rt, LAZY).await?; - match rt.config.mount_backend { - runtime::MountBackend::OverlayFsWithRenders => { - with_root - .mount_env_overlayfs(rt, &render_result.paths_rendered) - .await?; - with_root.mask_files(&rt.config, manifest).await?; - } - #[cfg(feature = "fuse-backend")] - runtime::MountBackend::OverlayFsWithFuse => { - // Switch to using a different lower_dir otherwise if we use the - // same one as the previous runtime when it lazy unmounts it will - // unmount our active lower_dir. - rt.rotate_lower_dir().await?; - rt.save_state_to_storage().await?; - - with_root.mount_fuse_lower_dir(rt).await?; - with_root - .mount_env_overlayfs(rt, &render_result.paths_rendered) - .await?; - } - #[cfg(feature = "fuse-backend")] - runtime::MountBackend::FuseOnly => { - with_root.mount_env_fuse(rt).await?; - } - #[allow(unreachable_patterns)] - _ => { - return Err(Error::String(format!( - "This binary was not compiled with support for {}", - rt.config.mount_backend - ))) - } - } - with_root.become_original_user()?; - Ok(render_result.render_summary) -} - -/// Initialize the current runtime as rt. -/// -/// This function will run blocking IO on the current thread. Although this is not ideal, -/// the mount namespacing operated per-thread and so restricts our ability to move execution. -pub async fn initialize_runtime(rt: &mut runtime::Runtime) -> Result { - let render_result = match rt.config.mount_backend { - runtime::MountBackend::OverlayFsWithRenders => { - resolve_and_render_overlay_dirs( - rt, - // skip saving the runtime in this step because we will save it after - // learning the mount namespace below - true, - ) - .await? - } - runtime::MountBackend::OverlayFsWithFuse | runtime::MountBackend::FuseOnly => { - // fuse uses the lowerdir that's defined in the runtime - // config, which is implicitly added to all overlay mounts - RenderResult::default() - } - }; - tracing::debug!("computing runtime manifest"); - let manifest = compute_runtime_manifest(rt).await?; - - let in_namespace = env::RuntimeConfigurator::default().enter_mount_namespace()?; - rt.config.mount_namespace = Some(in_namespace.mount_namespace().to_path_buf()); - rt.save_state_to_storage().await?; - - let with_root = in_namespace.become_root()?; - with_root.privatize_existing_mounts().await?; - with_root.ensure_mount_targets_exist(&rt.config)?; - match rt.config.mount_backend { - runtime::MountBackend::OverlayFsWithRenders => { - with_root.mount_runtime(&rt.config)?; - with_root.setup_runtime(rt).await?; - with_root - .mount_env_overlayfs(rt, &render_result.paths_rendered) - .await?; - with_root.mask_files(&rt.config, manifest).await?; - } - #[cfg(feature = "fuse-backend")] - runtime::MountBackend::OverlayFsWithFuse => { - with_root.mount_runtime(&rt.config)?; - with_root.setup_runtime(rt).await?; - with_root.mount_fuse_lower_dir(rt).await?; - with_root - .mount_env_overlayfs(rt, &render_result.paths_rendered) - .await?; - } - #[cfg(feature = "fuse-backend")] - runtime::MountBackend::FuseOnly => { - with_root.mount_env_fuse(rt).await?; - } - #[allow(unreachable_patterns)] - _ => { - return Err(Error::String(format!( - "This binary was not compiled with support for {}", - rt.config.mount_backend - ))) - } - } - with_root.become_original_user()?; - Ok(render_result.render_summary) -} diff --git a/crates/spfs/src/status_unix.rs b/crates/spfs/src/status_unix.rs new file mode 100644 index 000000000..2c1a1c68a --- /dev/null +++ b/crates/spfs/src/status_unix.rs @@ -0,0 +1,183 @@ +// Copyright (c) Sony Pictures Imageworks, et al. +// SPDX-License-Identifier: Apache-2.0 +// https://github.com/imageworks/spk + +use crate::resolve::{resolve_and_render_overlay_dirs, RenderResult}; +use crate::storage::fs::RenderSummary; +use crate::{bootstrap, env, runtime, Error, Result}; + +/// Remount the given runtime as configured. +pub async fn remount_runtime(rt: &runtime::Runtime) -> Result<()> { + let command = bootstrap::build_spfs_remount_command(rt)?; + // Not using `tokio::process` here because it relies on `SIGCHLD` to know + // when the process is done, which can be unreliable if something else + // is trapping signals, like the tarpaulin code coverage tool. + let mut cmd = std::process::Command::new(command.executable); + cmd.args(command.args); + tracing::debug!("{:?}", cmd); + let res = tokio::task::spawn_blocking(move || cmd.status()) + .await? + .map_err(|err| Error::process_spawn_error("spfs-enter --remount", err, None))?; + if res.code() != Some(0) { + Err(Error::String(format!( + "Failed to re-mount runtime filesystem: spfs-enter --remount failed with code {:?}", + res.code() + ))) + } else { + Ok(()) + } +} + +/// Exit the given runtime as configured, this should only ever be called with the active runtime +pub async fn exit_runtime(rt: &runtime::Runtime) -> Result<()> { + let command = bootstrap::build_spfs_exit_command(rt)?; + // Not using `tokio::process` here because it relies on `SIGCHLD` to know + // when the process is done, which can be unreliable if something else + // is trapping signals, like the tarpaulin code coverage tool. + let mut cmd = std::process::Command::new(command.executable); + cmd.args(command.args); + cmd.stderr(std::process::Stdio::piped()); + tracing::debug!("{:?}", cmd); + let res = tokio::task::spawn_blocking(move || cmd.output()) + .await? + .map_err(|err| Error::process_spawn_error("spfs-enter --exit", err, None))?; + if res.status.code() != Some(0) { + let out = String::from_utf8_lossy(&res.stderr); + Err(Error::String(format!( + "Failed to tear-down runtime filesystem: spfs-enter --exit failed with code {:?}: {}", + res.status.code(), + out.trim() + ))) + } else { + Ok(()) + } +} + +/// Reinitialize the current spfs runtime as rt (in case of runtime config changes). +/// +/// This function will run blocking IO on the current thread. Although this is not ideal, +/// the mount namespacing operated per-thread and so restricts our ability to move execution. +pub async fn reinitialize_runtime(rt: &mut runtime::Runtime) -> Result { + let render_result = match rt.config.mount_backend { + runtime::MountBackend::OverlayFsWithRenders => { + resolve_and_render_overlay_dirs(rt, false).await? + } + runtime::MountBackend::OverlayFsWithFuse + | runtime::MountBackend::FuseOnly + | runtime::MountBackend::WinFsp => { + // fuse uses the lowerdir that's defined in the runtime + // config, which is implicitly added to all overlay mounts + Default::default() + } + }; + + let in_namespace = env::RuntimeConfigurator::default().current_runtime(rt)?; + + tracing::debug!("computing runtime manifest"); + let manifest = super::compute_runtime_manifest(rt).await?; + in_namespace.ensure_mounts_already_exist().await?; + const LAZY: bool = true; // because we are about to re-mount over it + let with_root = in_namespace.become_root()?; + with_root.unmount_env(rt, LAZY).await?; + match rt.config.mount_backend { + runtime::MountBackend::OverlayFsWithRenders => { + with_root + .mount_env_overlayfs(rt, &render_result.paths_rendered) + .await?; + with_root.mask_files(&rt.config, manifest).await?; + } + #[cfg(feature = "fuse-backend")] + runtime::MountBackend::OverlayFsWithFuse => { + // Switch to using a different lower_dir otherwise if we use the + // same one as the previous runtime when it lazy unmounts it will + // unmount our active lower_dir. + rt.rotate_lower_dir().await?; + rt.save_state_to_storage().await?; + + with_root.mount_fuse_lower_dir(rt).await?; + with_root + .mount_env_overlayfs(rt, &render_result.paths_rendered) + .await?; + } + #[cfg(feature = "fuse-backend")] + runtime::MountBackend::FuseOnly => { + with_root.mount_env_fuse(rt).await?; + } + #[allow(unreachable_patterns)] + _ => { + return Err(Error::String(format!( + "This binary was not compiled with support for {}", + rt.config.mount_backend + ))) + } + } + with_root.become_original_user()?; + Ok(render_result.render_summary) +} + +/// Initialize the current runtime as rt. +/// +/// This function will run blocking IO on the current thread. Although this is not ideal, +/// the mount namespacing operated per-thread and so restricts our ability to move execution. +pub async fn initialize_runtime(rt: &mut runtime::Runtime) -> Result { + let render_result = match rt.config.mount_backend { + runtime::MountBackend::OverlayFsWithRenders => { + resolve_and_render_overlay_dirs( + rt, + // skip saving the runtime in this step because we will save it after + // learning the mount namespace below + true, + ) + .await? + } + runtime::MountBackend::OverlayFsWithFuse + | runtime::MountBackend::FuseOnly + | runtime::MountBackend::WinFsp => { + // fuse uses the lowerdir that's defined in the runtime + // config, which is implicitly added to all overlay mounts + RenderResult::default() + } + }; + tracing::debug!("computing runtime manifest"); + let manifest = super::compute_runtime_manifest(rt).await?; + + let in_namespace = env::RuntimeConfigurator::default().enter_mount_namespace()?; + rt.config.mount_namespace = Some(in_namespace.mount_namespace().to_path_buf()); + rt.save_state_to_storage().await?; + + let with_root = in_namespace.become_root()?; + with_root.privatize_existing_mounts().await?; + with_root.ensure_mount_targets_exist(&rt.config)?; + match rt.config.mount_backend { + runtime::MountBackend::OverlayFsWithRenders => { + with_root.mount_runtime(&rt.config)?; + with_root.setup_runtime(rt).await?; + with_root + .mount_env_overlayfs(rt, &render_result.paths_rendered) + .await?; + with_root.mask_files(&rt.config, manifest).await?; + } + #[cfg(feature = "fuse-backend")] + runtime::MountBackend::OverlayFsWithFuse => { + with_root.mount_runtime(&rt.config)?; + with_root.setup_runtime(rt).await?; + with_root.mount_fuse_lower_dir(rt).await?; + with_root + .mount_env_overlayfs(rt, &render_result.paths_rendered) + .await?; + } + #[cfg(feature = "fuse-backend")] + runtime::MountBackend::FuseOnly => { + with_root.mount_env_fuse(rt).await?; + } + #[allow(unreachable_patterns)] + _ => { + return Err(Error::String(format!( + "This binary was not compiled with support for {}", + rt.config.mount_backend + ))) + } + } + with_root.become_original_user()?; + Ok(render_result.render_summary) +} diff --git a/crates/spfs/src/status_win.rs b/crates/spfs/src/status_win.rs new file mode 100644 index 000000000..80bca36b9 --- /dev/null +++ b/crates/spfs/src/status_win.rs @@ -0,0 +1,32 @@ +// Copyright (c) Sony Pictures Imageworks, et al. +// SPDX-License-Identifier: Apache-2.0 +// https://github.com/imageworks/spk + +use crate::storage::fs::RenderSummary; +use crate::{runtime, Result}; + +/// Remount the given runtime as configured. +pub async fn remount_runtime(_rt: &runtime::Runtime) -> Result<()> { + todo!() +} + +/// Exit the given runtime as configured, this should only ever be called with the active runtime +pub async fn exit_runtime(_rt: &runtime::Runtime) -> Result<()> { + todo!() +} + +/// Reinitialize the current spfs runtime as rt (in case of runtime config changes). +/// +/// This function will run blocking IO on the current thread. Although this is not ideal, +/// the mount namespacing operated per-thread and so restricts our ability to move execution. +pub async fn reinitialize_runtime(_rt: &mut runtime::Runtime) -> Result { + todo!() +} + +/// Initialize the current runtime as rt. +/// +/// This function will run blocking IO on the current thread. Although this is not ideal, +/// the mount namespacing operated per-thread and so restricts our ability to move execution. +pub async fn initialize_runtime(_rt: &mut runtime::Runtime) -> Result { + todo!() +} diff --git a/crates/spfs/src/storage/fs/database.rs b/crates/spfs/src/storage/fs/database.rs index cc587cf5f..0888f898d 100644 --- a/crates/spfs/src/storage/fs/database.rs +++ b/crates/spfs/src/storage/fs/database.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // https://github.com/imageworks/spk +#[cfg(unix)] use std::os::unix::fs::PermissionsExt; use std::pin::Pin; @@ -118,14 +119,17 @@ impl graph::Database for super::FSRepository { err, )); } - let perms = std::fs::Permissions::from_mode(self.objects.file_permissions); - if let Err(err) = tokio::fs::set_permissions(&working_file, perms).await { - let _ = tokio::fs::remove_file(&working_file).await; - return Err(Error::StorageWriteError( - "set permissions on object file", - working_file, - err, - )); + #[cfg(unix)] + { + let perms = std::fs::Permissions::from_mode(self.objects.file_permissions); + if let Err(err) = tokio::fs::set_permissions(&working_file, perms).await { + let _ = tokio::fs::remove_file(&working_file).await; + return Err(Error::StorageWriteError( + "set permissions on object file", + working_file, + err, + )); + } } self.objects.ensure_base_dir(&filepath)?; match tokio::fs::rename(&working_file, &filepath).await { @@ -145,6 +149,7 @@ impl graph::Database for super::FSRepository { let filepath = self.objects.build_digest_path(&digest); // this might fail but we don't consider that fatal just yet + #[cfg(unix)] let _ = tokio::fs::set_permissions(&filepath, std::fs::Permissions::from_mode(0o777)).await; if let Err(err) = tokio::fs::remove_file(&filepath).await { @@ -169,6 +174,7 @@ impl graph::Database for super::FSRepository { let filepath = self.objects.build_digest_path(&digest); // this might fail but we don't consider that fatal just yet + #[cfg(unix)] let _ = tokio::fs::set_permissions(&filepath, std::fs::Permissions::from_mode(0o777)).await; let metadata = tokio::fs::symlink_metadata(&filepath) diff --git a/crates/spfs/src/storage/fs/hash_store.rs b/crates/spfs/src/storage/fs/hash_store.rs index f62b1cbe9..e0c6c781c 100644 --- a/crates/spfs/src/storage/fs/hash_store.rs +++ b/crates/spfs/src/storage/fs/hash_store.rs @@ -3,6 +3,7 @@ // https://github.com/imageworks/spk use std::io::ErrorKind; +#[cfg(unix)] use std::os::unix::fs::PermissionsExt; use std::path::{Path, PathBuf}; use std::pin::Pin; @@ -331,6 +332,7 @@ impl FSHashStore { // won't change. For example, writing a payload and then hard linking // to that payload in a render that expects the file to have a certain // permissions. + #[cfg(unix)] if created_new_file { if let Err(err) = tokio::fs::set_permissions( &path, diff --git a/crates/spfs/src/storage/fs/mod.rs b/crates/spfs/src/storage/fs/mod.rs index 5ce07e30b..ad8323810 100644 --- a/crates/spfs/src/storage/fs/mod.rs +++ b/crates/spfs/src/storage/fs/mod.rs @@ -8,6 +8,7 @@ mod database; mod hash_store; mod payloads; mod render_summary; +#[cfg_attr(windows, path = "./renderer_win.rs")] mod renderer; mod repository; mod tag; diff --git a/crates/spfs/src/storage/fs/renderer_win.rs b/crates/spfs/src/storage/fs/renderer_win.rs new file mode 100644 index 000000000..8ba8dbbbe --- /dev/null +++ b/crates/spfs/src/storage/fs/renderer_win.rs @@ -0,0 +1,447 @@ +// Copyright (c) Sony Pictures Imageworks, et al. +// SPDX-License-Identifier: Apache-2.0 +// https://github.com/imageworks/spk + +use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::sync::Arc; + +use async_stream::try_stream; +use chrono::{DateTime, Utc}; +use futures::{Stream, TryFutureExt, TryStreamExt}; +use tokio::sync::Semaphore; + +use super::{FSRepository, RenderReporter, SilentRenderReporter}; +use crate::encoding::{self, Encodable}; +use crate::runtime::makedirs_with_perms; +use crate::storage::prelude::*; +use crate::storage::LocalRepository; +use crate::{graph, tracking, Error, Result}; + +#[cfg(test)] +#[path = "./renderer_test.rs"] +mod renderer_test; + +/// The default limit for concurrent blobs when rendering manifests to disk. +/// See: [`Renderer::with_max_concurrent_blobs`] +pub const DEFAULT_MAX_CONCURRENT_BLOBS: usize = 100; + +/// The default limit for concurrent branches when rendering manifests to disk. +/// See: [`Renderer::with_max_concurrent_branches`] +pub const DEFAULT_MAX_CONCURRENT_BRANCHES: usize = 5; + +#[derive(Debug, Copy, Clone, strum::EnumString, strum::EnumVariantNames)] +pub enum RenderType { + HardLink, + HardLinkNoProxy, + Copy, +} + +impl FSRepository { + fn get_render_storage(&self) -> Result<&super::FSHashStore> { + match &self.renders { + Some(render_store) => Ok(&render_store.renders), + None => Err(Error::NoRenderStorage(self.address())), + } + } + + pub async fn has_rendered_manifest(&self, digest: encoding::Digest) -> bool { + let renders = match &self.renders { + Some(render_store) => &render_store.renders, + None => return false, + }; + let rendered_dir = renders.build_digest_path(&digest); + was_render_completed(rendered_dir) + } + + pub fn iter_rendered_manifests<'db>( + &'db self, + ) -> Pin> + Send + Sync + 'db>> { + Box::pin(try_stream! { + let renders = self.get_render_storage()?; + for await digest in renders.iter() { + yield digest?; + } + }) + } + + /// Return the path that the manifest would be rendered to. + pub fn manifest_render_path(&self, manifest: &graph::Manifest) -> Result { + Ok(self + .get_render_storage()? + .build_digest_path(&manifest.digest()?)) + } + + pub fn proxy_path(&self) -> Option<&std::path::Path> { + self.renders + .as_ref() + .map(|render_store| render_store.proxy.root()) + } + + /// Remove the identified render from this storage. + pub async fn remove_rendered_manifest(&self, digest: crate::encoding::Digest) -> Result<()> { + let renders = match &self.renders { + Some(render_store) => &render_store.renders, + None => return Ok(()), + }; + let rendered_dirpath = renders.build_digest_path(&digest); + let workdir = renders.workdir(); + makedirs_with_perms(&workdir, renders.directory_permissions)?; + Self::remove_dir_atomically(&rendered_dirpath, &workdir).await + } + + pub(crate) async fn remove_dir_atomically(dirpath: &Path, workdir: &Path) -> Result<()> { + let uuid = uuid::Uuid::new_v4().to_string(); + let working_dirpath = workdir.join(uuid); + if let Err(err) = tokio::fs::rename(&dirpath, &working_dirpath).await { + return match err.kind() { + std::io::ErrorKind::NotFound => Ok(()), + _ => Err(crate::Error::StorageWriteError( + "rename on render before removal", + working_dirpath, + err, + )), + }; + } + + unmark_render_completed(&dirpath).await?; + open_perms_and_remove_all(&working_dirpath).await + } + + /// Returns true if the render was actually removed + pub async fn remove_rendered_manifest_if_older_than( + &self, + older_than: DateTime, + digest: encoding::Digest, + ) -> Result { + let renders = match &self.renders { + Some(render_store) => &render_store.renders, + None => return Ok(false), + }; + let rendered_dirpath = renders.build_digest_path(&digest); + + let metadata = match tokio::fs::symlink_metadata(&rendered_dirpath).await { + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false), + Err(err) => { + return Err(Error::StorageReadError( + "symlink_metadata on rendered dir path", + rendered_dirpath.clone(), + err, + )) + } + Ok(metadata) => metadata, + }; + + let mtime = metadata.modified().map_err(|err| { + Error::StorageReadError( + "modified on symlink metadata of rendered dir path", + rendered_dirpath.clone(), + err, + ) + })?; + + if DateTime::::from(mtime) >= older_than { + return Ok(false); + } + + self.remove_rendered_manifest(digest).await?; + Ok(true) + } +} + +/// A semaphore for limiting the concurrency of blob renders. +struct BlobSemaphore(Arc); + +/// A newtype to represent holding the permit specifically for the blob semaphore. +struct BlobSemaphorePermit<'a>(tokio::sync::SemaphorePermit<'a>); + +impl BlobSemaphore { + /// Acquires a permit from the blob semaphore. + /// + /// Wrapper around [`tokio::sync::Semaphore::acquire`]. + async fn acquire(&self) -> BlobSemaphorePermit<'_> { + BlobSemaphorePermit( + self.0 + .acquire() + .await + .expect("semaphore should remain open"), + ) + } +} + +/// Renders manifest data to a directory on disk +pub struct Renderer<'repo, Repo, Reporter: RenderReporter = SilentRenderReporter> { + repo: &'repo Repo, + reporter: Arc, + blob_semaphore: BlobSemaphore, + max_concurrent_branches: usize, +} + +impl<'repo, Repo> Renderer<'repo, Repo, SilentRenderReporter> { + pub fn new(repo: &'repo Repo) -> Self { + Self { + repo, + reporter: Arc::new(SilentRenderReporter), + blob_semaphore: BlobSemaphore(Arc::new(Semaphore::new(DEFAULT_MAX_CONCURRENT_BLOBS))), + max_concurrent_branches: DEFAULT_MAX_CONCURRENT_BRANCHES, + } + } +} + +impl<'repo, Repo, Reporter> Renderer<'repo, Repo, Reporter> +where + Repo: Repository + LocalRepository, + Reporter: RenderReporter, +{ + /// Report progress to the given instance, replacing any existing one + pub fn with_reporter(self, reporter: T) -> Renderer<'repo, Repo, R> + where + T: Into>, + R: RenderReporter, + { + Renderer { + repo: self.repo, + reporter: reporter.into(), + blob_semaphore: self.blob_semaphore, + max_concurrent_branches: self.max_concurrent_branches, + } + } + + /// Set how many blobs should be processed at once. + pub fn with_max_concurrent_blobs(mut self, max_concurrent_blobs: usize) -> Self { + self.blob_semaphore = BlobSemaphore(Arc::new(Semaphore::new(max_concurrent_blobs))); + self + } + + /// Set how many branches should be processed at once. + /// + /// Each tree that is processed can have any number of subtrees. This number + /// limits the number of subtrees that can be processed at once for any given tree. This + /// means that the number compounds exponentially based on the depth of the manifest + /// being rendered. Eg: a limit of 2 allows two directories to be processed in the root + /// simultaneously and a further 2 within each of those two for a total of 4 branches, and so + /// on. When rendering extremely deep trees, a smaller, conservative number is better + /// to avoid open file limits. + pub fn with_max_concurrent_branches(mut self, max_concurrent_branches: usize) -> Self { + self.max_concurrent_branches = max_concurrent_branches; + self + } + + /// Render all layers in the given env to the render storage of the underlying + /// repository, returning the paths to all relevant layers in the appropriate order. + pub async fn render( + &self, + stack: I, + render_type: Option, + ) -> Result> + where + I: Iterator + Send, + D: AsRef + Send, + { + let layers = crate::resolve::resolve_stack_to_layers_with_repo(stack, self.repo).await?; + let mut futures = futures::stream::FuturesOrdered::new(); + for layer in layers { + let fut = self + .repo + .read_manifest(layer.manifest) + .and_then( + |manifest| async move { self.render_manifest(&manifest, render_type).await }, + ); + futures.push_back(fut); + } + futures.try_collect().await + } + + /// Recreate the full structure of a stored environment on disk + pub async fn render_into_directory, P: AsRef>( + &self, + env_spec: E, + target_dir: P, + render_type: RenderType, + ) -> Result<()> { + let env_spec = env_spec.into(); + let mut stack = Vec::new(); + for target in env_spec.iter() { + let target = target.to_string(); + let obj = self.repo.read_ref(target.as_str()).await?; + stack.push(obj.digest()?); + } + let layers = + crate::resolve::resolve_stack_to_layers_with_repo(stack.iter(), self.repo).await?; + let mut manifests = Vec::with_capacity(layers.len()); + for layer in layers { + manifests.push(self.repo.read_manifest(layer.manifest).await?); + } + let mut manifest = tracking::Manifest::default(); + for next in manifests.into_iter() { + manifest.update(&next.to_tracking_manifest()); + } + let manifest = graph::Manifest::from(&manifest); + self.render_manifest_into_dir(&manifest, target_dir, render_type) + .await + } + + /// Render a manifest into the renders area of the underlying repository, + /// returning the absolute local path of the directory. + pub async fn render_manifest( + &self, + manifest: &graph::Manifest, + render_type: Option, + ) -> Result { + let render_store = self.repo.render_store()?; + let rendered_dirpath = render_store.renders.build_digest_path(&manifest.digest()?); + if was_render_completed(&rendered_dirpath) { + tracing::trace!(path = ?rendered_dirpath, "render already completed"); + return Ok(rendered_dirpath); + } + tracing::trace!(path = ?rendered_dirpath, "rendering manifest..."); + + let uuid = uuid::Uuid::new_v4().to_string(); + let working_dir = render_store.renders.workdir().join(uuid); + makedirs_with_perms(&working_dir, 0o777)?; + + self.render_manifest_into_dir( + manifest, + &working_dir, + render_type.unwrap_or(RenderType::HardLink), + ) + .await?; + + render_store.renders.ensure_base_dir(&rendered_dirpath)?; + match tokio::fs::rename(&working_dir, &rendered_dirpath).await { + Ok(_) => (), + Err(err) => match err.raw_os_error() { + // XXX: Replace with ErrorKind::DirectoryNotEmpty when + // stabilized. + Some(libc::EEXIST) | Some(libc::ENOTEMPTY) => { + // The rename failed because the destination + // `rendered_dirpath` exists and is a non-empty directory. + // Assume we lost a race with some other process rendering + // the same manifest. Treat this as a success, but clean up + // the working directory left behind. + if let Err(err) = open_perms_and_remove_all(&working_dir).await { + tracing::warn!(path=?working_dir, "failed to clean up working directory: {:?}", err); + } + } + _ => { + return Err(Error::StorageWriteError( + "rename on render", + rendered_dirpath, + err, + )) + } + }, + } + + mark_render_completed(&rendered_dirpath).await?; + Ok(rendered_dirpath) + } + + /// Recreate the full structure of a stored manifest on disk. + pub async fn render_manifest_into_dir

( + &self, + _manifest: &graph::Manifest, + _target_dir: P, + _render_type: RenderType, + ) -> Result<()> + where + P: AsRef, + { + todo!() + } +} + +/// Walks down a filesystem tree, opening permissions on each file before removing +/// the entire tree. +/// +/// This process handles the case when a folder may include files +/// that need to be removed but on which the user doesn't have enough permissions. +/// It does assume that the current user owns the file, as it may not be possible to +/// change permissions before removal otherwise. +#[async_recursion::async_recursion] +pub async fn open_perms_and_remove_all(root: &Path) -> Result<()> { + let mut read_dir = tokio::fs::read_dir(&root) + .await + .map_err(|err| Error::StorageReadError("read_dir on root", root.to_owned(), err))?; + // TODO: parallelize this with async + while let Some(entry) = read_dir + .next_entry() + .await + .map_err(|err| Error::StorageReadError("next_entry on root dir", root.to_owned(), err))? + { + let entry_path = root.join(entry.file_name()); + let file_type = entry.file_type().await.map_err(|err| { + Error::StorageReadError("file_type on root entry", root.to_owned(), err) + })?; + if file_type.is_symlink() || file_type.is_file() { + tokio::fs::remove_file(&entry_path).await.map_err(|err| { + Error::StorageWriteError("remove_file on render entry", entry_path.clone(), err) + })?; + } + if file_type.is_dir() { + open_perms_and_remove_all(&entry_path).await?; + } + } + tokio::fs::remove_dir(&root).await.map_err(|err| { + Error::StorageWriteError("remove_dir on render root", root.to_owned(), err) + })?; + Ok(()) +} + +fn was_render_completed>(render_path: P) -> bool { + let mut name = render_path + .as_ref() + .file_name() + .expect("must have a file name") + .to_os_string(); + name.push(".completed"); + let marker_path = render_path.as_ref().with_file_name(name); + marker_path.exists() +} + +/// panics if the given path does not have a directory name +async fn mark_render_completed>(render_path: P) -> Result<()> { + let mut name = render_path + .as_ref() + .file_name() + .expect("must have a file name") + .to_os_string(); + name.push(".completed"); + let marker_path = render_path.as_ref().with_file_name(name); + // create if it doesn't exist but don't fail if it already exists (no exclusive open) + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .open(&marker_path) + .await + .map_err(|err| { + Error::StorageWriteError( + "open render completed marker path for write", + marker_path, + err, + ) + })?; + Ok(()) +} + +async fn unmark_render_completed>(render_path: P) -> Result<()> { + let mut name = render_path + .as_ref() + .file_name() + .expect("must have a file name") + .to_os_string(); + name.push(".completed"); + let marker_path = render_path.as_ref().with_file_name(name); + if let Err(err) = tokio::fs::remove_file(&marker_path).await { + match err.kind() { + std::io::ErrorKind::NotFound => Ok(()), + _ => Err(Error::StorageWriteError( + "remove file on render completed marker", + marker_path, + err, + )), + } + } else { + Ok(()) + } +} diff --git a/crates/spfs/src/storage/fs/repository.rs b/crates/spfs/src/storage/fs/repository.rs index 9f7a9a692..8c61f5adf 100644 --- a/crates/spfs/src/storage/fs/repository.rs +++ b/crates/spfs/src/storage/fs/repository.rs @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // https://github.com/imageworks/spk +#[cfg(unix)] use std::fs::Permissions; use std::io::Write; +#[cfg(unix)] use std::os::unix::prelude::PermissionsExt; use std::path::{Path, PathBuf}; @@ -369,19 +371,22 @@ fn write_version_file>(root: P, version: &semver::Version) -> Res err, ) })?; - // This file can be read only. It will be replaced by a new file - // if the contents need to be changed. But for interop with older - // versions of spfs that need to write to it, enable write. - temp_version_file - .as_file() - .set_permissions(Permissions::from_mode(0o666)) - .map_err(|err| { - Error::StorageWriteError( - "set_permissions on version file temp file", - temp_version_file.path().to_owned(), - err, - ) - })?; + #[cfg(unix)] + { + // This file can be read only. It will be replaced by a new file + // if the contents need to be changed. But for interop with older + // versions of spfs that need to write to it, enable write. + temp_version_file + .as_file() + .set_permissions(Permissions::from_mode(0o666)) + .map_err(|err| { + Error::StorageWriteError( + "set_permissions on version file temp file", + temp_version_file.path().to_owned(), + err, + ) + })?; + } temp_version_file .write_all(version.to_string().as_bytes()) .map_err(|err| { diff --git a/crates/spfs/src/storage/fs/tag.rs b/crates/spfs/src/storage/fs/tag.rs index 22d908fe0..b0397a2b4 100644 --- a/crates/spfs/src/storage/fs/tag.rs +++ b/crates/spfs/src/storage/fs/tag.rs @@ -5,6 +5,7 @@ use std::convert::TryInto; use std::ffi::OsStr; use std::mem::size_of; +#[cfg(unix)] use std::os::unix::fs::PermissionsExt; use std::path::{Path, PathBuf}; use std::pin::Pin; @@ -385,9 +386,12 @@ async fn write_tags_to_path(filepath: &PathBuf, tags: &[tracking::Tag]) -> Resul )); } - let perms = std::fs::Permissions::from_mode(0o666); - if let Err(err) = tokio::fs::set_permissions(&filepath, perms).await { - tracing::warn!(?err, ?filepath, "Failed to set tag permissions"); + #[cfg(unix)] + { + let perms = std::fs::Permissions::from_mode(0o666); + if let Err(err) = tokio::fs::set_permissions(&filepath, perms).await { + tracing::warn!(?err, ?filepath, "Failed to set tag permissions"); + } } Ok(()) } diff --git a/crates/spfs/src/tracking/manifest.rs b/crates/spfs/src/tracking/manifest.rs index a15e9258d..89deabe51 100644 --- a/crates/spfs/src/tracking/manifest.rs +++ b/crates/spfs/src/tracking/manifest.rs @@ -3,8 +3,10 @@ // https://github.com/imageworks/spk use std::collections::{HashMap, HashSet}; +#[cfg(unix)] use std::os::unix::fs::MetadataExt; -use std::os::unix::prelude::FileTypeExt; +#[cfg(windows)] +use std::os::windows::fs::MetadataExt; use std::pin::Pin; use std::sync::Arc; @@ -181,7 +183,7 @@ where T: Default, { /// Add a new directory entry to this manifest - pub fn mkdir>(&mut self, path: P) -> Result<&mut Entry> { + pub fn mkdir>(&mut self, path: P) -> MkResult<&mut Entry> { let entry = Entry::default(); self.mknod(path, entry) } @@ -191,11 +193,11 @@ where /// Entries that do not exist are created with a reasonable default /// file mode, but can and should be replaced by a new entry in the /// case where this is not desired. - pub fn mkdirs>(&mut self, path: P) -> Result<&mut Entry> { + pub fn mkdirs>(&mut self, path: P) -> MkResult<&mut Entry> { const TRIM_PAT: &[char] = &['/', '.']; let path = path.as_ref().trim_start_matches(TRIM_PAT); if path.is_empty() { - return Err(nix::errno::Errno::EEXIST.into()); + return Err(MkError::AlreadyExists(path.into())); } let path = RelativePathBuf::from(path).normalize(); let mut entry = &mut self.root; @@ -208,7 +210,7 @@ where } entry = entries.get_mut(step).unwrap(); if !entry.kind.is_tree() { - return Err(nix::errno::Errno::ENOTDIR.into()); + return Err(MkError::NotADirectory(path)); } } // do not expect any other components after normalizing @@ -219,7 +221,7 @@ where } /// Make a new file entry in this manifest - pub fn mkfile<'m>(&'m mut self, path: &str) -> Result<&'m mut Entry> { + pub fn mkfile<'m>(&'m mut self, path: &str) -> MkResult<&'m mut Entry> { let entry = Entry { kind: EntryKind::Blob, ..Default::default() @@ -229,13 +231,17 @@ where } impl Manifest { - pub fn mknod>(&mut self, path: P, new_entry: Entry) -> Result<&mut Entry> { + pub fn mknod>( + &mut self, + path: P, + new_entry: Entry, + ) -> MkResult<&mut Entry> { use relative_path::Component; const TRIM_PAT: &[char] = &['/', '.']; let path = path.as_ref().trim_start_matches(TRIM_PAT); if path.is_empty() { - return Err(nix::errno::Errno::EEXIST.into()); + return Err(MkError::AlreadyExists(path.into())); } let path = RelativePathBuf::from(path).normalize(); let mut entry = &mut self.root; @@ -244,12 +250,10 @@ impl Manifest { for step in components { match step { Component::Normal(step) => match entry.entries.get_mut(step) { - None => { - return Err(nix::errno::Errno::ENOENT.into()); - } + None => return Err(MkError::NotFound(path)), Some(e) => { if !e.kind.is_tree() { - return Err(nix::errno::Errno::ENOTDIR.into()); + return Err(MkError::NotADirectory(path)); } entry = e; } @@ -259,16 +263,30 @@ impl Manifest { } } match last { - None => Err(nix::errno::Errno::ENOENT.into()), + None => Err(MkError::NotFound(path)), Some(Component::Normal(step)) => { entry.entries.insert(step.to_string(), new_entry); Ok(entry.entries.get_mut(step).unwrap()) } - _ => Err(nix::errno::Errno::EIO.into()), + _ => Err(MkError::InvalidFilename(path)), } } } +#[derive(Debug, thiserror::Error)] +pub enum MkError { + #[error("Entry already exists in manifest {0}")] + AlreadyExists(RelativePathBuf), + #[error("Invalid filename for manifest entry {0}")] + InvalidFilename(RelativePathBuf), + #[error("Manifest entry is not a parent {0}")] + NotADirectory(RelativePathBuf), + #[error("Entry does not exist {0}")] + NotFound(RelativePathBuf), +} + +pub type MkResult = std::result::Result; + /// Walks all entries in a manifest depth-first pub struct ManifestWalker<'m, T = ()> { prefix: RelativePathBuf, @@ -569,20 +587,20 @@ where let stat_result = match tokio::fs::symlink_metadata(&path).await { Ok(r) => r, Err(lstat_err) if lstat_err.kind() == std::io::ErrorKind::NotFound => { - // Heuristic: if lstat fails with ENOENT, but `dir_entry` exists, // then the directory entry exists but it might be a whiteout file. // Assume so if `dir_entry` says it is a character device. - match dir_entry.file_type().await { - Ok(ft) if ft.is_char_device() => { + match dir_entry.metadata().await { + Ok(meta) if crate::runtime::is_removed_entry(&meta) => { // XXX: mode and size? entry.kind = EntryKind::Mask; entry.object = encoding::NULL_DIGEST.into(); self.reporter.computed_entry(&entry); return Ok(entry); } - Ok(_) => { + Ok(meta) => { return Err(Error::String(format!( - "Unexpected non-char device file: {}", + "Unexpected directory file type {:?}: {}", + meta.file_type(), path.as_ref().display() ))) } @@ -604,8 +622,18 @@ where } }; - entry.mode = stat_result.mode(); - entry.size = stat_result.size(); + #[cfg(unix)] + { + entry.mode = stat_result.mode(); + entry.size = stat_result.size(); + } + #[cfg(windows)] + { + entry.size = stat_result.file_size(); + // use the same default posix permissions as git uses + // for files created on windows + entry.mode = 0o644; + } let file_type = stat_result.file_type(); if file_type.is_symlink() { diff --git a/crates/spfs/src/tracking/mod.rs b/crates/spfs/src/tracking/mod.rs index 6ceffe605..375f3424e 100644 --- a/crates/spfs/src/tracking/mod.rs +++ b/crates/spfs/src/tracking/mod.rs @@ -8,7 +8,7 @@ pub mod blob_reader; mod diff; mod entry; mod env; -mod manifest; +pub mod manifest; mod object; mod tag; diff --git a/crates/spk-build/src/build/binary.rs b/crates/spk-build/src/build/binary.rs index 2fd21c149..01b1dfeb4 100644 --- a/crates/spk-build/src/build/binary.rs +++ b/crates/spk-build/src/build/binary.rs @@ -729,7 +729,7 @@ where .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "build script".to_owned(), + "build script", err, Some(source_dir.to_owned()), )) diff --git a/crates/spk-build/src/error.rs b/crates/spk-build/src/error.rs index aed67cf69..08263afa8 100644 --- a/crates/spk-build/src/error.rs +++ b/crates/spk-build/src/error.rs @@ -43,6 +43,8 @@ pub enum Error { #[error(transparent)] SPFS(#[from] spfs::Error), #[error(transparent)] + BuildManifest(#[from] spfs::tracking::manifest::MkError), + #[error(transparent)] SpkExecError(#[from] spk_exec::Error), #[error(transparent)] SpkIdentError(#[from] spk_schema::ident::Error), diff --git a/crates/spk-cli/cmd-test/src/test/tester.rs b/crates/spk-cli/cmd-test/src/test/tester.rs index 3c4a54b5b..fa1ed8cd5 100644 --- a/crates/spk-cli/cmd-test/src/test/tester.rs +++ b/crates/spk-cli/cmd-test/src/test/tester.rs @@ -61,7 +61,7 @@ pub trait Tester: Send { .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "bash".to_owned(), + "bash", err, Some(source_dir.to_owned()), )) diff --git a/crates/spk-schema/src/source_spec.rs b/crates/spk-schema/src/source_spec.rs index cb11e198c..6e4ae1453 100644 --- a/crates/spk-schema/src/source_spec.rs +++ b/crates/spk-schema/src/source_spec.rs @@ -130,7 +130,7 @@ impl LocalSource { .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "rsync".to_owned(), + "rsync", err, Some(dirname.to_owned()), )) @@ -196,7 +196,7 @@ impl GitSource { .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "git".to_owned(), + "git", err, Some(dirname.to_owned()), )) @@ -243,7 +243,7 @@ impl TarSource { .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "wget".to_owned(), + "wget", err, Some(tmpdir.path().to_owned()), )) @@ -273,7 +273,7 @@ impl TarSource { .status() .map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "tar".to_owned(), + "tar", err, Some(dirname.to_owned()), )) @@ -327,7 +327,7 @@ impl ScriptSource { tracing::debug!("running sources script"); let mut child = bash.spawn().map_err(|err| { Error::ProcessSpawnError(spfs::Error::process_spawn_error( - "bash".to_owned(), + "bash", err, Some(dirname.to_owned()), )) diff --git a/crates/spk-solve/Cargo.toml b/crates/spk-solve/Cargo.toml index ad9ea66e4..54c3ba15b 100644 --- a/crates/spk-solve/Cargo.toml +++ b/crates/spk-solve/Cargo.toml @@ -30,7 +30,8 @@ futures = { workspace = true } itertools = "0.10" once_cell = { workspace = true } priority-queue = "1.2" -rug = "1.17.0" +num-bigint = "0.4.3" +num-format = { version = "0.4.4", features = ["with-num-bigint"] } serde_json = { workspace = true } sentry = { workspace = true, optional = true } signal-hook = "0.3" diff --git a/crates/spk-solve/src/search_space.rs b/crates/spk-solve/src/search_space.rs index aa989abf6..b452f3555 100644 --- a/crates/spk-solve/src/search_space.rs +++ b/crates/spk-solve/src/search_space.rs @@ -5,7 +5,7 @@ use std::collections::HashMap; use std::sync::Arc; -use rug::Integer; +use num_bigint::BigUint; use spk_schema::ident::PkgRequest; use spk_schema::name::PkgNameBuf; use spk_schema::{BuildIdent, Deprecate, Package, Spec, VersionIdent}; @@ -197,14 +197,15 @@ fn show_total_stats( verbosity: u8, ) { let mut calc: String = "1".to_string(); - let mut total: Integer = Integer::new() + 1; - let mut total_builds = Integer::new(); - let mut avg_branches = Integer::new(); - let mut previous_num_nodes: Integer = Integer::new() + 1; + let mut total = BigUint::from(1u32); + let mut total_builds = BigUint::default(); + let mut avg_branches = BigUint::default(); + let mut previous_num_nodes = BigUint::from(1u32); for pkg in packages { let name = pkg.name(); if let Some(number) = counters.get(name) { + let number = *number; total_builds += number; avg_branches += number; @@ -226,16 +227,8 @@ fn show_total_stats( } let num_digits = total.to_string().len(); - let display_total: String = if num_digits > DIGITS_LIMIT { - format!("{total:5.4e}").replace('e', " x 10^") - } else { - // TODO: it would be nice to format these large numbers using - // something like the num-format crate, but the bignum's - // (rug::Integer) used to hold the large numbers in these - // calculations would need to support or be wrapped to - // implement num-format's required traits. - total.to_string() - }; + use num_format::ToFormattedString; + let display_total = total.to_formatted_string(&num_format::Locale::en); tracing::info!("Total number of nodes in unconstrained decision tree: {display_total}"); if num_digits > DIGITS_LIMIT && verbosity > SHOW_FULL_DIGITS_LEVEL { tracing::info!("The full number of nodes is: {total}");