From 118d6a5a1d92953074aa5137b4119a9d32a36745 Mon Sep 17 00:00:00 2001 From: Hatter Jiang Date: Fri, 30 Dec 2022 20:55:14 +0800 Subject: [PATCH] feat: clone from https://github.com/miquels/webdav-server-rs --- Cargo.toml | 78 +++ README.md | 60 +- TODO.md | 19 + debian/changelog | 25 + debian/compat | 1 + debian/control | 12 + debian/copyright | 2 + debian/docs | 1 + debian/examples | 1 + debian/install | 1 + debian/rules | 19 + debian/source/format | 1 + debian/source/options | 5 + debian/webdav-server-rs.webdav-server.init | 19 + debian/webdav-server-rs.webdav-server.service | 12 + examples/nginx-proxy.conf | 63 ++ fs_quota/.gitignore | 7 + fs_quota/Cargo.toml | 30 + fs_quota/README.md | 36 + fs_quota/README.tpl | 11 + fs_quota/build.rs | 68 ++ fs_quota/examples/fs_quota.rs | 11 + fs_quota/src/Makefile | 26 + fs_quota/src/generic_os.rs | 15 + fs_quota/src/lib.rs | 282 ++++++++ fs_quota/src/linux.rs | 101 +++ fs_quota/src/quota-linux.c | 44 ++ fs_quota/src/quota-nfs.c | 185 +++++ fs_quota/src/quota_nfs.rs | 92 +++ fs_quota/src/rquota.x | 139 ++++ pam/Cargo.toml | 32 + pam/README.md | 49 ++ pam/README.tpl | 11 + pam/TODO.md | 11 + pam/build.rs | 6 + pam/src/bin/main.rs | 92 +++ pam/src/lib.rs | 64 ++ pam/src/pam.c | 82 +++ pam/src/pam.rs | 97 +++ pam/src/pamclient.rs | 265 ++++++++ pam/src/pamserver.rs | 169 +++++ rustfmt.toml | 12 + src/auth.rs | 156 +++++ src/cache.rs | 185 +++++ src/config.rs | 339 ++++++++++ src/main.rs | 635 ++++++++++++++++++ src/rootfs.rs | 112 +++ src/router.rs | 262 ++++++++ src/suid.rs | 304 +++++++++ src/tls.rs | 58 ++ src/unixuser.rs | 135 ++++ src/userfs.rs | 125 ++++ webdav-server.toml | 154 +++++ 53 files changed, 4720 insertions(+), 1 deletion(-) create mode 100644 Cargo.toml create mode 100644 TODO.md create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/copyright create mode 100644 debian/docs create mode 100644 debian/examples create mode 100644 debian/install create mode 100755 debian/rules create mode 100644 debian/source/format create mode 100644 debian/source/options create mode 100644 debian/webdav-server-rs.webdav-server.init create mode 100644 debian/webdav-server-rs.webdav-server.service create mode 100644 examples/nginx-proxy.conf create mode 100644 fs_quota/.gitignore create mode 100644 fs_quota/Cargo.toml create mode 100644 fs_quota/README.md create mode 100644 fs_quota/README.tpl create mode 100644 fs_quota/build.rs create mode 100644 fs_quota/examples/fs_quota.rs create mode 100644 fs_quota/src/Makefile create mode 100644 fs_quota/src/generic_os.rs create mode 100644 fs_quota/src/lib.rs create mode 100644 fs_quota/src/linux.rs create mode 100644 fs_quota/src/quota-linux.c create mode 100644 fs_quota/src/quota-nfs.c create mode 100644 fs_quota/src/quota_nfs.rs create mode 100644 fs_quota/src/rquota.x create mode 100644 pam/Cargo.toml create mode 100644 pam/README.md create mode 100644 pam/README.tpl create mode 100644 pam/TODO.md create mode 100644 pam/build.rs create mode 100644 pam/src/bin/main.rs create mode 100644 pam/src/lib.rs create mode 100644 pam/src/pam.c create mode 100644 pam/src/pam.rs create mode 100644 pam/src/pamclient.rs create mode 100644 pam/src/pamserver.rs create mode 100644 rustfmt.toml create mode 100644 src/auth.rs create mode 100644 src/cache.rs create mode 100644 src/config.rs create mode 100644 src/main.rs create mode 100644 src/rootfs.rs create mode 100644 src/router.rs create mode 100644 src/suid.rs create mode 100644 src/tls.rs create mode 100644 src/unixuser.rs create mode 100644 src/userfs.rs create mode 100644 webdav-server.toml diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..8a3fd57 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "webdav-server" + +# When releasing to crates.io: +# - Update html_root_url in src/main.rs +# - Update CHANGELOG.md. +# - Create git tag webdav-server-0.x.y +version = "0.4.0" + +description = "webdav/http server with support for user accounts" +readme = "README.md" +documentation = "https://docs.rs/webdav-server" +repository = "https://github.com/miquels/webdav-server-rs" +homepage = "https://github.com/miquels/webdav-server-rs" +authors = ["Miquel van Smoorenburg "] +edition = "2018" +license = "Apache-2.0" +categories = ["filesystem"] + +[workspace] + +[features] +# By default, the "pam" and "quota" features are enabled. +# +# Some systems do not have pam (like OpenBSD), so to compile this +# package without pam but with quota use: +# +# cargo build --release --no-default-features --features=quota +# +default = [ "pam", "quota" ] + +# dependencies for the feature. +pam = [ "pam-sandboxed" ] +quota = [ "fs-quota" ] + +# Include debug info in release builds. +[profile.release] +debug = true + +# Build dependencies in optimized mode, even for debug builds. +[profile.dev.package."*"] +opt-level = 3 + +# Build dev-dependencies in non-optimized mode, even for release builds. +[profile.dev.build-override] +opt-level = 0 + +[dependencies] +clap = "2.33.3" +enum_from_str = "0.1.0" +enum_from_str_derive = "0.1.0" +env_logger = "0.8.3" +fs-quota = { path = "fs_quota", version = "0.1.0", optional = true } +futures = "0.3.15" +handlebars = "3.5.5" +headers = "0.3.4" +http = "0.2.4" +hyper = { version = "0.14.7", features = [ "http1", "http2", "server", "stream", "runtime" ] } +lazy_static = "1.4.0" +libc = "0.2.94" +log = "0.4.14" +nix = "0.21.0" +pam-sandboxed = { path = "pam", version = "0.2.0", optional = true } +percent-encoding = "2.1.0" +regex = "1.5.4" +rustls-pemfile = "1.0.0" +serde = { version = "1.0.125", features = ["derive"] } +serde_json = "1.0.64" +socket2 = "0.4.0" +time = "0.1.42" +tls-listener = { version = "0.5.1", features = [ "hyper-h1", "hyper-h2", "rustls" ] } +tokio = { version = "1.5.0", features = ["full"] } +tokio-rustls = "0.23.4" +toml = "0.5.8" +url = "2.2.2" +webdav-handler = { path = "../webdav-handler-rs", version = "=0.2.0" } +#webdav-handler = "0.2.0" +pwhash = "1.0.0" diff --git a/README.md b/README.md index 4df6a60..89d11c5 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,60 @@ -# webdav-server-rs +# WEBDAV-SERVER + +An implementation of a webdav server with support for user accounts, +and switching uid/gid to those users accounts. That last feature +is Linux-only, since the server is threaded and no other OSes have +support for thread-local credentials. + +Uses PAM authentication and local unix accounts. + +This server does not implement logging. For now, it is assumed that +most users of this software want to put an NGNIX or Apache reverse-proxy +in front of it anyway, and that frontend can implement TLS, logging, +enforcing a maximum number of connections, and timeouts. + +This crate uses futures 0.3 and async/await, so the minimum rust +compiler version is 1.39. + +## Features. + +- RFC4918: webdav, full support +- RFC4331: webdav quota support (linux quota, NFS quota, statfs) +- locking support (fake locking, enough for macOS and Windows clients) +- can be case insensitive for Windows clients +- files starting with a dot get the HIDDEN attribute on windows +- optimizations for macOS (spotlight indexing disabled, thumbnail previews + disabled, some light directory caching for `._` files) +- partial put support +- tested with Windows, macOS, Linux clients + +## Building. + +By default the server builds with **pam** and **quota** support. If your +OS does not support these one of features, use cargo command line options +to disable all features and enable only the ones your OS supports. + +For example, to build on OpenBSD, which does not have pam: + +``` +cargo build --release --no-default-features --features=quota +``` + +## Configuration. + +See the [example webdav-server.toml file](webdav-server.toml) + +There is also an [example nginx proxy](examples/nginx-proxy.conf) configuration. + +## Notes. + +The built-in PAM client will add the client IP address to PAM requests. +If the client IP adress is localhost (127/8 or ::1) then the content of +the X-Forwarded-For header is used instead (if present) to allow for +aforementioned frontend proxies. + +## Copyright and License. + + * © 2018, 2019 XS4ALL Internet bv + * © 2018, 2019 Miquel van Smoorenburg + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..c73881d --- /dev/null +++ b/TODO.md @@ -0,0 +1,19 @@ + +## TODO list + +- define list of "public dirs" where files/dirs are created 644/755, + and 600/700 everywhere else? +- blacklist accounts? +- document the PAM webdav config +- document nginx front-proxy config + +# DONE: + +- GET on directory -> directory listing +- configuration file +- systemd unit file +- configure path/prefix for accounts +- GET in / -- if file exists, do not authenticate. index support. +- rootfs: add serving files for GET +- parse index.tmpl file + diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..b5d561a --- /dev/null +++ b/debian/changelog @@ -0,0 +1,25 @@ +webdav-server-rs (0.4) unreleased; urgency=low + + * update to 0.4 release. + + -- Miquel van Smoorenburg (XS4ALL) Wed, 12 May 2021 16:49:34 +0200 + +webdav-server-rs (0.3) unreleased; urgency=low + + * update to webdav-handler-rs 0.2.0-alpha.1 + + -- Miquel van Smoorenburg (XS4ALL) Thu, 21 Nov 2019 23:44:03 +0100 + +webdav-server-rs (0.2) buster; urgency=low + + * update to webdav-handler-rs 0.1.2 + * update to modern(erder) Rust + + -- Miquel van Smoorenburg (XS4ALL) Tue, 12 Nov 2019 15:41:53 +0100 + +webdav-server-rs (0.1) wheezy jessie stretch; urgency=low + + * initial release + + -- Miquel van Smoorenburg (XS4ALL) Sun, 17 Mar 2019 23:20:52 +0100 + diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..48082f7 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +12 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..76772b6 --- /dev/null +++ b/debian/control @@ -0,0 +1,12 @@ +Source: webdav-server-rs +Section: net +Priority: extra +Maintainer: Miquel van Smoorenburg +Uploaders: XS4ALL Unixbeheer , Miquel van Smoorenburg (XS4ALL) +Build-Depends: debhelper (>= 12), gcc, libc-dev, libc-dev-bin, libpam0g-dev +Standards-Version: 3.9.1 + +Package: webdav-server-rs +Architecture: any +Depends: ${shlibs:Depends}, systemd +Description: Webdav server diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..483d3f1 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,2 @@ +Copyright 2018 - Miquel van Smoorenburg +Copyright 2019 - Miquel van Smoorenburg, XS4ALL Internet bv diff --git a/debian/docs b/debian/docs new file mode 100644 index 0000000..b43bf86 --- /dev/null +++ b/debian/docs @@ -0,0 +1 @@ +README.md diff --git a/debian/examples b/debian/examples new file mode 100644 index 0000000..60ee220 --- /dev/null +++ b/debian/examples @@ -0,0 +1 @@ +webdav-server.toml diff --git a/debian/install b/debian/install new file mode 100644 index 0000000..c5c9bce --- /dev/null +++ b/debian/install @@ -0,0 +1 @@ +target/release/webdav-server usr/sbin diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..559710e --- /dev/null +++ b/debian/rules @@ -0,0 +1,19 @@ +#! /usr/bin/make -f + +%: + dh $@ + +override_dh_auto_build: + @if ! command -v cargo >/dev/null; then \ + echo "Rust is not installed ("cargo" command not found)" >&2; \ + exit 1; fi + cargo build --release + +override_dh_auto_clean: + +override_dh_installsystemd: + +override_dh_installinit: + dh_installinit --name webdav-server + dh_installsystemd --name webdav-server + diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..89ae9db --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +3.0 (native) diff --git a/debian/source/options b/debian/source/options new file mode 100644 index 0000000..1245eee --- /dev/null +++ b/debian/source/options @@ -0,0 +1,5 @@ +--tar-ignore +--tar-ignore=target +--tar-ignore=TODO.md +--tar-ignore=*/src/*.toml +--tar-ignore=*test*.toml diff --git a/debian/webdav-server-rs.webdav-server.init b/debian/webdav-server-rs.webdav-server.init new file mode 100644 index 0000000..d2d0660 --- /dev/null +++ b/debian/webdav-server-rs.webdav-server.init @@ -0,0 +1,19 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: webdav-server +# Required-Start: $network $remote_fs $local_fs +# Required-Stop: $network $remote_fs $local_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Stop/start webdav-server +### END INIT INFO + +set -e + +test -x /usr/sbin/webdav-server || exit 0 + +. /lib/lsb/init-functions + +# If systemd is installed, we never actually get here. +exit 1 + diff --git a/debian/webdav-server-rs.webdav-server.service b/debian/webdav-server-rs.webdav-server.service new file mode 100644 index 0000000..8ec4819 --- /dev/null +++ b/debian/webdav-server-rs.webdav-server.service @@ -0,0 +1,12 @@ +[Unit] +Description=Webdav server +After=network.target +ConditionPathExists=/etc/webdav-server.toml + +[Service] +ExecStart=/usr/sbin/webdav-server +KillMode=process + +[Install] +WantedBy=multi-user.target + diff --git a/examples/nginx-proxy.conf b/examples/nginx-proxy.conf new file mode 100644 index 0000000..1108ff6 --- /dev/null +++ b/examples/nginx-proxy.conf @@ -0,0 +1,63 @@ +# +# Sample configuration for NGINX. Redirects http to https, and proxies +# https to localhost:4918. +# +# Replace SERVERNAME with the name of your server. +# +# On Debian, this config file can be put in /etc/nginx/sites-available/. +# + +# Upstream server definition. +upstream webdav-rs { + server 127.0.0.1:4918; + keepalive 100; + keepalive_requests 100000; + keepalive_timeout 120s; +} + +# Listener on port 80 that redirects to https. +server { + listen *:80; + listen [::]:80 ; + return 301 https://$host$request_uri; + autoindex off; + server_name SERVERNAME.example.com; + access_log /var/log/nginx/SERVERNAME.access.log; + error_log /var/log/nginx/SERVERNAME.error.log; +} + +# The actual proxy on port 443. +server { + listen *:443 ssl http2; + listen [::]:443 ssl http2 ; + + server_name SERVERNAME.example.com; + + ssl_certificate /etc/letsencrypt/SERVERNAME/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/SERVERNAME/privkey.pem; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 5m; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH; + ssl_prefer_server_ciphers on; + + access_log /var/log/nginx/SERVERNAME.access.log; + error_log /var/log/nginx/SERVERNAME-dev.error.log; + + location / { + proxy_buffering off; + client_max_body_size 0; + proxy_pass http://webdav-rs; + proxy_read_timeout 120s; + proxy_connect_timeout 90s; + proxy_send_timeout 90s; + proxy_redirect off; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Ssl on; + proxy_set_header Connection ""; + proxy_pass_header Date; + proxy_pass_header Server; + } +} diff --git a/fs_quota/.gitignore b/fs_quota/.gitignore new file mode 100644 index 0000000..d22b527 --- /dev/null +++ b/fs_quota/.gitignore @@ -0,0 +1,7 @@ + +/target/ +**/*.rs.bk +Cargo.lock + +src/rquota.h +src/rquota_xdr.c diff --git a/fs_quota/Cargo.toml b/fs_quota/Cargo.toml new file mode 100644 index 0000000..b538e87 --- /dev/null +++ b/fs_quota/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "fs-quota" + +# When releasing to crates.io: +# - Update html_root_url in src/lib.rs +# - Update CHANGELOG.md. +# - Run: cargo readme > README.md +# - Create git tag fs-quota-0.x.y +version = "0.1.0" + +readme = "README.md" +documentation = "https://docs.rs/fs-quota" +repository = "https://github.com/miquels/webdav-server-rs" +homepage = "https://github.com/miquels/webdav-server-rs/tree/master/fs_quota" +authors = ["Miquel van Smoorenburg "] +edition = "2018" +license = "Apache-2.0" +keywords = ["quota"] +categories = ["filesystem"] + +[features] +nfs = [] +default = ["nfs"] + +[build-dependencies] +cc = "1.0.66" + +[dependencies] +libc = "0.2.82" +log = "0.4.13" diff --git a/fs_quota/README.md b/fs_quota/README.md new file mode 100644 index 0000000..e0f4f49 --- /dev/null +++ b/fs_quota/README.md @@ -0,0 +1,36 @@ + +# fs-quota + +Get filesystem disk space used and available for a unix user. + +This crate has support for: + +- the Linux quota system +- NFS quotas (via SUNRPC). +- `libc::vfsstat` lookups (like `df`). + +Both the `quota` systemcall and `vfsstat` systemcall are different +on every system. That functionality is only implemented on Linux +right now. NFS quota support _should_ work everywhere. + +NFS quota support can be left out by disabling the `nfs` feature. + +Example application: +```rust +use fs_quota::*; + +fn main() { + let args: Vec = std::env::args().collect(); + if args.len() < 2 { + println!("usage: fs_quota "); + return; + } + println!("{:#?}", FsQuota::check(&args[1], None)); +} +``` + +### Copyright and License. + + * © 2018, 2019 XS4ALL Internet bv + * © 2018, 2019 Miquel van Smoorenburg + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/fs_quota/README.tpl b/fs_quota/README.tpl new file mode 100644 index 0000000..d3d22cd --- /dev/null +++ b/fs_quota/README.tpl @@ -0,0 +1,11 @@ + +# {{crate}} + +{{readme}} + +### Copyright and License. + + * © 2018, 2019 XS4ALL Internet bv + * © 2018, 2019 Miquel van Smoorenburg + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + diff --git a/fs_quota/build.rs b/fs_quota/build.rs new file mode 100644 index 0000000..a948da9 --- /dev/null +++ b/fs_quota/build.rs @@ -0,0 +1,68 @@ +extern crate cc; + +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; +use std::process::Command; + +fn run_rpcgen() { + let res = Command::new("rpcgen") + .arg("-c") + .arg("src/rquota.x") + .output() + .expect("failed to run rpcgen"); + let csrc = String::from_utf8_lossy(&res.stdout); + let mut f = File::create("src/rquota_xdr.c").expect("src/rquota_xdr.c"); + f.write_all( + csrc.replace("/usr/include/rpcsvc/rquota.h", "./rquota.h") + .replace("src/rquota.h", "./rquota.h") + .as_bytes(), + ) + .unwrap(); + + let res = Command::new("rpcgen") + .arg("-h") + .arg("src/rquota.x") + .output() + .expect("failed to run rpcgen"); + let hdr = String::from_utf8_lossy(&res.stdout); + let mut f = File::create("src/rquota.h").expect("src/rquota.h"); + f.write_all(hdr.as_bytes()).unwrap(); +} + +fn main() { + #[cfg(feature = "nfs")] + run_rpcgen(); + + let mut builder = cc::Build::new(); + + #[cfg(target_os = "linux")] + builder.file("src/quota-linux.c"); + + #[cfg(feature = "nfs")] + { + if Path::new("/usr/include/tirpc").exists() { + // Fedora does not include RPC support in glibc anymore, so use tirpc instead. + builder.include("/usr/include/tirpc"); + } + builder.file("src/quota-nfs.c").file("src/rquota_xdr.c"); + } + builder + .flag_if_supported("-Wno-unused-variable") + .compile("fs_quota"); + + if Path::new("/usr/include/tirpc").exists() { + println!("cargo:rustc-link-lib=tirpc"); + } else { + println!("cargo:rustc-link-lib=rpcsvc"); + } + + #[cfg(target_os = "linux")] + println!("cargo:rerun-if-changed=src/quota-linux.c"); + + #[cfg(feature = "nfs")] + { + println!("cargo:rerun-if-changed=src/rquota.x"); + println!("cargo:rerun-if-changed=src/quota-nfs.c"); + } +} diff --git a/fs_quota/examples/fs_quota.rs b/fs_quota/examples/fs_quota.rs new file mode 100644 index 0000000..2df12e5 --- /dev/null +++ b/fs_quota/examples/fs_quota.rs @@ -0,0 +1,11 @@ +extern crate fs_quota; +use fs_quota::*; + +fn main() { + let args: Vec = std::env::args().collect(); + if args.len() < 2 { + println!("usage: fs_quota "); + return; + } + println!("{:#?}", FsQuota::check(&args[1], None)); +} diff --git a/fs_quota/src/Makefile b/fs_quota/src/Makefile new file mode 100644 index 0000000..df432ef --- /dev/null +++ b/fs_quota/src/Makefile @@ -0,0 +1,26 @@ + +RPCGEN = rpcgen +CFLAGS = -Wall + +rquota.a: quota-nfs.o rquota_xdr.o + rm -f rquota.a + ar r rquota.a quota-nfs.o rquota_xdr.o + +rquota_xdr.c: rquota.h + (echo '#include '; \ + $(RPCGEN) -c rquota.x | \ + sed -e 's/IXDR_PUT/(void)IXDR_PUT/g' \ + -e 's,/usr/include/rpcsvc/rquota.h,rquota.h,' \ + -e 's/^static char rcsid.*//' ) > rquota_xdr.c + +rquota.h: Makefile rquota.x + $(RPCGEN) -h rquota.x > rquota.h + +rquota_xdr.o: rquota_xdr.c + cc -c rquota_xdr.c + +quota-nfs.o: rquota.h + +clean: + rm -f *.a *.o rquota.h rquota_clnt.c rquota_svc.c rquota_xdr.c + diff --git a/fs_quota/src/generic_os.rs b/fs_quota/src/generic_os.rs new file mode 100644 index 0000000..3a7e9f1 --- /dev/null +++ b/fs_quota/src/generic_os.rs @@ -0,0 +1,15 @@ +// +// No-op implementations. +// +use std::io; +use std::path::Path; + +use crate::{FqError, FsQuota, Mtab}; + +pub(crate) fn get_quota(_device: impl AsRef, _uid: u32) -> Result { + Err(FqError::NoQuota) +} + +pub(crate) fn read_mtab() -> io::Result> { + Ok(Vec::new()) +} diff --git a/fs_quota/src/lib.rs b/fs_quota/src/lib.rs new file mode 100644 index 0000000..fc4508f --- /dev/null +++ b/fs_quota/src/lib.rs @@ -0,0 +1,282 @@ +#![doc(html_root_url = "https://docs.rs/fs-quota/0.1.0")] +//! Get filesystem disk space used and available for a unix user. +//! +//! This crate has support for: +//! +//! - the Linux quota system +//! - NFS quotas (via SUNRPC). +//! - `libc::vfsstat` lookups (like `df`). +//! +//! Both the `quota` systemcall and `vfsstat` systemcall are different +//! on every system. That functionality is only implemented on Linux +//! right now. NFS quota support _should_ work everywhere. +//! +//! NFS quota support can be left out by disabling the `nfs` feature. +//! +//! Example application: +//! ```no_run +//! use fs_quota::*; +//! +//! fn main() { +//! let args: Vec = std::env::args().collect(); +//! if args.len() < 2 { +//! println!("usage: fs_quota "); +//! return; +//! } +//! println!("{:#?}", FsQuota::check(&args[1], None)); +//! } +//! ``` +#[macro_use] +extern crate log; +extern crate libc; + +use std::ffi::{CStr, CString, OsStr}; +use std::io; +use std::os::raw::c_char; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::MetadataExt; +use std::path::{Path, PathBuf}; + +#[cfg(feature = "nfs")] +mod quota_nfs; + +// Linux specific code lives in linux.rs. +#[cfg(target_os = "linux")] +mod linux; +#[cfg(target_os = "linux")] +use linux::{get_quota, read_mtab}; + +// Unsupported OS. +#[cfg(not(target_os = "linux"))] +mod generic_os; +#[cfg(not(target_os = "linux"))] +use generic_os::{get_quota, read_mtab}; + +#[derive(Debug, PartialEq)] +pub(crate) enum FsType { + LinuxExt, + LinuxXfs, + Nfs, + Other, +} + +// return filesystem major type. +fn fstype(tp: &str) -> FsType { + match tp { + "ext2" | "ext3" | "ext4" => FsType::LinuxExt, + "xfs" => FsType::LinuxXfs, + "nfs" | "nfs4" => FsType::Nfs, + _ => FsType::Other, + } +} + +/// quota / vfsstat lookup result. +#[derive(Debug)] +pub struct FsQuota { + /// number of bytes used. + pub bytes_used: u64, + /// maximum number of bytes (available - used). + pub bytes_limit: Option, + /// number of files (inodes) in use. + pub files_used: u64, + /// maximum number of files (available - used). + pub files_limit: Option, +} + +/// Error result. +#[derive(Debug)] +pub enum FqError { + /// Permission denied. + PermissionDenied, + /// Filesystem does not have quotas enabled. + NoQuota, + /// An I/O error occured. + IoError(io::Error), + /// Some other error. + Other, +} + +impl FsQuota { + /// Get the filesystem quota for a `uid` on the filesystem where `path` is on. + /// + /// If `uid` is `None`, get it for the current real user-id. + pub fn user(path: impl AsRef, uid: Option) -> Result { + let id = uid.unwrap_or(unsafe { libc::getuid() as u32 }); + let entry = get_mtab_entry(path)?; + + #[cfg(feature = "nfs")] + { + let fst = fstype(&entry.fstype); + if fst == FsType::Nfs { + return quota_nfs::get_quota(&entry, id); + } + } + + get_quota(&entry.device, id) + } + + /// Get used and available disk space of the filesystem indicated by `path`. + /// + /// This is not really a quota call; it simply calls `libc::vfsstat` (`df`). + pub fn system(path: impl AsRef) -> Result { + // Call libc::vfsstat(). It's POSIX so should be supported everywhere. + let cpath = CString::new(path.as_ref().as_os_str().as_bytes())?; + let mut vfs = unsafe { std::mem::zeroed::() }; + let rc = unsafe { libc::statvfs(cpath.as_ptr(), &mut vfs) }; + if rc != 0 { + return Err(FqError::IoError(io::Error::last_os_error())); + } + Ok(FsQuota { + bytes_used: ((vfs.f_blocks - vfs.f_bfree) * vfs.f_frsize) as u64, + bytes_limit: Some(((vfs.f_blocks - (vfs.f_bfree - vfs.f_bavail)) * vfs.f_frsize) as u64), + files_used: (vfs.f_files - vfs.f_ffree) as u64, + files_limit: Some((vfs.f_files - (vfs.f_ffree - vfs.f_favail)) as u64), + }) + } + + /// Lookup used and available disk space for a `uid`. First check user's quota, + /// if quotas are not enabled check the filesystem disk space usage. + /// + /// This is the equivalent of + /// + /// ```no_run + /// # let path = "/"; + /// # let uid = None; + /// # use fs_quota::*; + /// FsQuota::user(path, uid) + /// .or_else(|e| if e == FqError::NoQuota { FsQuota::system(path) } else { Err(e) }) + /// # ; + /// ``` + /// + pub fn check(path: impl AsRef, uid: Option) -> Result { + let path = path.as_ref(); + FsQuota::user(path, uid).or_else(|e| { + if e == FqError::NoQuota { + FsQuota::system(path) + } else { + Err(e) + } + }) + } +} + +// The libc realpath() function. +fn realpath>(path: P) -> io::Result { + let cpath = CString::new(path.as_ref().as_os_str().as_bytes())?; + let nullptr: *mut c_char = std::ptr::null_mut(); + unsafe { + let r = libc::realpath(cpath.as_ptr(), nullptr); + if r == nullptr { + Err(io::Error::last_os_error()) + } else { + let osstr = OsStr::from_bytes(CStr::from_ptr(r).to_bytes()); + let p = PathBuf::from(osstr); + libc::free(r as *mut libc::c_void); + Ok(p) + } + } +} + +#[derive(Debug, Clone)] +pub(crate) struct Mtab { + host: Option, + device: String, + directory: String, + fstype: String, +} + +// find an entry in the mtab. +fn get_mtab_entry(path: impl AsRef) -> Result { + let path = path.as_ref(); + let meta = std::fs::symlink_metadata(path)?; + + // get all eligible entries. + let ents = read_mtab()? + .into_iter() + .filter(|e| fstype(&e.fstype) != FsType::Other) + .filter(|e| { + match std::fs::metadata(&e.directory) { + Ok(ref m) => m.dev() == meta.dev(), + Err(_) => false, + } + }) + .collect::>(); + + // 0 matches, error. 1 match, fine. >1 match, need to look closer. + let entry = match ents.len() { + 0 => return Err(FqError::NoQuota), + 1 => ents[0].clone(), + _ => { + // multiple matching entries.. happens on NFS. + + // get "realpath" of the path that was passed in. + let rp = match realpath(path) { + Ok(p) => p, + Err(e) => return Err(e.into()), + }; + + // realpath the remaining entries as well.. + let mut v = Vec::new(); + for mut e in ents.into_iter() { + match realpath(&e.directory) { + Ok(p) => { + let c = String::from_utf8_lossy(p.as_os_str().as_bytes()); + e.directory = c.to_string(); + v.push(e); + }, + Err(_) => {}, + } + } + if v.len() == 0 { + return Err(FqError::NoQuota); + } + + // find longest match. + v.sort_by_key(|e| e.directory.clone()); + v.reverse(); + match v.iter().position(|ref x| rp.starts_with(&x.directory)) { + Some(p) => v[p].clone(), + None => { + return Err(FqError::NoQuota); + }, + } + }, + }; + Ok(entry) +} + +impl From for FqError { + fn from(e: io::Error) -> Self { + FqError::IoError(e) + } +} + +impl From for FqError { + fn from(e: std::ffi::NulError) -> Self { + FqError::IoError(e.into()) + } +} + +fn to_num(e: &FqError) -> u32 { + match e { + &FqError::PermissionDenied => 1, + &FqError::NoQuota => 2, + &FqError::IoError(_) => 3, + &FqError::Other => 4, + } +} + +impl PartialEq for FqError { + fn eq(&self, other: &Self) -> bool { + match self { + &FqError::IoError(ref e) => { + if let &FqError::IoError(ref o) = other { + e.kind() == o.kind() + } else { + false + } + }, + e => to_num(e) == to_num(other), + } + } +} diff --git a/fs_quota/src/linux.rs b/fs_quota/src/linux.rs new file mode 100644 index 0000000..355a211 --- /dev/null +++ b/fs_quota/src/linux.rs @@ -0,0 +1,101 @@ +// +// Linux specific systemcalls for quota. +// +use std::ffi::CString; +use std::fs::File; +use std::io; +use std::io::prelude::*; +use std::io::BufReader; +use std::os::raw::{c_char, c_int}; +use std::os::unix::ffi::OsStrExt; +use std::path::Path; + +use crate::{FqError, FsQuota, Mtab}; + +// The actual implementation is done in C, and imported here. +extern "C" { + fn fs_quota_linux( + device: *const c_char, + id: c_int, + do_group: c_int, + bytes_used: *mut u64, + bytes_limit: *mut u64, + files_used: *mut u64, + files_limit: *mut u64, + ) -> c_int; +} + +// wrapper for the C functions. +pub(crate) fn get_quota(device: impl AsRef, uid: u32) -> Result { + let id = uid as c_int; + let device = device.as_ref(); + + let mut bytes_used = 0u64; + let mut bytes_limit = 0u64; + let mut files_used = 0u64; + let mut files_limit = 0u64; + + let path = CString::new(device.as_os_str().as_bytes())?; + let rc = unsafe { + fs_quota_linux( + path.as_ptr(), + id, + 0, + &mut bytes_used as *mut u64, + &mut bytes_limit as *mut u64, + &mut files_used as *mut u64, + &mut files_limit as *mut u64, + ) + }; + + // Error mapping. + match rc { + 0 => { + let m = |v| if v == 0xffffffffffffffff { None } else { Some(v) }; + Ok(FsQuota { + bytes_used: bytes_used, + bytes_limit: m(bytes_limit), + files_used: files_used, + files_limit: m(files_limit), + }) + }, + 1 => Err(FqError::NoQuota), + _ => Err(FqError::IoError(io::Error::last_os_error())), + } +} + +// read /etc/mtab. +pub(crate) fn read_mtab() -> io::Result> { + let f = File::open("/etc/mtab")?; + let reader = BufReader::new(f); + let mut result = Vec::new(); + for l in reader.lines() { + let l2 = l?; + let line = l2.trim(); + if line.len() == 0 || line.starts_with("#") { + continue; + } + let words = line.split_whitespace().collect::>(); + if words.len() < 3 { + continue; + } + let (host, device) = if words[2].starts_with("nfs") { + if !words[0].contains(":") { + continue; + } + let mut s = words[0].splitn(2, ':'); + let host = s.next().unwrap(); + let path = s.next().unwrap(); + (Some(host.to_string()), path) + } else { + (None, words[2]) + }; + result.push(Mtab { + host: host, + device: device.to_string(), + directory: words[1].to_string(), + fstype: words[2].to_string(), + }); + } + Ok(result) +} diff --git a/fs_quota/src/quota-linux.c b/fs_quota/src/quota-linux.c new file mode 100644 index 0000000..5a89a6c --- /dev/null +++ b/fs_quota/src/quota-linux.c @@ -0,0 +1,44 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_STRUCT_DQBLK_CURSPACE +# define dqb_curblocks dqb_curspace +#endif + +int fs_quota_linux(char *path, int id, int do_group, + uint64_t *bytes_value_r, uint64_t *bytes_limit_r, + uint64_t *count_value_r, uint64_t *count_limit_r) +{ + int type = do_group ? GRPQUOTA : USRQUOTA; + + struct dqblk dqblk; + if (quotactl(QCMD(Q_GETQUOTA, type), path, id, (caddr_t)&dqblk) < 0) { + if (errno == ESRCH || errno == ENOENT) { + return 1; + } + return -1; + } + +#if _LINUX_QUOTA_VERSION == 1 + *bytes_value_r = dqblk.dqb_curblocks * 1024; +#else + *bytes_value_r = dqblk.dqb_curspace; +#endif + *bytes_limit_r = dqblk.dqb_bsoftlimit * 1024; + if (*bytes_limit_r == 0) { + *bytes_limit_r = dqblk.dqb_bhardlimit * 1024; + } + *count_value_r = dqblk.dqb_curinodes; + *count_limit_r = dqblk.dqb_isoftlimit; + if (*count_limit_r == 0) { + *count_limit_r = dqblk.dqb_ihardlimit; + } + return 0; +} + diff --git a/fs_quota/src/quota-nfs.c b/fs_quota/src/quota-nfs.c new file mode 100644 index 0000000..d57d72d --- /dev/null +++ b/fs_quota/src/quota-nfs.c @@ -0,0 +1,185 @@ +#include +#include +#include +#include +#include +#include + +#include "./rquota.h" +#define RQUOTA_GETQUOTA_TIMEOUT_SECS 2 + +static const uint32_t unlimited32 = 0xffffffff; +static const uint64_t unlimited64 = 0xffffffffffffffff; + +#define E_CLNT_CALL 0x00100000 +#define E_CLNT_CREATE 0x00000001 +#define E_NOQUOTA 0x00000002 +#define E_PERM 0x00000003 +#define E_UNKNOWN 0x0000f000 + +static void +rquota_get_result(const rquota *rq, + uint64_t *bytes_used_r, uint64_t *bytes_limit_r, + uint64_t *files_used_r, uint64_t *files_limit_r) +{ + *bytes_used_r = (uint64_t)rq->rq_curblocks * + (uint64_t)rq->rq_bsize; + *bytes_limit_r = unlimited64; + if (rq->rq_bsoftlimit != 0 && rq->rq_bsoftlimit != unlimited32) { + *bytes_limit_r = (uint64_t)rq->rq_bsoftlimit * + (uint64_t)rq->rq_bsize; + } else if (rq->rq_bhardlimit != unlimited32) { + *bytes_limit_r = (uint64_t)rq->rq_bhardlimit * + (uint64_t)rq->rq_bsize; + } + + *files_used_r = rq->rq_curfiles; + *files_limit_r = unlimited64; + if (rq->rq_fsoftlimit != 0 && rq->rq_fsoftlimit != unlimited32) + *files_limit_r = rq->rq_fsoftlimit; + else if (rq->rq_fhardlimit != unlimited32) + *files_limit_r = rq->rq_fhardlimit; +} + +int fs_quota_nfs_user(char *host, char *path, int uid, + uint64_t *bytes_used_r, uint64_t *bytes_limit_r, + uint64_t *files_used_r, uint64_t *files_limit_r) +{ + struct getquota_rslt result; + struct getquota_args args; + struct timeval timeout; + enum clnt_stat call_status; + CLIENT *cl; + + /* clnt_create() polls for a while to establish a connection */ + cl = clnt_create(host, RQUOTAPROG, RQUOTAVERS, "udp"); + if (cl == NULL) { + return E_CLNT_CREATE; + } + + /* Establish some RPC credentials */ + auth_destroy(cl->cl_auth); + cl->cl_auth = authunix_create_default(); + + /* make the rquota call on the remote host */ + args.gqa_pathp = path; + args.gqa_uid = uid; + + timeout.tv_sec = RQUOTA_GETQUOTA_TIMEOUT_SECS; + timeout.tv_usec = 0; + call_status = clnt_call(cl, RQUOTAPROC_GETQUOTA, + (xdrproc_t)xdr_getquota_args, (char *)&args, + (xdrproc_t)xdr_getquota_rslt, (char *)&result, + timeout); + + /* the result has been deserialized, let the client go */ + auth_destroy(cl->cl_auth); + clnt_destroy(cl); + + if (call_status != RPC_SUCCESS) { + return E_CLNT_CALL | call_status; + } + + switch (result.status) { + case Q_OK: { + rquota_get_result(&result.getquota_rslt_u.gqr_rquota, + bytes_used_r, bytes_limit_r, + files_used_r, files_limit_r); + return 0; + } + case Q_NOQUOTA: + return E_NOQUOTA; + case Q_EPERM: + return E_PERM; + default: + return E_UNKNOWN; + } +} + +int fs_quota_nfs_ext(char *host, char *path, int id, int do_group, + uint64_t *bytes_used_r, uint64_t *bytes_limit_r, + uint64_t *files_used_r, uint64_t *files_limit_r) +{ +#if defined(EXT_RQUOTAVERS) && defined(GRPQUOTA) + struct getquota_rslt result; + ext_getquota_args args; + struct timeval timeout; + enum clnt_stat call_status; + CLIENT *cl; + + /* clnt_create() polls for a while to establish a connection */ + cl = clnt_create(host, RQUOTAPROG, EXT_RQUOTAVERS, "udp"); + if (cl == NULL) { + return E_CLNT_CREATE; + } + + /* Establish some RPC credentials */ + auth_destroy(cl->cl_auth); + cl->cl_auth = authunix_create_default(); + + /* make the rquota call on the remote host */ + args.gqa_pathp = path; + args.gqa_id = id; + args.gqa_type = do_group ? GRPQUOTA : USRQUOTA; + timeout.tv_sec = RQUOTA_GETQUOTA_TIMEOUT_SECS; + timeout.tv_usec = 0; + + call_status = clnt_call(cl, RQUOTAPROC_GETQUOTA, + (xdrproc_t)xdr_ext_getquota_args, (char *)&args, + (xdrproc_t)xdr_getquota_rslt, (char *)&result, + timeout); + + /* the result has been deserialized, let the client go */ + auth_destroy(cl->cl_auth); + clnt_destroy(cl); + + if (call_status != RPC_SUCCESS) { + return E_CLNT_CALL | call_status; + } + + switch (result.status) { + case Q_OK: { + rquota_get_result(&result.getquota_rslt_u.gqr_rquota, + bytes_used_r, bytes_limit_r, + files_used_r, files_limit_r); + return 0; + } + case Q_NOQUOTA: + return E_NOQUOTA; + case Q_EPERM: + return E_PERM; + default: + return E_UNKNOWN; + } + + return 0; +#else + (void)host; (void)path; (void)id; (void)do_group; + (void)bytes_used_r; (void)bytes_limit_r; + (void)files_used_r; (void)files_limit_r; + return E_NOQUOTA; +#endif +} + +int fs_quota_nfs(char *host, char *path, char *nfsvers, int id, int do_group, + uint64_t *bytes_used_r, uint64_t *bytes_limit_r, + uint64_t *files_used_r, uint64_t *files_limit_r) +{ + /* For NFSv4, we send the filesystem path without initial /. Server + prepends proper NFS pseudoroot automatically and uses this for + detection of NFSv4 mounts. */ + if (strcmp(nfsvers, "nfs4") == 0) { + while (*path == '/') + path++; + } + + if (do_group) + return fs_quota_nfs_ext(host, path, id, 1, + bytes_used_r, bytes_limit_r, + files_used_r, files_limit_r); + else + return fs_quota_nfs_user(host, path, id, + bytes_used_r, bytes_limit_r, + files_used_r, files_limit_r); +} + diff --git a/fs_quota/src/quota_nfs.rs b/fs_quota/src/quota_nfs.rs new file mode 100644 index 0000000..e0bed0b --- /dev/null +++ b/fs_quota/src/quota_nfs.rs @@ -0,0 +1,92 @@ +use std::ffi::{CStr, CString}; +use std::os::raw::{c_char, c_int}; + +use crate::{FqError, FsQuota, Mtab}; + +extern "C" { + fn fs_quota_nfs( + host: *const c_char, + path: *const c_char, + nfsvers: *const c_char, + id: c_int, + do_group: c_int, + bytes_used: *mut u64, + bytes_limit: *mut u64, + files_used: *mut u64, + files_limit: *mut u64, + ) -> c_int; +} + +mod ffi { + use super::*; + extern "C" { + pub(crate) fn clnt_sperrno(e: c_int) -> *const c_char; + } +} + +// The rpcsvc clnt_sperrno function. +fn clnt_sperrno(e: c_int) -> &'static str { + unsafe { + let msg = ffi::clnt_sperrno(e); + std::str::from_utf8(CStr::from_ptr(msg).to_bytes()).unwrap() + } +} + +pub(crate) fn get_quota(entry: &Mtab, uid: u32) -> Result { + let host = CString::new(entry.host.as_ref().unwrap().as_bytes())?; + let path = CString::new(entry.device.as_bytes())?; + let fstype = CString::new(entry.fstype.as_bytes())?; + + let mut bytes_used = 0u64; + let mut bytes_limit = 0u64; + let mut files_used = 0u64; + let mut files_limit = 0u64; + + let rc = unsafe { + fs_quota_nfs( + host.as_ptr(), + path.as_ptr(), + fstype.as_ptr(), + uid as c_int, + 0, + &mut bytes_used as *mut u64, + &mut bytes_limit as *mut u64, + &mut files_used as *mut u64, + &mut files_limit as *mut u64, + ) + }; + + // Error mapping. + match rc { + 0 => {}, + 0x00000001 => { + debug!("nfs: clnt_create error"); + return Err(FqError::Other); + }, + 0x00000002 => { + return Err(FqError::NoQuota); + }, + 0x00000003 => { + debug!("nfs: permission denied"); + return Err(FqError::PermissionDenied); + }, + c @ 0x00100000..=0x001fffff => { + let e = c & 0x000fffff; + debug!("nfs: clnt_call error: {}", clnt_sperrno(e)); + return Err(FqError::Other); + }, + e => { + debug!("nfs: unknown error {}", e); + return Err(FqError::Other); + }, + } + + let m = |v| if v == 0xffffffffffffffff { None } else { Some(v) }; + let res = FsQuota { + bytes_used: bytes_used, + bytes_limit: m(bytes_limit), + files_used: files_used, + files_limit: m(files_limit), + }; + return Ok(res); +} diff --git a/fs_quota/src/rquota.x b/fs_quota/src/rquota.x new file mode 100644 index 0000000..3cd5c10 --- /dev/null +++ b/fs_quota/src/rquota.x @@ -0,0 +1,139 @@ +/* @(#)rquota.x 2.1 88/08/01 4.0 RPCSRC */ +/* @(#)rquota.x 1.2 87/09/20 Copyr 1987 Sun Micro */ + +/* + * Remote quota protocol + * Requires unix authentication + */ + +const RQ_PATHLEN = 1024; + +struct sq_dqblk { + unsigned int rq_bhardlimit; /* absolute limit on disk blks alloc */ + unsigned int rq_bsoftlimit; /* preferred limit on disk blks */ + unsigned int rq_curblocks; /* current block count */ + unsigned int rq_fhardlimit; /* absolute limit on allocated files */ + unsigned int rq_fsoftlimit; /* preferred file limit */ + unsigned int rq_curfiles; /* current # allocated files */ + unsigned int rq_btimeleft; /* time left for excessive disk use */ + unsigned int rq_ftimeleft; /* time left for excessive files */ +}; + +struct getquota_args { + string gqa_pathp; /* path to filesystem of interest */ + int gqa_uid; /* Inquire about quota for uid */ +}; + +struct setquota_args { + int sqa_qcmd; + string sqa_pathp; /* path to filesystem of interest */ + int sqa_id; /* Set quota for uid */ + sq_dqblk sqa_dqblk; +}; + +struct ext_getquota_args { + string gqa_pathp; /* path to filesystem of interest */ + int gqa_type; /* Type of quota info is needed about */ + int gqa_id; /* Inquire about quota for id */ +}; + +struct ext_setquota_args { + int sqa_qcmd; + string sqa_pathp; /* path to filesystem of interest */ + int sqa_id; /* Set quota for id */ + int sqa_type; /* Type of quota to set */ + sq_dqblk sqa_dqblk; +}; + +/* + * remote quota structure + */ +struct rquota { + int rq_bsize; /* block size for block counts */ + bool rq_active; /* indicates whether quota is active */ + unsigned int rq_bhardlimit; /* absolute limit on disk blks alloc */ + unsigned int rq_bsoftlimit; /* preferred limit on disk blks */ + unsigned int rq_curblocks; /* current block count */ + unsigned int rq_fhardlimit; /* absolute limit on allocated files */ + unsigned int rq_fsoftlimit; /* preferred file limit */ + unsigned int rq_curfiles; /* current # allocated files */ + unsigned int rq_btimeleft; /* time left for excessive disk use */ + unsigned int rq_ftimeleft; /* time left for excessive files */ +}; + +enum qr_status { + Q_OK = 1, /* quota returned */ + Q_NOQUOTA = 2, /* noquota for uid */ + Q_EPERM = 3 /* no permission to access quota */ +}; + +union getquota_rslt switch (qr_status status) { +case Q_OK: + rquota gqr_rquota; /* valid if status == Q_OK */ +case Q_NOQUOTA: + void; +case Q_EPERM: + void; +}; + +union setquota_rslt switch (qr_status status) { +case Q_OK: + rquota sqr_rquota; /* valid if status == Q_OK */ +case Q_NOQUOTA: + void; +case Q_EPERM: + void; +}; + +program RQUOTAPROG { + version RQUOTAVERS { + /* + * Get all quotas + */ + getquota_rslt + RQUOTAPROC_GETQUOTA(getquota_args) = 1; + + /* + * Get active quotas only + */ + getquota_rslt + RQUOTAPROC_GETACTIVEQUOTA(getquota_args) = 2; + + /* + * Set all quotas + */ + setquota_rslt + RQUOTAPROC_SETQUOTA(setquota_args) = 3; + + /* + * Get active quotas only + */ + setquota_rslt + RQUOTAPROC_SETACTIVEQUOTA(setquota_args) = 4; + } = 1; + version EXT_RQUOTAVERS { + /* + * Get all quotas + */ + getquota_rslt + RQUOTAPROC_GETQUOTA(ext_getquota_args) = 1; + + /* + * Get active quotas only + */ + getquota_rslt + RQUOTAPROC_GETACTIVEQUOTA(ext_getquota_args) = 2; + + /* + * Set all quotas + */ + setquota_rslt + RQUOTAPROC_SETQUOTA(ext_setquota_args) = 3; + + /* + * Set active quotas only + */ + setquota_rslt + RQUOTAPROC_SETACTIVEQUOTA(ext_setquota_args) = 4; + } = 2; +} = 100011; diff --git a/pam/Cargo.toml b/pam/Cargo.toml new file mode 100644 index 0000000..d0f6d3a --- /dev/null +++ b/pam/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "pam-sandboxed" + +# When releasing to crates.io: +# - Update html_root_url in src/lib.rs +# - Update CHANGELOG.md. +# - Run: cargo readme > README.md +# - Create git tag pam-sandboxed-0.x.y +version = "0.2.0" + +readme = "README.md" +documentation = "https://docs.rs/pam-sandboxed" +repository = "https://github.com/miquels/webdav-server-rs" +homepage = "https://github.com/miquels/webdav-server-rs/tree/master/pam" +authors = ["Miquel van Smoorenburg "] +edition = "2018" +license = "Apache-2.0" +categories = ["authentication"] + +[build-dependencies] +cc = "1.0.66" + +[dependencies] +bincode = "1.3.1" +env_logger = "0.8.2" +futures = "0.3.12" +libc = "0.2.82" +log = "0.4.13" +serde = "1.0.120" +serde_derive = "1.0.120" +threadpool = "1.8.1" +tokio = { version = "1.0.2", features = ["io-util", "net", "rt"] } diff --git a/pam/README.md b/pam/README.md new file mode 100644 index 0000000..e0092bf --- /dev/null +++ b/pam/README.md @@ -0,0 +1,49 @@ + +# pam-sandboxed + +### PAM authentication with the pam library running in a separate process. + +The PAM client in this crate creates a future that resolves with the +PAM authentication result. + +### HOW. + +When initialized, the code fork()s and sets up a pipe-based communications +channel between the parent (pam-client) and the child (pam-server). All +the Pam work is then done on a threadpool in the child process. + +### WHY. + +Reasons for doing this instead of just calling libpam directly: + +- Debian still comes with pam 1.8, which when calling setuid helpers + will first close all filedescriptors up to the rlimit. if + If that limit is high (millions) then it takes a looong while. + `RLIMIT_NOFILE` is reset to a reasonably low number in the child process. +- You might want to run the pam modules as a different user than + the main process +- There is code in libpam that might call setreuid(), and that is an + absolute non-starter in threaded code. +- Also, if you're mucking around with per-thread uid credentials on Linux by + calling the setresuid syscall directly, the pthread library code that + handles setuid() gets confused. + +### EXAMPLE. +```rust +// call this once. +let mut pam = PamAuth::new(None).expect("failed to initialized PAM"); + +// now use `pam` as a handle to authenticate. +let fut = pam.auth("other", "user", "pass", None) + .then(|res| { + println!("pam auth result: {:?}", res); + res + }); +tokio::spawn(fut.map_err(|_| ())); +``` + +### Copyright and License. + + * © 2018, 2019 XS4ALL Internet bv + * © 2018, 2019 Miquel van Smoorenburg + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/pam/README.tpl b/pam/README.tpl new file mode 100644 index 0000000..d3d22cd --- /dev/null +++ b/pam/README.tpl @@ -0,0 +1,11 @@ + +# {{crate}} + +{{readme}} + +### Copyright and License. + + * © 2018, 2019 XS4ALL Internet bv + * © 2018, 2019 Miquel van Smoorenburg + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + diff --git a/pam/TODO.md b/pam/TODO.md new file mode 100644 index 0000000..4a118a7 --- /dev/null +++ b/pam/TODO.md @@ -0,0 +1,11 @@ + +## PAM crate TODO items + +- check all panics. If the server-side panics - that's OK, the client-side + will just return errors. + +- client side, when server has gone away: + - panic ? + - start returning errors ? + - try to restart the server ? + diff --git a/pam/build.rs b/pam/build.rs new file mode 100644 index 0000000..d018e13 --- /dev/null +++ b/pam/build.rs @@ -0,0 +1,6 @@ +extern crate cc; + +fn main() { + println!("cargo:rustc-link-lib=pam"); + cc::Build::new().file("src/pam.c").compile("rpam"); // outputs `librpam.a` +} diff --git a/pam/src/bin/main.rs b/pam/src/bin/main.rs new file mode 100644 index 0000000..a5ce8d3 --- /dev/null +++ b/pam/src/bin/main.rs @@ -0,0 +1,92 @@ +use std::io::{self, Write}; + +use env_logger; +use pam_sandboxed::PamAuth; + +fn prompt(s: &str) -> io::Result { + print!("{}", s); + io::stdout().flush()?; + let mut input = String::new(); + io::stdin().read_line(&mut input)?; + Ok(input.trim().to_string()) +} + +fn main() -> Result<(), Box> { + env_logger::init(); + let name = prompt("What's your login? ")?; + let pass = prompt("What's your password? ")?; + + let mut pamauth = PamAuth::new(None)?; + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_io() + .build()?; + + rt.block_on(async move { + match pamauth.auth("other", &name, &pass, None).await { + Ok(res) => println!("pam.auth returned Ok({:?})", res), + Err(e) => println!("pam.auth returned error: {}", e), + } + Ok(()) + }) +} + +// I've put the tests here in bin/main.rs instead of in lib.rs, because "cargo test" +// for the library links the tests without -lpam, so it fails. The price we pay +// for that is a dynamic test-mode setting in the library, instead of compile-time. +#[cfg(test)] +mod tests { + use pam_sandboxed::{test_mode, PamAuth, PamError}; + use tokio; + + const TEST_STR: &str = "xyzzy-test-test"; + + #[test] + fn test_auth() { + test_mode(true); + + let mut pam = PamAuth::new(None).unwrap(); + let mut rt = tokio::runtime::Runtime::new().unwrap(); + + let res = rt.block_on(async { + let mut pam2 = pam.clone(); + + if let Err(e) = pam.auth(TEST_STR, "test", "foo", Some(TEST_STR)).await { + eprintln!("auth(test) failed: {:?}", e); + return Err(e); + } + + if let Ok(_) = pam2.auth(TEST_STR, "unknown", "bar", Some(TEST_STR)).await { + eprintln!("auth(unknown) succeeded, should have failed"); + return Err(PamError::unknown()); + } + + Ok(()) + }); + assert!(res.is_ok()); + } + + #[test] + fn test_many() { + test_mode(true); + + let pam = PamAuth::new(None).unwrap(); + let mut rt = tokio::runtime::Runtime::new().unwrap(); + + let mut handles = Vec::new(); + rt.block_on(async move { + for i in 1u32..=1000 { + let mut pam = pam.clone(); + let handle = tokio::spawn(async move { + if let Err(e) = pam.auth(TEST_STR, "test", "bar", Some(TEST_STR)).await { + panic!("auth(test) failed at iteration {}: {:?}", i, e); + } + }); + handles.push(handle); + } + for handle in handles.drain(..) { + let _ = handle.await; + } + }); + } +} diff --git a/pam/src/lib.rs b/pam/src/lib.rs new file mode 100644 index 0000000..5ba01e0 --- /dev/null +++ b/pam/src/lib.rs @@ -0,0 +1,64 @@ +#![doc(html_root_url = "https://docs.rs/pam-sandboxed/0.2.0")] +//! ## PAM authentication with the pam library running in a separate process. +//! +//! The PAM client in this crate creates a future that resolves with the +//! PAM authentication result. +//! +//! ## HOW. +//! +//! When initialized, the code fork()s and sets up a pipe-based communications +//! channel between the parent (pam-client) and the child (pam-server). All +//! the Pam work is then done on a threadpool in the child process. +//! +//! ## WHY. +//! +//! Reasons for doing this instead of just calling libpam directly: +//! +//! - Debian still comes with pam 1.8, which when calling setuid helpers +//! will first close all filedescriptors up to the rlimit. if +//! If that limit is high (millions) then it takes a looong while. +//! `RLIMIT_NOFILE` is reset to a reasonably low number in the child process. +//! - You might want to run the pam modules as a different user than +//! the main process +//! - There is code in libpam that might call setreuid(), and that is an +//! absolute non-starter in threaded code. +//! - Also, if you're mucking around with per-thread uid credentials on Linux by +//! calling the setresuid syscall directly, the pthread library code that +//! handles setuid() gets confused. +//! +//! ## EXAMPLE. +//! ``` +//! use pam_sandboxed::PamAuth; +//! +//! fn main() { +//! // call this once, early. +//! let mut pam = PamAuth::new(None).expect("failed to initialized PAM"); +//! +//! let mut rt = tokio::runtime::Runtime::new().expect("failed to initialize tokio runtime"); +//! rt.block_on(async move { +//! let res = pam.auth("other", "user", "pass", None).await; +//! println!("pam auth result: {:?}", res); +//! }); +//! } +//! ``` +#[macro_use] +extern crate log; +#[macro_use] +extern crate serde_derive; + +mod pam; +mod pamclient; +mod pamserver; + +use std::sync::atomic::Ordering; + +pub use crate::pam::PamError; +pub use crate::pamclient::PamAuth; + +// See bin/main.rs, mod tests. +#[doc(hidden)] +pub fn test_mode(enabled: bool) { + use crate::pam::TEST_MODE; + let getal = if enabled { 1 } else { 0 }; + TEST_MODE.store(getal, Ordering::SeqCst); +} diff --git a/pam/src/pam.c b/pam/src/pam.c new file mode 100644 index 0000000..ae88586 --- /dev/null +++ b/pam/src/pam.c @@ -0,0 +1,82 @@ +#include +#include +#include +#include + +struct creds { + char *user; + char *password; +}; + +static void add_reply(struct pam_response **reply, int count, char *txt) +{ + *reply = realloc(*reply, (count + 1) * sizeof(struct pam_response)); + (*reply)[count].resp_retcode = 0; + (*reply)[count].resp = strdup(txt ? txt: ""); +} + +static int c_pam_conv(int num_msg, const struct pam_message **msg, + struct pam_response **resp, void *appdata) +{ + struct pam_response *reply = NULL; + struct creds *creds = (struct creds *)appdata; + int replies = 0; + + int count; + for (count = 0; count < num_msg; count++) { + switch (msg[count]->msg_style) { + case PAM_PROMPT_ECHO_ON: + add_reply(&reply, replies++, creds->user); + break; + case PAM_PROMPT_ECHO_OFF: + add_reply(&reply, replies++, creds->password); + break; + case PAM_TEXT_INFO: + break; + case PAM_ERROR_MSG: + default: + if (reply != NULL) + free(reply); + return PAM_CONV_ERR; + } + } + *resp = reply; + return PAM_SUCCESS; +} + +int c_pam_auth(char *service, char *user, char *pass, char *remip) +{ + struct creds creds = { + user, + pass, + }; + struct pam_conv conv = { + c_pam_conv, + &creds, + }; + + pam_handle_t *pamh = NULL; + int ret = pam_start(service, user, &conv, &pamh); + if (ret != PAM_SUCCESS) + return ret; + if (ret == PAM_SUCCESS && remip && remip[0]) + ret = pam_set_item(pamh, PAM_RHOST, remip); + if (ret == PAM_SUCCESS) + ret = pam_authenticate(pamh, 0); + pam_end(pamh, 0); + + return ret; +} + +void c_pam_lower_rlimits() +{ + struct rlimit rlim; + if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { + rlim_t l = rlim.rlim_cur; + if (l > 256) + l = 256; + rlim.rlim_cur = l; + rlim.rlim_max = l; + setrlimit(RLIMIT_NOFILE, &rlim); + } +} diff --git a/pam/src/pam.rs b/pam/src/pam.rs new file mode 100644 index 0000000..34937c8 --- /dev/null +++ b/pam/src/pam.rs @@ -0,0 +1,97 @@ +use std::error::Error; +use std::ffi::{CStr, CString}; +use std::os::raw::{c_char, c_int, c_void}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +extern "C" { + fn c_pam_auth( + service: *const c_char, + user: *const c_char, + pass: *const c_char, + remip: *const c_char, + ) -> c_int; + fn _c_pam_return_value(index: c_int) -> c_int; + fn pam_strerror(pamh: *const c_void, errnum: c_int) -> *const c_char; + fn c_pam_lower_rlimits(); +} + +pub(crate) const ERR_NUL_BYTE: i32 = 414243; +pub(crate) const ERR_SEND_TO_SERVER: i32 = 414244; +pub(crate) const ERR_RECV_FROM_SERVER: i32 = 414245; + +pub(crate) static TEST_MODE: AtomicUsize = AtomicUsize::new(0); + +/// Error returned if authentication fails. +/// +/// It's best not to try to interpret this, and handle all errors +/// as "authentication failed". +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PamError(pub(crate) i32); + +impl PamError { + #[doc(hidden)] + pub fn unknown() -> PamError { + PamError(13) + } +} + +impl std::fmt::Display for PamError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self.0 { + ERR_NUL_BYTE => write!(f, "embedded 0 byte in string"), + ERR_SEND_TO_SERVER => write!(f, "error sending request to server"), + ERR_RECV_FROM_SERVER => write!(f, "error receiving response from server"), + _ => { + let errnum = self.0 as c_int; + let nullptr: *const c_void = std::ptr::null(); + let errstr = unsafe { CStr::from_ptr(pam_strerror(nullptr, errnum)).to_string_lossy() }; + f.write_str(&format!("PAM error: {}", errstr)) + }, + } + } +} + +impl Error for PamError { + fn description(&self) -> &str { + "PAM authentication error" + } + + fn source(&self) -> Option<&(dyn Error + 'static)> { + None + } +} + +impl From for PamError { + fn from(_e: std::ffi::NulError) -> Self { + PamError(ERR_NUL_BYTE) + } +} + +pub(crate) fn pam_auth(service: &str, user: &str, pass: &str, remip: &str) -> Result<(), PamError> { + if TEST_MODE.load(Ordering::SeqCst) > 0 { + return if user == "test" { Ok(()) } else { Err(PamError(1)) }; + } + + let c_service = CString::new(service)?; + let c_user = CString::new(user)?; + let c_pass = CString::new(pass)?; + let c_remip = CString::new(remip)?; + let ret = unsafe { + c_pam_auth( + c_service.as_ptr(), + c_user.as_ptr(), + c_pass.as_ptr(), + c_remip.as_ptr(), + ) + }; + match ret { + 0 => Ok(()), + errnum => Err(PamError(errnum)), + } +} + +pub(crate) fn pam_lower_rlimits() { + unsafe { + c_pam_lower_rlimits(); + } +} diff --git a/pam/src/pamclient.rs b/pam/src/pamclient.rs new file mode 100644 index 0000000..31c4deb --- /dev/null +++ b/pam/src/pamclient.rs @@ -0,0 +1,265 @@ +// Client part, that is, the part that runs in the local process. +// +// All the futures based code lives here. +// +use std::cell::RefCell; +use std::collections::HashMap; +use std::io; +use std::os::unix::net::UnixStream as StdUnixStream; +use std::sync::{Arc, Mutex, Once}; + +use futures::channel::{mpsc, oneshot}; +use futures::join; +use futures::{sink::SinkExt, stream::StreamExt}; + +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::unix::ReadHalf as UnixReadHalf; +use tokio::net::unix::WriteHalf as UnixWriteHalf; +use tokio::net::UnixStream; + +use crate::pam::{PamError, ERR_RECV_FROM_SERVER, ERR_SEND_TO_SERVER}; +use crate::pamserver::{PamResponse, PamServer}; + +// Request to be sent to the server process. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct PamRequest { + pub id: u64, + pub user: String, + pub pass: String, + pub service: String, + pub remip: Option, +} + +// sent over request channel to PamAuthTask. +struct PamRequest1 { + req: PamRequest, + resp_chan: oneshot::Sender>, +} + +/// Pam authenticator. +#[derive(Clone)] +pub struct PamAuth { + inner: Arc, +} + +struct PamAuthInner { + once: Once, + serversock: RefCell>, + req_chan: RefCell>>, +} + +// Mutation of PamAuthInner only happens once, +// protected by atomic Once, so this is safe. +unsafe impl Sync for PamAuthInner {} +unsafe impl Send for PamAuthInner {} + +impl PamAuth { + /// Create a new PAM authenticator. This will start a new PAM server process + /// in the background, and it will contain a new PAM coordination task that + /// will be lazily spawned the first time auth() is called. + /// + /// Note that it is important to call this very early in main(), before any + /// threads or runtimes have started. + /// + /// ```no_run + /// use pam_sandboxed::PamAuth; + /// + /// fn main() -> Result<(), Box> { + /// // get pam authentication handle. + /// let mut pam = PamAuth::new(None)?; + /// + /// // now start tokio runtime and use handle. + /// let mut rt = tokio::runtime::Runtime::new()?; + /// rt.block_on(async move { + /// let res = pam.auth("other", "user", "pass", None).await; + /// println!("pam auth result: {:?}", res); + /// }); + /// Ok(()) + /// } + /// ``` + /// + pub fn new(num_threads: Option) -> Result { + // spawn the server process. + let serversock = PamServer::start(num_threads)?; + + let inner = PamAuthInner { + once: Once::new(), + req_chan: RefCell::new(None), + serversock: RefCell::new(Some(serversock)), + }; + Ok(PamAuth { + inner: Arc::new(inner), + }) + } + + /// Authenticate via pam and return the result. + /// + /// - `service`: PAM service to use - usually "other". + /// - `username`: account username + /// - `password`: account password + /// - `remoteip`: if this is a networking service, the remote IP address of the client. + pub async fn auth( + &mut self, + service: &str, + username: &str, + password: &str, + remoteip: Option<&str>, + ) -> Result<(), PamError> + { + // If we haven't started the background task yet, do it now. + // That also initializes req_chan. + let inner = &self.inner; + inner.once.call_once(|| { + // These should not ever panic on unwrap(). + let serversock = inner.serversock.borrow_mut().take().unwrap(); + inner + .req_chan + .replace(Some(PamAuthTask::start(serversock).unwrap())); + }); + + // create request to be sent to the server. + let req = PamRequest { + id: 0, + user: username.to_string(), + pass: password.to_string(), + service: service.to_string(), + remip: remoteip.map(|s| s.to_string()), + }; + + // add a one-shot channel for the response. + let (tx, rx) = oneshot::channel::>(); + + // put it all together and send it. + let req1 = PamRequest1 { + req: req, + resp_chan: tx, + }; + let mut authtask_chan = inner.req_chan.borrow().as_ref().unwrap().clone(); + authtask_chan + .send(req1) + .await + .map_err(|_| PamError(ERR_SEND_TO_SERVER))?; + + // wait for the response. + match rx.await { + Ok(res) => res, + Err(_) => Err(PamError(ERR_RECV_FROM_SERVER)), + } + } +} + +// Shared data for the PamAuthTask tasks. +struct PamAuthTask { + // clients waiting for a response. + waiters: Mutex>>>, +} + +impl PamAuthTask { + // Start the server process. Then return a handle to send requests on. + fn start(serversock: StdUnixStream) -> io::Result> { + let mut serversock = UnixStream::from_std(serversock)?; + + // create a request channel. + let (req_tx, req_rx) = mpsc::channel::(0); + + // shared state between request and response task. + let this = PamAuthTask { + waiters: Mutex::new(HashMap::new()), + }; + + debug!("PamAuthTask: spawning task on runtime"); + tokio::spawn(async move { + // split serversock into send/receive halves. + let (srx, stx) = serversock.split(); + + join!(this.handle_request(req_rx, stx), this.handle_response(srx)); + }); + + Ok(req_tx) + } + + async fn handle_request(&self, mut req_rx: mpsc::Receiver, mut stx: UnixWriteHalf<'_>) { + let mut id: u64 = 0; + loop { + // receive next request. + let PamRequest1 { mut req, resp_chan } = match req_rx.next().await { + Some(r1) => r1, + None => { + // PamAuth handle was dropped. Ask server to exit. + let data = [0u8; 2]; + let _ = stx.write_all(&data).await; + return; + }, + }; + + // store the response channel. + req.id = id; + id += 1; + { + let mut waiters = self.waiters.lock().unwrap(); + waiters.insert(req.id, resp_chan); + } + + // serialize data and send. + let mut data: Vec = match bincode::serialize(&req) { + Ok(data) => data, + Err(e) => { + // this panic can never happen at runtime. + panic!("PamClient: serializing data: {:?}", e); + }, + }; + if data.len() > 65533 { + // this panic can never happen at runtime. + panic!("PamClient: serialized data > 65533 bytes"); + } + let l1 = ((data.len() >> 8) & 0xff) as u8; + let l2 = (data.len() & 0xff) as u8; + data.insert(0, l1); + data.insert(1, l2); + if let Err(e) = stx.write_all(&data).await { + // this can happen if the server has gone away. + // in which case, handle_response() will exit as well. + error!("PamClient: FATAL: writing data to server: {:?}", e); + return; + } + } + } + + async fn handle_response(&self, mut srx: UnixReadHalf<'_>) { + loop { + // read size header. + let mut buf = [0u8; 2]; + if let Err(_) = srx.read_exact(&mut buf).await { + error!("PamClient: FATAL: short read, server gone away?!"); + return; + } + let sz = ((buf[0] as usize) << 8) + (buf[1] as usize); + + // read response data. + let mut data = Vec::with_capacity(sz); + data.resize(sz, 0u8); + if let Err(_) = srx.read_exact(&mut data[..]).await { + error!("PamClient: FATAL: short read, server gone away?!"); + return; + } + + // deserialize. + let resp: PamResponse = match bincode::deserialize(&data[..]) { + Ok(req) => req, + Err(_) => { + // this panic can never happen at runtime. + panic!("PamCLient: error deserializing response"); + }, + }; + + // and send response to waiting requester. + let resp_chan = { + let mut waiters = self.waiters.lock().unwrap(); + waiters.remove(&resp.id) + }; + if let Some(resp_chan) = resp_chan { + let _ = resp_chan.send(resp.result); + } + } + } +} diff --git a/pam/src/pamserver.rs b/pam/src/pamserver.rs new file mode 100644 index 0000000..9b31bbe --- /dev/null +++ b/pam/src/pamserver.rs @@ -0,0 +1,169 @@ +// Server part - the code here is fork()ed off and lives in its own +// process. We communicate with it through a unix stream socket. +// +// This is all old-fashioned blocking and thread-based code. +// +use std::io::{self, Read, Write}; +use std::os::unix::io::AsRawFd; +use std::os::unix::net::UnixStream as StdUnixStream; +use std::sync::{Arc, Mutex}; + +use bincode::{deserialize, serialize}; +use libc; + +use crate::pam::{pam_auth, pam_lower_rlimits, PamError}; +use crate::pamclient::PamRequest; + +// Response back from the server process. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct PamResponse { + pub id: u64, + pub result: Result<(), PamError>, +} + +// server side. +pub(crate) struct PamServer { + rx_socket: StdUnixStream, + tx_socket: Arc>, +} + +impl PamServer { + // fork and start the server, return the stream socket for communication. + pub(crate) fn start(num_threads: Option) -> Result { + // Create a unix socketpair for communication. + let (sock1, sock2) = StdUnixStream::pair()?; + let sock3 = sock2.try_clone()?; + + let handle = std::thread::spawn(move || { + // fork server. + let pid = unsafe { libc::fork() }; + if pid < 0 { + return Err(io::Error::last_os_error()); + } + if pid == 0 { + // first, close all filedescriptors (well, all..) + for fdno in 3..8192 { + if fdno != sock2.as_raw_fd() && fdno != sock3.as_raw_fd() { + unsafe { + libc::close(fdno); + } + } + } + let mut server = PamServer { + rx_socket: sock2, + tx_socket: Arc::new(Mutex::new(sock3)), + }; + pam_lower_rlimits(); + trace!("PamServer: child: starting server"); + server.serve(num_threads.unwrap_or(8)); + drop(server); + std::process::exit(0); + } + Ok(()) + }); + handle.join().unwrap()?; + + trace!("PamServer: parent: started server"); + Ok(sock1) + } + + // serve requests. + fn serve(&mut self, num_threads: usize) { + // create a threadpool, then serve connections via the threadpool. + let pool = threadpool::ThreadPool::new(num_threads); + + // process incoming connections. + loop { + // read length. + let mut buf = [0u8; 2]; + let res = self.rx_socket.read_exact(&mut buf); + if let Err(e) = res { + if e.kind() == std::io::ErrorKind::UnexpectedEof { + // parent probably exited - not an error. + trace!("PamServer::serve: EOF reached on input"); + break; + } + panic!("PamServer::serve: read socket: {}", e); + } + let sz = ((buf[0] as usize) << 8) + (buf[1] as usize); + if sz == 0 { + // size 0 packet indicates client wants to shut us down. + trace!("PamServer::serve: EOF packet on input"); + break; + } + + // read request data. + let mut data = Vec::with_capacity(sz); + data.resize(sz, 0u8); + let res = self.rx_socket.read_exact(&mut data); + if let Err(e) = res { + panic!("PamServer::serve: read socket: {}", e); + } + let req: PamRequest = match deserialize(&data[..]) { + Ok(req) => req, + Err(_) => panic!("PamServer::serve: error deserializing request"), + }; + trace!( + "PamServer::serve: read request {:?} active threads: {} queued {}", + req, + pool.active_count(), + pool.queued_count() + ); + + // run request on pool. + let sock = self.tx_socket.clone(); + pool.execute(move || { + if let Err(e) = pam_process(req, sock) { + panic!("PamServer::pam_process: error: {}", e); + } + }); + let mut i = 0; + while pool.queued_count() > 2 * pool.max_count() { + if i == 399 { + debug!( + "PamServer::serve: pool busy! active {}, max {}, queued: {}", + pool.active_count(), + pool.max_count(), + pool.queued_count() + ); + } + i += 1; + i = i % 400; + std::thread::sleep(std::time::Duration::from_millis(5)); + } + } + + pool.join(); + trace!("PamServer::serve: exit."); + std::process::exit(0); + } +} + +// Process one request. This is run on the threadpool. +fn pam_process(req: PamRequest, sock: Arc>) -> Result<(), io::Error> { + trace!("PamServer::pam_process: starting with request {:?}", req); + + // authenticate. + let remip = req.remip.as_ref().map(|s| s.as_str()).unwrap_or(""); + let res = PamResponse { + id: req.id, + result: pam_auth(&req.service, &req.user, &req.pass, remip), + }; + + // and send back result. + trace!("PamServer::pam_process: returning response {:?}", res); + let mut response: Vec = serialize(&res) + .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("error serializing response: {}", e)))?; + let l1 = ((response.len() >> 8) & 0xff) as u8; + let l2 = (response.len() & 0xff) as u8; + response.insert(0, l1); + response.insert(1, l2); + + match sock.lock().unwrap().write_all(&response) { + Err(e) => { + debug!("PamServer::pam_process: writing to response socket: {}", e); + Err(e) + }, + Ok(..) => Ok(()), + } +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..2eedefd --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,12 @@ +unstable_features = true + +edition = "2018" +binop_separator = "Back" +blank_lines_upper_bound = 3 +enum_discrim_align_threshold = 20 +force_multiline_blocks = true +match_block_trailing_comma = true +max_width = 110 +struct_field_align_threshold = 20 +where_single_line = true +wrap_comments = false diff --git a/src/auth.rs b/src/auth.rs new file mode 100644 index 0000000..0099f7e --- /dev/null +++ b/src/auth.rs @@ -0,0 +1,156 @@ +use std::io; +use std::net::SocketAddr; +use std::sync::Arc; + +use crate::config::{AuthType, Config, Location}; + +use headers::{authorization::Basic, Authorization, HeaderMapExt}; +use http::status::StatusCode; + +type HttpRequest = http::Request; + +#[derive(Clone)] +pub struct Auth { + config: Arc, + #[cfg(feature = "pam")] + pam_auth: pam_sandboxed::PamAuth, +} + +impl Auth { + pub fn new(config: Arc) -> io::Result { + // initialize pam. + #[cfg(feature = "pam")] + let pam_auth = { + // set cache timeouts. + if let Some(timeout) = config.pam.cache_timeout { + crate::cache::cached::set_pamcache_timeout(timeout); + } + pam_sandboxed::PamAuth::new(config.pam.threads.clone())? + }; + + Ok(Auth { + #[cfg(feature = "pam")] + pam_auth, + config, + }) + } + + // authenticate user. + pub async fn auth<'a>( + &'a self, + req: &'a HttpRequest, + location: &Location, + _remote_ip: SocketAddr, + ) -> Result + { + // we must have a login/pass + let basic = match req.headers().typed_get::>() { + Some(Authorization(basic)) => basic, + _ => return Err(StatusCode::UNAUTHORIZED), + }; + let user = basic.username(); + let pass = basic.password(); + + // match the auth type. + let auth_type = location + .accounts + .auth_type + .as_ref() + .or(self.config.accounts.auth_type.as_ref()); + match auth_type { + #[cfg(feature = "pam")] + Some(&AuthType::Pam) => self.auth_pam(req, user, pass, _remote_ip).await, + Some(&AuthType::HtPasswd(ref ht)) => self.auth_htpasswd(user, pass, ht.as_str()).await, + None => { + debug!("need authentication, but auth-type is not set"); + Err(StatusCode::UNAUTHORIZED) + }, + } + } + + // authenticate user using PAM. + #[cfg(feature = "pam")] + async fn auth_pam<'a>( + &'a self, + req: &'a HttpRequest, + user: &'a str, + pass: &'a str, + remote_ip: SocketAddr, + ) -> Result + { + // stringify the remote IP address. + let ip = remote_ip.ip(); + let ip_string = if ip.is_loopback() { + // if it's loopback, take the value from the x-forwarded-for + // header, if present. + req.headers() + .get("x-forwarded-for") + .and_then(|s| s.to_str().ok()) + .and_then(|s| s.split(',').next()) + .map(|s| s.trim().to_owned()) + } else { + Some(match ip { + std::net::IpAddr::V4(ip) => ip.to_string(), + std::net::IpAddr::V6(ip) => ip.to_string(), + }) + }; + let ip_ref = ip_string.as_ref().map(|s| s.as_str()); + + // authenticate. + let service = self.config.pam.service.as_str(); + let pam_auth = self.pam_auth.clone(); + match crate::cache::cached::pam_auth(pam_auth, service, user, pass, ip_ref).await { + Ok(_) => Ok(user.to_string()), + Err(_) => { + debug!( + "auth_pam({}): authentication for {} ({:?}) failed", + service, user, ip_ref + ); + Err(StatusCode::UNAUTHORIZED) + }, + } + } + + // authenticate user using htpasswd. + async fn auth_htpasswd<'a>( + &'a self, + user: &'a str, + pass: &'a str, + section: &'a str, + ) -> Result + { + // Get the htpasswd.WHATEVER section from the config file. + let file = match self.config.htpasswd.get(section) { + Some(section) => section.htpasswd.as_str(), + None => return Err(StatusCode::UNAUTHORIZED), + }; + + // Read the file and split it into a bunch of lines. + tokio::task::block_in_place(move || { + let data = match std::fs::read_to_string(file) { + Ok(data) => data, + Err(e) => { + debug!("{}: {}", file, e); + return Err(StatusCode::UNAUTHORIZED); + }, + }; + let lines = data + .split('\n') + .map(|s| s.trim()) + .filter(|s| !s.starts_with("#") && !s.is_empty()); + + // Check each line for a match. + for line in lines { + let mut fields = line.split(':'); + if let (Some(htuser), Some(htpass)) = (fields.next(), fields.next()) { + if htuser == user && pwhash::unix::verify(pass, htpass) { + return Ok(user.to_string()); + } + } + } + + debug!("auth_htpasswd: authentication for {} failed", user); + Err(StatusCode::UNAUTHORIZED) + }) + } +} diff --git a/src/cache.rs b/src/cache.rs new file mode 100644 index 0000000..9cde518 --- /dev/null +++ b/src/cache.rs @@ -0,0 +1,185 @@ +use std::borrow::Borrow; +use std::cmp::Eq; +use std::collections::vec_deque::VecDeque; +use std::collections::HashMap; +use std::hash::Hash; +use std::option::Option; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; + +#[allow(dead_code)] +pub struct Cache { + intern: Mutex>, +} + +struct Intern { + maxsize: usize, + maxage: Duration, + map: HashMap>, + fifo: VecDeque<(Instant, K)>, +} + +impl Cache { + pub fn new() -> Cache { + let i = Intern { + maxsize: 0, + maxage: Duration::new(0, 0), + map: HashMap::new(), + fifo: VecDeque::new(), + }; + Cache { + intern: Mutex::new(i), + } + } + + #[allow(dead_code)] + pub fn maxsize(self, maxsize: usize) -> Self { + self.intern.lock().unwrap().maxsize = maxsize; + self + } + + #[allow(dead_code)] + pub fn maxage(self, maxage: Duration) -> Self { + self.intern.lock().unwrap().maxage = maxage; + self + } + + fn expire(&self, m: &mut Intern) { + let mut n = m.fifo.len(); + if m.maxsize > 0 && n >= m.maxsize { + n = m.maxsize; + } + if m.maxage.as_secs() > 0 || m.maxage.subsec_nanos() > 0 { + let now = Instant::now(); + while n > 0 { + let &(t, _) = m.fifo.get(n - 1).unwrap(); + if now.duration_since(t) <= m.maxage { + break; + } + n -= 1; + } + } + for x in n..m.fifo.len() { + let &(_, ref key) = m.fifo.get(x).unwrap(); + m.map.remove(&key); + } + m.fifo.truncate(n); + } + + pub fn insert(&self, key: K, val: V) -> Arc { + let mut m = self.intern.lock().unwrap(); + self.expire(&mut *m); + let av = Arc::new(val); + let ac = av.clone(); + m.map.insert(key.clone(), av); + m.fifo.push_front((Instant::now(), key)); + ac + } + + // see https://doc.rust-lang.org/book/first-edition/borrow-and-asref.html + pub fn get(&self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq, + { + let mut m = self.intern.lock().unwrap(); + self.expire(&mut *m); + if let Some(v) = m.map.get(key) { + return Some(v.clone()); + } + None + } +} + +pub(crate) mod cached { + // + // Cached versions of Unix account lookup and Pam auth. + // + use std::io; + use std::sync::{Arc, Mutex}; + use std::time::Duration; + + use crate::cache; + use crate::unixuser::{self, User}; + use lazy_static::lazy_static; + + struct Timeouts { + pwcache: Duration, + pamcache: Duration, + } + + lazy_static! { + static ref TIMEOUTS: Mutex = Mutex::new(Timeouts { + pwcache: Duration::new(120, 0), + pamcache: Duration::new(120, 0), + }); + static ref PWCACHE: cache::Cache = new_pwcache(); + static ref PAMCACHE: cache::Cache = new_pamcache(); + } + + fn new_pwcache() -> cache::Cache { + let timeouts = TIMEOUTS.lock().unwrap(); + cache::Cache::new().maxage(timeouts.pwcache) + } + + fn new_pamcache() -> cache::Cache { + let timeouts = TIMEOUTS.lock().unwrap(); + cache::Cache::new().maxage(timeouts.pamcache) + } + + pub(crate) fn set_pwcache_timeout(secs: usize) { + let mut timeouts = TIMEOUTS.lock().unwrap(); + timeouts.pwcache = Duration::new(secs as u64, 0); + } + + #[cfg(feature = "pam")] + pub(crate) fn set_pamcache_timeout(secs: usize) { + let mut timeouts = TIMEOUTS.lock().unwrap(); + timeouts.pamcache = Duration::new(secs as u64, 0); + } + + #[cfg(feature = "pam")] + pub async fn pam_auth<'a>( + pam_auth: pam_sandboxed::PamAuth, + service: &'a str, + user: &'a str, + pass: &'a str, + remip: Option<&'a str>, + ) -> Result<(), pam_sandboxed::PamError> + { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + let mut s = DefaultHasher::new(); + service.hash(&mut s); + user.hash(&mut s); + pass.hash(&mut s); + remip.as_ref().hash(&mut s); + let key = s.finish(); + + if let Some(cache_user) = PAMCACHE.get(&key) { + if user == cache_user.as_str() { + return Ok(()); + } + } + + let mut pam_auth = pam_auth; + match pam_auth.auth(&service, &user, &pass, remip).await { + Err(e) => Err(e), + Ok(()) => { + PAMCACHE.insert(key, user.to_owned()); + Ok(()) + }, + } + } + + pub async fn unixuser(username: &str, with_groups: bool) -> Result, io::Error> { + if let Some(pwd) = PWCACHE.get(username) { + return Ok(pwd); + } + match User::by_name_async(username, with_groups).await { + Err(e) => Err(e), + Ok(pwd) => Ok(PWCACHE.insert(username.to_owned(), pwd)), + } + } +} diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 0000000..1412117 --- /dev/null +++ b/src/config.rs @@ -0,0 +1,339 @@ +use std::collections::HashMap; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::path::Path; +use std::process::exit; +use std::{fs, io}; + +use enum_from_str::ParseEnumVariantError; +use enum_from_str_derive::FromStr; +use serde::{Deserialize, Deserializer}; +use toml; +use webdav_handler::DavMethodSet; + +use crate::router::Router; + +#[derive(Deserialize, Debug)] +pub struct Config { + pub server: Server, + #[serde(default)] + pub accounts: Accounts, + #[serde(default)] + pub pam: Pam, + #[serde(default)] + pub htpasswd: HashMap, + #[serde(default)] + pub unix: Unix, + #[serde(default)] + pub location: Vec, + #[serde(skip)] + pub router: Router, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct Server { + #[serde(default)] + pub listen: OneOrManyAddr, + #[serde(default)] + pub tls_listen: OneOrManyAddr, + #[serde(default)] + pub tls_key: Option, + #[serde(default)] + pub tls_cert: Option, + //#[serde(deserialize_with = "deserialize_user", default)] + pub uid: Option, + //#[serde(deserialize_with = "deserialize_group", default)] + pub gid: Option, + #[serde(default)] + pub identification: Option, + #[serde(default)] + pub cors: bool, +} + +#[derive(Deserialize, Debug, Clone, Default)] +pub struct Accounts { + #[serde(rename = "auth-type", deserialize_with = "deserialize_authtype", default)] + pub auth_type: Option, + #[serde(rename = "acct-type", deserialize_with = "deserialize_opt_enum", default)] + pub acct_type: Option, + #[serde(default)] + pub realm: Option, +} + +#[derive(Deserialize, Debug, Clone, Default)] +pub struct Pam { + pub service: String, + #[serde(rename = "cache-timeout")] + pub cache_timeout: Option, + pub threads: Option, +} + +#[derive(Deserialize, Debug, Clone, Default)] +pub struct HtPasswd { + pub htpasswd: String, +} + +#[derive(Deserialize, Debug, Clone, Default)] +pub struct Unix { + #[serde(rename = "cache-timeout")] + pub cache_timeout: Option, + #[serde(rename = "min-uid", default)] + pub min_uid: Option, + #[serde(rename = "supplementary-groups", default)] + pub aux_groups: bool, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct Location { + #[serde(default)] + pub route: Vec, + #[serde(deserialize_with = "deserialize_methodset", default)] + pub methods: Option, + #[serde(deserialize_with = "deserialize_opt_enum", default)] + pub auth: Option, + #[serde(default, flatten)] + pub accounts: Accounts, + #[serde(deserialize_with = "deserialize_enum")] + pub handler: Handler, + #[serde(default)] + pub setuid: bool, + pub directory: String, + #[serde(default, alias = "hide-symlinks")] + pub hide_symlinks: Option, + #[serde(default)] + pub indexfile: Option, + #[serde(default)] + pub autoindex: bool, + #[serde( + rename = "case-insensitive", + deserialize_with = "deserialize_opt_enum", + default + )] + pub case_insensitive: Option, + #[serde(deserialize_with = "deserialize_opt_enum", default)] + pub on_notfound: Option, +} + +#[derive(FromStr, Debug, Clone, Copy)] +pub enum Handler { + #[from_str = "virtroot"] + Virtroot, + #[from_str = "filesystem"] + Filesystem, +} + +#[derive(FromStr, Debug, Clone, Copy)] +pub enum Auth { + #[from_str = "false"] + False, + #[from_str = "true"] + True, + #[from_str = "opportunistic"] + Opportunistic, + #[from_str = "write"] + Write, +} + +#[derive(Debug, Clone)] +pub enum AuthType { + #[cfg(feature = "pam")] + Pam, + HtPasswd(String), +} + +#[derive(FromStr, Debug, Clone, Copy)] +pub enum AcctType { + #[from_str = "unix"] + Unix, +} + +#[derive(FromStr, Debug, Clone, Copy)] +pub enum CaseInsensitive { + #[from_str = "true"] + True, + #[from_str = "ms"] + Ms, + #[from_str = "false"] + False, +} + +#[derive(FromStr, Debug, Clone, Copy)] +pub enum OnNotfound { + #[from_str = "continue"] + Continue, + #[from_str = "return"] + Return, +} + +#[derive(Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum OneOrManyAddr { + One(SocketAddr), + Many(Vec), +} + +impl OneOrManyAddr { + pub fn is_empty(&self) -> bool { + match self { + OneOrManyAddr::One(_) => false, + OneOrManyAddr::Many(v) => v.is_empty(), + } + } +} + +impl Default for OneOrManyAddr { + fn default() -> Self { + OneOrManyAddr::Many(Vec::new()) + } +} + +impl ToSocketAddrs for OneOrManyAddr { + type Iter = std::vec::IntoIter; + fn to_socket_addrs(&self) -> io::Result> { + let i = match self { + OneOrManyAddr::Many(ref v) => v.to_owned(), + OneOrManyAddr::One(ref s) => vec![*s], + }; + Ok(i.into_iter()) + } +} + +// keep this here for now, we might implement a enum{(u32, String} later for +// usernames and groupnames. +#[allow(unused)] +pub fn deserialize_user<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de> { + let s = String::deserialize(deserializer)?; + s.parse::() + .map(|v| Some(v)) + .map_err(serde::de::Error::custom) +} + +#[allow(unused)] +pub fn deserialize_group<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de> { + let s = String::deserialize(deserializer)?; + s.parse::() + .map(|v| Some(v)) + .map_err(serde::de::Error::custom) +} + +pub fn deserialize_methodset<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de> { + let m = Vec::::deserialize(deserializer)?; + DavMethodSet::from_vec(m) + .map(|v| Some(v)) + .map_err(serde::de::Error::custom) +} + +pub fn deserialize_authtype<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de> { + let s = String::deserialize(deserializer)?; + if s.starts_with("htpasswd.") { + return Ok(Some(AuthType::HtPasswd(s[9..].to_string()))); + } + #[cfg(feature = "pam")] + if &s == "pam" { + return Ok(Some(AuthType::Pam)); + } + if s == "" { + return Ok(None); + } + Err(serde::de::Error::custom("unknown auth-type")) +} + +pub fn deserialize_opt_enum<'de, D, E>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + E: std::str::FromStr, + E::Err: std::fmt::Display, +{ + String::deserialize(deserializer)? + .as_str() + .parse::() + .map(|e| Some(e)) + .map_err(serde::de::Error::custom) +} + +pub fn deserialize_enum<'de, D, E>(deserializer: D) -> Result +where + D: Deserializer<'de>, + E: std::str::FromStr, + E::Err: std::fmt::Display, +{ + String::deserialize(deserializer)? + .as_str() + .parse::() + .map_err(serde::de::Error::custom) +} + +// Read the TOML config into a config::Config struct. +pub fn read(toml_file: impl AsRef) -> io::Result { + let buffer = fs::read_to_string(&toml_file)?; + + // initial parse. + let config: Config = match toml::from_str(&buffer) { + Ok(v) => Ok(v), + Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e.to_string())), + }?; + + Ok(config) +} + +pub fn build_routes(cfg: &str, config: &mut Config) -> io::Result<()> { + let mut builder = Router::builder(); + for (idx, location) in config.location.iter().enumerate() { + for r in &location.route { + if let Err(e) = builder.add(r, location.methods.clone(), idx) { + let msg = format!("{}: [[location]][{}]: route {}: {}", cfg, idx, r, e); + return Err(io::Error::new(io::ErrorKind::InvalidData, msg)); + } + } + } + config.router = builder.build(); + Ok(()) +} + +pub fn check(cfg: &str, config: &Config) { + #[cfg(feature = "pam")] + if let Some(AuthType::Pam) = config.accounts.auth_type { + if config.pam.service == "" { + eprintln!("{}: missing section [pam]", cfg); + exit(1); + } + } + + if config.server.listen.is_empty() && config.server.tls_listen.is_empty() { + eprintln!("{}: [server]: at least one of listen or tls_listen must be set", cfg); + exit(1); + } + if !config.server.tls_listen.is_empty() { + if config.server.tls_cert.is_none() { + eprintln!("{}: [server]: tls_cert not set", cfg); + exit(1); + } + if config.server.tls_key.is_none() { + eprintln!("{}: [server]: tls_key not set", cfg); + exit(1); + } + } + + for (idx, location) in config.location.iter().enumerate() { + if location.setuid { + if !crate::suid::has_thread_switch_ugid() { + eprintln!( + "{}: [[location]][{}]: setuid: uid switching not supported on this OS", + cfg, idx + ); + exit(1); + } + if config.server.uid.is_none() || config.server.gid.is_none() { + eprintln!("{}: [server]: missing uid and/or gid", cfg); + exit(1); + } + if config.accounts.acct_type.is_none() && location.accounts.acct_type.is_none() { + eprintln!("{}: [[location]][{}]: setuid: no acct-type set", cfg, idx); + exit(1); + } + } + } +} diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..c2c57a3 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,635 @@ +#![doc(html_root_url = "https://docs.rs/webdav-server/0.4.0")] +//! # `webdav-server` is a webdav server that handles user-accounts. +//! +//! This is a webdav server that allows access to a users home directory, +//! just like an ancient FTP server would (remember those?). +//! +//! This is an application. There is no API documentation here. +//! If you want to build your _own_ webdav server, use the `webdav-handler` crate. +//! +//! See the [GitHub repository](https://github.com/miquels/webdav-server-rs/) +//! for documentation on how to run the server. +//! + +#[macro_use] +extern crate log; + +mod auth; +mod cache; +mod config; +mod rootfs; +#[doc(hidden)] +pub mod router; +mod suid; +mod tls; +mod unixuser; +mod userfs; + +use std::convert::TryFrom; +use std::io; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::os::unix::io::{FromRawFd, AsRawFd}; +use std::process::exit; +use std::sync::Arc; + +use clap::clap_app; +use headers::{authorization::Basic, Authorization, HeaderMapExt}; +use http::status::StatusCode; +use hyper::{ + self, + server::conn::{AddrIncoming, AddrStream}, + service::{make_service_fn, service_fn}, +}; +use tls_listener::TlsListener; +use tokio_rustls::server::TlsStream; +use webdav_handler::{davpath::DavPath, DavConfig, DavHandler, DavMethod, DavMethodSet}; +use webdav_handler::{fakels::FakeLs, fs::DavFileSystem, ls::DavLockSystem}; + +use crate::config::{AcctType, Auth, CaseInsensitive, Handler, Location, OnNotfound}; +use crate::rootfs::RootFs; +use crate::router::MatchedRoute; +use crate::suid::proc_switch_ugid; +use crate::tls::tls_acceptor; +use crate::userfs::UserFs; + +static PROGNAME: &'static str = "webdav-server"; + +// Contains "state" and a handle to the config. +#[derive(Clone)] +struct Server { + dh: DavHandler, + auth: auth::Auth, + config: Arc, +} + +type HttpResult = Result, io::Error>; +type HttpRequest = http::Request; + +// Server implementation. +impl Server { + // Constructor. + pub fn new(config: Arc, auth: auth::Auth) -> Self { + // mostly empty handler. + let ls = FakeLs::new() as Box; + let dh = DavHandler::builder().locksystem(ls).build_handler(); + + Server { dh, auth, config } + } + + // check user account. + async fn acct<'a>( + &'a self, + location: &Location, + auth_user: Option<&'a String>, + user_param: Option<&'a str>, + ) -> Result>, StatusCode> + { + // Get username - if any. + let user = match auth_user.map(|u| u.as_str()).or(user_param) { + Some(u) => u, + None => return Ok(None), + }; + + // If account is not set, fine. + let acct_type = location + .accounts + .acct_type + .as_ref() + .or(self.config.accounts.acct_type.as_ref()); + match acct_type { + Some(&AcctType::Unix) => {}, + None => return Ok(None), + }; + + // check if user exists. + let pwd = match cache::cached::unixuser(user, self.config.unix.aux_groups).await { + Ok(pwd) => pwd, + Err(_) => { + debug!("acct: unix: user {} not found", user); + return Err(StatusCode::UNAUTHORIZED); + }, + }; + + // check minimum uid + if let Some(min_uid) = self.config.unix.min_uid { + if pwd.uid < min_uid { + debug!("acct: {}: uid {} too low (<{})", pwd.name, pwd.uid, min_uid); + return Err(StatusCode::FORBIDDEN); + } + } + Ok(Some(pwd)) + } + + // return a new response::Builder with the Server and CORS header set. + fn response_builder(&self) -> http::response::Builder { + let mut builder = hyper::Response::builder(); + self.set_headers(builder.headers_mut().unwrap()); + builder + } + + // Set Server: webdav-server-rs header, and CORS. + fn set_headers(&self, headers: &mut http::HeaderMap) { + let id = self + .config + .server + .identification + .as_ref() + .map(|s| s.as_str()) + .unwrap_or("webdav-server-rs"); + if id != "" { + headers.insert("server", id.parse().unwrap()); + } + if self.config.server.cors { + headers.insert("Access-Control-Allow-Origin", "*".parse().unwrap()); + headers.insert("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS,PROPFIND".parse().unwrap()); + headers.insert("Access-Control-Allow-Headers", "DNT,Depth,Range".parse().unwrap()); + } + } + + // handle a request. + async fn route(&self, req: HttpRequest, remote_ip: SocketAddr) -> HttpResult { + // Get the URI path. + let davpath = match DavPath::from_uri(req.uri()) { + Ok(p) => p, + Err(_) => return self.error(StatusCode::BAD_REQUEST).await, + }; + let path = davpath.as_bytes(); + + // Get the method. + let method = match DavMethod::try_from(req.method()) { + Ok(m) => m, + Err(_) => return self.error(http::StatusCode::METHOD_NOT_ALLOWED).await, + }; + + // Request is stored here. + let mut reqdata = Some(req); + let mut got_match = false; + + // Match routes to one or more locations. + for route in self + .config + .router + .matches(path, method, &["user", "path"]) + .drain(..) + { + got_match = true; + + // Take the request from the option. + let req = reqdata.take().unwrap(); + + // if we might continue, store a clone of the request for the next round. + let location = &self.config.location[*route.data]; + if let Some(OnNotfound::Continue) = location.on_notfound { + reqdata.get_or_insert(clone_httpreq(&req)); + } + + // handle request. + let res = self + .handle(req, method, path, route, location, remote_ip.clone()) + .await?; + + // no on_notfound? then this is final. + if reqdata.is_none() || res.status() != StatusCode::NOT_FOUND { + return Ok(res); + } + } + + if !got_match { + debug!("route: no matching route for {:?}", davpath); + } + + self.error(StatusCode::NOT_FOUND).await + } + + // handle a request. + async fn handle<'a, 't: 'a, 'p: 'a>( + &'a self, + req: HttpRequest, + method: DavMethod, + path: &'a [u8], + route: MatchedRoute<'t, 'p, usize>, + location: &'a Location, + remote_ip: SocketAddr, + ) -> HttpResult + { + // See if we matched a :user parameter + // If so, it must be valid UTF-8, or we return NOT_FOUND. + let user_param = match route.params[0].as_ref() { + Some(p) => { + match p.as_str() { + Some(p) => Some(p), + None => { + debug!("handle: invalid utf-8 in :user part of path"); + return self.error(StatusCode::NOT_FOUND).await; + }, + } + }, + None => None, + }; + + // Do authentication if needed. + let auth_hdr = req.headers().typed_get::>(); + let do_auth = match location.auth { + Some(Auth::True) => true, + Some(Auth::Write) => !DavMethodSet::WEBDAV_RO.contains(method) || auth_hdr.is_some(), + Some(Auth::False) => false, + Some(Auth::Opportunistic) | None => auth_hdr.is_some(), + }; + let auth_user = if do_auth { + let user = match self.auth.auth(&req, location, remote_ip).await { + Ok(user) => user, + Err(status) => return self.auth_error(status, location).await, + }; + // if there was a :user in the route, return error if it does not match. + if user_param.map(|u| u != &user).unwrap_or(false) { + debug!("handle: auth user and :user mismatch"); + return self.auth_error(StatusCode::UNAUTHORIZED, location).await; + } + Some(user) + } else { + None + }; + + // Now see if we want to do a account lookup, for uid/gid/homedir. + let pwd = match self.acct(location, auth_user.as_ref(), user_param).await { + Ok(pwd) => pwd, + Err(status) => return self.auth_error(status, location).await, + }; + + // Expand "~" in the directory. + let dir = match expand_directory(location.directory.as_str(), pwd.as_ref()) { + Ok(d) => d, + Err(_) => return self.error(StatusCode::NOT_FOUND).await, + }; + + // If :path matched, we can calculate the prefix. + // If it didn't, the entire path _is_ the prefix. + let prefix = match route.params[1].as_ref() { + Some(p) => { + let mut start = p.start(); + if start > 0 { + start -= 1; + } + &path[..start] + }, + None => path, + }; + let prefix = match std::str::from_utf8(prefix) { + Ok(p) => p.to_string(), + Err(_) => { + debug!("handle: prefix is non-UTF8"); + return self.error(StatusCode::NOT_FOUND).await; + }, + }; + + // Get User-Agent for user-agent specific modes. + let user_agent = req + .headers() + .get("user-agent") + .and_then(|s| s.to_str().ok()) + .unwrap_or(""); + + // Case insensitivity wanted? + let case_insensitive = match location.case_insensitive { + Some(CaseInsensitive::True) => true, + Some(CaseInsensitive::Ms) => user_agent.contains("Microsoft"), + Some(CaseInsensitive::False) | None => false, + }; + + // macOS optimizations? + let macos = user_agent.contains("WebDAVFS/") && user_agent.contains("Darwin"); + + // Get the filesystem. + let auth_ugid = if location.setuid { + pwd.as_ref().map(|p| (p.uid, p.gid, p.groups.as_slice())) + } else { + None + }; + let fs = match location.handler { + Handler::Virtroot => { + let auth_user = auth_user.as_ref().map(String::to_owned); + RootFs::new(dir, auth_user, auth_ugid) as Box + }, + Handler::Filesystem => { + UserFs::new(dir, auth_ugid, true, case_insensitive, macos) as Box + }, + }; + + // Build a handler. + let methods = location + .methods + .unwrap_or(DavMethodSet::from_vec(vec!["GET", "HEAD"]).unwrap()); + let hide_symlinks = location.hide_symlinks.clone().unwrap_or(true); + + let mut config = DavConfig::new() + .filesystem(fs) + .strip_prefix(prefix) + .methods(methods) + .hide_symlinks(hide_symlinks) + .autoindex(location.autoindex); + if let Some(auth_user) = auth_user { + config = config.principal(auth_user); + } + if let Some(indexfile) = location.indexfile.clone() { + config = config.indexfile(indexfile); + } + + // All set. + self.run_davhandler(config, req).await + } + + async fn build_error(&self, code: StatusCode, location: Option<&Location>) -> HttpResult { + let msg = format!( + "{} {}\n", + code.as_u16(), + code.canonical_reason().unwrap_or("") + ); + let mut response = self + .response_builder() + .status(code) + .header("Content-Type", "text/xml"); + if code == StatusCode::UNAUTHORIZED { + let realm = location.and_then(|location| location.accounts.realm.as_ref()); + let realm = realm.or(self.config.accounts.realm.as_ref()); + let realm = realm.map(|s| s.as_str()).unwrap_or("Webdav Server"); + response = response.header("WWW-Authenticate", format!("Basic realm=\"{}\"", realm).as_str()); + } + Ok(response.body(msg.into()).unwrap()) + } + + async fn auth_error(&self, code: StatusCode, location: &Location) -> HttpResult { + self.build_error(code, Some(location)).await + } + + async fn error(&self, code: StatusCode) -> HttpResult { + self.build_error(code, None).await + } + + // Call the davhandler, then add headers to the response. + async fn run_davhandler(&self, config: DavConfig, req: HttpRequest) -> HttpResult { + let resp = self.dh.handle_with(config, req).await; + let (mut parts, body) = resp.into_parts(); + self.set_headers(&mut parts.headers); + Ok(http::Response::from_parts(parts, body)) + } +} + +fn main() -> Result<(), Box> { + // command line option processing. + let matches = clap_app!(webdav_server => + (version: "0.3") + (@arg CFG: -c --config +takes_value "configuration file (/etc/webdav-server.toml)") + (@arg PORT: -p --port +takes_value "listen to this port on localhost only") + (@arg DBG: -D --debug "enable debug level logging") + ) + .get_matches(); + + if matches.is_present("DBG") { + use env_logger::Env; + let level = "webdav_server=debug,webdav_handler=debug"; + env_logger::Builder::from_env(Env::default().default_filter_or(level)).init(); + } else { + env_logger::init(); + } + + let port = matches.value_of("PORT"); + let cfg = matches.value_of("CFG").unwrap_or("/etc/webdav-server.toml"); + + // read config. + let mut config = match config::read(cfg.clone()) { + Err(e) => { + eprintln!("{}: {}: {}", PROGNAME, cfg, e); + exit(1); + }, + Ok(c) => c, + }; + config::check(cfg.clone(), &config); + + // build routes. + if let Err(e) = config::build_routes(cfg.clone(), &mut config) { + eprintln!("{}: {}: {}", PROGNAME, cfg, e); + exit(1); + } + + if let Some(port) = port { + let localhosts = vec![ + ("127.0.0.1:".to_string() + port).parse::().unwrap(), + ("[::]:".to_string() + port).parse::().unwrap(), + ]; + config.server.listen = config::OneOrManyAddr::Many(localhosts); + } + let config = Arc::new(config); + + // set cache timeouts. + if let Some(timeout) = config.unix.cache_timeout { + cache::cached::set_pwcache_timeout(timeout); + } + + // resolve addresses. + let addrs = config.server.listen.clone().to_socket_addrs().unwrap_or_else(|e| { + eprintln!("{}: {}: [server] listen: {:?}", PROGNAME, cfg, e); + exit(1); + }); + let tls_addrs = config.server.tls_listen.clone().to_socket_addrs().unwrap_or_else(|e| { + eprintln!("{}: {}: [server] listen: {:?}", PROGNAME, cfg, e); + exit(1); + }); + + // initialize auth early. + let auth = auth::Auth::new(config.clone())?; + + // start tokio runtime and initialize the rest from within the runtime. + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_io() + .enable_time() + .build()?; + + rt.block_on(async move { + // build servers (one for each listen address). + let dav_server = Server::new(config.clone(), auth); + let mut servers = Vec::new(); + let mut tls_servers = Vec::new(); + + // Plaintext servers. + for sockaddr in addrs { + let listener = match make_listener(sockaddr) { + Ok(l) => l, + Err(e) => { + eprintln!("{}: listener on {:?}: {}", PROGNAME, &sockaddr, e); + exit(1); + }, + }; + let dav_server = dav_server.clone(); + let make_service = make_service_fn(move |socket: &AddrStream| { + let dav_server = dav_server.clone(); + let remote_addr = socket.remote_addr(); + async move { + let func = move |req| { + let dav_server = dav_server.clone(); + async move { dav_server.route(req, remote_addr).await } + }; + Ok::<_, hyper::Error>(service_fn(func)) + } + }); + let incoming = AddrIncoming::from_listener(listener)?; + let server = hyper::Server::builder(incoming); + println!("Listening on http://{:?}", sockaddr); + + servers.push(async move { + if let Err(e) = server.serve(make_service).await { + eprintln!("{}: server error: {}", PROGNAME, e); + exit(1); + } + }); + } + + // TLS servers. + if tls_addrs.len() > 0 { + let tls_acceptor = tls_acceptor(&config.server)?; + + for sockaddr in tls_addrs { + let tls_acceptor = tls_acceptor.clone(); + let listener = make_listener(sockaddr).unwrap_or_else(|e| { + eprintln!("{}: listener on {:?}: {}", PROGNAME, &sockaddr, e); + exit(1); + }); + let dav_server = dav_server.clone(); + let make_service = make_service_fn(move |stream: &TlsStream| { + let dav_server = dav_server.clone(); + let remote_addr = stream.get_ref().0.remote_addr(); + async move { + let func = move |req| { + let dav_server = dav_server.clone(); + async move { dav_server.route(req, remote_addr).await } + }; + Ok::<_, hyper::Error>(service_fn(func)) + } + }); + + // Since the server can exit when there's an error on the TlsStream, + // we run it in a loop. Every time the loop is entered we dup() the + // listening fd and create a new TcpListener. This way, we should + // not lose any pending connections during a restart. + let master_listen_fd = listener.as_raw_fd(); + std::mem::forget(listener); + + println!("Listening on https://{:?}", sockaddr); + tls_servers.push(async move { + loop { + // reuse the incoming socket after the server exits. + let listen_fd = match nix::unistd::dup(master_listen_fd) { + Ok(fd) => fd, + Err(e) => { + eprintln!("{}: server error: dup: {}", PROGNAME, e); + break; + } + }; + // SAFETY: listen_fd is unique (we just dup'ed it). + let std_listen = unsafe { std::net::TcpListener::from_raw_fd(listen_fd) }; + let listener = match tokio::net::TcpListener::from_std(std_listen) { + Ok(l) => l, + Err(e) => { + eprintln!("{}: server error: new TcpListener: {}", PROGNAME, e); + break; + } + }; + let a_incoming = match AddrIncoming::from_listener(listener) { + Ok(a) => a, + Err(e) => { + eprintln!("{}: server error: new AddrIncoming: {}", PROGNAME, e); + break; + } + }; + let incoming = TlsListener::new(tls_acceptor.clone(), a_incoming); + let server = hyper::Server::builder(incoming); + if let Err(e) = server.serve(make_service.clone()).await { + eprintln!("{}: server error: {} (retrying)", PROGNAME, e); + } + } + }); + } + } + + // drop privs. + match (&config.server.uid, &config.server.gid) { + (&Some(uid), &Some(gid)) => { + if !suid::have_suid_privs() { + eprintln!( + "{}: insufficent priviliges to switch uid/gid (not root).", + PROGNAME + ); + exit(1); + } + let keep_privs = config.location.iter().any(|l| l.setuid); + proc_switch_ugid(uid, gid, keep_privs); + }, + _ => {}, + } + + // spawn all servers, and wait for them to finish. + let mut tasks = Vec::new(); + for server in servers.drain(..) { + tasks.push(tokio::spawn(server)); + } + for server in tls_servers.drain(..) { + tasks.push(tokio::spawn(server)); + } + for task in tasks.drain(..) { + let _ = task.await; + } + + Ok::<_, Box>(()) + }) +} + +// Clones a http request with an empty body. +fn clone_httpreq(req: &HttpRequest) -> HttpRequest { + let mut builder = http::Request::builder() + .method(req.method().clone()) + .uri(req.uri().clone()) + .version(req.version().clone()); + for (name, value) in req.headers().iter() { + builder = builder.header(name, value); + } + builder.body(hyper::Body::empty()).unwrap() +} + +fn expand_directory(dir: &str, pwd: Option<&Arc>) -> Result { + // If it doesn't start with "~", skip. + if !dir.starts_with("~") { + return Ok(dir.to_string()); + } + // ~whatever doesn't work. + if dir.len() > 1 && !dir.starts_with("~/") { + debug!("expand_directory: rejecting {}", dir); + return Err(StatusCode::NOT_FOUND); + } + // must have a directory, and that dir must be UTF-8. + let pwd = match pwd { + Some(pwd) => pwd, + None => { + debug!("expand_directory: cannot expand {}: no account", dir); + return Err(StatusCode::NOT_FOUND); + }, + }; + let homedir = pwd.dir.to_str().ok_or(StatusCode::NOT_FOUND)?; + Ok(format!("{}/{}", homedir, &dir[1..])) +} + +// Make a new TcpListener, and if it's a V6 listener, set the +// V6_V6ONLY socket option on it. +fn make_listener(addr: SocketAddr) -> io::Result { + use socket2::{Domain, SockAddr, Socket, Type, Protocol}; + let s = Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP))?; + if addr.is_ipv6() { + s.set_only_v6(true)?; + } + s.set_nonblocking(true)?; + s.set_nodelay(true)?; + s.set_reuse_address(true)?; + let addr: SockAddr = addr.into(); + s.bind(&addr)?; + s.listen(128)?; + let listener: std::net::TcpListener = s.into(); + tokio::net::TcpListener::from_std(listener) +} diff --git a/src/rootfs.rs b/src/rootfs.rs new file mode 100644 index 0000000..13ced0a --- /dev/null +++ b/src/rootfs.rs @@ -0,0 +1,112 @@ +// +// Virtual Root filesystem for PROPFIND. +// +// Shows "/" and "/user". +// +use std; +use std::path::Path; + +use futures::future::{self, FutureExt}; +use webdav_handler::davpath::DavPath; +use webdav_handler::fs::*; + +use crate::userfs::UserFs; + +#[derive(Clone)] +pub struct RootFs { + user: String, + fs: UserFs, +} + +impl RootFs { + pub fn new

(dir: P, user: Option, creds: Option<(u32, u32, &[u32])>) -> Box + where P: AsRef + Clone { + Box::new(RootFs { + user: user.unwrap_or("".to_string()), + fs: *UserFs::new(dir, creds, false, false, true), + }) + } +} + +impl DavFileSystem for RootFs { + // Only allow "/" or "/user", for both return the metadata of the UserFs root. + fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture> { + async move { + let b = path.as_bytes(); + if b != b"/" && &b[1..] != self.user.as_bytes() { + return Err(FsError::NotFound); + } + let path = DavPath::new("/").unwrap(); + self.fs.metadata(&path).await + } + .boxed() + } + + // Only return one entry: "user". + fn read_dir<'a>( + &'a self, + path: &'a DavPath, + _meta: ReadDirMeta, + ) -> FsFuture>> + { + Box::pin(async move { + let mut v = Vec::new(); + if self.user != "" { + v.push(RootFsDirEntry { + name: self.user.clone(), + meta: self.fs.metadata(path).await, + }); + } + let strm = futures::stream::iter(RootFsReadDir { + iterator: v.into_iter(), + }); + Ok(Box::pin(strm) as FsStream>) + }) + } + + // cannot open any files. + fn open(&self, _path: &DavPath, _options: OpenOptions) -> FsFuture> { + Box::pin(future::ready(Err(FsError::NotImplemented))) + } + + // forward quota. + fn get_quota(&self) -> FsFuture<(u64, Option)> { + self.fs.get_quota() + } +} + +#[derive(Debug)] +struct RootFsReadDir { + iterator: std::vec::IntoIter, +} + +impl Iterator for RootFsReadDir { + type Item = Box; + + fn next(&mut self) -> Option> { + match self.iterator.next() { + None => return None, + Some(entry) => Some(Box::new(entry)), + } + } +} + +#[derive(Debug)] +struct RootFsDirEntry { + meta: FsResult>, + name: String, +} + +impl DavDirEntry for RootFsDirEntry { + fn metadata(&self) -> FsFuture> { + Box::pin(future::ready(self.meta.clone())) + } + + fn name(&self) -> Vec { + self.name.as_bytes().to_vec() + } + + fn is_dir(&self) -> FsFuture { + Box::pin(future::ready(Ok(true))) + } +} diff --git a/src/router.rs b/src/router.rs new file mode 100644 index 0000000..7cce9b3 --- /dev/null +++ b/src/router.rs @@ -0,0 +1,262 @@ +//! +//! Simple and stupid HTTP router. +//! +use std::default::Default; +use std::fmt::Debug; + +use lazy_static::lazy_static; +use regex::bytes::{Match, Regex, RegexSet}; +use webdav_handler::{DavMethod, DavMethodSet}; + +// internal representation of a route. +#[derive(Debug)] +struct Route { + regex: Regex, + methods: Option, + data: T, +} + +/// A matched route. +#[derive(Debug)] +pub struct MatchedRoute<'t, 'p, T: Debug> { + pub methods: Option, + pub params: Vec>>, + pub data: &'t T, +} + +/// A parameter on a matched route. +pub struct Param<'p>(Match<'p>); + +impl Debug for Param<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("Param") + .field("start", &self.0.start()) + .field("end", &self.0.end()) + .field("as_str", &std::str::from_utf8(self.0.as_bytes()).ok()) + .finish() + } +} + +impl<'p> Param<'p> { + /// Returns the starting byte offset of the match in the path. + #[inline] + pub fn start(&self) -> usize { + self.0.start() + } + + /// Returns the ending byte offset of the match in the path. + #[inline] + pub fn end(&self) -> usize { + self.0.end() + } + + /// Returns the matched part of the path. + #[inline] + pub fn as_bytes(&self) -> &'p [u8] { + self.0.as_bytes() + } + + /// Returns the matched part of the path as a &str, if it is valid utf-8. + #[inline] + pub fn as_str(&self) -> Option<&'p str> { + std::str::from_utf8(self.0.as_bytes()).ok() + } +} + +pub struct Builder { + routes: Vec>, +} + +impl Builder { + /// Add a route. + /// + /// Routes are matched in the order they were added. + /// + /// If a route starts with '^', it's assumed that it is a regular + /// expression. Parameters are included as "named capture groups". + /// + /// Otherwise, it's a route-expression, with just the normal :params + /// and *splat param, and parts between parentheses are optional. + /// + /// Example: + /// + /// - /api/get/:id + /// - /files/*path + /// - /users(/) + /// - /users(/*path) + /// + pub fn add( + &mut self, + route: impl AsRef, + methods: Option, + data: T, + ) -> Result<&mut Self, regex::Error> + { + let route = route.as_ref(); + // Might be a regexp + if route.starts_with("^") { + return self.add_re(route, methods, data); + } + // Ignore it if it does not start with / + if !route.starts_with("/") { + return Ok(self); + } + + // First, replace special characters "()*" with unicode chars + // from the private-use area, so that we can then regex-escape + // the entire string. + let re_route = route + .chars() + .map(|c| { + match c { + '*' => '\u{e001}', + '(' => '\u{e002}', + ')' => '\u{e003}', + '\u{e001}' => ' ', + '\u{e002}' => ' ', + '\u{e003}' => ' ', + c => c, + } + }) + .collect::(); + let re_route = regex::escape(&re_route); + + // Translate route expression into regexp. + // We do a simple transformation: + // :ident -> (?P[^/]*) + // *ident -> (?P.*) + // (text) -> (?:text|) + lazy_static! { + static ref COLON: Regex = Regex::new(":([a-zA-Z0-9]+)").unwrap(); + static ref SPLAT: Regex = Regex::new("\u{e001}([a-zA-Z0-9]+)").unwrap(); + static ref MAYBE: Regex = Regex::new("\u{e002}([^\u{e002}]*)\u{e003}").unwrap(); + }; + let mut re_route = re_route.into_bytes(); + re_route = COLON.replace_all(&re_route, &b"(?P<$1>[^/]*)"[..]).to_vec(); + re_route = SPLAT.replace_all(&re_route, &b"(?P<$1>.*)"[..]).to_vec(); + re_route = MAYBE.replace_all(&re_route, &b"($1)?"[..]).to_vec(); + + // finalize regex. + let re_route = "^".to_string() + &String::from_utf8(re_route).unwrap() + "$"; + + self.add_re(&re_route, methods, data) + } + + // add route as regular expression. + fn add_re(&mut self, s: &str, methods: Option, data: T) -> Result<&mut Self, regex::Error> { + // Set flags: enable ". matches everything", disable strict unicode. + // We known 's' starts with "^", add it after that. + let s2 = format!("^(?s){}", &s[1..]); + let regex = Regex::new(&s2)?; + self.routes.push(Route { regex, methods, data }); + Ok(self) + } + + /// Combine all the routes and compile them into an internal RegexSet. + pub fn build(&mut self) -> Router { + let set = RegexSet::new(self.routes.iter().map(|r| r.regex.as_str())).unwrap(); + Router { + routes: std::mem::replace(&mut self.routes, Vec::new()), + set, + } + } +} + +/// Dead simple HTTP router. +#[derive(Debug)] +pub struct Router { + set: RegexSet, + routes: Vec>, +} + +impl Default for Router { + fn default() -> Router { + Router { + set: RegexSet::new(&[] as &[&str]).unwrap(), + routes: Vec::new(), + } + } +} + +impl Router { + /// Return a builder. + pub fn builder() -> Builder { + Builder { routes: Vec::new() } + } + + /// See if the path matches a route in the set. + /// + /// The names of the parameters you want to be returned need to be passed in as an array. + pub fn matches<'a>( + &self, + path: &'a [u8], + method: DavMethod, + param_names: &[&str], + ) -> Vec> + { + let mut matched = Vec::new(); + for idx in self.set.matches(path) { + let route = &self.routes[idx]; + if route.methods.map(|m| m.contains(method)).unwrap_or(true) { + let mut params = Vec::new(); + if let Some(caps) = route.regex.captures(path) { + for name in param_names { + params.push(caps.name(name).map(|p| Param(p))); + } + } else { + for _ in param_names { + params.push(None); + } + } + matched.push(MatchedRoute { + methods: route.methods, + params, + data: &route.data, + }); + } + } + matched + } +} + +#[cfg(test)] +mod tests { + use super::*; + use webdav_handler::DavMethod; + + fn test_match(rtr: &Router, p: &[u8], user: &str, path: &str) { + let x = rtr.matches(p, DavMethod::Get, &["user", "path"]); + assert!(x.len() > 0); + let x = &x[0]; + if user != "" { + assert!(x.params[0] + .as_ref() + .map(|b| b.as_bytes() == user.as_bytes()) + .unwrap_or(false)); + } + if path != "" { + assert!(x.params[1] + .as_ref() + .map(|b| b.as_bytes() == path.as_bytes()) + .unwrap_or(false)); + } + } + + #[test] + fn test_router() -> Result<(), Box> { + let rtr = Router::::builder() + .add("/", None, 1)? + .add("/users(/:user)", None, 2)? + .add("/files/*path", None, 3)? + .add("/files(/*path)", None, 4)? + .build(); + + test_match(&rtr, b"/", "", ""); + test_match(&rtr, b"/users", "", ""); + test_match(&rtr, b"/users/", "", ""); + test_match(&rtr, b"/users/mike", "mike", ""); + test_match(&rtr, b"/files/foo/bar", "", "foo/bar"); + test_match(&rtr, b"/files", "", ""); + Ok(()) + } +} diff --git a/src/suid.rs b/src/suid.rs new file mode 100644 index 0000000..592a336 --- /dev/null +++ b/src/suid.rs @@ -0,0 +1,304 @@ +use std::io; +use std::sync::atomic::{AtomicBool, Ordering}; + +static THREAD_SWITCH_UGID_USED: AtomicBool = AtomicBool::new(false); + +#[cfg(all(target_os = "linux"))] +mod setuid { + // On x86, the default SYS_setresuid is 16 bits. We need to + // import the 32-bit variant. + #[cfg(target_arch = "x86")] + mod uid32 { + pub use libc::SYS_getgroups32 as SYS_getgroups; + pub use libc::SYS_setgroups32 as SYS_setgroups; + pub use libc::SYS_setresgid32 as SYS_setresgid; + pub use libc::SYS_setresuid32 as SYS_setresuid; + } + #[cfg(not(target_arch = "x86"))] + mod uid32 { + pub use libc::{SYS_getgroups, SYS_setgroups, SYS_setresgid, SYS_setresuid}; + } + use self::uid32::*; + use std::cell::RefCell; + use std::convert::TryInto; + use std::io; + use std::sync::atomic::Ordering; + const ID_NONE: libc::uid_t = 0xffffffff; + + // current credentials of this thread. + struct UgidState { + ruid: u32, + euid: u32, + rgid: u32, + egid: u32, + groups: Vec, + } + + impl UgidState { + fn new() -> UgidState { + super::THREAD_SWITCH_UGID_USED.store(true, Ordering::Release); + UgidState { + ruid: unsafe { libc::getuid() } as u32, + euid: unsafe { libc::geteuid() } as u32, + rgid: unsafe { libc::getgid() } as u32, + egid: unsafe { libc::getegid() } as u32, + groups: getgroups().expect("UgidState::new"), + } + } + } + + fn getgroups() -> io::Result> { + // get number of groups. + let size = unsafe { + libc::syscall( + SYS_getgroups, + 0 as libc::c_int, + std::ptr::null_mut::(), + ) + }; + if size < 0 { + return Err(oserr(size, "getgroups(0, NULL)")); + } + + // get groups. + let mut groups = Vec::::with_capacity(size as usize); + groups.resize(size as usize, 0); + let res = unsafe { libc::syscall(SYS_getgroups, size as libc::c_int, groups.as_mut_ptr() as *mut _) }; + + // sanity check. + if res != size { + if res < 0 { + return Err(oserr(res, format!("getgroups({}, buffer)", size))); + } + return Err(io::Error::new( + io::ErrorKind::Other, + format!("getgroups({}, buffer): returned {}", size, res), + )); + } + + Ok(groups) + } + + fn oserr(code: libc::c_long, msg: impl AsRef) -> io::Error { + let msg = msg.as_ref(); + let err = io::Error::from_raw_os_error(code.try_into().unwrap()); + io::Error::new(err.kind(), format!("{}: {}", msg, err)) + } + + // thread-local seteuid. + fn seteuid(uid: u32) -> io::Result<()> { + let res = unsafe { libc::syscall(SYS_setresuid, ID_NONE, uid, ID_NONE) }; + if res < 0 { + return Err(oserr(res, format!("seteuid({})", uid))); + } + Ok(()) + } + + // thread-local setegid. + fn setegid(gid: u32) -> io::Result<()> { + let res = unsafe { libc::syscall(SYS_setresgid, ID_NONE, gid, ID_NONE) }; + if res < 0 { + return Err(oserr(res, format!("setegid({})", gid))); + } + Ok(()) + } + + // thread-local setgroups. + fn setgroups(gids: &[u32]) -> io::Result<()> { + let size = gids.len() as libc::c_int; + let res = unsafe { libc::syscall(SYS_setgroups, size, gids.as_ptr() as *const libc::gid_t) }; + if res < 0 { + return Err(oserr(res, format!("setgroups({}, {:?}", size, gids))); + } + Ok(()) + } + + // credential state is thread-local. + thread_local!(static CURRENT_UGID: RefCell = RefCell::new(UgidState::new())); + + /// Switch thread credentials. + pub(super) fn thread_switch_ugid(newuid: u32, newgid: u32, newgroups: &[u32]) -> (u32, u32, Vec) { + CURRENT_UGID.with(|current_ugid| { + let mut cur = current_ugid.borrow_mut(); + let (olduid, oldgid, oldgroups) = (cur.euid, cur.egid, cur.groups.clone()); + let groups_changed = newgroups != cur.groups.as_slice(); + + // Check if anything changed. + if newuid != cur.euid || newgid != cur.egid || groups_changed { + // See if we have to switch to root privs first. + if cur.euid != 0 && (newuid != cur.ruid || newgid != cur.rgid || groups_changed) { + // Must first switch to root. + if let Err(e) = seteuid(0) { + panic!("{}", e); + } + cur.euid = 0; + } + + if newgid != cur.egid { + // Change gid. + if let Err(e) = setegid(newgid) { + panic!("{}", e); + } + cur.egid = newgid; + } + if groups_changed { + // Change groups. + if let Err(e) = setgroups(newgroups) { + panic!("{}", e); + } + cur.groups.truncate(0); + cur.groups.extend_from_slice(newgroups); + } + if newuid != cur.euid { + // Change uid. + if let Err(e) = seteuid(newuid) { + panic!("{}", e); + } + cur.euid = newuid; + } + } + (olduid, oldgid, oldgroups) + }) + } + + // Yep.. + pub fn has_thread_switch_ugid() -> bool { + true + } +} + +#[cfg(not(target_os = "linux"))] +mod setuid { + // Not implemented, as it looks like only Linux has support for + // per-thread uid/gid switching. + // + // DO NOT implement this through libc::setuid, as that will + // switch the uids of all threads. + // + /// Switch thread credentials. Not implemented! + pub(super) fn thread_switch_ugid(_newuid: u32, _newgid: u32, _newgroups: &[u32]) -> (u32, u32, Vec) { + unimplemented!(); + } + + // Nope. + pub fn has_thread_switch_ugid() -> bool { + false + } +} + +pub use self::setuid::has_thread_switch_ugid; +use self::setuid::thread_switch_ugid; + +#[derive(Clone, Debug)] +struct UgidCreds { + pub uid: u32, + pub gid: u32, + pub groups: Vec, +} + +pub struct UgidSwitch { + target_creds: Option, +} + +pub struct UgidSwitchGuard { + base_creds: Option, +} + +impl UgidSwitch { + pub fn new(creds: Option<(u32, u32, &[u32])>) -> UgidSwitch { + let target_creds = match creds { + Some((uid, gid, groups)) => { + Some(UgidCreds { + uid, + gid, + groups: groups.into(), + }) + }, + None => None, + }; + UgidSwitch { target_creds } + } + + #[allow(dead_code)] + pub fn run(&self, func: F) -> R + where F: FnOnce() -> R { + let _guard = self.guard(); + func() + } + + pub fn guard(&self) -> UgidSwitchGuard { + match &self.target_creds { + &None => UgidSwitchGuard { base_creds: None }, + &Some(ref creds) => { + let (uid, gid, groups) = thread_switch_ugid(creds.uid, creds.gid, &creds.groups); + UgidSwitchGuard { + base_creds: Some(UgidCreds { uid, gid, groups }), + } + }, + } + } +} + +impl Drop for UgidSwitchGuard { + fn drop(&mut self) { + if let Some(ref creds) = self.base_creds { + thread_switch_ugid(creds.uid, creds.gid, &creds.groups); + } + } +} + +/// Switch process credentials. Keeps the saved-uid as root, so that +/// we can switch to other ids later on. +pub fn proc_switch_ugid(uid: u32, gid: u32, keep_privs: bool) { + if THREAD_SWITCH_UGID_USED.load(Ordering::Acquire) { + panic!("proc_switch_ugid: called after thread_switch_ugid() has been used"); + } + + fn last_os_error() -> io::Error { + io::Error::last_os_error() + } + + unsafe { + // first get full root privs (real, effective, and saved uids) + if libc::setuid(0) != 0 { + panic!("libc::setuid(0): {:?}", last_os_error()); + } + + // set real uid, and keep effective uid at 0. + #[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))] + if libc::setreuid(uid, 0) != 0 { + panic!("libc::setreuid({}, 0): {:?}", uid, last_os_error()); + } + #[cfg(any(target_os = "openbsd", target_os = "freebsd"))] + if libc::setresuid(uid, 0, 0) != 0 { + panic!("libc::setreuid({}, 0): {:?}", uid, last_os_error()); + } + + // set group id. + if libc::setgid(gid) != 0 { + panic!("libc::setgid({}): {:?}", gid, last_os_error()); + } + + // remove _all_ auxilary groups. + if libc::setgroups(0, std::ptr::null::()) != 0 { + panic!("setgroups[]: {:?}", last_os_error()); + } + + if keep_privs { + // finally set effective uid. saved uid is still 0. + if libc::seteuid(uid) != 0 { + panic!("libc::seteuid({}): {:?}", uid, last_os_error()); + } + } else { + // drop all privs. + if libc::setuid(uid) != 0 { + panic!("libc::setuid({}): {:?}", uid, last_os_error()); + } + } + } +} + +/// Do we have sufficient privs to switch uids? +pub fn have_suid_privs() -> bool { + unsafe { libc::geteuid() == 0 } +} diff --git a/src/tls.rs b/src/tls.rs new file mode 100644 index 0000000..09e291b --- /dev/null +++ b/src/tls.rs @@ -0,0 +1,58 @@ +use std::fs::File; +use std::io::{self, ErrorKind}; +use std::sync::Arc; + +use tokio_rustls::rustls::{Certificate, PrivateKey, ServerConfig}; +use tokio_rustls::TlsAcceptor; +use rustls_pemfile as pemfile; + +use crate::config::Server; + +pub fn tls_acceptor(cfg: &Server) -> io::Result { + + // Private key. + let pkey_fn = cfg.tls_key.as_ref().ok_or_else(|| { + io::Error::new(io::ErrorKind::NotFound, "config: server: tls_key not set") + })?; + let pkey_file = File::open(pkey_fn).map_err(|e| { + io::Error::new(e.kind(), format!("{}: {}", pkey_fn, e)) + })?; + let mut pkey_file = io::BufReader::new(pkey_file); + let pkey = match pemfile::read_one(&mut pkey_file) { + Ok(Some(pemfile::Item::RSAKey(pkey))) => PrivateKey(pkey), + Ok(Some(pemfile::Item::PKCS8Key(pkey))) => PrivateKey(pkey), + Ok(Some(pemfile::Item::ECKey(pkey))) => PrivateKey(pkey), + Ok(Some(_)) => return Err(io::Error::new(io::ErrorKind::InvalidData, format!("{}: unknown private key format", pkey_fn))), + Ok(None) => return Err(io::Error::new(io::ErrorKind::InvalidData, format!("{}: expected one private key", pkey_fn))), + Err(_) => return Err(io::Error::new(io::ErrorKind::InvalidData, format!("{}: invalid data", pkey_fn))), + }; + + // Certificate. + let cert_fn = cfg.tls_cert.as_ref().ok_or_else(|| { + io::Error::new(io::ErrorKind::NotFound, "config: server: tls_cert not set") + })?; + let cert_file = File::open(cert_fn).map_err(|e| { + io::Error::new(e.kind(), format!("{}: {}", cert_fn, e)) + })?; + let mut cert_file = io::BufReader::new(cert_file); + let certs = pemfile::certs(&mut cert_file).map_err(|_| { + io::Error::new(io::ErrorKind::InvalidData, format!("{}: invalid data", cert_fn)) + })?; + let certs = certs + .into_iter() + .map(|cert| Certificate(cert.into())) + .collect(); + + let config = Arc::new( + ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + .with_single_cert(certs, pkey) + .map_err(|e| { + io::Error::new(ErrorKind::InvalidData, format!("{}/{}: {}", pkey_fn, cert_fn, e)) + })? + ).into(); + + Ok(config) +} + diff --git a/src/unixuser.rs b/src/unixuser.rs new file mode 100644 index 0000000..c9d3fa7 --- /dev/null +++ b/src/unixuser.rs @@ -0,0 +1,135 @@ +use std; +use std::ffi::{CStr, OsStr}; +use std::io; +use std::os::unix::ffi::OsStrExt; +use std::path::{Path, PathBuf}; + +use tokio::task::block_in_place; + +#[derive(Debug)] +pub struct User { + pub name: String, + pub passwd: String, + pub gecos: String, + pub uid: u32, + pub gid: u32, + pub groups: Vec, + pub dir: PathBuf, + pub shell: PathBuf, +} + +unsafe fn cptr_to_osstr<'a>(c: *const libc::c_char) -> &'a OsStr { + let bytes = CStr::from_ptr(c).to_bytes(); + OsStr::from_bytes(&bytes) +} + +unsafe fn cptr_to_path<'a>(c: *const libc::c_char) -> &'a Path { + Path::new(cptr_to_osstr(c)) +} + +unsafe fn to_user(pwd: &libc::passwd) -> User { + // turn into (unsafe!) rust slices + let cs_name = CStr::from_ptr(pwd.pw_name); + let cs_passwd = CStr::from_ptr(pwd.pw_passwd); + let cs_gecos = CStr::from_ptr(pwd.pw_gecos); + let cs_dir = cptr_to_path(pwd.pw_dir); + let cs_shell = cptr_to_path(pwd.pw_shell); + + // then turn the slices into safe owned values. + User { + name: cs_name.to_string_lossy().into_owned(), + passwd: cs_passwd.to_string_lossy().into_owned(), + gecos: cs_gecos.to_string_lossy().into_owned(), + dir: cs_dir.to_path_buf(), + shell: cs_shell.to_path_buf(), + uid: pwd.pw_uid, + gid: pwd.pw_gid, + groups: Vec::new(), + } +} + +impl User { + pub fn by_name(name: &str, with_groups: bool) -> Result { + let mut buf = [0u8; 1024]; + let mut pwd: libc::passwd = unsafe { std::mem::zeroed() }; + let mut result: *mut libc::passwd = std::ptr::null_mut(); + + let cname = match std::ffi::CString::new(name) { + Ok(un) => un, + Err(_) => return Err(io::Error::from_raw_os_error(libc::ENOENT)), + }; + let ret = unsafe { + libc::getpwnam_r( + cname.as_ptr(), + &mut pwd as *mut _, + buf.as_mut_ptr() as *mut _, + buf.len() as libc::size_t, + &mut result as *mut _, + ) + }; + + if ret != 0 { + return Err(io::Error::from_raw_os_error(ret)); + } + if result.is_null() { + return Err(io::Error::from_raw_os_error(libc::ENOENT)); + } + let mut user = unsafe { to_user(&pwd) }; + + if with_groups { + let mut ngroups = (buf.len() / std::mem::size_of::()) as libc::c_int; + let ret = unsafe { + libc::getgrouplist( + cname.as_ptr(), + user.gid as libc::gid_t, + buf.as_mut_ptr() as *mut _, + &mut ngroups as *mut _, + ) + }; + if ret >= 0 && ngroups > 0 { + let mut groups_vec = Vec::with_capacity(ngroups as usize); + let groups = unsafe { + std::slice::from_raw_parts(buf.as_ptr() as *const libc::gid_t, ngroups as usize) + }; + // + // Only supplementary or auxilary groups, filter out primary. + // + groups_vec.extend(groups.iter().map(|&g| g as u32).filter(|&g| g != user.gid)); + user.groups = groups_vec; + } + } + + Ok(user) + } + + /* + pub fn by_uid(uid: u32) -> Result { + let mut buf = [0; 1024]; + let mut pwd: libc::passwd = unsafe { std::mem::zeroed() }; + let mut result: *mut libc::passwd = std::ptr::null_mut(); + + let ret = unsafe { + getpwuid_r( + uid, + &mut pwd as *mut _, + buf.as_mut_ptr(), + buf.len() as libc::size_t, + &mut result as *mut _, + ) + }; + if ret == 0 { + if result.is_null() { + return Err(io::Error::from_raw_os_error(libc::ENOENT)); + } + let p = unsafe { to_user(&pwd) }; + Ok(p) + } else { + Err(io::Error::from_raw_os_error(ret)) + } + } + */ + + pub async fn by_name_async(name: &str, with_groups: bool) -> Result { + block_in_place(move || User::by_name(name, with_groups)) + } +} diff --git a/src/userfs.rs b/src/userfs.rs new file mode 100644 index 0000000..ecd6f36 --- /dev/null +++ b/src/userfs.rs @@ -0,0 +1,125 @@ +use std::any::Any; +use std::path::{Path, PathBuf}; + +use webdav_handler::davpath::DavPath; +use webdav_handler::fs::*; +use webdav_handler::localfs::LocalFs; + +use crate::suid::UgidSwitch; + +#[derive(Clone)] +pub struct UserFs { + pub fs: LocalFs, + basedir: PathBuf, + uid: u32, +} + +impl UserFs { + pub fn new( + dir: impl AsRef, + target_creds: Option<(u32, u32, &[u32])>, + public: bool, + case_insensitive: bool, + macos: bool, + ) -> Box + { + // uid is used for quota() calls. + let uid = target_creds.as_ref().map(|ugid| ugid.0).unwrap_or(0); + + // set up the LocalFs hooks for uid switching. + let switch = UgidSwitch::new(target_creds.clone()); + let blocking_guard = Box::new(move || Box::new(switch.guard()) as Box); + + Box::new(UserFs { + basedir: dir.as_ref().to_path_buf(), + fs: *LocalFs::new_with_fs_access_guard( + dir, + public, + case_insensitive, + macos, + Some(blocking_guard), + ), + uid: uid, + }) + } +} + +impl DavFileSystem for UserFs { + fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture> { + self.fs.metadata(path) + } + + fn symlink_metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture> { + self.fs.symlink_metadata(path) + } + + fn read_dir<'a>( + &'a self, + path: &'a DavPath, + meta: ReadDirMeta, + ) -> FsFuture>> + { + self.fs.read_dir(path, meta) + } + + fn open<'a>(&'a self, path: &'a DavPath, options: OpenOptions) -> FsFuture> { + self.fs.open(path, options) + } + + fn create_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> { + self.fs.create_dir(path) + } + + fn remove_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> { + self.fs.remove_dir(path) + } + + fn remove_file<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> { + self.fs.remove_file(path) + } + + fn rename<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> { + self.fs.rename(from, to) + } + + fn copy<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> { + self.fs.copy(from, to) + } + + #[cfg(feature = "quota")] + fn get_quota<'a>(&'a self) -> FsFuture<(u64, Option)> { + use crate::cache; + use fs_quota::*; + use futures::future::FutureExt; + use std::time::Duration; + + lazy_static::lazy_static! { + static ref QCACHE: cache::Cache = cache::Cache::new().maxage(Duration::new(30, 0)); + } + + async move { + let mut key = self.basedir.clone(); + key.push(&self.uid.to_string()); + let r = match QCACHE.get(&key) { + Some(r) => { + debug!("get_quota for {:?}: from cache", key); + r + }, + None => { + let path = self.basedir.clone(); + let uid = self.uid; + let r = self + .fs + .blocking(move || { + FsQuota::check(&path, Some(uid)).map_err(|_| FsError::GeneralFailure) + }) + .await?; + debug!("get_quota for {:?}: insert to cache", key); + QCACHE.insert(key, r) + }, + }; + Ok((r.bytes_used, r.bytes_limit)) + } + .boxed() + } +} diff --git a/webdav-server.toml b/webdav-server.toml new file mode 100644 index 0000000..a92ecb2 --- /dev/null +++ b/webdav-server.toml @@ -0,0 +1,154 @@ +# +# Webdav server settings. +# +[server] + # Port(s) to listen on. + listen = [ "0.0.0.0:4918", "[::]:4918" ] + + # Tls config. + # tls_listen = [ "0.0.0.0:443", "[::]:443" ] + # tls_cert = "/etc/ssl/certs/example.com-chained.crt" + # tls_key = "/etc/ssl/private/example.com.key" + + # Unix uid/gid to run under (when not running setuid as user). + # Optional - if not set, will not change uid. + uid = 33 + gid = 33 + # Server: header to send (default: "webdav-server-rs") + identification = "webdav-server-rs" + +# +# User settings. +# +# These are defaults. The same settings can be applied +# on the [[location]] level. +# +[accounts] + # how to authenticate: pam, htaccess.NAME (default: unset). + auth-type = "pam" + # what account "database" to use (default: unset). + acct-type = "unix" + # realm to use with basic authentication (default: "Webdav Server"). + realm = "Webdav Server" + +# +# PAM authentication settings. +# +[pam] + # PAM service to use. + service = "other" + # Cache timeout (secs). 0 disables the cache (default: 120). + cache-timeout = 120 + # Number of thread to use for the PAM service threadpool (default: 8). + threads = 8 + +# +# Htpasswd authentication settings. +# +[htpasswd.example] + # htpasswd file. + htpasswd = "/etc/htpasswd.example" + +# Unix account settings. +# +[unix] + # Cache timeout (secs). 0 disables the cache (default: 120). + cache-timeout = 120 + # Accounts with a user-id lower than this value cannot login (default: 0). + min-uid = 1000 + +# +# Below follow a number of locations. Each location definition starts with +# [[location]] (literally). For every request, the "path" and "methods" +# settings of each location are checked in the same order # as they appear +# in this file. The first one that matches is used. +# + +## +## Example location. Lists all settings. +## +[[location]] + # Matching route(s) to the resource. + # + # As is the convention with many http routers, you can use :PARAMETER + # (path element match) or *PARAMETER (path wildcard match) in the + # path definition. Everything between parentheses is optional. + # + # Currently 2 parameters can be used: + # + # - "path" is the part of the path to map to the filesystem + # - "user" matches the currently authenticated user. + # + # A couple of examples: + # + # - For authenticated webdav sessions: [ "/:user/*path" ] or [ "/:user(/*path)" ] + # - For serving / but no dirs below it: [ "/(:path)" ] + # - For serving / and everything below it: [ "/*path" ] + # + # Note that only "path" is what is applied to the "directory" setting to + # form a path on the filesystem. So route = [ "/loca/tion/*path" ] and + # directory = "/var/www/html" will serve the content of /var/www/html + # at the http path /loca/tion/. + # + # If there is no route defined, the location is ignored. + route = [ "/*path" ] + + # Allowed methods (default: all). + # + # List of individual methods, or one of: + # + # http-ro: GET, HEAD + # http-rw: GET, HEAD, PUT + # webdav-ro: GET, HEAD, OPTIONS, PROPFIND + # webdav-rw: GET, HEAD, OPTIONS, PROPFIND, PUT, PATCH, PROPPATCH, + # MKCOL, COPY, MOVE, DELETE, LOCK, UNLOCK + methods = [ "webdav-ro" ] + + # Authenticate? true, false, opportunistic, write (default: opportunistic). + # + # "opportunistic": means "if you send an Authorization: header, we'll check it". + # "write": means "for methods in webdav-rw that are not in webdav-ro". + auth = "false" + + # Type of handler: filesystem, virtroot. Mandatory. + # + # The filesystem handler is what you would expect. + # + # The virtroot handler is a special handler for PROPFIND requests on + # authenticated sessions, i.e. where we have a username. It is useful + # when you have your webdav clients all under, say, /:user/*path. + # In that case, normally a PROPFIND of "/" would return either NOT_FOUND + # or the contents of the directory of some [[location]]. If the handler + # is set to "virtroot", a PROPFIND will list exactly one subdirectory, + # with the name of the authenticated user. + # + handler = "filesystem" + + # what to do on 404 Not Found: continue, return (default: return). + on_notfound = "return" + + # Change UID/GID to that of the authenticated user: true, false (default: false). + setuid = false + + # Directory to serve. Mandatory. + # + # You can use "~" to indicate "homedirectory of authenticated user". + # + directory = "/var/www/html" + + # Index file to serve when you GET a directory (if it exists) (default: none). + #indexfile = "index.html" + + # Serve HTML directory indexes: true, false (default: false). + autoindex = false + + # webdav PROPFIND: hide symbolic links: true, false (default: true). + hide-symlinks = true + + # case insensitive lookups: true, false, ms (default: false). + # "ms" means "for Microsoft clients". + case-insensitive = "false" + +# Another location definition could follow. +#[[location]] +